content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
__author__ = 'Sergei'
from model.contact import Contact
class ContactHelper:
def __init__(self, app):
self.app = app
def fill_contact_first_last(self, Contact):
wd = self.app.wd
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(Contact.first_n)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(Contact.last_n)
def create_first_last(self, Contact):
wd = self.app.wd
self.open_contact_page()
wd.find_element_by_link_text("add new").click()
self.fill_contact_first_last(Contact)
if wd.find_element_by_name("submit").click():
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_contact_full(self, Contact):
wd = self.app.wd
self.change_field_value("firstname",Contact.first_n)
self.change_field_value("middlename",Contact.mid_n)
self.change_field_value("lastname",Contact.last_n)
self.change_field_value("nickname",Contact.nick_n)
self.change_field_value("company",Contact.company)
self.change_field_value("address",Contact.address)
self.change_field_value("home",Contact.home_ph)
self.change_field_value("mobile",Contact.cell_ph)
self.change_field_value("email",Contact.email)
def create_c(self,contacts):
wd = self.app.wd
self.open_contact_page()
wd.find_element_by_link_text("add new").click()
self.fill_contact_full(contacts)
if wd.find_element_by_name("submit").click():
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def select_contact_by_index(self,index):
wd = self.app.wd
# self.open_contact_page()
wd.find_elements_by_name("selected[]")[index].click()
wd.find_element_by_css_selector("img[alt=\"Edit\"]").click()
def open_contact_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("http://localhost/addressbook/")):
wd.get("http://localhost/addressbook/")
def contact_delete_by_index(self,index):
wd = self.app.wd
self.open_contact_page()
self.select_contact_by_index(index)
wd.find_element_by_name("update[value=\"Delete\"]").click()
# wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
# wd.switch_to_alert().accept()
self.contact_cache = None
def contact_modify_by_index(self,index,cont):
wd = self.app.wd
# self.open_contact_page()
self.select_contact_by_index(index)
# wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.fill_first_last_name(cont)
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def fill_first_last_name(self, Contact):
wd = self.app.wd
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(Contact.first_n)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(Contact.last_n)
def modify_first_contact(self, cont, index):
wd = self.app.wd
self.open_contact_page()
self.select_contact_by_index(index)
# wd.find_element_by_css_selector("img[alt=\"Edit\"]")[index].click()
self.fill_first_last_name(cont)
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def contact_delete(self):
self.contact_delete_by_index(0)
self.contact_cache = None
def contact_first_modify(self):
self.contact_modify_by_index(0)
self.contact_cache = None
def test_edit_contact(self, Contact):
wd = self.app.wd
self.open_contact_page()
wd.find_element_by_name("selected[]").click()
wd.find_element_by_css_selector("img[alt=\"Edit\"]").click()
self.fill_contact_full(Contact)
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def count(self):
wd = self.app.wd
return len(wd.find_elements_by_name("selected[]"))
def count_first(self):
wd = self.app.wd
self.open_contact_page()
# wd.find_elements_by_name('entry')
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_contact_page()
self.contact_cache = []
for element in wd.find_elements_by_name('entry'):
id = element.find_element_by_name("selected[]").get_attribute("value")
text = element.text
self.contact_cache.append(Contact( id=id, first_n=text))
return list(self.contact_cache)
| python |
"""
twtxt.models
~~~~~~~~~~~~
This module implements the main models used in twtxt.
:copyright: (c) 2016 by buckket.
:license: MIT, see LICENSE for more details.
"""
from datetime import datetime, timezone
import humanize
from dateutil.tz import tzlocal
class Tweet:
"""A :class:`Tweet` represents a single tweet.
:param str text: text of the tweet in raw format
:param ~datetime.datetime created_at: (optional) when the tweet was created, defaults to :meth:`~datetime.datetime.now` when no value is given
:param Source source: (optional) the :class:`Source` the tweet is from
"""
def __init__(self, text, created_at=None, source=None):
if text:
self.text = text
else:
raise ValueError("empty text")
if created_at is None:
created_at = datetime.now(tzlocal())
try:
self.created_at = created_at.replace(microsecond=0)
except AttributeError:
raise TypeError("created_at is of invalid type")
self.source = source
@staticmethod
def _is_valid_operand(other):
return (hasattr(other, "text") and
hasattr(other, "created_at"))
def __lt__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at < other.created_at
def __le__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at < other.created_at or (self.created_at == other.created_at and self.text == other.text)
def __gt__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at > other.created_at
def __ge__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at > other.created_at or (self.created_at == other.created_at and self.text == other.text)
def __eq__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at == other.created_at and self.text == other.text
def __str__(self):
return "{created_at}\t{text}".format(created_at=self.created_at.isoformat(), text=self.text)
@property
def relative_datetime(self):
"""Return human-readable relative time string."""
now = datetime.now(timezone.utc)
tense = "from now" if self.created_at > now else "ago"
return "{0} {1}".format(humanize.naturaldelta(now - self.created_at), tense)
@property
def absolute_datetime(self):
"""Return human-readable absolute time string."""
return self.created_at.strftime("%a, %d %b %Y %H:%M:%S")
class Source:
"""A :class:`Source` represents a twtxt feed, remote as well as local.
:param str nick: nickname of twtxt user
:param str url: URL to remote twtxt file
:param str file: path to local twtxt file
"""
def __init__(self, nick, url=None, file=None):
self.nick = nick.lower()
self.url = url
self.file = file
| python |
# %% [markdown]
## Acessando todos os parâmetros (genérico)
# %%
def todos_params(*posicionais, **nomeados):
print(f'Posicionais: {posicionais}')
print(f'Nomeados: {nomeados}\n')
todos_params(1,2,3) #3 Parâmetros posicionais e nenhum parâmetro nomeado
todos_params(1,2,3, nome='Victor', solteiro=True) #3 parâmetros posicionais e 2 parâmetros nomeados
todos_params(nome='Victor', idade=26, solteiro=True) #3 parâmetros posicionais e nenhum parâmetro posicional
todos_params([1,2,3], 'a', 'b', 'c', nome='Victor', solteiro=True) #4 parâmetros posicionais e 2 parâmetros nomeados
# todos_params(nome='Victor', solteiro=True, 1, 2, 3) #Erro: neste caso, a função está aguardando primeiramente os parâmetros posicionais e depois os nomeados | python |
import numpy as np
from ivory.callbacks.results import concatenate
def test_libraries(runs):
for run in runs.values():
run.start("both")
for mode in ["val", "test"]:
outputs = []
for run in runs.values():
outputs.append(run.results[mode].output)
for output in outputs[1:]:
assert np.allclose(outputs[0], output)
def callback(index, output, target):
return index, 2 * output, target
gen = (run.results for run in runs.values())
results = concatenate(gen, reduction="mean", callback=callback)
assert np.allclose(2 * outputs[0], results.test.output)
| python |
from sqlalchemy.orm import Session
from apps.crud.pusher import get_pushers_by_token, get_pushers_by_token_and_type
from apps.serializer.record import RecordSerializer
from apps.pusher import test_wechat, official_wechat, e_mail, android, wechat, qq
type_func_dict = {
1: test_wechat.send_msg,
2: official_wechat.send_msg,
3: e_mail.send_msg,
4: android.send_msg,
5: wechat.send_msg,
6: qq.send_msg,
}
def send_msg(session: Session, record: RecordSerializer):
if record.push_type is not None:
pusher = get_pushers_by_token_and_type(session=session, token=record.token, push_type=record.push_type)
type_func_dict[pusher.push_type](title=record.title, content=record.content, to_user=pusher.params1)
else:
pushers = get_pushers_by_token(session=session, token=record.token)
for p in pushers:
type_func_dict[p.push_type](title=record.title, content=record.content, to_user=p.params1)
| python |
from __future__ import print_function
import logging
import pandas as pd
import numpy as np
import scipy.stats as stats
from matplotlib.backends.backend_pdf import PdfPages
import os.path
from .storemanager import StoreManager
from .condition import Condition
from .constants import WILD_TYPE_VARIANT
from .sfmap import sfmap_plot
from .dataframe import singleton_dataframe
from .random_effects import rml_estimator
class Experiment(StoreManager):
"""
Class for a coordinating multiple :py:class:`~.selection.Selection`
objects. Creating an
:py:class:`~experiment.Experiment` requires a valid *config* object,
usually from a ``.json`` configuration file.
"""
store_suffix = "exp"
treeview_class_name = "Experiment"
def __init__(self):
StoreManager.__init__(self)
self.conditions = list()
self._wt = None
self.logger = logging.getLogger("{}.{}".format(__name__, self.__class__))
@property
def wt(self):
if self.has_wt_sequence():
if self._wt is None:
self._wt = self.selection_list()[0].wt.duplicate(self.name)
return self._wt
else:
if self._wt is not None:
raise ValueError(
"Experiment should not contain wild type "
"sequence [{}]".format(self.name)
)
else:
return None
def configure(self, cfg, configure_children=True):
"""
Set up the :py:class:`~experiment.Experiment` using the *cfg* object,
usually from a ``.json`` configuration file.
"""
StoreManager.configure(self, cfg)
self.logger = logging.getLogger(
"{}.{} - {}".format(__name__, self.__class__.__name__, self.name)
)
if configure_children:
if "conditions" not in cfg:
raise KeyError(
"Missing required config value {} [{}]"
"".format("conditions", self.name)
)
for cnd_cfg in cfg["conditions"]:
cnd = Condition()
cnd.configure(cnd_cfg)
self.add_child(cnd)
selection_names = [x.name for x in self.selection_list()]
if len(set(selection_names)) != len(selection_names):
raise ValueError("Non-unique selection names [{}]" "".format(self.name))
def serialize(self):
"""
Format this object (and its children) as a config object suitable for
dumping to a config file.
"""
cfg = StoreManager.serialize(self)
cfg["conditions"] = [child.serialize() for child in self.children]
return cfg
def _children(self):
"""
Method bound to the ``children`` property. Returns a list of all
:py:class:`~condition.Condition` objects belonging to this object,
sorted by name.
"""
return sorted(self.conditions, key=lambda x: x.name)
def add_child(self, child):
"""
Add a selection.
"""
if child.name in self.child_names():
raise ValueError(
"Non-unique condition name '{}' [{}]" "".format(child.name, self.name)
)
child.parent = self
self.conditions.append(child)
def remove_child_id(self, tree_id):
"""
Remove the reference to a :py:class:`~condition.Condition` with
Treeview id *tree_id*.
"""
self.conditions = [x for x in self.conditions if x.treeview_id != tree_id]
def selection_list(self):
"""
Return the :py:class:`~selection.Selection` objects as a list.
"""
selections = list()
for cnd in self.children:
selections.extend(cnd.children)
return selections
def validate(self):
"""
Calls validate on all child Conditions. Also checks the wild type
sequence status.
"""
# check the wild type sequences
if self.has_wt_sequence():
for child in self.selection_list()[1:]:
if self.selection_list()[0].wt != child.wt:
self.logger.warning("Inconsistent wild type sequences")
break
for child in self.children:
child.validate()
def is_coding(self):
"""
Return ``True`` if the all :py:class:`~selection.Selection` in the
:py:class:`~experiment.Experiment` count protein-coding variants, else
``False``.
"""
return all(x.is_coding() for x in self.selection_list())
def has_wt_sequence(self):
"""
Return ``True`` if the all :py:class:`~selection.Selection` in the
:py:class:`~experiment.Experiment` have a wild type sequence, else
``False``.
"""
return all(x.has_wt_sequence() for x in self.selection_list())
def calculate(self):
"""
Calculate scores for all :py:class:`~selection.Selection` objects.
"""
if len(self.labels) == 0:
raise ValueError(
"No data present across all conditions [{}]" "".format(self.name)
)
for s in self.selection_list():
s.calculate()
self.combine_barcode_maps()
for label in self.labels:
self.calc_counts(label)
if self.scoring_method != "counts":
self.calc_shared_full(label)
self.calc_shared(label)
self.calc_scores(label)
if label != "barcodes":
self.calc_pvalues_wt(label)
def combine_barcode_maps(self):
"""
Combine all barcode maps for :py:class:`~selection.Selection` objects
into a single data frame and store it in ``'/main/barcodemap'``.
If multiple variants or IDs map to the same barcode, only the first one
will be present in the barcode map table.
The ``'/main/barcodemap'`` table is not created if no
:py:class:`~selection.Selection` has barcode map information.
"""
if self.check_store("/main/barcodemap"):
return
bcm = None
for sel in self.selection_list():
if "/main/barcodemap" in sel.store.keys():
if bcm is None:
bcm = sel.store["/main/barcodemap"]
else:
bcm = bcm.join(
sel.store["/main/barcodemap"], rsuffix=".drop", how="outer"
)
new = bcm.loc[pd.isnull(bcm)["value"]]
bcm.loc[new.index, "value"] = new["value.drop"]
bcm.drop("value.drop", axis="columns", inplace=True)
if bcm is not None:
bcm.sort_values("value", inplace=True)
self.store.put(
"/main/barcodemap", bcm, format="table", data_columns=bcm.columns
)
def calc_counts(self, label):
"""
Create a data frame of all counts in this Experiment. This data frame
is not used for any calculations, but is provided to facilitate
exploration of the data set.
"""
if self.check_store("/main/{}/counts".format(label)):
return
# create columns multi-index
# has to be lex-sorted for multi-slicing to work
self.logger.info("Creating column multi-index for counts ({})".format(label))
conditions_index = list()
selections_index = list()
values_index = list()
for cnd in self.children:
for sel in cnd.children:
conditions_index.extend([cnd.name] * len(sel.timepoints))
selections_index.extend([sel.name] * len(sel.timepoints))
values_index.extend(["c_{}".format(x) for x in sorted(sel.timepoints)])
columns = pd.MultiIndex.from_tuples(
zip(conditions_index, selections_index, values_index),
names=["condition", "selection", "timepoint"],
)
# create union index
self.logger.info("Creating row index for counts ({})".format(label))
combined = None
first = True
for s in self.selection_list():
if first:
combined = s.store.select(
"/main/{}/counts_unfiltered" "".format(label), "columns='index'"
).index
first = False
else:
combined = combined.join(
s.store.select(
"/main/{}/counts_unfiltered".format(label), "columns='index'"
).index,
how="outer",
)
# create and fill the data frames
self.logger.info(
"Populating Experiment data frame with counts ({})".format(label)
)
data = pd.DataFrame(index=combined, columns=columns)
for cnd in self.children:
for sel in cnd.children:
sel_data = sel.store.select(
"/main/{}/counts_unfiltered" "".format(label)
)
for tp in sel.timepoints:
data.loc[:][cnd.name, sel.name, "c_{}".format(tp)] = sel_data[
"c_{}".format(tp)
]
self.store.put("/main/{}/counts".format(label), data, format="table")
def calc_shared_full(self, label):
"""
Use joins to create a data frame containing all scores across all
Selections in the Experiment.
"""
if self.check_store("/main/{}/scores_shared_full".format(label)):
return
# create columns multi-index
# has to be lex-sorted for multi-slicing to work
self.logger.info("Creating column multi-index for scores ({})")
conditions_index = list()
selections_index = list()
values_index = list()
if self.scoring_method == "simple":
values_list = ["score"]
else:
values_list = ["score", "SE"]
for cnd in self.children:
for sel in cnd.children:
conditions_index.extend([cnd.name] * len(values_list))
selections_index.extend([sel.name] * len(values_list))
values_index.extend(sorted(values_list))
columns = pd.MultiIndex.from_tuples(
zip(conditions_index, selections_index, values_index),
names=["condition", "selection", "value"],
)
# create union index
self.logger.info("Creating row index for scores ({})".format(label))
combined = None
first = True
for s in self.selection_list():
if first:
combined = s.store.select(
"/main/{}/scores".format(label), "columns='index'"
).index
first = False
else:
combined = combined.join(
s.store.select(
"/main/{}/scores".format(label), "columns='index'"
).index,
how="outer",
)
# create and fill the data frames
self.logger.info(
"Populating Experiment data frame with scores ({})".format(label)
)
data = pd.DataFrame(index=combined, columns=columns)
for cnd in self.children:
for sel in cnd.children:
sel_data = sel.store.select("/main/{}/scores".format(label))
for v in values_list:
data.loc[:, (cnd.name, sel.name, v)] = sel_data[v]
self.store.put(
"/main/{}/scores_shared_full".format(label), data, format="table"
)
def calc_shared(self, label):
"""
Get the subset of scores that are shared across all Selections in each
Condition.
"""
if self.check_store("/main/{}/scores_shared".format(label)):
return
idx = pd.IndexSlice
self.logger.info(
"Identifying subset shared across all Selections ({})".format(label)
)
data = self.store.select("/main/{}/scores_shared_full".format(label))
# identify variants found in all selections in at least one condition
complete = np.full(len(data.index), False, dtype=bool)
for cnd in data.columns.levels[0]:
complete = np.logical_or(
complete, data.loc[:, idx[cnd, :, :]].notnull().all(axis="columns")
)
data = data.loc[complete]
self.store.put("/main/{}/scores_shared".format(label), data, format="table")
def calc_scores(self, label):
"""
Combine the scores and standard errors within each condition.
"""
if self.check_store("/main/{}/scores".format(label)):
return
self.logger.info("Calculating per-condition scores ({})".format(label))
# set up new data frame
shared_index = self.store.select(
"/main/{}/scores_shared" "".format(label), columns="index"
).index
columns = pd.MultiIndex.from_product(
[sorted(self.child_names()), sorted(["score", "SE", "epsilon"])],
names=["condition", "value"],
)
data = pd.DataFrame(np.nan, index=shared_index, columns=columns)
del shared_index
del columns
# set up local variables
idx = pd.IndexSlice
score_df = self.store.select("/main/{}/scores_shared".format(label))
if self.scoring_method == "simple":
# special case for simple ratios that have no SE
# calculates the average score
for cnd in score_df.columns.levels[0]:
data.loc[:, idx[cnd, "score"]] = score_df.loc[
:, idx[cnd, :, "score"]
].mean(axis=1)
else:
for cnd in score_df.columns.levels[0]:
y = np.array(score_df.loc[:, idx[cnd, :, "score"]].values).T
sigma2i = np.array(score_df.loc[:, idx[cnd, :, "SE"]].values ** 2).T
# single replicate of the condition
if y.shape[0] == 1:
data.loc[:, idx[cnd, "score"]] = y.ravel()
data.loc[:, idx[cnd, "SE"]] = np.sqrt(sigma2i).ravel()
data.loc[:, idx[cnd, "epsilon"]] = 0.0
# multiple replicates
else:
betaML, var_betaML, eps = rml_estimator(y, sigma2i)
data.loc[:, idx[cnd, "score"]] = betaML
data.loc[:, idx[cnd, "SE"]] = np.sqrt(var_betaML)
data.loc[:, idx[cnd, "epsilon"]] = eps
# special case for normalized wild type variant
if self.logr_method == "wt" and WILD_TYPE_VARIANT in data.index:
data.loc[WILD_TYPE_VARIANT, idx[:, "SE"]] = 0.0
data.loc[WILD_TYPE_VARIANT, idx[:, "score"]] = 0.0
data.loc[WILD_TYPE_VARIANT, idx[:, "epsilon"]] = 0.0
# store the data
self.store.put("/main/{}/scores".format(label), data, format="table")
def calc_pvalues_wt(self, label):
"""
Calculate uncorrected pvalue for each variant compared to wild type.
"""
if self.check_store("/main/{}/scores_pvalues_wt".format(label)):
return
idx = pd.IndexSlice
wt = self.store.select(
"/main/{}/scores".format(label), "index=WILD_TYPE_VARIANT"
)
if len(wt) == 0: # no wild type score
self.logger.info(
"Failed to find wild type score, skipping wild type p-value calculations"
)
return
data = self.store.select(
"/main/{}/scores".format(label), "index!=WILD_TYPE_VARIANT"
)
columns = pd.MultiIndex.from_product(
[sorted(self.child_names()), sorted(["z", "pvalue_raw"])],
names=["condition", "value"],
)
result_df = pd.DataFrame(index=data.index, columns=columns)
condition_labels = data.columns.levels[0]
for cnd in condition_labels:
result_df.loc[:, idx[cnd, "z"]] = np.absolute(
wt.loc[WILD_TYPE_VARIANT, idx[cnd, "score"]]
- data.loc[:, idx[cnd, "score"]]
) / np.sqrt(
wt.loc[WILD_TYPE_VARIANT, idx[cnd, "SE"]] ** 2
+ data.loc[:, idx[cnd, "SE"]] ** 2
)
result_df.loc[:, idx[cnd, "pvalue_raw"]] = 2 * stats.norm.sf(
result_df.loc[:, idx[cnd, "z"]]
)
self.store.put(
"/main/{}/scores_pvalues_wt".format(label), result_df, format="table"
)
def calc_pvalues_pairwise(self, label):
"""
Calculate pvalues for each variant in each pair of Conditions.
"""
if self.check_store("/main/{}/scores_pvalues".format(label)):
return
data = self.store["/main/{}/scores".format(label)]
cnd1_index = list()
cnd2_index = list()
values_index = list()
values_list = ["z", "pvalue_raw"]
condition_labels = data.columns.levels[0]
for i, cnd1 in enumerate(condition_labels):
for cnd2 in condition_labels[i + 1 :]:
cnd1_index.extend([cnd1] * len(values_list))
cnd2_index.extend([cnd2] * len(values_list))
values_index.extend(sorted(values_list))
columns = pd.MultiIndex.from_tuples(
zip(cnd1_index, cnd2_index, values_index),
names=["condition1", "condition2", "value"],
)
idx = pd.IndexSlice
result_df = pd.DataFrame(np.nan, index=data.index, columns=columns)
for i, cnd1 in enumerate(condition_labels):
for cnd2 in condition_labels[i + 1 :]:
result_df.loc[:, idx[cnd1, cnd2, "z"]] = np.absolute(
data.loc[:, idx[cnd1, "score"]] - data.loc[:, idx[cnd2, "score"]]
) / np.sqrt(
data.loc[:, idx[cnd1, "SE"]] ** 2
+ data.loc[:, idx[cnd2, "SE"]] ** 2
)
result_df.loc[:, idx[cnd1, cnd2, "pvalue_raw"]] = 2 * stats.norm.sf(
result_df.loc[:, idx[cnd1, cnd2, "z"]]
)
self.store.put(
"/main/{}/scores_pvalues".format(label), result_df, format="table"
)
def make_plots(self):
if self.plots_requested:
self.logger.info("Creating plots")
# sequence-function maps
if self.scoring_method != "counts":
if "synonymous" in self.labels:
pdf = PdfPages(
os.path.join(self.plot_dir, "sequence_function_map_aa.pdf")
)
for condition in self.children:
self.sfmap_wrapper(
condition=condition.name, pdf=pdf, coding=True
)
pdf.close()
if "variants" in self.labels:
pdf = PdfPages(
os.path.join(self.plot_dir, "sequence_function_map_nt.pdf")
)
for condition in self.children:
self.sfmap_wrapper(
condition=condition.name, pdf=pdf, coding=False
)
pdf.close()
for s in self.selection_list():
s.make_plots()
def write_tsv(self):
"""
Write each table from the store to its own tab-separated file.
Files are written to a ``tsv`` directory in the default output
location. File names are the HDF5 key with ``'_'`` substituted for
``'/'``.
"""
if self.tsv_requested:
self.logger.info("Generating tab-separated output files")
for k in self.store.keys():
self.write_table_tsv(k)
for s in self.selection_list():
s.write_tsv()
def sfmap_wrapper(self, condition, pdf, coding):
"""
Create a sequence function map for scores in *condition*.
Uses :py:func:`~sfmap.sfmap_plot` for the plotting.
"""
plot_options = self.get_root().plot_options
if coding:
label = "amino acid"
else:
label = "nucleotide"
self.logger.info(
"Creating sequence-function map ({}, {})".format(condition, label)
)
idx = pd.IndexSlice
if coding:
df_name = "/main/synonymous/scores"
else:
df_name = "/main/variants/scores"
if plot_options is not None:
data, wtseq = singleton_dataframe(
self.store[df_name][idx[condition, "score"]],
self.wt,
coding=coding,
aa_list=plot_options["aa_list"],
)
data_se, _ = singleton_dataframe(
self.store[df_name][idx[condition, "SE"]],
self.wt,
coding=coding,
aa_list=plot_options["aa_list"],
)
else:
data, wtseq = singleton_dataframe(
self.store[df_name][idx[condition, "score"]], self.wt, coding=coding
)
data_se, _ = singleton_dataframe(
self.store[df_name][idx[condition, "SE"]], self.wt, coding=coding
)
# format the title
if coding:
title = "Amino Acid"
else:
title = "Nucleotide"
if self.scoring_method in ("WLS", "OLS"):
title += " Sequence-Function Map\n{} ({} Slope)".format(
condition, self.scoring_method
)
elif self.scoring_method == "ratios":
title += " Sequence-Function Map\n{} ({})".format(
condition, "Enrich2 Ratio"
)
elif self.scoring_method == "simple":
title += " Sequence-Function Map\n{} ({})".format(
condition, "Simplified Ratio"
)
else:
raise ValueError("Invalid scoring method", self.name)
if plot_options is not None:
sfmap_plot(
df=data,
pdf=pdf,
style="scores",
df_se=data_se,
dimensions="tall",
wt=wtseq,
title=title,
aa_list=plot_options["aa_list"],
aa_label_groups=plot_options["aa_label_groups"],
)
else:
sfmap_plot(
df=data,
pdf=pdf,
style="scores",
df_se=data_se,
dimensions="tall",
wt=wtseq,
title=title,
)
def correlation_plot(self, pdf, label):
"""
Create a triangular heatmap showing the Pearson correlation coefficient
for each pairwise comparison of replicate scores.
"""
pass
| python |
import sys
import os
import glob
import shutil
import xml.etree.ElementTree as ET
if not os.path.exists("../results/"):
os.makedirs("../results/")
if os.path.exists("../results/detection/"):
shutil.rmtree("../results/detection/")
os.makedirs("../results/detection/")
# create VOC format files
xml_list = [f for f in os.listdir('../predictions') if f.endswith('xml')]
if len(xml_list) == 0:
print("Error: no .xml files found in predictions")
sys.exit()
for tmp_file in xml_list:
print(tmp_file)
with open(os.path.join('../results/detection', tmp_file.replace(".xml", ".txt")), "a") as new_f:
root = ET.parse(os.path.join('../predictions', tmp_file)).getroot()
for obj in root.findall('object'):
obj_name = obj.find('name').text.replace(' ', '_').rstrip().lower()
bndbox = obj.find('bndbox')
left = bndbox.find('xmin').text
top = bndbox.find('ymin').text
right = bndbox.find('xmax').text
bottom = bndbox.find('ymax').text
conf = obj.find('difficult').text
new_f.write("%s %s %s %s %s %s\n" % (obj_name, conf, left, top, right, bottom))
print("Conversion completed!")
| python |
"""
Calculate the number of proteins per kingdom / phylum / genus / species per genera for the phages
"""
import os
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Calculate the kingdom / phylum / genus / species per genera for the phages")
parser.add_argument('-d', help='directory with phage flat files, one file per phage', required=True)
parser.add_argument('-i', help='file with id, taxid, taxonomy (just kingdom / phylum / genus / species). Output from blast_tax_to_genera.py', required=True)
parser.add_argument('-l', help='file with location in body (default: phage_host_location.txt)', default='phage_host_location.txt')
parser.add_argument('-b', help='Only print phages for which we have a body site associated with the host', action='store_true')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
bodysite={}
with open(args.l, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
bodysite[p[0]] = p[3]
genome = {} # this is a hash of proteins -> genomes
count = {}
proteins = {} # list of proteins in this genome
for f in os.listdir(args.d):
if args.v:
sys.stderr.write("Reading genome {}\n".format(f))
with open(os.path.join(args.d, f), 'r') as fin:
for l in fin:
p=l.strip().split("\t")
genome[p[5]] = p[0]
if p[0] not in proteins:
proteins[p[0]] = set()
proteins[p[0]].add(p[5])
count[p[5]] = [set(), set(), set(), set()]
seen = set()
with open(args.i, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
if p[2] not in ['Archaea', 'Bacteria']:
continue
seen.add(p[0])
for i in range(4):
if len(p) < 6:
sys.stderr.write("Not enough elements in {}\n".format("|".join(p)))
continue
count[p[0]][i].add(p[i+2])
genomeavs = {}
for i in seen:
g = genome[i]
if g not in genomeavs:
genomeavs[g] = [[], [], [], []]
for j in range(4):
genomeavs[g][j].append(len(count[i][j]))
for g in genomeavs:
sys.stdout.write(g)
if g in bodysite:
sys.stdout.write("\t{}".format(bodysite[g]))
else:
sys.stdout.write("\t-")
sys.stdout.write("\t{}\t".format(len(proteins[g])))
sys.stdout.write("\t".join(genomeavs[g]))
sys.stdout.write("\n") | python |
from flask_restful import Resource, reqparse, request
from lib.objects.namespace import Namespace
from lib.objects.lock import Lock
class LockController(Resource):
# TODO Check access as separate method or decorator
# https://flask-restful.readthedocs.io/en/latest/extending.html#resource-method-decorators
parser = reqparse.RequestParser()
parser.add_argument(
"ttl", type=int, default=60, help="Time for lock to live without refreshes"
)
def __init__(self, storage):
self.storage = storage
def put(self, namespace_id: str, lock_id: str):
namespace = Namespace(storage=self.storage, id=namespace_id)
if not namespace.validate_id():
return {"message": "Wrong namespace"}, 400
if not namespace.read():
return {"message": "Namespace not found", "lock": None}, 404
token = request.headers.get("X-Getlock-Auth")
if token != namespace.token:
return {"message": "Provided wrong auth token"}, 403
args = self.parser.parse_args(strict=True)
lock = Lock(storage=self.storage, id=lock_id, namespace=namespace)
if not lock.validate_id():
return {"message": "Wrong lock", "lock": None}, 400
if not lock.read():
message = "Lock created"
lock._load(**args)
lock.create()
else:
message = "Lock updated"
lock._load_self()
lock._load(**args)
lock.update()
return {"message": message, "lock": lock._dump()}, 201
def get(self, namespace_id: str, lock_id: str):
namespace = Namespace(storage=self.storage, id=namespace_id)
if not namespace.validate_id():
return {"message": "Wrong namespace"}, 400
if not namespace.read():
return {"message": "Namespace not found", "lock": None}, 404
lock = Lock(storage=self.storage, id=lock_id, namespace=namespace)
if not lock.validate_id():
return {"message": "Wrong lock", "lock": None}, 400
if not lock.read():
return {"message": "Lock not found", "lock": None}, 404
lock._load_self()
if lock.expired:
return {"message": "Lock has expired", "lock": lock._dump()}, 410
return {"message": "Lock found", "lock": lock._dump()}, 200
def delete(self, namespace_id: str, lock_id: str):
namespace = Namespace(storage=self.storage, id=namespace_id)
if not namespace.validate_id():
return {"message": "Wrong namespace"}, 400
if not namespace.read():
return {"message": "Namespace not found", "lock": None}, 404
token = request.headers.get("X-Getlock-Auth")
if token != namespace.token:
return {"message": "Provided wrong auth token"}, 403
lock = Lock(storage=self.storage, id=lock_id, namespace=namespace)
if not lock.validate_id():
return {"message": "Wrong lock", "lock": None}, 400
if not lock.read():
return {"message": "Lock not found", "lock": None}, 404
lock.delete()
return {"message": "Lock removed", "lock": lock._dump()}, 200
| python |
__author__ = "Polymathian"
__version__ = "0.3.0"
| python |
# coding=utf-8
"""
The MIT License
Copyright (c) 2013 Mustafa İlhan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class Globals:
_1_DAY = 86400 # 24 * 60 * 60 seconds
_1_WEEK = 604800 # 7 * 24 * 60 * 60 seconds
_1_MONTH = 2592000 # 30 * 24 * 60 * 60 seconds
_10_MINUTES = 600 # seconds
DEFAULT_LIMIT = 15
MAX_REQUESTS = 15
REGIONS = [
1, 23424969
] # regions = [('tr', '23424969'), ('usa', '23424977'), ('world', '1')]
DUAL_LAYER_MEMCACHE_AND_IN_APP_MEMORY_CACHE = 0 # Cache in both memcache and cachepy by default
SINGLE_LAYER_MEMCACHE_ONLY = 1
SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY = 2
| python |
# This is automatically-generated code.
# Uses the jinja2 library for templating.
import cvxpy as cp
import numpy as np
import scipy as sp
# setup
problemID = "quantile_0"
prob = None
opt_val = None
# Variable declarations
# Generate data
np.random.seed(0)
m = 400
n = 10
k = 100
p = 1
sigma = 0.1
x = np.random.rand(m)*2*np.pi*p
y = np.sin(x) + sigma*np.sin(x)*np.random.randn(m)
alphas = np.linspace(1./(k+1), 1-1./(k+1), k)
# RBF features
mu_rbf = np.array([np.linspace(-1, 2*np.pi*p+1, n)])
mu_sig = (2*np.pi*p+2)/n
X = np.exp(-(mu_rbf.T - x).T**2/(2*mu_sig**2))
# Problem construction
Theta = cp.Variable(n,k)
def quantile_loss(alphas, Theta, X, y):
m, n = X.shape
k = len(alphas)
Y = np.tile(y.flatten(), (k, 1)).T
A = np.tile(alphas, (m, 1))
Z = X*Theta - Y
return cp.sum_entries(
cp.max_elemwise(
cp.mul_elemwise( -A, Z),
cp.mul_elemwise(1-A, Z)))
f = quantile_loss(alphas, Theta, X, y)
C = [X*(Theta[:,1:] - Theta[:,:-1]) >= 0]
prob = cp.Problem(cp.Minimize(f), C)
# Problem collection
# Single problem collection
problemDict = {
"problemID" : problemID,
"problem" : prob,
"opt_val" : opt_val
}
problems = [problemDict]
# For debugging individual problems:
if __name__ == "__main__":
def printResults(problemID = "", problem = None, opt_val = None):
print(problemID)
problem.solve()
print("\tstatus: {}".format(problem.status))
print("\toptimal value: {}".format(problem.value))
print("\ttrue optimal value: {}".format(opt_val))
printResults(**problems[0])
| python |
from starlette.config import Config
# Configuration from environment variables or '.env' file.
config = Config(".env")
DB_NAME = config("DB_NAME")
TEST_DB_NAME = config("TEST_DB_NAME")
DB_USER = config("DB_USER")
DB_PASSWORD = config("DB_PASSWORD")
DB_HOST = config("DB_HOST")
DB_PORT = config("DB_PORT")
SECRET_KEY = config("SECRET_KEY")
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
| python |
"""Migration for the Submitty system."""
import os
def up(config):
"""
Run up migration.
:param config: Object holding configuration details about Submitty
:type config: migrator.config.Config
"""
os.system("apt install -qy python3-numpy")
os.system("apt install -qy python3-opencv")
os.system("apt-get update")
def down(config):
"""
Run down migration (rollback).
:param config: Object holding configuration details about Submitty
:type config: migrator.config.Config
"""
pass
| python |
# -*- coding: utf-8 -*-
from sqlalchemy import Column, String, Integer, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from src.model.base import Base
from src.model.EstacaoZona import EstacaoZona
class Zona(Base):
__tablename__ = 'Zona'
Zona_id = Column(Integer, primary_key=True)
Nome = Column(String)
Raio = Column(Float)
Latitude = Column(Float)
Longitude = Column(Float)
Estacoes = relationship('Estacao', secondary=EstacaoZona)
Tags = relationship('Tag')
def format(self):
return {
"Zona_id": f'{self.Zona_id}',
"Nome": self.Nome,
"Raio": f'{self.Raio}',
"Latitude": f'{self.Latitude}',
"Longitude": f'{self.Longitude}'
} | python |
import matplotlib.pyplot as plt
from playLA.Matrix import Matrix
from playLA.Vector import Vector
import math
if __name__ == "__main__":
points = [[0, 0], [0, 5], [3, 5], [3, 4], [1, 4],
[1, 3], [2, 3], [2, 2], [1, 2], [1, 0]]
x = [point[0] for point in points]
y = [point[1] for point in points]
plt.figure(figsize=(5, 5))
plt.xlim(-10, 10)
plt.ylim(-10, 10)
plt.plot(x, y)
# plt.show()
P = Matrix(points)
# print(P)
# T = Matrix([[2, 0], [0, 1.5]]) # T: 2 * 2 P: 10 * 2
# T = Matrix([[1, 0], [0, -1]])
# T = Matrix([[-1, 0], [0, 1]])
# T = Matrix([[-1, 0], [0, -1]])
# T = Matrix([[1, 1], [0, 1]])
# T = Matrix([[1, 0], [1, 1]])
# T = Matrix([[1, 0.5], [1, 1]])
# T = Matrix([[1, 0.5], [1, 1]])
theta = math.pi / 3
T = Matrix([[math.cos(theta), math.sin(theta)],
[-math.sin(theta), math.cos(theta)]])
P2 = T.dot(P.T()) # P2: 2 * 10
# print(P2)
plt.plot([P2.col_vector(i)[0] for i in range(P2.col_num())],
[P2.col_vector(i)[1] for i in range(P2.col_num())])
plt.show()
| python |
import ast
import json
import os
from base_automation import report
# ---------------------------- terminal ------------------------------------#
@report.utils.step('send terminal command: {command}')
def terminal_command(command):
try:
step_data(f"send command to terminal:\n{command}")
return os.system(command)
except Exception as e:
step_data(e)
# ---------------------------- environment ------------------------------------#
@report.utils.step("get environment items")
def get_environment_items(key):
return os.environ.items()
@report.utils.step("get environment variable: {key}")
def get_environment_variable(key):
return os.environ.get(key)
@report.utils.step("set environment variable: {key}, {value}")
def set_environment_variable(key, value):
os.environ.setdefault(key, value)
# ---------------------------- report data ------------------------------------#
@report.utils.step('{step_description}')
def step_data(step_description):
pass
@report.utils.step("assert validation - {step_description}")
def compare_data(first_condition, second_condition, step_description=None, positive_test=True):
if positive_test:
assert first_condition == second_condition
else:
assert first_condition != second_condition
# ---------------------------- files actions ------------------------------------#
@report.utils.step("dict to json")
def dict_to_json(string_content):
return json.dumps(str_to_dict(string_content))
@report.utils.step("str to dict")
def str_to_dict(string_content):
return ast.literal_eval(str(string_content))
@report.utils.step("load json")
def load_json(json_content):
return json.loads(json_content)
@report.utils.step("create temp json")
def create_temp_json(file_path, data):
json_file = open(file_path, "w")
json_file.write(data)
json_file.close()
| python |
# PLUGIN MADE BY DANGEROUSJATT
# KEEP CREDIT
# MADE FOR HELLBOT
# BY TEAM HELLBOT
# NOW IN darkbot
import math
from darkbot.utils import admin_cmd, sudo_cmd, edit_or_reply
from userbot import CmdHelp
from userbot import bot as darkbot
@darkbot.on(admin_cmd(pattern="sin ?(.*)"))
@darkbot.on(sudo_cmd(pattern="sin ?(.*)", allow_sudo=True))
async def findsin(event):
input_str = int(event.pattern_match.group(1))
output = math.sin(input_str)
await event.edit(f"**Value of Sin** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="cos ?(.*)"))
@darkbot.on(sudo_cmd(pattern="cos ?(.*)", allow_sudo=True))
async def find_cos(event):
input_str = int(event.pattern_match.group(1))
output = math.cos(input_str)
await event.edit(f"**Value of Cos** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="tan ?(.*)"))
@darkbot.on(sudo_cmd(pattern="tan ?(.*)", allow_sudo=True))
async def find_tan(event):
input_str = int(event.pattern_match.group(1))
output = math.tan(input_str)
await event.edit(f"**Value of Tan** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="cosec ?(.*)"))
@darkbot.on(sudo_cmd(pattern="cosec ?(.*)", allow_sudo=True))
async def find_csc(event):
input_str = float(event.pattern_match.group(1))
output = mpmath.csc(input_str)
await event.edit(f"**Value of Cosec** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="sec ?(.*)"))
@darkbot.on(sudo_cmd(pattern="sec ?(.*)", allow_sudo=True))
async def find_sec(event):
input_str = float(event.pattern_match.group(1))
output = mpmath.sec(input_str)
await event.edit(f"**Value of Sec** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="cot ?(.*)"))
@darkbot.on(sudo_cmd(pattern="cot ?(.*)", allow_sudo=True))
async def find_cot(event):
input_str = float(event.pattern_match.group(1))
output = mpmath.cot(input_str)
await event.edit(f"**Value of Cot** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="square ?(.*)"))
@darkbot.on(sudo_cmd(pattern="square ?(.*)", allow_sudo=True))
async def square(event):
input_str = float(event.pattern_match.group(1))
output = input_str * input_str
await event.edit(f"**Square of** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="cube ?(.*)"))
@darkbot.on(sudo_cmd(pattern="cube ?(.*)", allow_sudo=True))
async def cube(event):
input_str = float(event.pattern_match.group(1)) # DANGEROUSJATT
output = input_str * input_str * input_str
await event.edit(f"**Cube of** `{input_str}`\n== `{output}`")
CmdHelp("maths").add_command(
"cube", "<query>", "Gives the cube of given number"
).add_command(
"square", "<query>", "Gives the square of given number"
).add_command(
"cot", "<query>", "Gives the cot of given query"
).add_command(
"sec", "<query>", "Gives the sec of given query"
).add_command(
"cosec", "<query>", "Gives the cosec of given query"
).add_command(
"tan", "<query>", "Gives the tan of given query"
).add_command(
"sin", "<query>", "Gives the sin of given query"
).add_command(
"cos", "<query>", "Gives the cos of given query"
).add() | python |
# Copyright 2021 cstsunfu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from typing import Dict, List, Set
from dlk.core.base_module import SimpleModule, BaseModuleConfig
from . import embedding_register, embedding_config_register
from dlk.core.modules import module_config_register, module_register
@embedding_config_register("pretrained_transformers")
class PretrainedTransformersConfig(BaseModuleConfig):
"""Config for PretrainedTransformers
Config Example1:
>>> {
>>> "module": {
>>> "_base": "roberta",
>>> },
>>> "config": {
>>> "pretrained_model_path": "*@*",
>>> "input_map": {
>>> "input_ids": "input_ids",
>>> "attention_mask": "attention_mask",
>>> "type_ids": "type_ids",
>>> },
>>> "output_map": {
>>> "embedding": "embedding",
>>> },
>>> "dropout": 0, //dropout rate
>>> "embedding_dim": "*@*",
>>> },
>>> "_link": {
>>> "config.pretrained_model_path": ["module.config.pretrained_model_path"],
>>> },
>>> "_name": "pretrained_transformers",
>>> }
Config Example2:
>>> for gather embedding
>>> {
>>> "module": {
>>> "_base": "roberta",
>>> },
>>> "config": {
>>> "pretrained_model_path": "*@*",
>>> "input_map": {
>>> "input_ids": "input_ids",
>>> "attention_mask": "subword_mask",
>>> "type_ids": "type_ids",
>>> "gather_index": "gather_index",
>>> },
>>> "output_map": {
>>> "embedding": "embedding",
>>> },
>>> "embedding_dim": "*@*",
>>> "dropout": 0, //dropout rate
>>> },
>>> "_link": {
>>> "config.pretrained_model_path": ["module.config.pretrained_model_path"],
>>> },
>>> "_name": "pretrained_transformers",
>>> }
"""
def __init__(self, config: Dict):
super(PretrainedTransformersConfig, self).__init__(config)
self.pretrained_transformers_config = config["module"]
self.post_check(config['config'], used=[
"pretrained_model_path",
"embedding_dim",
"output_map",
"input_map",
"dropout",
"return_logits",
])
@embedding_register("pretrained_transformers")
class PretrainedTransformers(SimpleModule):
"""Wrap the hugingface transformers
"""
def __init__(self, config: PretrainedTransformersConfig):
super(PretrainedTransformers, self).__init__(config)
self._provide_keys = {'embedding'}
self._required_keys = {'input_ids', 'attention_mask'}
self.config = config
self.pretrained_transformers = module_register.get(config.pretrained_transformers_config['_name'])(module_config_register.get(config.pretrained_transformers_config['_name'])(config.pretrained_transformers_config))
def init_weight(self, method):
"""init the weight of submodules by 'method'
Args:
method: init method
Returns:
None
"""
self.pretrained_transformers.init_weight(method)
def forward(self, inputs: Dict[str, torch.Tensor])->Dict[str, torch.Tensor]:
"""get the transformers output as embedding
Args:
inputs: one mini-batch inputs
Returns:
one mini-batch outputs
"""
input_ids = inputs[self.get_input_name('input_ids')] if "input_ids" in self.config._input_map else None
attention_mask = inputs[self.get_input_name('attention_mask')] if "attention_mask" in self.config._input_map else None
type_ids = inputs[self.get_input_name('type_ids')] if "type_ids" in self.config._input_map else None
type_ids = inputs[self.get_input_name('type_ids')] if "type_ids" in self.config._input_map else None
inputs_embeds = inputs[self.get_input_name('inputs_embeds')] if "inputs_embeds" in self.config._input_map else None
if (input_ids is None and inputs_embeds is None) or (input_ids is not None and inputs_embeds is not None):
raise PermissionError("input_ids and input_embeds must set one of them to None")
sequence_output, all_hidden_states, all_self_attentions = self.pretrained_transformers(
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": type_ids,
"inputs_embeds": inputs_embeds,
}
)
if 'gather_index' in self.config._input_map:
# gather_index.shape == bs*real_sent_len
gather_index = inputs[self.get_input_name("gather_index")]
g_bs, g_seq_len = gather_index.shape
bs, seq_len, hid_size = sequence_output.shape
assert g_bs == bs
assert g_seq_len <= seq_len
sequence_output = torch.gather(sequence_output[:, :, :], 1, gather_index.unsqueeze(-1).expand(bs, g_seq_len, hid_size))
inputs[self.get_output_name('embedding')] = sequence_output
if self._logits_gather.layer_map:
inputs.update(self._logits_gather(all_hidden_states))
return inputs
| python |
import os
import os.path
from os.path import exists
import hashlib
import json
import uuid
import pprint
import unittest
from pathlib import Path
from collections import defaultdict
import settings
import pathlib
from cromulent import model, vocab, reader
from cromulent.model import factory
from pipeline.util import CromObjectMerger
from pipeline.projects.sales import SalesPipeline
from pipeline.projects.people import PeoplePipeline
from pipeline.projects.knoedler import KnoedlerPipeline
from pipeline.projects.aata import AATAPipeline
from pipeline.projects.sales.util import SalesTree
from pipeline.nodes.basic import Serializer, AddArchesModel
MODELS = {
'Bidding': 'model-bidding',
'Acquisition': 'model-acquisition',
'Activity': 'model-activity',
'SaleActivity': 'model-sale-activity',
'Event': 'model-event',
'Group': 'model-groups',
'HumanMadeObject': 'model-object',
'LinguisticObject': 'model-lo',
'Person': 'model-person',
'Place': 'model-place',
'ProvenanceEntry': 'model-activity',
'Production': 'model-production',
'Set': 'model-set',
'VisualItem': 'model-visual-item',
'Inventorying': 'model-inventorying'
}
class TestWriter():
'''
Deserialize the output of each resource and store in memory.
Merge data for multiple serializations of the same resource.
'''
def __init__(self):
self.output = {}
self.merger = CromObjectMerger()
super().__init__()
def __call__(self, data: dict, *args, **kwargs):
d = data['_OUTPUT']
dd = json.loads(d)
dr = data['_ARCHES_MODEL']
if dr not in self.output:
self.output[dr] = {}
uu = data.get('uuid')
if 'id' in dd:
uu = hashlib.sha256(dd['id'].encode('utf-8')).hexdigest()
elif not uu and 'uri' in data:
uu = hashlib.sha256(data['uri'].encode('utf-8')).hexdigest()
# print(f'*** No UUID in top-level resource. Using a hash of top-level URI: {uu}')
if not uu:
uu = str(uuid.uuid4())
# print(f'*** No UUID in top-level resource;')
# print(f'*** Using an assigned UUID filename for the content: {uu}')
fn = '%s.json' % uu
data = json.loads(d)
if fn in self.output[dr]:
r = reader.Reader()
model_object = r.read(d)
merger = self.merger
content = self.output[dr][fn]
try:
m = r.read(content)
if m == model_object:
self.output[dr][fn] = data
return
else:
merger.merge(m, model_object)
self.output[dr][fn] = json.loads(factory.toString(m, False))
return
except model.DataError:
print(f'Exception caught while merging data from {fn}:')
print(d)
print(content)
raise
else:
self.output[dr][fn] = data
def process_model(self, model):
data = {v['id']: v for v in model.values()}
return data
def process_output(self, output):
data = {k: self.process_model(v) for k, v in output.items()}
return data
def processed_output(self):
return self.process_output(self.output)
##########################################################################################
class SalesTestPipeline(SalesPipeline):
'''
Test Provenance pipeline subclass that allows using a custom Writer.
'''
def __init__(self, writer, input_path, catalogs, auction_events, contents, **kwargs):
self.uid_tag_prefix = 'tag:getty.edu,2019:digital:pipeline:TESTS:REPLACE-WITH-UUID#'
super().__init__(input_path, catalogs, auction_events, contents, **kwargs)
self.writer = writer
self.prev_post_sales_map = {}
def serializer_nodes_for_model(self, *args, model=None, **kwargs):
nodes = []
if model:
nodes.append(AddArchesModel(model=model))
nodes.append(Serializer(compact=False))
nodes.append(self.writer)
return nodes
def get_services(self):
services = super().get_services()
services.update({
'problematic_records': {},
'location_codes': {}
})
return services
def run(self, **options):
vocab.add_linked_art_boundary_check()
vocab.add_attribute_assignment_check()
services = self.get_services(**options)
super().run(services=services, **options)
post_map = services['post_sale_map']
self.generate_prev_post_sales_data(post_map)
def load_prev_post_sales_data(self):
return {}
def persist_prev_post_sales_data(self, post_sale_rewrite_map):
self.prev_post_sales_map = post_sale_rewrite_map
def load_sales_tree(self):
return SalesTree()
def persist_sales_tree(self, g):
self.sales_tree = g
class TestSalesPipelineOutput(unittest.TestCase):
'''
Parse test CSV data and run the Provenance pipeline with the in-memory TestWriter.
Then verify that the serializations in the TestWriter object are what was expected.
'''
def setUp(self):
settings.pipeline_common_service_files_path = os.environ.get('GETTY_PIPELINE_COMMON_SERVICE_FILES_PATH', str(pathlib.Path('data/common')))
settings.pipeline_service_files_base_path = os.environ.get('GETTY_PIPELINE_SERVICE_FILES_PATH', str(pathlib.Path('data')))
# os.environ['GETTY_PIPELINE_SERVICE_FILES_PATH'] = str(pathlib.Path('data/sales'))
self.catalogs = {
'header_file': 'tests/data/sales/sales_catalogs_info_0.csv',
'files_pattern': 'tests/data/sales/empty.csv',
}
self.contents = {
'header_file': 'tests/data/sales/sales_contents_0.csv',
'files_pattern': 'tests/data/sales/empty.csv',
}
self.auction_events = {
'header_file': 'tests/data/sales/sales_descriptions_0.csv',
'files_pattern': 'tests/data/sales/empty.csv',
}
os.environ['QUIET'] = '1'
def tearDown(self):
pass
def run_pipeline(self, test_name):
input_path = os.getcwd()
catalogs = self.catalogs.copy()
events = self.auction_events.copy()
contents = self.contents.copy()
tests_path = Path(f'tests/data/sales/{test_name}')
catalog_files = list(tests_path.rglob('sales_catalogs_info*'))
event_files = list(tests_path.rglob('sales_descriptions*'))
content_files = list(tests_path.rglob('sales_contents*'))
if catalog_files:
if exists(str(tests_path / 'sales_catalogs_info_0.csv')):
catalogs['header_file'] = str(tests_path / 'sales_catalogs_info_0.csv')
catalogs['files_pattern'] = str(tests_path / 'sales_catalogs_info_[!0]*')
if event_files:
if exists(str(tests_path / 'sales_descriptions_0.csv')):
events['header_file'] = str(tests_path / 'sales_descriptions_0.csv')
events['files_pattern'] = str(tests_path / 'sales_descriptions_[!0]*')
if content_files:
if exists(str(tests_path / 'sales_contents_0.csv')):
contents['header_file'] = str(tests_path / 'sales_contents_0.csv')
contents['files_pattern'] = str(tests_path / 'sales_contents_[!0]*')
writer = TestWriter()
pipeline = SalesTestPipeline(
writer,
input_path,
catalogs=catalogs,
auction_events=events,
contents=contents,
models=MODELS,
limit=100,
debug=True
)
pipeline.run()
self.prev_post_sales_map = pipeline.prev_post_sales_map
return writer.processed_output()
##########################################################################################
class AATATestPipeline(AATAPipeline):
'''
Test Provenance pipeline subclass that allows using a custom Writer.
'''
def __init__(self, writer, input_path, *args, **kwargs):
self.uid_tag_prefix = 'tag:getty.edu,2019:digital:pipeline:TESTS:REPLACE-WITH-UUID#'
super().__init__(input_path, *args, **kwargs)
self.writer = writer
def serializer_nodes_for_model(self, *args, model=None, **kwargs):
nodes = []
if model:
nodes.append(AddArchesModel(model=model))
nodes.append(Serializer(compact=False))
nodes.append(self.writer)
return nodes
def get_services(self):
services = super().get_services()
# services.update({
# })
return services
def run(self, **options):
vocab.add_linked_art_boundary_check()
vocab.add_attribute_assignment_check()
services = self.get_services(**options)
super().run(services=services, **options)
class TestAATAPipelineOutput(unittest.TestCase):
'''
Parse test CSV data and run the Provenance pipeline with the in-memory TestWriter.
Then verify that the serializations in the TestWriter object are what was expected.
'''
def setUp(self):
settings.pipeline_common_service_files_path = os.environ.get('GETTY_PIPELINE_COMMON_SERVICE_FILES_PATH', str(pathlib.Path('data/common')))
settings.pipeline_service_files_base_path = os.environ.get('GETTY_PIPELINE_SERVICE_FILES_PATH', str(pathlib.Path('data')))
# os.environ['GETTY_PIPELINE_SERVICE_FILES_PATH'] = str(pathlib.Path('data/aata'))
self.patterns = {
'abstracts_pattern': 'tests/data/aata/empty.xml',
'journals_pattern': 'tests/data/aata/empty.xml',
'series_pattern': 'tests/data/aata/empty.xml',
'people_pattern': 'tests/data/aata/empty.xml',
'corp_pattern': 'tests/data/aata/empty.xml',
'geog_pattern': 'tests/data/aata/empty.xml',
'subject_pattern': 'tests/data/aata/empty.xml',
'tal_pattern': 'tests/data/aata/empty.xml',
}
os.environ['QUIET'] = '1'
def tearDown(self):
pass
def run_pipeline(self, test_name):
input_path = os.getcwd()
tests_path = Path(f'tests/data/aata/{test_name}')
patterns = {
'abstracts_pattern': 'AATA_[0-9]*.xml',
'journals_pattern': 'AATA*Journal.xml',
'series_pattern': 'AATA*Series.xml',
'people_pattern': 'Auth_person.xml',
'corp_pattern': 'Auth_corp.xml',
'geog_pattern': 'Auth_geog.xml',
'subject_pattern': 'Auth_subject.xml',
'tal_pattern': 'Auth_TAL.xml'
}
kwargs = self.patterns.copy()
for k, pattern in patterns.items():
files = list(tests_path.rglob(pattern))
if files:
kwargs[k] = str(tests_path / pattern)
writer = TestWriter()
pipeline = AATATestPipeline(
writer,
input_path,
models=MODELS,
limit=100,
debug=True,
**kwargs,
)
pipeline.run()
return writer.processed_output()
def verify_content(self, data, **kwargs):
for k, expected in kwargs.items():
self.assertIn(k, data)
got = data.get(k)
if isinstance(got, list):
values = [g['content'] for g in got]
self.assertIn(expected, values)
else:
value = got['content']
self.assertEqual(value, expected)
def verify_property(self, data, property, **kwargs):
for k, expected in kwargs.items():
self.assertIn(k, data)
got = data.get(k)
if isinstance(got, list):
values = [g[property] for g in got]
self.assertIn(expected, values)
else:
value = got[property]
self.assertEqual(value, expected)
def get_classification_labels(self, data):
cl = data.get('classified_as', [])
for c in cl:
clabel = c['_label']
yield clabel
def get_typed_referrers(self, data):
return self.get_typed_content('referred_to_by', data)
def get_typed_identifiers(self, data):
return self.get_typed_content('identified_by', data)
def get_typed_content(self, prop, data):
identified_by = data.get(prop, [])
identifiers = defaultdict(set)
for i in identified_by:
content = i['content']
for clabel in self.get_classification_labels(i):
identifiers[clabel].add(content)
for k in identifiers.keys():
if len(identifiers[k]) == 1:
identifiers[k] = identifiers[k].pop()
return dict(identifiers)
def verify_place_hierarchy(self, places, place, expected_names):
while place:
expected = expected_names.pop(0)
self.verify_content(place, identified_by=expected)
place = place.get('part_of', [])
if place:
i = place[0]['id']
place = places.get(i)
self.assertEqual(len(expected_names), 0)
##########################################################################################
class KnoedlerTestPipeline(KnoedlerPipeline):
'''
Test Provenance pipeline subclass that allows using a custom Writer.
'''
def __init__(self, writer, input_path, data, **kwargs):
self.uid_tag_prefix = 'tag:getty.edu,2019:digital:pipeline:TESTS:REPLACE-WITH-UUID#'
super().__init__(input_path, data, **kwargs)
self.writer = writer
def serializer_nodes_for_model(self, *args, model=None, **kwargs):
nodes = []
if model:
nodes.append(AddArchesModel(model=model))
nodes.append(Serializer(compact=False))
nodes.append(self.writer)
return nodes
def get_services(self):
services = super().get_services()
services.update({
'problematic_records': {},
'location_codes': {},
})
return services
def run(self, **options):
vocab.conceptual_only_parts()
vocab.add_linked_art_boundary_check()
vocab.add_attribute_assignment_check()
services = self.get_services(**options)
super().run(services=services, **options)
class TestKnoedlerPipelineOutput(unittest.TestCase):
'''
Parse test CSV data and run the Provenance pipeline with the in-memory TestWriter.
Then verify that the serializations in the TestWriter object are what was expected.
'''
def setUp(self):
settings.pipeline_common_service_files_path = os.environ.get('GETTY_PIPELINE_COMMON_SERVICE_FILES_PATH', str(pathlib.Path('data/common')))
settings.pipeline_service_files_base_path = os.environ.get('GETTY_PIPELINE_SERVICE_FILES_PATH', str(pathlib.Path('data')))
# os.environ['GETTY_PIPELINE_SERVICE_FILES_PATH'] = str(pathlib.Path('data/knoedler'))
# os.environ['GETTY_PIPELINE_COMMON_SERVICE_FILES_PATH'] = 'data/common'
self.data = {
'header_file': 'tests/data/knoedler/knoedler_0.csv',
'files_pattern': 'knoedler.csv',
}
os.environ['QUIET'] = '1'
def tearDown(self):
pass
def run_pipeline(self, test_name):
input_path = os.getcwd()
data = self.data.copy()
tests_path = Path(f'tests/data/knoedler/{test_name}')
files = list(tests_path.rglob('knoedler_ar*'))
if files:
data['files_pattern'] = str(tests_path / 'knoedler_ar*')
writer = TestWriter()
pipeline = KnoedlerTestPipeline(
writer,
input_path,
data=data,
models=MODELS,
limit=100,
debug=True
)
pipeline.run()
return writer.processed_output()
##########################################################################################
class PeopleTestPipeline(PeoplePipeline):
'''
Test Provenance pipeline subclass that allows using a custom Writer.
'''
def __init__(self, writer, input_path, data, **kwargs):
self.uid_tag_prefix = 'tag:getty.edu,2019:digital:pipeline:TESTS:REPLACE-WITH-UUID#'
super().__init__(input_path, data, **kwargs)
self.writer = writer
def serializer_nodes_for_model(self, *args, model=None, **kwargs):
nodes = []
if model:
nodes.append(AddArchesModel(model=model))
nodes.append(Serializer(compact=False))
nodes.append(self.writer)
return nodes
def get_services(self):
services = super().get_services()
services.update({
'problematic_records': {},
'location_codes': {},
})
return services
def run(self, **options):
vocab.add_linked_art_boundary_check()
vocab.add_attribute_assignment_check()
services = self.get_services(**options)
super().run(services=services, **options)
class TestPeoplePipelineOutput(unittest.TestCase):
'''
Parse test CSV data and run the Provenance pipeline with the in-memory TestWriter.
Then verify that the serializations in the TestWriter object are what was expected.
'''
def setUp(self):
settings.pipeline_common_service_files_path = os.environ.get('GETTY_PIPELINE_COMMON_SERVICE_FILES_PATH', str(pathlib.Path('data/common')))
settings.pipeline_service_files_base_path = os.environ.get('GETTY_PIPELINE_SERVICE_FILES_PATH', str(pathlib.Path('data')))
# os.environ['GETTY_PIPELINE_SERVICE_FILES_PATH'] = str(pathlib.Path('data/people'))
# os.environ['GETTY_PIPELINE_COMMON_SERVICE_FILES_PATH'] = 'data/common'
self.data = {
'header_file': 'tests/data/people/people_authority_0.csv',
'files_pattern': 'people_authority.csv',
}
os.environ['QUIET'] = '1'
def tearDown(self):
pass
def run_pipeline(self, test_name):
input_path = os.getcwd()
data = self.data.copy()
tests_path = Path(f'tests/data/people/{test_name}')
files = list(tests_path.rglob('people_authority_ar*'))
if files:
data['files_pattern'] = str(tests_path / 'people_authority_ar*')
writer = TestWriter()
pipeline = PeopleTestPipeline(
writer,
input_path,
data=data,
models=MODELS,
limit=100,
debug=True
)
pipeline.run()
return writer.processed_output()
##########################################################################################
def classified_identifiers(data, key='identified_by'):
classified_identifiers = {}
identifiers = [(i['content'], i.get('classified_as', [])) for i in data.get(key, [])]
for (content, classification) in identifiers:
if len(classification):
for cl in classification:
label = cl['_label']
classified_identifiers[label] = content
else:
classified_identifiers[None] = content
return classified_identifiers
def classified_identifier_sets(data, key='identified_by'):
classified_identifiers = defaultdict(set)
identifiers = [(i.get('content'), i.get('classified_as', [])) for i in data.get(key, [])]
for (content, classification) in identifiers:
if content:
if len(classification):
for cl in classification:
label = cl['_label']
classified_identifiers[label].add(content)
else:
classified_identifiers[None].add(content)
return classified_identifiers
def classification_sets(data, key='_label'):
classification_set = set()
classification = data.get('classified_as', [])
if len(classification):
for cl in classification:
label = cl[key]
classification_set.add(label)
return classification_set
def classification_tree(data, key='_label'):
tree = {}
classification = data.get('classified_as', [])
if len(classification):
for cl in classification:
label = cl[key]
tree[label] = classification_tree(cl, key=key)
return tree
| python |
import logging
from datalad_lgpdextension.utils.dataframe import Dataframe
from datalad_lgpdextension.writers.dataframe import Dataframe as dfutils
from datalad_lgpdextension.utils.folder import Folder
from datalad_lgpdextension.runner.actions import Actions
from datalad_lgpdextension.utils.generate_config import GenerateConfig
from datalad_lgpdextension.utils.folder import Folder
lgr = logging.getLogger('datalad.lgpdextension.lgpd_extension.writers.dataframe')
class Main:
def __init__(self,filename=f"{Folder().getcurrent()}/_settings.json"):
self.filename = filename
def update_file(self,settings):
defauld_field = "Added the '{{FIELD}} field'. YOU NEED TO CONFIGURE THE '{{FIELD}} FIELD' FROM SETTINGS JSON."
msgs = ""
if not settings.get("ofuscation",None):
msg = defauld_field.replace("{{FIELD}}","OFUSCATION")
msgs += "\n" + msg
lgr.info(msg)
settings["ofuscation"] = GenerateConfig().addExampleOfuscation()
if not settings.get("tokenization",None):
msg = defauld_field.replace("{{FIELD}}","TOKENIZATION")
msgs = "\n" + msg
lgr.info(msg)
settings["tokenization"] = GenerateConfig().addExampleTokenization()
if not settings.get("file",None):
msg = defauld_field.replace("{{FIELD}}","FILE")
msgs += "\n"
lgr.info(msg)
settings["file"] = GenerateConfig().addExampleFile()
if not settings.get("columns",None):
msg = defauld_field.replace("{{FIELD}}","COLUMNS")
msgs += "\n" + msg
lgr.info(msg)
settings["columns"] = GenerateConfig().addExampleColumn()
Folder(self.filename).save(settings)
if msgs != "":
raise Exception(msgs)
return settings
def run(self):
if not Folder(self.filename).exists():
settings = self.update_file(dict())
else:
fld = Folder(self.filename)
settings = self.update_file(fld.read())
dataframe = dfutils().read(settings)
for colname,value in settings["columns"].items():
if value.get("enable",None) == "true":
Actions(colname,settings,dataframe,self.filename).run(value["actions"])
return True | python |
class LinkedListNode:
def __init__(self, data):
self.data = data
self.next = None
class Stack:
def __init__(self):
self.num_elements = 0
self.head = None
def push(self, data):
new_node = LinkedListNode(data)
if self.head is None:
self.head = new_node
else:
new_node.next = self.head
self.head = new_node
self.num_elements += 1
def pop(self):
if self.is_empty():
return None
temp = self.head.data
self.head = self.head.next
self.num_elements -= 1
return temp
def top(self):
if self.head is None:
return None
return self.head.data
def size(self):
return self.num_elements
def is_empty(self):
return self.num_elements == 0
def evaluate_post_fix(input_list):
stack = Stack();
operators = ['*', "/", "-", "+"];
for element in input_list:
# print(stack)
if element in operators:
first = int(stack.pop());
second = int(stack.pop());
print(first, second, element)
if element is '+':
stack.push(second + first);
if element is '/':
stack.push(int(second / first));
if element is '*':
stack.push(int(second * first));
if element is '-':
stack.push(second - first)
else:
stack.push(element);
# print(stack.head.data)
return stack.head.data
print(evaluate_post_fix(["4", "13", "5", "/", "+"])); | python |
import numpy as np
def project(W, X, mu=None):
if mu is None:
return np.dot(X,W)
return np.dot(X - mu, W)
def reconstruct(W, Y, mu=None):
if mu is None:
return np.dot(Y,W.T)
return np.dot(Y, W.T) + mu
def pca(X, y, num_components=0):
[n,d] = X.shape
if (num_components <= 0) or (num_components>n):
num_components = n
mu = X.mean(axis=0)
X = X - mu
if n>d:
C = np.dot(X.T,X)
[eigenvalues,eigenvectors] = np.linalg.eigh(C)
else:
C = np.dot(X,X.T)
[eigenvalues,eigenvectors] = np.linalg.eigh(C)
eigenvectors = np.dot(X.T,eigenvectors)
for i in xrange(n):
eigenvectors[:,i] = eigenvectors[:,i]/np.linalg.norm(eigenvectors[:,i])
# or simply perform an economy size decomposition
# eigenvectors, eigenvalues, variance = np.linalg.svd(X.T, full_matrices=False)
# sort eigenvectors descending by their eigenvalue
idx = np.argsort(-eigenvalues)
eigenvalues = eigenvalues[idx]
eigenvectors = eigenvectors[:,idx]
# select only num_components
eigenvalues = eigenvalues[0:num_components].copy()
eigenvectors = eigenvectors[:,0:num_components].copy()
return [eigenvalues, eigenvectors, mu]
def lda(X, y, num_components=0):
y = np.asarray(y)
[n,d] = X.shape
c = np.unique(y)
if (num_components <= 0) or (num_component>(len(c)-1)):
num_components = (len(c)-1)
meanTotal = X.mean(axis=0)
Sw = np.zeros((d, d), dtype=np.float32)
Sb = np.zeros((d, d), dtype=np.float32)
for i in c:
Xi = X[np.where(y==i)[0],:]
meanClass = Xi.mean(axis=0)
Sw = Sw + np.dot((Xi-meanClass).T, (Xi-meanClass))
Sb = Sb + n * np.dot((meanClass - meanTotal).T, (meanClass - meanTotal))
eigenvalues, eigenvectors = np.linalg.eig(np.linalg.inv(Sw)*Sb)
idx = np.argsort(-eigenvalues.real)
eigenvalues, eigenvectors = eigenvalues[idx], eigenvectors[:,idx]
eigenvalues = np.array(eigenvalues[0:num_components].real, dtype=np.float32, copy=True)
eigenvectors = np.array(eigenvectors[0:,0:num_components].real, dtype=np.float32, copy=True)
return [eigenvalues, eigenvectors]
def fisherfaces(X,y,num_components=0):
y = np.asarray(y)
[n,d] = X.shape
c = len(np.unique(y))
[eigenvalues_pca, eigenvectors_pca, mu_pca] = pca(X, y, (n-c))
[eigenvalues_lda, eigenvectors_lda] = lda(project(eigenvectors_pca, X, mu_pca), y, num_components)
eigenvectors = np.dot(eigenvectors_pca,eigenvectors_lda)
return [eigenvalues_lda, eigenvectors, mu_pca]
| python |
import pytest
from copy import deepcopy
import mosdef_cassandra as mc
import unyt as u
from mosdef_cassandra.tests.base_test import BaseTest
from mosdef_cassandra.writers.inp_functions import generate_input
from mosdef_cassandra.writers.writers import write_mcfs
from mosdef_cassandra.utils.tempdir import *
class TestInpFunctions(BaseTest):
@pytest.fixture
def onecomp_system(self, methane_oplsaa, box):
system = mc.System([box], [methane_oplsaa], mols_to_add=[[10]])
moveset = mc.MoveSet("nvt", [methane_oplsaa])
return system, moveset
@pytest.fixture
def twocomp_system(self, methane_oplsaa, butane_oplsaa, box):
system = mc.System(
[box], [methane_oplsaa, butane_oplsaa], mols_to_add=[[10, 100]]
)
moveset = mc.MoveSet("nvt", [methane_oplsaa, butane_oplsaa])
return system, moveset
@pytest.fixture
def twobox_system(self, methane_oplsaa, box):
system = mc.System(
[box, box], [methane_oplsaa], mols_to_add=[[10], [5]]
)
moveset = mc.MoveSet("gemc", [methane_oplsaa])
return system, moveset
@pytest.fixture
def twocomptwobox_system(self, methane_oplsaa, butane_oplsaa, box):
system = mc.System(
[box, box],
[methane_oplsaa, butane_oplsaa],
mols_to_add=[[10, 100], [1, 5]],
)
moveset = mc.MoveSet("gemc_npt", [methane_oplsaa, butane_oplsaa])
return system, moveset
@pytest.fixture
def gcmc_system(
self, methane_oplsaa, fixed_lattice_compound, fixed_lattice_trappe
):
box_list = [fixed_lattice_compound]
species_list = [fixed_lattice_trappe, methane_oplsaa]
system = mc.System(
box_list,
species_list,
mols_in_boxes=[[1, 0]],
mols_to_add=[[0, 10]],
)
moveset = mc.MoveSet("gcmc", species_list)
return system, moveset
def test_invalid_kwargs(self, onecomp_system):
(system, moveset) = onecomp_system
with pytest.raises(ValueError, match=r"Invalid input argument"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
random_arg=1,
)
def test_run_name(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
run_name="test name",
)
assert "# Run_Name\ntest-name.out" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
run_name="test_name",
)
assert "# Run_Name\ntest_name.out" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Run_Name\nnvt.out" in inp_data
with pytest.raises(TypeError, match=r"must be a string"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
run_name=1,
)
def test_sim_type(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Sim_Type\nnvt" in inp_data
with pytest.raises(ValueError, match=r"Unsupported sim_type"):
inp_data = mc.writers.inp_functions.get_sim_type("gccmc")
def test_nbr_species(self, onecomp_system, twocomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Nbr_Species\n1" in inp_data
(system, moveset) = twocomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Nbr_Species\n2" in inp_data
def test_vdw_style(self, twocomp_system, twobox_system):
(system, moveset) = twocomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# VDW_Style\nlj cut_tail 12.0" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
vdw_style="none",
)
assert "# VDW_Style\nnone\n" in inp_data
with pytest.raises(ValueError, match=r"Unsupported vdw_style"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
vdw_style="cutoff",
vdw_cutoff=12.0 * u.angstrom,
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
cutoff_style="cut",
vdw_cutoff=15.0 * u.angstrom,
)
assert "# VDW_Style\nlj cut 15.0" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
cutoff_style="cut_shift",
vdw_cutoff=15.0 * u.angstrom,
)
assert "# VDW_Style\nlj cut_shift 15.0" in inp_data
with pytest.raises(ValueError, match=r"Only one box"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
vdw_cutoff_box2=10.0 * u.angstrom,
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
cutoff_style="cut_switch",
vdw_cutoff=[12.0 * u.angstrom, 15.0 * u.angstrom],
)
assert "# VDW_Style\nlj cut_switch 12.0 15.0" in inp_data
with pytest.raises(ValueError, match=r"requires an inner"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
cutoff_style="cut_switch",
vdw_cutoff=12.0 * u.angstrom,
)
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# VDW_Style\nlj cut_tail 12.0\nlj cut_tail 12.0" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
cutoff_style="cut_switch",
vdw_cutoff_box1=[12.0 * u.angstrom, 15.0 * u.angstrom],
vdw_cutoff_box2=[11.0 * u.angstrom, 13.0 * u.angstrom],
)
assert (
"# VDW_Style\nlj cut_switch 12.0 15.0\nlj cut_switch 11.0 13.0"
in inp_data
)
with pytest.raises(ValueError, match=r"Unsupported cutoff style"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
cutoff_style="cutoff",
vdw_cutoff=12.0 * u.angstrom,
)
def test_charge_style(self, twocomp_system, twobox_system):
(system, moveset) = twocomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Charge_Style\ncoul ewald 12.0 1e-05\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_style="cut",
)
assert "# Charge_Style\ncoul cut 12.0\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_style="dsf",
)
assert "# Charge_Style\ncoul dsf 12.0\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_style="dsf",
dsf_damping=0.2,
)
assert "# Charge_Style\ncoul dsf 12.0 0.2\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_style="none",
)
assert "# Charge_Style\nnone\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_cutoff=15.0 * u.angstrom,
ewald_accuracy=5e-6,
)
assert "# Charge_Style\ncoul ewald 15.0 5e-06\n" in inp_data
with pytest.raises(ValueError, match=r"Only one box"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_cutoff_box2=1.0 * u.angstrom,
)
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_cutoff_box2=30.0 * u.angstrom,
ewald_accuracy=5e-6,
)
assert (
"# Charge_Style\ncoul ewald 12.0 5e-06\ncoul ewald 30.0 5e-06\n"
in inp_data
)
def test_mixing_rule(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Mixing_Rule\nlb\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
mixing_rule="geometric",
)
assert "# Mixing_Rule\ngeometric\n" in inp_data
mixing_dict = {"ls_138_s1 ls_140_s1": "1.0 1.0"}
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
mixing_rule="custom",
custom_mixing_dict=mixing_dict,
)
assert (
"# Mixing_Rule\ncustom\nls_138_s1 ls_140_s1 1.0 1.0\n" in inp_data
)
with pytest.raises(
ValueError, match=r"Custom mixing rule requested but"
):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
mixing_rule="custom",
)
with pytest.raises(ValueError, match=r"Unsupported mixing rule"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
mixing_rule="other",
)
def test_seeds(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Seed_Info\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
seeds=[1, 2],
)
assert "# Seed_Info\n1 2\n" in inp_data
with pytest.raises(TypeError, match=r"argument should be a list"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
seeds=100,
)
with pytest.raises(ValueError, match=r"must be integers"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
seeds=[100, -1],
)
def test_rcut_min(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Rcutoff_Low\n1.0\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
rcut_min=10.0 * u.angstrom,
)
assert "# Rcutoff_Low\n10.0\n" in inp_data
with pytest.raises(TypeError, match=r"unyt_array"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
rcut_min="hello",
)
def test_pair_energy(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
rcut_min=10.0 * u.angstrom,
)
assert "# Pair_Energy\ntrue\n" in inp_data
with pytest.raises(TypeError, match=r"be of type boolean"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pair_energy=1,
)
def test_max_molecules(self, twocomp_system, gcmc_system):
(system, moveset) = twocomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert (
"# Molecule_Files\nspecies1.mcf 10\nspecies2.mcf 100" in inp_data
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
max_molecules=[100, 1000],
)
assert (
"# Molecule_Files\nspecies1.mcf 100\nspecies2.mcf 1000" in inp_data
)
(system, moveset) = gcmc_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 10.0 * (u.kJ / u.mol)],
)
assert (
"# Molecule_Files\nspecies1.mcf 1\nspecies2.mcf 2010\n" in inp_data
)
(system, moveset) = twocomp_system
with pytest.raises(TypeError, match=r"should be a list"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
max_molecules=100,
)
with pytest.raises(ValueError, match=r"Length of list specified"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
max_molecules=[100],
)
def test_boxes(self, onecomp_system, twobox_system, gcmc_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Box_Info\n1\ncubic\n50.0\n" in inp_data
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Box_Info\n2\ncubic\n50.0\n\ncubic\n50.0\n" in inp_data
(system, moveset) = gcmc_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 10.0 * (u.kJ / u.mol)],
)
assert "# Box_Info\n1\ncubic\n29.84\n" in inp_data
def test_temperature(self, onecomp_system, twobox_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=200.0 * u.K,
)
assert "# Temperature_Info\n200.0\n" in inp_data
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=200.0 * u.K,
)
assert "# Temperature_Info\n200.0\n200.0\n" in inp_data
with pytest.raises(ValueError, match=r"less than zero"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=-300.0 * u.K,
)
with pytest.raises(TypeError, match=r"unyt_array"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature="hi",
)
def test_pressure(self, twocomptwobox_system):
(system, moveset) = twocomptwobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure=2.0 * u.bar,
)
assert "# Pressure_Info\n2.0\n2.0\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure=2.0 * u.bar,
pressure_box2=10.0 * u.bar,
)
assert "# Pressure_Info\n2.0\n10.0\n" in inp_data
with pytest.raises(ValueError, match=r"Pressure must be specified"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
with pytest.raises(TypeError, match=r"unyt_array"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure="string",
)
def test_chempot(self, gcmc_system):
(system, moveset) = gcmc_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 10.0 * (u.kJ / u.mol)],
)
assert "# Chemical_Potential_Info\nnone 10.0 \n" in inp_data
with pytest.raises(
ValueError, match=r"Chemical potential information"
):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
with pytest.raises(TypeError, match=r"unyt_array"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", "string"],
)
def test_moveset_formatting(self, onecomp_system):
# Invalid keyword
with pytest.raises(
ValueError, match="Invalid probability info section"
):
fake_prob_dict = {"trans": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Translate
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"translate": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"translate": [0.1, 1.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"translate": [0.1, ["test"]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"translate": [0.1, [5.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Rotate
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"rotate": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"rotate": [0.1, 1.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"rotate": [0.1, ["test"]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"rotate": [0.1, [5.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Angle
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"angle": [14.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"angle": 14.0}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Dihedral
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"dihed": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"dihed": [0.1, 1.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"dihed": [0.1, ["test"]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"dihed": [0.1, [5.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Regrow
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"regrow": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"regrow": ["test", 0.1, 0.2]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating"):
fake_prob_dict = {"regrow": ["test", [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"regrow": [0.3, 1.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating"):
fake_prob_dict = {"regrow": [0.3, ["string"]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"regrow": [0.3, [1.0]]}
# Vol
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"volume": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"volume": [0.1, 100.0, 0.2]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating point"):
fake_prob_dict = {"volume": ["test", [100.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"volume": [0.1, 100.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating point"):
fake_prob_dict = {"volume": [0.1, ["test"]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"volume": [0.1, [100.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Insertable
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"insert": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"insert": [0.1, True, True]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating point"):
fake_prob_dict = {"insert": ["test", [True]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"insert": [0.1, True]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a boolean value"):
fake_prob_dict = {"insert": [0.1, [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"insert": [0.1, [True]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Swap
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"swap": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"swap": [0.1, [True], [0.5]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating point"):
fake_prob_dict = {"swap": ["test", [True], [0.5], [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"swap": [0.1, True, [0.5], [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a boolean value"):
fake_prob_dict = {"swap": [0.1, [1.0], [0.5], [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"swap": [0.1, [True], 0.5, [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating point"):
fake_prob_dict = {"swap": [0.1, [True], ["test"], [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"swap": [0.1, [True], [0.5], 1.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating point"):
fake_prob_dict = {"swap": [0.1, [True], [0.5], ["test"]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"swap": [0.1, [True], [0.5], [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"swap": [0.1, [True], [0.5], None]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"swap": [0.1, [True], None, None]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
def test_moveset_onecomp(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.33\n2.0 \n" in inp_data
assert "# Prob_Rotation\n0.33\n30.0 \n" in inp_data
assert "# Prob_Angle" not in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.34\n1.0 \n" in inp_data
assert "# Prob_Volume" not in inp_data
assert "# Prob_Insertion" not in inp_data
assert "# Prob_Deletion" not in inp_data
assert "# Prob_Swap" not in inp_data
assert "# Prob_Ring" not in inp_data
moveset.prob_angle = 0.1
moveset.prob_translate = 0.3
moveset.prob_rotate = 0.3
moveset.prob_regrow = 0.3
moveset.max_translate[0][0] = 10.0 * u.angstrom
moveset.max_rotate[0][0] = 10.0 * u.degree
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.3\n10.0 \n" in inp_data
assert "# Prob_Rotation\n0.3\n10.0 \n" in inp_data
assert "# Prob_Angle\n0.1\n" in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.3\n1.0 \n" in inp_data
assert "# Prob_Volume" not in inp_data
assert "# Prob_Insertion" not in inp_data
assert "# Prob_Deletion" not in inp_data
assert "# Prob_Swap" not in inp_data
assert "# Prob_Ring" not in inp_data
def test_moveset_twocomp(self, twocomp_system):
(system, moveset) = twocomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.33\n2.0 2.0 \n" in inp_data
assert "# Prob_Rotation\n0.33\n30.0 30.0 \n" in inp_data
assert "# Prob_Angle" not in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.34\n0.5 0.5 \n" in inp_data
assert "# Prob_Volume" not in inp_data
assert "# Prob_Insertion" not in inp_data
assert "# Prob_Deletion" not in inp_data
assert "# Prob_Swap" not in inp_data
assert "# Prob_Ring" not in inp_data
moveset.prob_angle = 0.1
moveset.prob_translate = 0.3
moveset.prob_rotate = 0.3
moveset.prob_regrow = 0.26
moveset.max_translate[0][0] = 10.0 * u.angstrom
moveset.max_rotate[0][0] = 10.0 * u.degree
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.3\n10.0 2.0 \n" in inp_data
assert "# Prob_Rotation\n0.3\n10.0 30.0 \n" in inp_data
assert "# Prob_Angle\n0.1\n" in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.26\n0.5 0.5 \n" in inp_data
assert "# Prob_Volume" not in inp_data
assert "# Prob_Insertion" not in inp_data
assert "# Prob_Deletion" not in inp_data
assert "# Prob_Swap" not in inp_data
assert "# Prob_Ring" not in inp_data
def test_moveset_twobox(self, twobox_system):
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.3\n2.0 \n2.0 \n" in inp_data
assert "# Prob_Rotation\n0.3\n30.0 \n30.0 \n" in inp_data
assert "# Prob_Angle" not in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.295\n1.0 \n" in inp_data
assert "# Prob_Volume\n0.005\n500.0\n" in inp_data
assert "# Prob_Insertion" not in inp_data
assert "# Prob_Deletion" not in inp_data
assert (
"# Prob_Swap\n0.1\ncbmc \nprob_swap_species 1.0 \nprob_swap_from_box 0.5 0.5 \n"
in inp_data
)
assert "# Prob_Ring" not in inp_data
def test_moveset_twocomptwobox(self, twocomptwobox_system):
(system, moveset) = twocomptwobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure=1.0 * u.bar,
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.3\n2.0 2.0 \n2.0 2.0 \n" in inp_data
assert "# Prob_Rotation\n0.3\n30.0 30.0 \n30.0 30.0 \n" in inp_data
assert "# Prob_Angle" not in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.295\n0.5 0.5 \n" in inp_data
assert "# Prob_Volume\n0.005\n500.0\n5000.0\n" in inp_data
assert "# Prob_Insertion" not in inp_data
assert "# Prob_Deletion" not in inp_data
assert (
"# Prob_Swap\n0.1\ncbmc cbmc \nprob_swap_species 0.5 0.5 \nprob_swap_from_box 0.5 0.5 \n"
in inp_data
)
assert "# Prob_Ring" not in inp_data
def test_moveset_gcmc(self, gcmc_system):
(system, moveset) = gcmc_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 1.0 * (u.kJ / u.mol)],
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.25\n0.0 2.0 \n" in inp_data
assert "# Prob_Rotation\n0.25\n0.0 30.0 \n" in inp_data
assert "# Prob_Angle" not in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.3\n0.0 1.0 \n" in inp_data
assert "# Prob_Volume" not in inp_data
assert "# Prob_Insertion\n0.1\nnone cbmc" in inp_data
assert "# Prob_Deletion\n0.1\n" in inp_data
assert "# Prob_Swap" not in inp_data
assert "# Prob_Ring" not in inp_data
def test_start_type(
self,
onecomp_system,
twocomp_system,
twobox_system,
twocomptwobox_system,
gcmc_system,
):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Start_Type\nmake_config 10\n" in inp_data
(system, moveset) = twocomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Start_Type\nmake_config 10 100\n" in inp_data
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Start_Type\nmake_config 10\nmake_config 5\n" in inp_data
(system, moveset) = twocomptwobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure=1.0 * u.bar,
)
assert (
"# Start_Type\nmake_config 10 100\nmake_config 1 5\n" in inp_data
)
(system, moveset) = gcmc_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 1.0 * (u.kJ / u.mol)],
)
assert "# Start_Type\nadd_to_config 1 0 box1.in.xyz 0 10\n" in inp_data
# HACK to test read config
system_copy = deepcopy(system)
system_copy._mols_to_add = [[0, 0], [0, 0]]
inp_data = generate_input(
system=system_copy,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 1.0 * (u.kJ / u.mol)],
)
assert "# Start_Type\nread_config 1 0 box1.in.xyz\n" in inp_data
def test_run_type(self, onecomp_system, twobox_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Run_Type\nequilibration 1000 \n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="production",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Run_Type\nproduction 1000 \n" in inp_data
with pytest.raises(ValueError, match=r"Invalid run type"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="pro",
run_length=500,
temperature=300.0 * u.K,
)
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Run_Type\nequilibration 1000 100\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
thermal_stat_freq=100,
vol_stat_freq=50,
)
assert "# Run_Type\nequilibration 100 50\n" in inp_data
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
thermal_stat_freq=10.2,
vol_stat_freq=50,
)
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
thermal_stat_freq=10,
vol_stat_freq=1.2,
)
def test_length_info(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert (
"# Simulation_Length_Info\nunits steps\nprop_freq 500\ncoord_freq 5000\nrun 500"
in inp_data
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
steps_per_sweep=10,
units="sweeps",
)
assert (
"# Simulation_Length_Info\nunits sweeps\nprop_freq 500\ncoord_freq 5000\nrun 500\nsteps_per_sweep 10\n"
in inp_data
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
block_avg_freq=10,
)
assert (
"# Simulation_Length_Info\nunits steps\nprop_freq 500\ncoord_freq 5000\nrun 500\nblock_averages 10\n"
in inp_data
)
with pytest.raises(ValueError, match=r"Invalid units"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
units="stweeps",
)
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
prop_freq=1.2,
)
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
coord_freq=1.2,
)
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=5.2,
temperature=300.0 * u.K,
)
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
block_avg_freq=10.2,
)
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
steps_per_sweep=10.2,
)
def test_property_info(self, onecomp_system, twobox_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert (
"# Property_Info 1\nenergy_total\nenergy_intra\nenergy_inter\nenthalpy\npressure\nvolume\nnmols\nmass_density\n"
in inp_data
)
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert (
"# Property_Info 1\nenergy_total\nenergy_intra\nenergy_inter\nenthalpy\npressure\nvolume\nnmols\nmass_density\n\n# Property_Info 2\nenergy_total\nenergy_intra\nenergy_inter\nenthalpy\npressure\nvolume\nnmols\nmass_density\n"
in inp_data
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
properties=["energy_total", "enthalpy", "density"],
)
assert (
"# Property_Info 1\nenergy_total\nenthalpy\ndensity\n\n# Property_Info 2\nenergy_total\nenthalpy\ndensity\n"
in inp_data
)
with pytest.raises(ValueError, match=r"Invalid property"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
properties=["temperature"],
)
def test_fragment_files(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Fragment_Files\n" in inp_data
def test_verbose_log(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
verbose_log=True,
)
assert "# Verbose_Logfile\ntrue\n" in inp_data
with pytest.raises(TypeError, match=r"Verbosity must be"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
verbose_log="true",
)
def test_cbmc_info(self, onecomp_system, twobox_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert (
"# CBMC_Info\nkappa_ins 10\nkappa_dih 10\nrcut_cbmc 6.0\n"
in inp_data
)
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert (
"# CBMC_Info\nkappa_ins 10\nkappa_dih 10\nrcut_cbmc 6.0 6.0\n"
in inp_data
)
(system, moveset) = onecomp_system
moveset.cbmc_rcut = [0.45 * u.nm]
moveset.cbmc_n_insert = 2
moveset.cbmc_n_dihed = 5
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
print(inp_data)
assert (
"# CBMC_Info\nkappa_ins 2\nkappa_dih 5\nrcut_cbmc 4.5\n"
in inp_data
)
@pytest.mark.parametrize(
"typ,value",
[
("slitpore", 1.0 * u.angstrom),
("cylinder", 1.0 * u.angstrom),
("sphere", 1.0 * u.angstrom),
("interface", [1.0 * u.angstrom, 2.0 * u.angstrom]),
],
)
def test_write_restricted_gcmc(self, gcmc_system, typ, value):
(system, moveset) = gcmc_system
moveset.add_restricted_insertions(
system.species_topologies, [[None, typ]], [[None, value]]
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 10.0 * (u.kJ / u.mol)],
)
if typ == "interface":
assert (
"\nrestricted_insertion {} {:0.1f} {:0.1f}\n".format(
typ, value[0].to_value(), value[1].to_value()
)
in inp_data
)
else:
assert (
"\nrestricted_insertion {} {:0.1f}\n".format(
typ, value.to_value()
)
in inp_data
)
@pytest.mark.parametrize(
"typ,value",
[
("slitpore", 30 * u.angstrom),
("cylinder", 30 * u.angstrom),
("sphere", 30 * u.angstrom),
("interface", [30 * u.angstrom, 50 * u.angstrom]),
],
)
def test_fail_restricted_gcmc(self, gcmc_system, typ, value):
(system, moveset) = gcmc_system
moveset.add_restricted_insertions(
system.species_topologies, [[None, typ]], [[None, value]]
)
with pytest.raises(ValueError, match=r"Restricted insertion"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 10.0 * (u.kJ / u.mol)],
)
@pytest.mark.parametrize(
"typ,value",
[
("slitpore", 10.0 * u.angstrom),
("cylinder", 10.0 * u.angstrom),
("sphere", 10.0 * u.angstrom),
("interface", [10.0 * u.angstrom, 20.0 * u.angstrom]),
],
)
def test_write_restricted_gemc_npt(self, twocomptwobox_system, typ, value):
(system, moveset) = twocomptwobox_system
moveset.add_restricted_insertions(
system.species_topologies,
[[None, None], [None, typ]],
[[None, None], [None, value]],
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure=1 * u.bar,
)
if typ == "interface":
assert (
"\nrestricted_insertion {} {:0.1f} {:0.1f}\n".format(
typ, value[0].to_value(), value[1].to_value()
)
in inp_data
)
else:
assert (
"\nrestricted_insertion {} {:0.1f}\n".format(
typ, value.to_value()
)
in inp_data
)
@pytest.mark.parametrize(
"typ,value",
[
("slitpore", 60 * u.angstrom),
("cylinder", 60 * u.angstrom),
("sphere", 60 * u.angstrom),
("interface", [10 * u.angstrom, 70 * u.angstrom]),
],
)
def test_fail_restricted_gemc_npt(self, twocomptwobox_system, typ, value):
(system, moveset) = twocomptwobox_system
moveset.add_restricted_insertions(
system.species_topologies,
[[None, None], [None, typ]],
[[None, None], [None, value]],
)
with pytest.raises(ValueError, match=r"Restricted insertion"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure=1 * u.bar,
)
@pytest.mark.parametrize(
"angle_style", [["fixed"], ["harmonic"], "fixed", "harmonic"]
)
def test_onecomp_angle_style(self, onecomp_system, angle_style):
with temporary_directory() as tmp_dir:
with temporary_cd(tmp_dir):
(system, moveset) = onecomp_system
write_mcfs(system, angle_style=angle_style)
@pytest.mark.parametrize("angle_style", ["fixed", "harmonic"])
def test_twocomp_angle_style(self, twocomp_system, angle_style):
with temporary_directory() as tmp_dir:
with temporary_cd(tmp_dir):
(system, moveset) = twocomp_system
write_mcfs(system, angle_style=[angle_style, angle_style])
def test_angle_style_error(self, onecomp_system):
(system, moveset) = onecomp_system
with pytest.raises(ValueError, match="Invalid"):
write_mcfs(system, angle_style=["charmm"])
| python |
class Solution:
def largestPerimeter(self, A: List[int]) -> int:
A.sort()
for i in range(len(A)-1, 1, -1):
if A[i-2] + A[i-1] > A[i]:
return A[i-2] + A[i-1] + A[i]
else:
return 0
| python |
class Permissions(object):
# ccpo permissions
VIEW_AUDIT_LOG = "view_audit_log"
VIEW_CCPO_USER = "view_ccpo_user"
CREATE_CCPO_USER = "create_ccpo_user"
EDIT_CCPO_USER = "edit_ccpo_user"
DELETE_CCPO_USER = "delete_ccpo_user"
# base portfolio perms
VIEW_PORTFOLIO = "view_portfolio"
# application management
VIEW_APPLICATION = "view_application"
EDIT_APPLICATION = "edit_application"
CREATE_APPLICATION = "create_application"
DELETE_APPLICATION = "delete_application"
VIEW_APPLICATION_MEMBER = "view_application_member"
EDIT_APPLICATION_MEMBER = "edit_application_member"
DELETE_APPLICATION_MEMBER = "delete_application_member"
CREATE_APPLICATION_MEMBER = "create_application_member"
VIEW_ENVIRONMENT = "view_environment"
EDIT_ENVIRONMENT = "edit_environment"
CREATE_ENVIRONMENT = "create_environment"
DELETE_ENVIRONMENT = "delete_environment"
ASSIGN_ENVIRONMENT_MEMBER = "assign_environment_member"
VIEW_APPLICATION_ACTIVITY_LOG = "view_application_activity_log"
# funding
VIEW_PORTFOLIO_FUNDING = "view_portfolio_funding" # TO summary page
CREATE_TASK_ORDER = "create_task_order" # create a new TO
VIEW_TASK_ORDER_DETAILS = "view_task_order_details" # individual TO page
EDIT_TASK_ORDER_DETAILS = (
"edit_task_order_details" # edit TO that has not been finalized
)
# reporting
VIEW_PORTFOLIO_REPORTS = "view_portfolio_reports"
# portfolio admin
VIEW_PORTFOLIO_ADMIN = "view_portfolio_admin"
VIEW_PORTFOLIO_NAME = "view_portfolio_name"
EDIT_PORTFOLIO_NAME = "edit_portfolio_name"
VIEW_PORTFOLIO_USERS = "view_portfolio_users"
EDIT_PORTFOLIO_USERS = "edit_portfolio_users"
CREATE_PORTFOLIO_USERS = "create_portfolio_users"
VIEW_PORTFOLIO_ACTIVITY_LOG = "view_portfolio_activity_log"
VIEW_PORTFOLIO_POC = "view_portfolio_poc"
# portfolio POC
EDIT_PORTFOLIO_POC = "edit_portfolio_poc"
ARCHIVE_PORTFOLIO = "archive_portfolio"
| python |
'''
任意累积
描述
请根据编程模板补充代码,计算任意个输入数字的乘积。
注意,仅需要在标注...的地方补充一行或多行代码。
'''
def cmul(a, *b):
input(a)
m = a
for i in b:
m *= i
return m
print(eval("cmul({})".format(input())))
'''
该程序需要注意两个内容:
1. 无限制数量函数定义的方法,其中b在函数cmul中表达除了a之外的所有输入参数;
2. 以字符串形式调用函数的方法,"cmul()"与eval()的组合,提供了很多灵活性。
''' | python |
from src.preprocessor import preprocessor as preprocessor
from src.error import ApplicationError, error_list
from src.aggregator import Aggregator
from src.constants import MIN_CONTENT_LEN
from flask import Blueprint, request, jsonify
from flask_cors import cross_origin
import io
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
router = Blueprint(__name__, "router")
limiter = Limiter(
key_func=get_remote_address,
default_limits=["2000 per day", "500 per hour"]
)
@router.route('/', methods=['GET'])
@cross_origin()
def index():
return "Hello"
@router.errorhandler(429)
@cross_origin()
def ratelimit_handler(e):
return return_result(ApplicationError(*error_list["RATE_LIMIT_EXCEEDED"]))
@router.route('/api/url', methods=['POST'])
@limiter.limit('60/minute')
@cross_origin()
def parse_url():
print("Got request", request.args)
# No URL found. Raise error
url = request.args.get('url', None)
print(url)
try:
if url is None:
raise ApplicationError(*error_list["URL_NT_FND"])
except ApplicationError as error:
return return_result(error)
# TODO: Throwing error not added
news_obj, twitter_obj, error = preprocessor(url, published=True)
if error is not None:
return return_result(error)
if len(news_obj.content.split(' ')) < MIN_CONTENT_LEN:
return return_result(ApplicationError(*error_list["CONTENT_TOO_SHORT"]))
aggregator = Aggregator(news=news_obj, tweet=twitter_obj, is_twitter=twitter_obj is not None)
try:
aggregator.run_models()
except ApplicationError as error:
return return_result(error)
return return_result(error, True, aggregator, twitter_obj, news_obj)
@router.route('/api/file', methods=['POST'])
@limiter.limit('60/minute')
@cross_origin()
def parse_file():
print("Got request", request.args)
# If file not found, raise error
try:
if 'file' not in request.files:
raise ApplicationError(*error_list["FILE_NT_FND"])
else:
filest = request.files['file']
if not filest.filename.endswith('doc') and not filest.filename.endswith('docx'):
raise ApplicationError(*error_list["FILE_NT_SUP"])
else:
file_obj = io.BytesIO(filest.read())
except ApplicationError as error:
return return_result(error)
news_obj, twitter_obj, error = preprocessor(file_obj, published=False)
if error is not None:
return return_result(error)
if len(news_obj.content.split(' ')) < MIN_CONTENT_LEN:
return return_result(ApplicationError(*error_list["CONTENT_TOO_SHORT"]))
aggregator = Aggregator(news=news_obj, tweet=twitter_obj, is_twitter=False)
try:
aggregator.run_models()
except ApplicationError as error:
return return_result(error)
# TODO: returning result
return return_result(error, False, aggregator, twitter_obj, news_obj)
def return_result(error: ApplicationError, published=None, aggregator=None, tweet=None, news_obj=None):
if error is None:
agg_dict = aggregator.to_dict() if aggregator is not None else None
news_dict = news_obj.to_dict() if news_obj is not None else None
tweet_dict = tweet.to_dict() if tweet is not None else None
if published:
input_type = 'Twitter' if tweet is not None else "NonTwitter"
else:
input_type = "UnPub"
return jsonify({
"input_type": input_type,
"models": agg_dict,
"details": news_dict,
"metrics": tweet_dict,
"error": ""
})
else:
return jsonify({"error": error.to_dict()})
| python |
'''
@author: Sergio Rojas
@contact: [email protected]
--------------------------
Contenido bajo
Atribución-NoComercial-CompartirIgual 3.0 Venezuela (CC BY-NC-SA 3.0 VE)
http://creativecommons.org/licenses/by-nc-sa/3.0/ve/
Creado en abril 23, 2016
'''
import matplotlib.pyplot as plt
x = [1.5, 2.7, 3.8, 9.5,12.3]
y = [3.8,-2.4, 0.35,6.2,1.5]
fig = plt.figure()
#---
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_title('Etiqueta de la grafica 1', fontsize = 10)
ax1.set_xlabel('Etiqueta del eje x1', fontsize = 12)
ax1.set_ylabel('Etiqueta del eje y1', fontsize = 15)
ax1.plot(x, y, 'ro', label='y Vs x')
ax1.legend(loc='best')
#---
ax2 = fig.add_subplot(1, 2, 2)
ax2.plot(y, x, 'bx-', label='x Vs y', markersize=20, linewidth=2)
ax2.set_title('Etiqueta de la grafica 2', fontsize = 10)
ax2.set_xlabel('Etiqueta del eje x2', fontsize = 12)
ax2.set_ylabel('Etiqueta del eje y2', fontsize = 15)
ax2.legend(loc=0)
fig.tight_layout()
fig.savefig("fig2.png")
plt.show()
| python |
import numpy as np
from Activations import Activations
class Layer:
def __init__(self, nNeurons, activation=Activations.linear, input=np.array([0.0])):
if type(input) == Layer:
self.inputs = input.forward()
self.inputLayer = input
else:
self.inputs = np.array([input])
self.inputLayer = None
self.weights = (np.random.random((nNeurons, len(self.inputs[0]))) * 2) - 1
self.biases = (np.random.random((1,nNeurons)) * 2) - 1
self.activation = activation
self.output = np.nan
self.target = None
self.outputLayer = None
def setInput(self, input):
if type(input) == Layer:
inputs = input.forward()
self.inputLayer = input
self.inputLayer.outputLayer = self
else:
inputs = np.array([input])
self.inputLayer = None
if len(inputs[0])-len(self.inputs[0]) != 0:
self.weights = (np.random.random((len(self.biases[0]), len(inputs[0]))) * 2) - 1
self.inputs = inputs
return self.inputs
def forward(self):
if self.inputLayer != None:
self.inputs = self.inputLayer.forward()
self.output = self.activation(np.dot(self.weights, self.inputs.T).T + self.biases)
return self.output
def calcDeriv(self):
deriv = []
if self.outputLayer == None and type(self.target) == np.ndarray:
deriv = self.output-self.target
else:
if self.outputLayer != None:
outDeriv = self.outputLayer.calcDeriv()
outputs = self.forward()
for i in range(len(self.biases[0])):
deriv.append([])
for j in range(len(self.outputLayer.biases[0])):
wno = self.outputLayer.weights[j][i]
bo = self.outputLayer.biases[0][j]
deriv[len(deriv)-1].append(Activations.getDerivative(self.outputLayer.activation)(outputs[0][i]*wno+bo)*wno)
deriv = np.array(deriv).dot(outDeriv.T).T
return deriv | python |
from baconian.common.special import *
from baconian.core.core import EnvSpec
from copy import deepcopy
import typeguard as tg
from baconian.common.error import *
class SampleData(object):
def __init__(self, env_spec: EnvSpec = None, obs_shape=None, action_shape=None):
if env_spec is None and (obs_shape is None or action_shape is None):
raise ValueError('At least env_spec or (obs_shape, action_shape) should be passed in')
self.env_spec = env_spec
self.obs_shape = env_spec.obs_shape if env_spec else obs_shape
self.action_shape = env_spec.action_shape if env_spec else action_shape
def reset(self):
raise NotImplementedError
def append(self, *args, **kwargs):
raise NotImplementedError
def union(self, sample_data):
raise NotImplementedError
def get_copy(self):
raise NotImplementedError
def __call__(self, set_name, **kwargs):
raise NotImplementedError
def append_new_set(self, name, data_set: (list, np.ndarray), shape: (tuple, list)):
raise NotImplementedError
def sample_batch(self, *args, **kwargs):
raise NotImplementedError
def apply_transformation(self, set_name, func, *args, **kwargs):
raise NotImplementedError
def apply_op(self, set_name, func, *args, **kwargs):
raise NotImplementedError
class TransitionData(SampleData):
def __init__(self, env_spec: EnvSpec = None, obs_shape=None, action_shape=None):
super(TransitionData, self).__init__(env_spec=env_spec, obs_shape=obs_shape, action_shape=action_shape)
self.cumulative_reward = 0.0
self.step_count_per_episode = 0
assert isinstance(self.obs_shape, (list, tuple))
assert isinstance(self.action_shape, (list, tuple))
self.obs_shape = list(self.obs_shape)
self.action_shape = list(self.action_shape)
self._internal_data_dict = {
'state_set': [np.empty([0] + self.obs_shape), self.obs_shape],
'new_state_set': [np.empty([0] + self.obs_shape), self.obs_shape],
'action_set': [np.empty([0] + self.action_shape), self.action_shape],
'reward_set': [np.empty([0]), []],
'done_set': [np.empty([0], dtype=bool), []]
}
self.current_index = 0
def __len__(self):
return len(self._internal_data_dict['state_set'][0])
def __call__(self, set_name, **kwargs):
if set_name not in self._allowed_data_set_keys:
raise ValueError('pass in set_name within {} '.format(self._allowed_data_set_keys))
return make_batch(self._internal_data_dict[set_name][0],
original_shape=self._internal_data_dict[set_name][1])
def reset(self):
for key, data_set in self._internal_data_dict.items():
self._internal_data_dict[key][0] = np.empty([0, *self._internal_data_dict[key][1]])
self.cumulative_reward = 0.0
self.step_count_per_episode = 0
def append(self, state: np.ndarray, action: np.ndarray, new_state: np.ndarray, done: bool, reward: float):
self._internal_data_dict['state_set'][0] = np.concatenate(
(self._internal_data_dict['state_set'][0], np.reshape(state, [1] + self.obs_shape)), axis=0)
self._internal_data_dict['new_state_set'][0] = np.concatenate(
(self._internal_data_dict['new_state_set'][0], np.reshape(new_state, [1] + self.obs_shape)), axis=0)
self._internal_data_dict['reward_set'][0] = np.concatenate(
(self._internal_data_dict['reward_set'][0], np.reshape(reward, [1])), axis=0)
self._internal_data_dict['done_set'][0] = np.concatenate(
(self._internal_data_dict['done_set'][0], np.reshape(np.array(done, dtype=bool), [1])), axis=0)
self._internal_data_dict['action_set'][0] = np.concatenate(
(self._internal_data_dict['action_set'][0], np.reshape(action, [1] + self.action_shape)), axis=0)
self.cumulative_reward += reward
def union(self, sample_data):
assert isinstance(sample_data, type(self))
self.cumulative_reward += sample_data.cumulative_reward
self.step_count_per_episode += sample_data.step_count_per_episode
for key, val in self._internal_data_dict.items():
assert self._internal_data_dict[key][1] == sample_data._internal_data_dict[key][1]
self._internal_data_dict[key][0] = np.concatenate(
(self._internal_data_dict[key][0], sample_data._internal_data_dict[key][0]), axis=0)
def get_copy(self):
obj = TransitionData(env_spec=self.env_spec, obs_shape=self.obs_shape, action_shape=self.action_shape)
for key in self._internal_data_dict:
obj._internal_data_dict[key] = deepcopy(self._internal_data_dict[key])
return obj
def append_new_set(self, name, data_set: (list, np.ndarray), shape: (tuple, list)):
assert len(data_set) == len(self)
assert len(np.array(data_set).shape) - 1 == len(shape)
if len(shape) > 0:
assert np.equal(np.array(data_set).shape[1:], shape).all()
shape = tuple(shape)
self._internal_data_dict[name] = [np.array(data_set), shape]
def sample_batch(self, batch_size, shuffle_flag=True, **kwargs) -> dict:
if shuffle_flag is False:
raise NotImplementedError
total_num = len(self)
id_index = np.random.randint(low=0, high=total_num, size=batch_size)
batch_data = dict()
for key in self._internal_data_dict.keys():
batch_data[key] = self(key)[id_index]
return batch_data
def get_mean_of(self, set_name):
return self.apply_op(set_name=set_name, func=np.mean)
def get_sum_of(self, set_name):
return self.apply_op(set_name=set_name, func=np.sum)
def apply_transformation(self, set_name, func, direct_apply=False, **func_kwargs):
data = make_batch(self._internal_data_dict[set_name][0],
original_shape=self._internal_data_dict[set_name][1])
transformed_data = make_batch(func(data, **func_kwargs),
original_shape=self._internal_data_dict[set_name][1])
if transformed_data.shape != data.shape:
raise TransformationResultedToDifferentShapeError()
elif direct_apply is True:
self._internal_data_dict[set_name][0] = transformed_data
return transformed_data
def apply_op(self, set_name, func, **func_kwargs):
data = make_batch(self._internal_data_dict[set_name][0],
original_shape=self._internal_data_dict[set_name][1])
applied_op_data = np.array(func(data, **func_kwargs))
return applied_op_data
def shuffle(self, index: list = None):
if not index:
index = np.arange(len(self._internal_data_dict['state_set'][0]))
np.random.shuffle(index)
for key in self._internal_data_dict.keys():
self._internal_data_dict[key][0] = self._internal_data_dict[key][0][index]
@property
def _allowed_data_set_keys(self):
return list(self._internal_data_dict.keys())
@property
def state_set(self):
return self('state_set')
@property
def new_state_set(self):
return self('new_state_set')
@property
def action_set(self):
return self('action_set')
@property
def reward_set(self):
return self('reward_set')
@property
def done_set(self):
return self('done_set')
class TrajectoryData(SampleData):
def __init__(self, env_spec=None, obs_shape=None, action_shape=None):
super(TrajectoryData, self).__init__(env_spec=env_spec, obs_shape=obs_shape, action_shape=action_shape)
self.trajectories = []
def reset(self):
self.trajectories = []
def append(self, transition_data: TransitionData):
self.trajectories.append(transition_data)
def union(self, sample_data):
if not isinstance(sample_data, type(self)):
raise TypeError()
self.trajectories += sample_data.trajectories
def return_as_transition_data(self, shuffle_flag=False) -> TransitionData:
transition_set = self.trajectories[0].get_copy()
for i in range(1, len(self.trajectories)):
transition_set.union(self.trajectories[i])
if shuffle_flag is True:
transition_set.shuffle()
return transition_set
def get_mean_of(self, set_name):
tran = self.return_as_transition_data()
return tran.get_mean_of(set_name)
def get_sum_of(self, set_name):
tran = self.return_as_transition_data()
return tran.get_sum_of(set_name)
def __len__(self):
return len(self.trajectories)
def get_copy(self):
tmp_traj = TrajectoryData(env_spec=self.env_spec, obs_shape=self.obs_shape, action_shape=self.action_shape)
for traj in self.trajectories:
tmp_traj.append(transition_data=traj.get_copy())
return tmp_traj
def apply_transformation(self, set_name, func, direct_apply=False, **func_kwargs):
# TODO unit test
for traj in self.trajectories:
traj.apply_transformation(set_name, func, direct_apply, **func_kwargs)
def apply_op(self, set_name, func, **func_kwargs):
# TODO unit test
res = []
for traj in self.trajectories:
res.append(traj.apply_op(set_name, func, **func_kwargs))
return np.array(res)
| python |
# BSD LICENSE
#
# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import time
import utils
import settings
from config import PortConf
from settings import NICS, LOG_NAME_SEP, get_netdev
from project_dpdk import DPDKdut
from dut import Dut
from net_device import GetNicObj
from net_device import RemoveNicObj
class VirtDut(DPDKdut):
"""
A connection to the CRB under test.
This class sends commands to the CRB and validates the responses. It is
implemented using either ssh for linuxapp or the terminal server for
baremetal.
All operations are in fact delegated to an instance of either CRBLinuxApp
or CRBBareMetal.
"""
def __init__(self, hyper, crb, serializer, virttype, vm_name, suite, cpu_topo):
self.vm_name = vm_name
self.hyper = hyper
self.cpu_topo = cpu_topo
self.vm_ip = crb['IP']
self.NAME = 'virtdut' + LOG_NAME_SEP + '%s' % self.vm_ip
super(Dut, self).__init__(crb, serializer, self.NAME)
# load port config from suite cfg
self.suite = suite
self.number_of_cores = 0
self.tester = None
self.cores = []
self.architecture = None
self.ports_info = None
self.ports_map = []
self.virttype = virttype
def init_log(self):
self.logger.config_suite(self.host_dut.test_classname, 'virtdut')
def close(self, force=False):
if self.session:
self.session.close(force)
self.session = None
if self.alt_session:
self.alt_session.close(force)
self.alt_session = None
RemoveNicObj(self)
def set_nic_type(self, nic_type):
"""
Set CRB NICS ready to validated.
"""
self.nic_type = nic_type
# vm_dut config will load from vm configuration file
def load_portconf(self):
"""
Load port config for this virtual machine
"""
self.conf = PortConf()
self.conf.load_ports_config(self.vm_name)
self.ports_cfg = self.conf.get_ports_config()
return
def create_portmap(self):
# if not config ports in vm port config file, used ping6 get portmap
if not self.ports_cfg:
self.map_available_ports()
port_num = len(self.ports_info)
self.ports_map = [-1] * port_num
for key in self.ports_cfg.keys():
index = int(key)
if index >= port_num:
print utils.RED("Can not found [%d ]port info" % index)
continue
if 'peer' in self.ports_cfg[key].keys():
tester_pci = self.ports_cfg[key]['peer']
# find tester_pci index
pci_idx = self.tester.get_local_index(tester_pci)
self.ports_map[index] = pci_idx
def set_target(self, target, bind_dev=True):
"""
Set env variable, these have to be setup all the time. Some tests
need to compile example apps by themselves and will fail otherwise.
Set hugepage on DUT and install modules required by DPDK.
Configure default ixgbe PMD function.
"""
self.set_toolchain(target)
# set env variable
# These have to be setup all the time. Some tests need to compile
# example apps by themselves and will fail otherwise.
self.send_expect("export RTE_TARGET=" + target, "#")
self.send_expect("export RTE_SDK=`pwd`", "#")
if not self.skip_setup:
self.build_install_dpdk(target)
self.setup_memory(hugepages=1024)
self.setup_modules(target)
if bind_dev:
self.bind_interfaces_linux('igb_uio')
def prerequisites(self, pkgName, patch):
"""
Prerequest function should be called before execute any test case.
Will call function to scan all lcore's information which on DUT.
Then call pci scan function to collect nic device information.
At last setup DUT' environment for validation.
"""
if not self.skip_setup:
self.prepare_package()
self.send_expect("cd %s" % self.base_dir, "# ")
self.send_expect("alias ls='ls --color=none'", "#")
if self.get_os_type() == 'freebsd':
self.send_expect('alias make=gmake', '# ')
self.send_expect('alias sed=gsed', '# ')
self.init_core_list()
self.pci_devices_information()
# scan ports before restore interface
self.scan_ports()
# update with real numa id
self.update_ports()
# restore dut ports to kernel
if self.virttype != 'XEN':
self.restore_interfaces()
else:
self.restore_interfaces_domu()
# rescan ports after interface up
self.rescan_ports()
# no need to rescan ports for guest os just bootup
# load port infor from config file
self.load_portconf()
# enable tester port ipv6
self.host_dut.enable_tester_ipv6()
self.mount_procfs()
self.create_portmap()
# disable tester port ipv6
self.host_dut.disable_tester_ipv6()
# print latest ports_info
for port_info in self.ports_info:
self.logger.info(port_info)
def init_core_list(self):
self.cores = []
cpuinfo = self.send_expect("grep --color=never \"processor\""
" /proc/cpuinfo", "#", alt_session=False)
cpuinfo = cpuinfo.split('\r\n')
if self.cpu_topo != '':
topo_reg = r"(\d)S/(\d)C/(\d)T"
m = re.match(topo_reg, self.cpu_topo)
if m:
socks = int(m.group(1))
cores = int(m.group(2))
threads = int(m.group(3))
total = socks * cores * threads
cores_persock = cores * threads
total_phycores = socks * cores
# cores should match cpu_topo
if total != len(cpuinfo):
print utils.RED("Core number not matched!!!")
else:
for core in range(total):
thread = core / total_phycores
phy_core = core % total_phycores
# if this core is hyper core
if thread:
idx = core % total_phycores
socket = idx / cores
else:
socket = core / cores
# tricky here, socket must be string
self.cores.append({'thread': core,
'socket': str(socket),
'core': phy_core})
self.number_of_cores = len(self.cores)
return
# default core map
for line in cpuinfo:
m = re.search("processor\t: (\d+)", line)
if m:
thread = m.group(1)
socket = 0
core = thread
self.cores.append(
{'thread': thread, 'socket': socket, 'core': core})
self.number_of_cores = len(self.cores)
def restore_interfaces_domu(self):
"""
Restore Linux interfaces.
"""
for port in self.ports_info:
pci_bus = port['pci']
pci_id = port['type']
driver = settings.get_nic_driver(pci_id)
if driver is not None:
addr_array = pci_bus.split(':')
domain_id = addr_array[0]
bus_id = addr_array[1]
devfun_id = addr_array[2]
port = GetNicObj(self, domain_id, bus_id, devfun_id)
itf = port.get_interface_name()
self.send_expect("ifconfig %s up" % itf, "# ")
time.sleep(30)
print self.send_expect("ip link ls %s" % itf, "# ")
else:
self.logger.info(
"NOT FOUND DRIVER FOR PORT (%s|%s)!!!" % (pci_bus, pci_id))
def pci_devices_information(self):
self.pci_devices_information_uncached()
def get_memory_channels(self):
"""
Virtual machine has no memory channel concept, so always return 1
"""
return 1
def check_ports_available(self, pci_bus, pci_id):
"""
Check that whether auto scanned ports ready to use
"""
pci_addr = "%s:%s" % (pci_bus, pci_id)
if pci_id == "8086:100e":
return False
return True
# load vm port conf need another function
# need add vitrual function device into NICS
def scan_ports(self):
"""
Scan ports information, for vm will always scan
"""
self.scan_ports_uncached()
def scan_ports_uncached(self):
"""
Scan ports and collect port's pci id, mac adress, ipv6 address.
"""
scan_ports_uncached = getattr(
self, 'scan_ports_uncached_%s' % self.get_os_type())
return scan_ports_uncached()
def update_ports(self):
"""
Update ports information, according to host pci
"""
for port in self.ports_info:
vmpci = port['pci']
for pci_map in self.hyper.pci_maps:
# search pci mapping strucutre
if vmpci == pci_map['guestpci']:
hostpci = pci_map['hostpci']
# search host port info structure
for hostport in self.host_dut.ports_info:
# update port numa
if hostpci == hostport['pci']:
port['numa'] = hostport['numa']
port['port'].socket = hostport['numa']
break
if 'sriov_vfs_pci' in hostport and \
hostpci in hostport['sriov_vfs_pci']:
port['numa'] = hostport['numa']
port['port'].socket = hostport['numa']
break
def map_available_ports(self):
"""
Load or generate network connection mapping list.
"""
self.map_available_ports_uncached()
self.logger.warning("VM DUT PORT MAP: " + str(self.ports_map))
def map_available_ports_uncached(self):
"""
Generate network connection mapping list.
"""
nrPorts = len(self.ports_info)
if nrPorts == 0:
return
remove = []
self.ports_map = [-1] * nrPorts
hits = [False] * len(self.tester.ports_info)
for vmPort in range(nrPorts):
vmpci = self.ports_info[vmPort]['pci']
peer = self.get_peer_pci(vmPort)
# if peer pci configured
if peer is not None:
for remotePort in range(len(self.tester.ports_info)):
if self.tester.ports_info[remotePort]['pci'] == peer:
hits[remotePort] = True
self.ports_map[vmPort] = remotePort
break
if self.ports_map[vmPort] == -1:
self.logger.error("CONFIGURED TESTER PORT CANNOT FOUND!!!")
else:
continue # skip ping6 map
# strip pci address on host for pass-through device
hostpci = 'N/A'
for pci_map in self.hyper.pci_maps:
if vmpci == pci_map['guestpci']:
hostpci = pci_map['hostpci']
break
# auto ping port map
for remotePort in range(len(self.tester.ports_info)):
# for two vfs connected to same tester port
# need skip ping from devices on same pf device
remotepci = self.tester.ports_info[remotePort]['pci']
port_type = self.tester.ports_info[remotePort]['type']
# IXIA port should not check whether has vfs
if port_type != 'ixia':
remoteport = self.tester.ports_info[remotePort]['port']
vfs = []
# vm_dut and tester in same dut
host_ip = self.crb['IP'].split(':')[0]
if self.crb['tester IP'] == host_ip:
vfs = remoteport.get_sriov_vfs_pci()
# if hostpci is vf of tester port
if hostpci == remotepci or hostpci in vfs:
print utils.RED("Skip ping from same PF device")
continue
ipv6 = self.get_ipv6_address(vmPort)
if ipv6 == "Not connected":
continue
out = self.tester.send_ping6(
remotePort, ipv6, self.get_mac_address(vmPort))
if ('64 bytes from' in out):
self.logger.info(
"PORT MAP: [dut %d: tester %d]" % (vmPort, remotePort))
self.ports_map[vmPort] = remotePort
hits[remotePort] = True
continue
| python |
#!/usr/bin/python
# Copyright (c)2012 EMC Corporation
# All Rights Reserved
# This software contains the intellectual property of EMC Corporation
# or is licensed to EMC Corporation from third parties. Use of this
# software and the intellectual property contained therein is expressly
# limited to the terms and conditions of the License Agreement under which
# it is provided by or on behalf of EMC.
import json
import common
from common import SOSError
class VcenterDatacenter(object):
'''
The class definition for operations on 'VcenterDatacenter'.
'''
# Commonly used URIs for the 'vcenterdatacenters' module
URI_SERVICES_BASE = ''
URI_RESOURCE_DEACTIVATE = '{0}/deactivate'
URI_VCENTER = URI_SERVICES_BASE + '/compute/vcenters/{0}'
URI_VCENTER_DATACENTERS = URI_VCENTER + '/vcenter-data-centers'
URI_DATACENTERS = URI_SERVICES_BASE + '/compute/vcenter-data-centers'
URI_DATACENTER = URI_SERVICES_BASE + '/compute/vcenter-data-centers/{0}'
URI_DATACENTER_CLUSTERS = URI_DATACENTER + '/clusters'
URI_DATACENTER_HOSTS = URI_DATACENTER + '/hosts'
URI_DATACENTERS_CREATE_CLUSTER = \
URI_DATACENTERS + "/{0}/create-vcenter-cluster"
URI_DATACENTERS_UPDATE_CLUSTER = \
URI_DATACENTERS + "/{0}/update-vcenter-cluster"
DATACENTERS_FROM_ALL_TENANTS = "No-Filter";
DATACENTERS_WITH_NO_TENANTS = "Not-Assigned";
def __init__(self, ipAddr, port):
'''
Constructor: takes IP address and port of the ViPR instance. These are
needed to make http requests for REST API
'''
self.__ipAddr = ipAddr
self.__port = port
def vcenterdatacenter_query(self, name, vcenter, tenantname):
'''
Returns the UID of the vcenterdatacenter specified by the name
'''
if (common.is_uri(name)):
return name
vcenterdatacenters = self.vcenterdatacenter_list(vcenter, tenantname)
for vcenterdatacenter in vcenterdatacenters:
if (vcenterdatacenter['name'] == name):
return vcenterdatacenter['id']
raise SOSError(SOSError.NOT_FOUND_ERR,
"vcenterdatacenter " + name + ": not found")
def vcenterdatacenter_list(self, vcenter, tenantname):
'''
Returns all the vcenterdatacenters in a vdc
Parameters:
Returns:
JSON payload of vcenterdatacenter list
'''
from vcenter import VCenter
obj = VCenter(self.__ipAddr, self.__port)
uri = obj.vcenter_query(vcenter, tenantname)
(s, h) = common.service_json_request(
self.__ipAddr, self.__port, "GET",
VcenterDatacenter.URI_VCENTER_DATACENTERS.format(uri), VcenterDatacenter.DATACENTERS_FROM_ALL_TENANTS)
o = common.json_decode(s)
return o['vcenter_data_center']
def vcenterdatacenter_get_clusters(self, label, vcenter,
tenantname, xml=False):
'''
Makes a REST API call to retrieve details of a vcenterdatacenter
based on its UUID
'''
uri = self.vcenterdatacenter_query(label, vcenter, tenantname)
(s, h) = common.service_json_request(
self.__ipAddr, self.__port, "GET",
VcenterDatacenter.URI_DATACENTER_CLUSTERS.format(uri),
None, None, xml)
o = common.json_decode(s)
from cluster import Cluster
obj = Cluster(self.__ipAddr, self.__port)
dtlslst = obj.cluster_get_details_list(o['cluster'])
return dtlslst
def vcenterdatacenter_get_hosts(self, label,
vcenter, tenantname, xml=False):
'''
Makes a REST API call to retrieve details of a
vcenterdatacenter based on its UUID
'''
uri = self.vcenterdatacenter_query(label, vcenter, tenantname)
(s, h) = common.service_json_request(
self.__ipAddr, self.__port, "GET",
VcenterDatacenter.URI_DATACENTER_HOSTS.format(uri),
None, None, xml)
from host import Host
obj = Host(self.__ipAddr, self.__port)
o = common.json_decode(s)
hostsdtls = obj.show(o['host'])
return hostsdtls
def vcenterdatacenter_show(self, label, vcenter, tenantname, xml=False):
'''
Makes a REST API call to retrieve details of a vcenterdatacenter
based on its UUID
'''
uri = self.vcenterdatacenter_query(label, vcenter, tenantname)
(s, h) = common.service_json_request(
self.__ipAddr, self.__port, "GET",
VcenterDatacenter.URI_DATACENTER.format(uri),
None, None, xml)
if(not xml):
o = common.json_decode(s)
if('inactive' in o):
if(o['inactive']):
return None
else:
return s
return o
def vcenterdatacenter_show_by_uri(self, uri, xml=False):
'''
Makes a REST API call to retrieve details of a vcenterdatacenter
based on its UUID
'''
(s, h) = common.service_json_request(
self.__ipAddr, self.__port, "GET",
VcenterDatacenter.URI_DATACENTER.format(uri),
None, None, xml)
if(not xml):
o = common.json_decode(s)
if('inactive' in o):
if(o['inactive']):
return None
else:
return s
return o
def vcenterdatacenter_create(self, label, vcenter, tenantname):
'''
creates a vcenterdatacenter
parameters:
label: label of the vcenterdatacenter
Returns:
JSON payload response
'''
try:
check = self.vcenterdatacenter_show(label, vcenter, tenantname)
if(not check):
raise SOSError(SOSError.NOT_FOUND_ERR,
"vcenterdatacenter " + label + ": not found")
except SOSError as e:
if(e.err_code == SOSError.NOT_FOUND_ERR):
from vcenter import VCenter
obj = VCenter(self.__ipAddr, self.__port)
vcenteruri = obj.vcenter_query(vcenter, tenantname)
var = dict()
params = dict()
params['name'] = label
body = json.dumps(params)
(s, h) = common.service_json_request(
self.__ipAddr, self.__port, "POST",
VcenterDatacenter.URI_VCENTER_DATACENTERS.format(
vcenteruri), body)
o = common.json_decode(s)
return o
else:
raise e
if(check):
raise SOSError(SOSError.ENTRY_ALREADY_EXISTS_ERR,
"vcenterdatacenter with name " + label +
" already exists")
def vcenterdatacenter_delete(self, label, vcenter, tenantname):
'''
Makes a REST API call to delete a vcenterdatacenter by its UUID
'''
uri = self.vcenterdatacenter_query(label, vcenter, tenantname)
(s, h) = common.service_json_request(
self.__ipAddr, self.__port, "POST",
self.URI_RESOURCE_DEACTIVATE.format(
VcenterDatacenter.URI_DATACENTER.format(uri)), None)
return str(s) + " ++ " + str(h)
def vcenterdatacenter_get_details(self, vcenterdatacenters):
lst = []
for iter in vcenterdatacenters:
dtls = self.vcenterdatacenter_show_by_uri(iter['id'])
if(dtls):
lst.append(dtls)
return lst
'''
Create a new vCenter cluster with all hosts and datastores
'''
def vcenterdatacenter_create_cluster(self, name, vcenter, cluster,
tenantname):
from cluster import Cluster
cl_uri = Cluster(self.__ipAddr, self.__port).cluster_query(cluster, name ,vcenter, tenantname)
dc_uri = self.vcenterdatacenter_query(name, vcenter, tenantname)
params = {'id': cl_uri}
body = json.dumps(params)
(s, h) = common.service_json_request(
self.__ipAddr, self.__port,
"POST",
VcenterDatacenter.URI_DATACENTERS_CREATE_CLUSTER.format(dc_uri),
body)
return common.json_decode(s)
'''
Updates an existing vCenter cluster with new hosts and datastores
'''
def vcenterdatacenter_update_cluster(self, name, vcenter, cluster,
tenantname):
from cluster import Cluster
cl_uri = Cluster(self.__ipAddr, self.__port).cluster_query(cluster, name, vcenter, tenantname)
dc_uri = self.vcenterdatacenter_query(name, vcenter, tenantname)
params = {'id': cl_uri}
body = json.dumps(params)
(s, h) = common.service_json_request(
self.__ipAddr, self.__port,
"POST",
VcenterDatacenter.URI_DATACENTERS_UPDATE_CLUSTER.format(dc_uri),
body)
return common.json_decode(s)
def vcenterdatacenter_update(self, label, vcenter, tenantname, newtenantname):
'''
updates a vcenterdatacenter
parameters:
label: label of the vcenterdatacenter
Returns:
JSON payload response
'''
try:
check = self.vcenterdatacenter_show(label, vcenter, tenantname)
if check:
raise SOSError(SOSError.ENTRY_ALREADY_EXISTS_ERR,
"vcenterdatacenter " + label + ": found")
except SOSError as e:
if e.err_code == SOSError.ENTRY_ALREADY_EXISTS_ERR:
uri = self.vcenterdatacenter_query(label, vcenter, VcenterDatacenter.DATACENTERS_FROM_ALL_TENANTS)
params = dict()
params['name'] = label
if newtenantname is not None and newtenantname != 'null':
from tenant import Tenant
obj = Tenant(self.__ipAddr, self.__port)
params['tenant'] = obj.tenant_query(newtenantname)
elif newtenantname is not None:
params['tenant'] = newtenantname
body = json.dumps(params)
(s, h) = common.service_json_request(
self.__ipAddr, self.__port, "PUT",
VcenterDatacenter.URI_DATACENTER.format(uri), body)
o = common.json_decode(s)
return o
else:
raise e
if not check:
raise SOSError(SOSError.NOT_FOUND_ERR,
"vcenterdatacenter with name " + label +
" dost not exist")
# datacenter Create routines
def create_parser(subcommand_parsers, common_parser):
# create command parser
create_parser = subcommand_parsers.add_parser(
'create',
description='ViPR vcenterdatacenter Create CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Create a vcenterdatacenter')
mandatory_args = create_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
help='Name of vcenterdatacenter',
metavar='<vcenterdatacentername>',
dest='name',
required=True)
mandatory_args.add_argument('-vcenter',
help='vcenter',
dest='vcenter',
metavar='<vcenter>',
required=True)
create_parser.add_argument('-tenant', '-tn',
help='Name of Tenant',
metavar='<tenant>',
dest='tenant',
default=None)
create_parser.set_defaults(func=vcenterdatacenter_create)
def vcenterdatacenter_create(args):
obj = VcenterDatacenter(args.ip, args.port)
try:
res = obj.vcenterdatacenter_create(args.name,
args.vcenter, args.tenant)
except SOSError as e:
common.format_err_msg_and_raise("create", "vcenterdatacenter",
e.err_text, e.err_code)
# datacenter Delete routines
def delete_parser(subcommand_parsers, common_parser):
# delete command parser
delete_parser = subcommand_parsers.add_parser(
'delete',
description='ViPR vcenterdatacenter Delete CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Delete a vcenterdatacenter')
mandatory_args = delete_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
help='name of vcenterdatacenter',
dest='name',
metavar='<vcenterdatacentername>',
required=True)
mandatory_args.add_argument('-vcenter',
help='vcenter',
dest='vcenter',
metavar='<vcenter>',
required=True)
delete_parser.add_argument('-tenant', '-tn',
help='Name of Tenant',
metavar='<tenant>',
dest='tenant',
default=None)
delete_parser.set_defaults(func=vcenterdatacenter_delete)
def vcenterdatacenter_delete(args):
obj = VcenterDatacenter(args.ip, args.port)
try:
res = obj.vcenterdatacenter_delete(args.name,
args.vcenter, args.tenant)
except SOSError as e:
common.format_err_msg_and_raise("delete", "vcenterdatacenter",
e.err_text, e.err_code)
# datacenter Show routines
def show_parser(subcommand_parsers, common_parser):
# show command parser
show_parser = subcommand_parsers.add_parser(
'show',
description='ViPR vcenterdatacenter Show CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Show a vcenterdatacenter')
mandatory_args = show_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
help='name of vcenterdatacenter',
dest='name',
metavar='<vcenterdatacentername>',
required=True)
mandatory_args.add_argument('-vcenter',
help='vcenter',
dest='vcenter',
metavar='<vcenter>',
required=True)
show_parser.add_argument('-tenant', '-tn',
help='Name of Tenant',
metavar='<tenant>',
dest='tenant',
default=None)
show_parser.add_argument('-xml',
dest='xml',
action='store_true',
help='XML response')
show_parser.set_defaults(func=vcenterdatacenter_show)
def vcenterdatacenter_show(args):
obj = VcenterDatacenter(args.ip, args.port)
try:
res = obj.vcenterdatacenter_show(args.name, args.vcenter,
args.tenant, args.xml)
if(not res):
raise SOSError(SOSError.NOT_FOUND_ERR,
"vcenterdatacenter " + args.name + ": not found")
if(args.xml):
return common.format_xml(res)
return common.format_json_object(res)
except SOSError as e:
common.format_err_msg_and_raise("show", "vcenterdatacenter",
e.err_text, e.err_code)
# datacenter get hosts routines
def get_hosts_parser(subcommand_parsers, common_parser):
# show command parser
get_hosts_parser = subcommand_parsers.add_parser(
'get-hosts',
description='ViPR vcenterdatacenter get hosts CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Show the hosts of a vcenterdatacenter')
mandatory_args = get_hosts_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
help='name of vcenterdatacenter',
dest='name',
metavar='<vcenterdatacentername>',
required=True)
mandatory_args.add_argument('-vcenter',
help='vcenter',
dest='vcenter',
metavar='<vcenter>',
required=True)
get_hosts_parser.add_argument('-tenant', '-tn',
help='Name of Tenant',
metavar='<tenant>',
dest='tenant',
default=None)
get_hosts_parser.add_argument(
'-long', '-l',
action='store_true',
help='List vcenters with more details in tabular form',
dest='long')
get_hosts_parser.add_argument('-verbose', '-v',
action='store_true',
help='List vcenters with details',
dest='verbose')
get_hosts_parser.set_defaults(func=vcenterdatacenter_get_hosts)
def vcenterdatacenter_get_hosts(args):
obj = VcenterDatacenter(args.ip, args.port)
try:
res = obj.vcenterdatacenter_get_hosts(args.name,
args.vcenter, args.tenant)
if(len(res) > 0):
if(args.verbose):
return common.format_json_object(res)
elif(args.long):
from common import TableGenerator
TableGenerator(res, ['name', 'type', 'job_discovery_status',
'job_metering_status']).printTable()
else:
from common import TableGenerator
TableGenerator(res, ['name']).printTable()
except SOSError as e:
common.format_err_msg_and_raise("get hosts", "vcenterdatacenter",
e.err_text, e.err_code)
# datacenter get clusters routines
def get_clusters_parser(subcommand_parsers, common_parser):
# show command parser
get_clusters_parser = subcommand_parsers.add_parser(
'get-clusters',
description='ViPR vcenterdatacenter get clusters CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Show the clusters of a vcenterdatacenter')
mandatory_args = get_clusters_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument('-name', '-n',
help='name of vcenterdatacenter',
dest='name',
metavar='<vcenterdatacentername>',
required=True)
mandatory_args.add_argument('-vcenter',
help='vcenter',
dest='vcenter',
metavar='<vcenter>',
required=True)
get_clusters_parser.add_argument('-tenant', '-tn',
help='Name of Tenant',
metavar='<tenant>',
dest='tenant',
default=None)
get_clusters_parser.add_argument(
'-long', '-l',
action='store_true',
help='List vcenters with more details in tabular form',
dest='long')
get_clusters_parser.add_argument('-verbose', '-v',
action='store_true',
help='List vcenters with details',
dest='verbose')
get_clusters_parser.set_defaults(func=vcenterdatacenter_get_clusters)
def vcenterdatacenter_get_clusters(args):
obj = VcenterDatacenter(args.ip, args.port)
try:
res = obj.vcenterdatacenter_get_clusters(args.name,
args.vcenter, args.tenant)
if(len(res) > 0):
if(args.verbose):
return common.format_json_object(res)
elif(args.long):
from common import TableGenerator
TableGenerator(res, ['name']).printTable()
else:
from common import TableGenerator
TableGenerator(res, ['name']).printTable()
except SOSError as e:
common.format_err_msg_and_raise("get clusters", "vcenterdatacenter",
e.err_text, e.err_code)
# datacenter Query routines
def query_parser(subcommand_parsers, common_parser):
# query command parser
query_parser = subcommand_parsers.add_parser(
'query',
description='ViPR vcenterdatacenter Query CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Query a vcenterdatacenter')
mandatory_args = query_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
help='name of vcenterdatacenter',
dest='name',
metavar='<vcenterdatacentername>',
required=True)
query_parser.add_argument('-tenant', '-tn',
help='Name of Tenant',
metavar='<tenant>',
dest='tenant',
default=None)
query_parser.set_defaults(func=vcenterdatacenter_query)
def vcenterdatacenter_query(args):
obj = VcenterDatacenter(args.ip, args.port)
try:
res = obj.vcenterdatacenter_query(args.name, args.tenant)
return common.format_json_object(res)
except SOSError as e:
if(e.err_code == SOSError.NOT_FOUND_ERR):
raise SOSError(SOSError.NOT_FOUND_ERR,
"vcenterdatacenter query failed: " + e.err_text)
else:
raise e
# datacenter List routines
def list_parser(subcommand_parsers, common_parser):
# list command parser
list_parser = subcommand_parsers.add_parser(
'list',
description='ViPR vcenterdatacenter List CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='List of vcenterdatacenters')
mandatory_args = list_parser.add_argument_group('mandatory arguments')
list_parser.add_argument('-verbose', '-v',
action='store_true',
help='List vcenterdatacenters with details',
dest='verbose')
list_parser.add_argument(
'-long', '-l',
action='store_true',
help='List vcenterdatacenters with more details in tabular form',
dest='long')
mandatory_args.add_argument('-vcenter',
help='vcenter',
dest='vcenter',
metavar='<vcenter>',
required=True)
list_parser.add_argument('-tenant', '-tn',
help='Name of Tenant',
metavar='<tenant>',
dest='tenant',
default=None)
list_parser.set_defaults(func=vcenterdatacenter_list)
def vcenterdatacenter_list(args):
obj = VcenterDatacenter(args.ip, args.port)
try:
uris = obj.vcenterdatacenter_list(args.vcenter, args.tenant)
output = []
outlst = []
for uri in uris:
temp = obj.vcenterdatacenter_show_by_uri(uri['id'], False)
if(temp):
output.append(temp)
if(len(output) > 0):
if(args.verbose):
return common.format_json_object(output)
elif(args.long):
from common import TableGenerator
TableGenerator(output,
['name', 'auto_san_zoning',
'auto_tier_policy']).printTable()
else:
from common import TableGenerator
TableGenerator(output, ['name']).printTable()
except SOSError as e:
raise e
# datacenter Create cluster routines
def create_cluster_parser(subcommand_parsers, common_parser):
create_parser = subcommand_parsers.add_parser(
'create-cluster',
description='ViPR vcenterdatacenter Create-cluster CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Create a new vCenter cluster')
mandatory_args = create_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
help='Name of vcenterdatacenter',
metavar='<vcenterdatacentername>',
dest='name',
required=True)
mandatory_args.add_argument('-vcenter',
help='vcenter',
dest='vcenter',
metavar='<vcenter>',
required=True)
mandatory_args.add_argument('-cluster',
help='name of cluster',
dest='cluster',
metavar='<cluster>',
required=True)
create_parser.add_argument('-tenant', '-tn',
help='Name of Tenant',
metavar='<tenant>',
dest='tenant',
default=None)
create_parser.set_defaults(func=vcenterdatacenter_create_cluster)
def vcenterdatacenter_create_cluster(args):
obj = VcenterDatacenter(args.ip, args.port)
try:
res = obj.vcenterdatacenter_create_cluster(args.name,
args.vcenter,
args.cluster,
args.tenant)
except SOSError as e:
common.format_err_msg_and_raise("create-cluster", "vcenterdatacenter",
e.err_text, e.err_code)
# datacenter Create cluster routines
def update_cluster_parser(subcommand_parsers, common_parser):
create_parser = subcommand_parsers.add_parser(
'update-cluster',
description='ViPR vcenterdatacenter Update-cluster CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Update a new vCenter cluster')
mandatory_args = create_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
help='Name of vcenterdatacenter',
metavar='<vcenterdatacentername>',
dest='name',
required=True)
mandatory_args.add_argument('-vcenter',
help='vcenter',
dest='vcenter',
metavar='<vcenter>',
required=True)
mandatory_args.add_argument('-cluster',
help='name of cluster',
dest='cluster',
metavar='<cluster>',
required=True)
create_parser.add_argument('-tenant', '-tn',
help='Name of Tenant',
metavar='<tenant>',
dest='tenant',
default=None)
create_parser.set_defaults(func=vcenterdatacenter_update_cluster)
def vcenterdatacenter_update_cluster(args):
obj = VcenterDatacenter(args.ip, args.port)
try:
res = obj.vcenterdatacenter_update_cluster(args.name,
args.vcenter,
args.cluster,
args.tenant)
except SOSError as e:
common.format_err_msg_and_raise("update-cluster", "vcenterdatacenter",
e.err_text, e.err_code)
#
# vcenterdatacenter update routines
#
def update_parser(subcommand_parsers, common_parser):
# create command parser
update_parser = subcommand_parsers.add_parser(
'update',
description='ViPR vCenterDataCenter Update CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Update a vCenterDataCenter')
mandatory_args = update_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
help='Name of vCenterDataCenter',
metavar='<vcenterdatacentername>',
dest='name',
required=True)
mandatory_args.add_argument('-vcenter',
help='vcenter',
dest='vcenter',
metavar='<vcenter>',
required=True)
mandatory_args.add_argument('-tenant', '-tn',
help='Name of Tenant',
metavar='<tenant>',
dest='tenant',
required=True)
update_parser.add_argument('-newtenant', '-ntn',
help='Name of the new Tenant to be updated. Provide null if want to remove the exsiting tenant from the datacetner',
metavar='<newtenant>',
dest='newtenant',
default=None)
update_parser.set_defaults(func=vcenterdatacenter_update)
def vcenterdatacenter_update(args):
obj = VcenterDatacenter(args.ip, args.port)
try:
res = obj.vcenterdatacenter_update(args.name,
args.vcenter, args.tenant, args.newtenant)
except SOSError as e:
common.format_err_msg_and_raise("update", "vcenterdatacenter",
e.err_text, e.err_code)
#
# vcenterdatacenter Main parser routine
#
def vcenterdatacenter_parser(parent_subparser, common_parser):
# main vcenterdatacenter parser
parser = parent_subparser.add_parser(
'vcenterdatacenter',
description='ViPR vcenterdatacenter CLI usage',
parents=[common_parser],
conflict_handler='resolve',
help='Operations on vcenterdatacenter')
subcommand_parsers = parser.add_subparsers(help='Use One Of Commands')
# create command parser
create_parser(subcommand_parsers, common_parser)
# delete command parser
delete_parser(subcommand_parsers, common_parser)
# show command parser
show_parser(subcommand_parsers, common_parser)
# list command parser
list_parser(subcommand_parsers, common_parser)
# get clusters parser
get_clusters_parser(subcommand_parsers, common_parser)
# get hosts parser
get_hosts_parser(subcommand_parsers, common_parser)
# create vcenter cluster parser
create_cluster_parser(subcommand_parsers, common_parser)
# update vcenter cluster parser
update_cluster_parser(subcommand_parsers, common_parser)
# update vcenter datacenter parser
update_parser(subcommand_parsers, common_parser)
| python |
from django.shortcuts import render
from django.shortcuts import get_object_or_404
# from rest_framework import status
# from rest_framework.permissions import IsAuthenticated, IsAdminUser
# from rest_framework.response import Response
# from rest_framework import viewsets
from findance import abstract
from .models import Currency
from .serializers import CurrencySerializer
class CurrencyAPI(abstract.BaseFindanceAPI):
serializer = CurrencySerializer
search_alternate = 'code'
| python |
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from urlparse import urlparse, parse_qs
import argparse
import concoction
class WebServer(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
def do_GET(self):
self._set_headers()
if self.path[:9] != "/?recipe=":
self.wfile.write("You must give recipe parameter")
else:
query_components = parse_qs(urlparse(self.path).query)
if "recipe" not in query_components:
self.wfile.write("You must give recipe parameter")
self.wfile.write(concoction.Concoction().process(map(lambda x: x, str(query_components["recipe"]))))
def run(server_class=HTTPServer, handler_class=WebServer, port=80, verbose=False):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
if verbose:
print 'Starting httpd...'
httpd.serve_forever()
def parse_args():
# Parsing args
parser = argparse.ArgumentParser(description="Generate a Chef program")
main_group = parser.add_mutually_exclusive_group()
group_file = main_group.add_argument_group()
group = group_file.add_mutually_exclusive_group()
group.add_argument("-s", "--string", action="store", type=str, help="Set string as input", default="")
group.add_argument("-f", "--file", action="store", type=str, help="Set file as input")
group_file.add_argument("-o", "--out", action="store", type=str, help="Set file as output")
main_group.add_argument("-p", "--port", action="store", type=int, help="Start as web server", default=-1)
parser.add_argument("-v", "--verbose", action="store_true", help="Allow verbose")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
if args.port != -1:
run(port=args.port,verbose=args.verbose)
else:
my_concoction = concoction.Concoction(args.verbose)
my_output_file = "concoction.chef"
if args.out is not None:
my_output_file = args.out
my_input_text = ""
if args.string is not None and len(args.string) != 0:
my_input_text = args.string
else:
if args.file is not None:
my_input_text = my_concoction.read_file(args.file)
my_concoction.write_file(my_output_file,my_concoction.process(my_input_text))
| python |
from flask import request
from app import newjson,jsonify
from . import api,base_dir
from ..model.live2d import live2dConfig,live2dModel
import os,json
@api.route("/live2d/config/get",endpoint="live2d-config-get",methods = ["GET","POST"])
def live2d_getConfig():
config = request.values.get("config","default",type=str)
tip = request.values.get("tip", "default", type=str)
model = request.values.get("model","kesshouban",type=str)
return newjson("1",data=live2dConfig(config,tip,model).dump())
@api.route("/live2d/model/get",endpoint="live2d-model-get",methods = ["GET","POST"])
def live2d_getModel():
id = request.values.get("id",1,type=int)
name = request.values.get("name","",type=str)
textureId = request.values.get("tid",0,type=int)
changeModel = request.values.get("cm", 0, type=int)
changeTexture = request.values.get("ct",0,type=int)
id += changeModel
textureId += changeTexture
if name != "":
model = live2dModel.initByName(name,textureId)
else:
model = live2dModel.initById(id,textureId)
return jsonify(model.dump())
@api.route("/live2d/model/change",endpoint="live2d-model-change",methods = ["GET","POST"])
def live2d_getModel():
id = request.values.get("id",1,type=int)
name = request.values.get("name","",type=str)
textureId = request.values.get("tid",0,type=int)
changeModel = request.values.get("cm", 0, type=int)
changeTexture = request.values.get("ct",0,type=int)
id += changeModel
textureId += changeTexture
if name != "":
model = live2dModel.initByName(name,textureId)
else:
model = live2dModel.initById(id,textureId)
return newjson("1",data={"Id":model.id,
"TextureId":model.textureId,
"Name":model.name})
| python |
from django.apps import AppConfig
class FourAppConfig(AppConfig):
name = 'four_app'
| python |
# coding: latin-1
###############################################################################
# eVotUM - Electronic Voting System
#
# generateSecret-app.py
#
# Cripto-4.4.1 - Commmad line app to exemplify the usage of generateSecret
# function (see shamirsecret.py)
#
# Copyright (c) 2016 Universidade do Minho
# Developed by André Baptista - Devise Futures, Lda. ([email protected])
# Reviewed by Ricardo Barroso - Devise Futures, Lda. ([email protected])
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
###############################################################################
"""
Command line app that generates a random string with length characters.
"""
import sys
from eVotUM.Cripto import shamirsecret
def printUsage():
print("Usage: python generateSecret-app.py length")
def parseArgs():
if (len(sys.argv) != 2):
printUsage()
else:
length = int(sys.argv[1])
main(length)
def main(length):
sys.stdout.write("%s\n" % shamirsecret.generateSecret(length))
if __name__ == "__main__":
parseArgs()
| python |
import pandas as pd
from actymath.columns.base import Column
from actymath.calc import register
class TestColumn1(Column):
column_name = "q(x{life})"
parameters = {"life": "test"}
dependencies = []
class TestColumn2(Column):
column_name = "timestamp"
parameters = {}
dependencies = []
def test_column_parse_works_with_kwargs():
col = "q(x3)"
result = TestColumn1().parse_column(col)
assert result[0] == "q(x{life})"
assert result[1] == {"life": "3"}
# And now no match
col = "q(y1)"
result = TestColumn1().parse_column(col)
assert result is None
# Also no match - case sensitive
col = "Q(x3)"
result = TestColumn1().parse_column(col)
assert result is None
def test_column_parse_works_without_kwargs():
col = "timestamp"
result = TestColumn2().parse_column(col)
assert result[0] == "timestamp"
assert result[1] == {}
# and no no match
col = "times"
result = TestColumn2().parse_column(col)
assert result is None | python |
#!/bin/env python
#===============================================================================
# NAME: test_api.py
#
# DESCRIPTION: A basic test framework for integration testing.
# AUTHOR: Kevin Dinkel
# EMAIL: [email protected]
# DATE CREATED: November 19, 2015
#
# Copyright 2015, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
#===============================================================================
#
# Python standard modules
from fprime_gds.gse.utils.gse_api import GseApi
from fprime_gds.gse.utils.test_history import TestHistory
import signal
import time
import fprime.gse.utils.history as history
__author__ = "Kevin Dinkel"
__copyright__ = "Copyright 2015, California Institute of Technology."
__version__ = "1.0"
__email__ = "[email protected]"
class TestApi(TestHistory):
'''
------------------------------------------------------------------------------------------------
NOTE: This TestApi extends many super classes. To best see all its methods and descriptions run:
pydoc test_api
This will show you all the methods and derived methods included within this API in one place.
-------------------------------------------------------------------------------------------------
This TestAPI extends the GSE API by adding three main features:
1) A set of histories (dictionaries) which store incoming events and telemetry
2) The ability to assert truths about the state of these histories at any time
3) The ability to wait for truths about the state of these histories to become true before
a given timeout
This module is intended to be used for integration testing. A user will instantiate
an object of type TestAPI and use it (and the underlying GseApi object) to send commands
to a running topology. The TestAPI will collect any out coming telemetry and events.
The user can use the TestAPI to run assertions against the received telemetry and events
to check that the topology is running as expected.
There are a few things the user should be aware of: All received events and telemetry are
received on an incoming message queue. These events and telemetry are not stored into a
history for querying until a '*wait*" function in this API is run, in which case events and
telemetry are copied from the message queue and into the history until the "*wait*" function
returns. Optionally, the user may sleep an arbitrary amount of time, and then run update()
to force an update of the history from the message queue at a given time. The latter method
is not as desireable for many reasons, because choosing an arbitrary sleep time can be difficult
or error prone. After the histories are filled during a test, the user can run "*assert*" functions
to check the state of the histories, without worrying about the histories updating as the check them.
Finally, the user can then (optionally) clear the history before sending more commands to the topology.
Here is a very basic test that someone might write using this API:
def test_single_command(api):
# This is a very basic test. Send a noop command and make sure it succeeds.
# Wait for FSW to be started, and clear the state of the api:
time.sleep(2)
api.reset()
# Send no-op and make sure we get a response within 5 seconds:
api.send("CMD_NO_OP") # Command is sent, this returns immediately
api.wait_assert_evr_size(1, "OpCodeCompleted") # Collect data in history until this evr is returned
# Assert that we got events signaling the success of the command:
api.assert_evr_size(1, "OpCodeDispatched") # Check that 1 event of these types have been received
api.assert_evr_size(1, "OpCodeCompleted")
api.assert_evr_size(1, "NoOpReceived")
# Assert that the correct command was executed:
noOpId = api.get_cmd_id("CMD_NO_OP") # get the command id (opcode) from the mnemonic,
# since the opcode is an event parameter we want to check
api.assert_evr([noOpId, api.ANYTHING], "OpCodeDispatched") # Check event with two arguments,
# but ignore the value of the second one
api.assert_evr([noOpId], "OpCodeCompleted") # Check event with single argument
api.assert_evr([noOpId], "OpCodeCompleted", index=api.ALL) # This is equivelant to the first command
# we are making sure all events of this type
# have this value
api.assert_evr([noOpId], "OpCodeCompleted", index=0) # Check only the first index
api.assert_evr([noOpId], "OpCodeCompleted", index=api.ANY) # Using api.ANY can be helpful if you want
# check that any index matches the expected value
# Assert that we got telemetry signaling the success of the command:
api.assert_tlm_size(1, "CommandsDispatched") # Check that one telemetry of this type has been received
# Size assertion functions also have an optional filterFunc argument that can be used
# to only count telemetry or events that pass a certain filter function. In this case
# we would expect that there are 0 "CommandDispatched" evrs that have a value greater
# than 1, since only a single command was sent.
api.assert_tlm_size(0, "CommandsDispatched", filterFunc=(lambda x: x > 1))
# Assert that the value of the telemetry point is 1:
api.assert_tlm(1, "CommandsDispatched") # Check that CommandsDispatched count has been
# incremented from 0 to 1
'''
###############################
# Public API methods:
###############################
def __init__(self, gse_api):
self.api = gse_api
super(TestApi, self).__init__()
###################################################################################
###################################################################################
## Sending Commands:
###################################################################################
###################################################################################
def send_wait_evr(self, cmd_name, evr_name, args=None, timeout=5):
'''
Send a command and update histories until a given event is received on the message queue
Note: no test assertions are thrown during the execution of this command, even in the
event of a timeout
@param cmd_name: the name (mnemonic) of the command to send
@param evr_name: the name of the event to wait for
@param args: (optional) arguments to pass with the command
@param timeout: (optional) timeout in seconds, default is 5 seconds
'''
status = self.send(cmd_name, args)
if status == -1:
return [], []
tlm_list, evr_list = self.api.wait_evr(evr_name, timeout)
self.__add_to_hist(tlm_list, evr_list)
return tlm_list, evr_list
def send_wait_tlm(self, cmd_name, tlm_name, args=None, timeout=5):
'''
Send a command and update histories until a given telemetry point is received on the message queue
Note: no test assertions are thrown during the execution of this command, even in the
event of a timeout
@param cmd_name: the name (mnemonic) of the command to send
@param tlm_name: the name of the tlm to wait for
@param args: (optional) arguments to pass with the command
@param timeout: (optional) timeout in seconds, default is 5 seconds
'''
status = self.send(cmd_name, args)
if status == -1:
return [], []
tlm_list, evr_list = self.api.wait_tlm(tlm_name, timeout)
self.__add_to_hist(tlm_list, evr_list)
return tlm_list, evr_list
###################################################################################
###################################################################################
## Updating histories:
###################################################################################
###################################################################################
def wait_evr(self, evr_name, timeout=5):
'''
Update histories until a given event is received on the message queue
Note: no test assertions are thrown during the execution of this command, even in the
event of a timeout, use wait_assert* commands to achieve this.
@param evr_name: the name of the evr to wait for
@param timeout: (optional) timeout in seconds, default is 5 seconds
'''
tlm_list, evr_list = self.api.wait_evr(evr_name, timeout)
self.__add_to_hist(tlm_list, evr_list)
return tlm_list, evr_list
def wait_tlm(self, tlm_name, timeout=5):
'''
Update histories until a given telemetry point is received on the message queue
Note: no test assertions are thrown during the execution of this command, even in the
event of a timeout, use wait_assert* commands to achieve this.
@param tlm_name: the name of the tlm to wait for
@param timeout: (optional) timeout in seconds, default is 5 seconds
'''
tlm_list, evr_list = self.api.wait_tlm(tlm_name, timeout)
self.__add_to_hist(tlm_list, evr_list)
return tlm_list, evr_list
def update(self):
'''
Update histories right now. This takes any data sitting on the message queues and pushes
it into the histories. This function might be useful when running command, sleeping a
predetermined amount of time, and then running update_hist(). It is an alternative to
the "wait_*" and "wait_assert_*" functions in this API, but should be used sparingly
as it might create brittle tests.
Note: no test assertions are thrown during the execution of this command, even in the
event of a timeout, use wait_assert* commands to achieve this.
'''
tlm_list, evr_list = self.api.receive()
self.__add_to_hist(tlm_list, evr_list)
###################################################################################
###################################################################################
## Clear histories:
###################################################################################
###################################################################################
#
# Please see the TestHistory class for the definition of the following inherited methods:
#
# clear_evr(self)
# clear_tlm(self)
# clear(self)
#
# Reset API state:
def reset(self):
'''
Remove all events from the event history and remove all telemetry from the telemetry history
and remove any pending events or telemetry in the message queue. This gets rid of ALL the current
telemetry and event state, and should be useful in providing a clean slate during testing.
'''
self.clear()
self.api.flush()
###################################################################################
###################################################################################
## Print helpers:
###################################################################################
###################################################################################
#
# Please see the TestHistory class for the definition of the following inherited methods:
#
# print_evr(self)
# print_tlm(self)
# pretty_print(self)
#
###################################################################################
###################################################################################
## Test event size:
###################################################################################
###################################################################################
#
# Please see the TestHistory class for the definition of the following inherited methods:
#
# assert_evr_size(self, size, evr_name=None)
# assert_evr_size_eq(self, size, evr_name=None) # Same as above, but here for convenience
# assert_evr_size_ne(self, size, evr_name=None)
# assert_evr_size_lt(self, size, evr_name=None)
# assert_evr_size_le(self, size, evr_name=None)
# assert_evr_size_gt(self, size, evr_name=None)
# assert_evr_size_ge(self, size, evr_name=None)
#
###################################################################################
###################################################################################
## Test telemetry size:
###################################################################################
###################################################################################
#
# Please see the TestHistory class for the definition of the following inherited methods:
#
# assert_tlm_size(self, size, tlm_name=None)
# assert_tlm_size_eq(self, size, tlm_name=None) # Same as above, but here for convenience
# assert_tlm_size_ne(self, size, tlm_name=None)
# assert_tlm_size_lt(self, size, tlm_name=None)
# assert_tlm_size_le(self, size, tlm_name=None)
# assert_tlm_size_gt(self, size, tlm_name=None)
# assert_tlm_size_ge(self, size, tlm_name=None)
#
###################################################################################
###################################################################################
## Test event values:
###################################################################################
###################################################################################
#
# Please see the TestHistory class for the definition of the following inherited methods:
#
# assert_evr(self, value, evr_name=None, index=history.ALL)
# assert_evr_eq(self, value, evr_name=None, index=history.ALL) # Same as above, but here for convenience
# assert_evr_ne(self, value, evr_name=None, index=history.ALL)
# assert_evr_lt(self, value, evr_name=None, index=history.ALL)
# assert_evr_le(self, value, evr_name=None, index=history.ALL)
# assert_evr_gt(self, value, evr_name=None, index=history.ALL)
# assert_evr_ge(self, value, evr_name=None, index=history.ALL)
# assert_evr_is(self, value, evr_name=None, index=history.ALL)
# assert_evr_is_not(self, value, evr_name=None, index=history.ALL)
#
###################################################################################
###################################################################################
## Test telemetry values:
###################################################################################
###################################################################################
#
# Please see the TestHistory class for the definition of the following inherited methods:
#
# assert_tlm(self, value, tlm_name=None, index=history.ALL)
# assert_tlm_eq(self, value, tlm_name=None, index=history.ALL) # Same as above, but here for convenience
# assert_tlm_ne(self, value, tlm_name=None, index=history.ALL)
# assert_tlm_lt(self, value, tlm_name=None, index=history.ALL)
# assert_tlm_le(self, value, tlm_name=None, index=history.ALL)
# assert_tlm_gt(self, value, tlm_name=None, index=history.ALL)
# assert_tlm_ge(self, value, tlm_name=None, index=history.ALL)
# assert_tlm_is(self, value, tlm_name=None, index=history.ALL)
# assert_tlm_is_not(self, value, tlm_name=None, index=history.ALL)
#
###################################################################################
###################################################################################
## Test and wait for event size:
###################################################################################
###################################################################################
def wait_assert_evr_size(self, size, evr_name=None, filterFunc=None, timeout=5):
'''
Assert the number of events received is equal to 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of events expected
@param evr_name: (optional) if provided, only check the size of events of
this type
@param filterFunc: (optional) if provided, only events arguments that return true when passed
into this function are counted. For example, to only count event arguments with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_size(size, evr_name, filterFunc), timeout)
def wait_assert_evr_size_eq(self, size, evr_name=None, filterFunc=None, timeout=5): # Same as above, but here for convenience
'''
Assert the number of events received is equal to 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of events expected
@param evr_name: (optional) if provided, only check the size of events of
this type
@param filterFunc: (optional) if provided, only events arguments that return true when passed
into this function are counted. For example, to only count event arguments with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_size_eq(size, evr_name, filterFunc), timeout)
def wait_assert_evr_size_ne(self, size, evr_name=None, filterFunc=None, timeout=5):
'''
Assert the number of events received is not equal to 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of events expected
@param evr_name: (optional) if provided, only check the size of events of
this type
@param filterFunc: (optional) if provided, only events arguments that return true when passed
into this function are counted. For example, to only count event arguments with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_size_ne(size, evr_name, filterFunc), timeout)
def wait_assert_evr_size_lt(self, size, evr_name=None, filterFunc=None, timeout=5):
'''
Assert the number of events received is less than 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of events expected
@param evr_name: (optional) if provided, only check the size of events of
this type
@param filterFunc: (optional) if provided, only events arguments that return true when passed
into this function are counted. For example, to only count event arguments with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_size_lt(size, evr_name, filterFunc), timeout)
def wait_assert_evr_size_le(self, size, evr_name=None, filterFunc=None, timeout=5):
'''
Assert the number of events received is less than or equal to 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of events expected
@param evr_name: (optional) if provided, only check the size of events of
this type
@param filterFunc: (optional) if provided, only events arguments that return true when passed
into this function are counted. For example, to only count event arguments with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_size_le(size, evr_name, filterFunc), timeout)
def wait_assert_evr_size_gt(self, size, evr_name=None, filterFunc=None, timeout=5):
'''
Assert the number of events received is greater than 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of events expected
@param evr_name: (optional) if provided, only check the size of events of
this type
@param filterFunc: (optional) if provided, only events arguments that return true when passed
into this function are counted. For example, to only count event arguments with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_size_gt(size, evr_name, filterFunc), timeout)
def wait_assert_evr_size_ge(self, size, evr_name=None, filterFunc=None, timeout=5):
'''
Assert the number of events received is greater than or equal to 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of events expected
@param evr_name: (optional) if provided, only check the size of events of
this type
@param filterFunc: (optional) if provided, only events arguments that return true when passed
into this function are counted. For example, to only count event arguments with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_size_ge(size, evr_name, filterFunc), timeout)
###################################################################################
###################################################################################
## Test and wait for telemetry size:
###################################################################################
###################################################################################
def wait_assert_tlm_size(self, size, tlm_name=None, filterFunc=None, timeout=5):
'''
Assert the number of telemetry received is equal to 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of telemetry points expected
@param tlm_name: (optional) if provided, only check the size of telemetry of
this type
@param filterFunc: (optional) if provided, only telemetry values that return true when passed
into this function are counted. For example, to only count telemetry values with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_size(size, tlm_name, filterFunc), timeout)
def wait_assert_tlm_size_eq(self, size, tlm_name=None, filterFunc=None, timeout=5): # Same as above, but here for convenience
'''
Assert the number of telemetry received is equal to 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of telemetry points expected
@param tlm_name: (optional) if provided, only check the size of telemetry of
this type
@param filterFunc: (optional) if provided, only telemetry values that return true when passed
into this function are counted. For example, to only count telemetry values with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_size_eq(size, tlm_name, filterFunc), timeout)
def wait_assert_tlm_size_ne(self, size, tlm_name=None, filterFunc=None, timeout=5):
'''
Assert the number of telemetry received is not equal to 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of telemetry points expected
@param tlm_name: (optional) if provided, only check the size of telemetry of
this type
@param filterFunc: (optional) if provided, only telemetry values that return true when passed
into this function are counted. For example, to only count telemetry values with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_size_ne(size, tlm_name, filterFunc), timeout)
def wait_assert_tlm_size_lt(self, size, tlm_name=None, filterFunc=None, timeout=5):
'''
Assert the number of telemetry received is less than 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of telemetry points expected
@param tlm_name: (optional) if provided, only check the size of telemetry of
this type
@param filterFunc: (optional) if provided, only telemetry values that return true when passed
into this function are counted. For example, to only count telemetry values with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_size_lt(size, tlm_name, filterFunc), timeout)
def wait_assert_tlm_size_le(self, size, tlm_name=None, filterFunc=None, timeout=5):
'''
Assert the number of telemetry received is less than or equal to 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of telemetry points expected
@param tlm_name: (optional) if provided, only check the size of telemetry of
this type
@param filterFunc: (optional) if provided, only telemetry values that return true when passed
into this function are counted. For example, to only count telemetry values with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_size_le(size, tlm_name, filterFunc), timeout)
def wait_assert_tlm_size_gt(self, size, tlm_name=None, filterFunc=None, timeout=5):
'''
Assert the number of telemetry received is greater than 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of telemetry points expected
@param tlm_name: (optional) if provided, only check the size of telemetry of
this type
@param filterFunc: (optional) if provided, only telemetry values that return true when passed
into this function are counted. For example, to only count telemetry values with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_size_gt(size, tlm_name, filterFunc), timeout)
def wait_assert_tlm_size_ge(self, size, tlm_name=None, filterFunc=None, timeout=5):
'''
Assert the number of telemetry received is greater than or equal to 'size' or wait until this
is true, otherwise timeout and assert failure.
@param size: the number of telemetry points expected
@param tlm_name: (optional) if provided, only check the size of telemetry of
this type
@param filterFunc: (optional) if provided, only telemetry values that return true when passed
into this function are counted. For example, to only count telemetry values with numerical values
greater than 5 you can pass in the function: filterFunc=(lambda x: x>5)
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_size_ge(size, tlm_name, filterFunc), timeout)
###################################################################################
###################################################################################
## Test and wait for event argument values:
###################################################################################
###################################################################################
def wait_assert_evr(self, value, evr_name=None, index=history.ALL, timeout=5):
'''
Assert the value of event arguments received is equal to 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the event arguments
@param evr_name: (optional) if provided, only check the value of events
of this type
@param index: (optional) if provided, only check the value of events of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if evr_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr(value, evr_name, index), timeout)
def wait_assert_evr_eq(self, value, evr_name=None, index=history.ALL, timeout=5): # Same as above, but here for convenience
'''
Assert the value of event arguments received is equal to 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the event arguments
@param evr_name: (optional) if provided, only check the value of events
of this type
@param index: (optional) if provided, only check the value of events of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if evr_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_eq(value, evr_name, index), timeout)
def wait_assert_evr_ne(self, value, evr_name=None, index=history.ALL, timeout=5):
'''
Assert the value of event arguments received is not equal to 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the event arguments
@param evr_name: (optional) if provided, only check the value of events
of this type
@param index: (optional) if provided, only check the value of events of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if evr_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_ne(value, evr_name, index), timeout)
def wait_assert_evr_lt(self, value, evr_name=None, index=history.ALL, timeout=5):
'''
Assert the value of event arguments received is less than 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the event arguments
@param evr_name: (optional) if provided, only check the value of events
of this type
@param index: (optional) if provided, only check the value of events of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if evr_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_lt(value, evr_name, index), timeout)
def wait_assert_evr_le(self, value, evr_name=None, index=history.ALL, timeout=5):
'''
Assert the value of event arguments received is less than or equal to 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the event arguments
@param evr_name: (optional) if provided, only check the value of events
of this type
@param index: (optional) if provided, only check the value of events of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if evr_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_le(value, evr_name, index), timeout)
def wait_assert_evr_gt(self, value, evr_name=None, index=history.ALL, timeout=5):
'''
Assert the value of event arguments received is greater than 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the event arguments
@param evr_name: (optional) if provided, only check the value of events
of this type
@param index: (optional) if provided, only check the value of events of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if evr_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_gt(value, evr_name, index), timeout)
def wait_assert_evr_ge(self, value, evr_name=None, index=history.ALL, timeout=5):
'''
Assert the value of event arguments received is greater than or equal to 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the event arguments
@param evr_name: (optional) if provided, only check the value of events
of this type
@param index: (optional) if provided, only check the value of events of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if evr_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_evr_ge(value, evr_name, index), timeout)
###################################################################################
###################################################################################
## Test and wait for telemtry values:
###################################################################################
###################################################################################
def wait_assert_tlm(self, value, tlm_name=None, index=history.ALL, timeout=5):
'''
Assert the value of telemetry received is equal to 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the telemetry
@param tlm_name: (optional) if provided, only check the value of telemetry
of this type
@param index: (optional) if provided, only check the value of tlm of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if tlm_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm(value, tlm_name, index), timeout)
def wait_assert_tlm_eq(self, value, tlm_name=None, index=history.ALL, timeout=5): # Same as above, but here for convenience
'''
Assert the value of telemetry received is equal to 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the telemetry
@param tlm_name: (optional) if provided, only check the value of telemetry
of this type
@param index: (optional) if provided, only check the value of tlm of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if tlm_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_eq(value, tlm_name, index), timeout)
def wait_assert_tlm_ne(self, value, tlm_name=None, index=history.ALL, timeout=5):
'''
Assert the value of telemetry received is not equal to 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the telemetry
@param tlm_name: (optional) if provided, only check the value of telemetry
of this type
@param index: (optional) if provided, only check the value of tlm of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if tlm_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_ne(value, tlm_name, index), timeout)
def wait_assert_tlm_lt(self, value, tlm_name=None, index=history.ALL, timeout=5):
'''
Assert the value of telemetry received is less than 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the telemetry
@param tlm_name: (optional) if provided, only check the value of telemetry
of this type
@param index: (optional) if provided, only check the value of tlm of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if tlm_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_lt(value, tlm_name, index), timeout)
def wait_assert_tlm_le(self, value, tlm_name=None, index=history.ALL, timeout=5):
'''
Assert the value of telemetry received is less than or equal to 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the telemetry
@param tlm_name: (optional) if provided, only check the value of telemetry
of this type
@param index: (optional) if provided, only check the value of tlm of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if tlm_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_le(value, tlm_name, index), timeout)
def wait_assert_tlm_gt(self, value, tlm_name=None, index=history.ALL, timeout=5):
'''
Assert the value of telemetry received is greater than 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the telemetry
@param tlm_name: (optional) if provided, only check the value of telemetry
of this type
@param index: (optional) if provided, only check the value of tlm of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if tlm_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_gt(value, tlm_name, index), timeout)
def wait_assert_tlm_ge(self, value, tlm_name=None, index=history.ALL, timeout=5):
'''
Assert the value of telemetry received is greater than or equal to 'value' or
wait until this is true, otherwise timeout and assert failure
@param value: the expected value of the telemetry
@param tlm_name: (optional) if provided, only check the value of telemetry
of this type
@param index: (optional) if provided, only check the value of tlm of
this index in the history. Passing TestHistory.ALL will check all indexes for that value.
Passing TestHistory.ANY will check to make sure at least 1 value meets the comparison.
Note index will only be used if tlm_name is also specified.
@param timeout: (optional) timeout in seconds (default is 5 seconds).
'''
return self.__wait_assert(lambda: self.assert_tlm_ge(value, tlm_name, index), timeout)
###################################################################################
###################################################################################
## Retrieve dictionary elements
###################################################################################
###################################################################################
#
# Please see the TestHistory class for the definition of the following inherited methods:
#
# get_evr_hist(self, evr_name=None, index=history.ALL)
# get_tlm_hist(self, tlm_name=None, index=history.ALL)
#
###############################
# Public class variables:
###############################
#
# Please see the TestHistory class for the definition of the following inherited methods:
#
# anything()
# near()
#
###############################
# Private methods:
###############################
def __wait_assert(self, f, timeout=None):
"""
Continues to update the history until a function f does not assert or a timeout occures
"""
def add_item_to_hist():
# Add a single item from the queue to the history
# Return true if item is added
tlm, evr = self.api._pop_queue()
if tlm is None and evr is None:
return False
tlm_list = []
evr_list = []
if tlm:
tlm_list.append(tlm)
if evr:
evr_list.append(evr)
self.__add_to_hist(tlm_list, evr_list)
return True
def fail(msg):
try:
f()
except AssertionError as e:
assert 0, msg + "\n\n\n" + e.args[0]
assert 0, msg + "\n\n\n" + self.get_pretty_print()
def assert_failing():
# As long as there is something to add to the hist keep trying
# the assertion, else return True. Return False if the assertion is met
while True:
try:
f()
except AssertionError:
if add_item_to_hist():
continue
else:
return True
except:
raise
return False
if timeout:
signal.signal(signal.SIGALRM, self._timeout_sig_handler)
signal.alarm(timeout)
try:
while assert_failing():
# Sleep a bit if there is nothing in the queue, and then try again:
if timeout:
time.sleep(0.1)
else:
# just check assertion once if a timeout is not set
fail('Unable to meet assertion.')
except GseApi.TimeoutException:
fail('Timeout reached, unable to meet assertion.')
except:
raise
if timeout:
signal.alarm(0)
def __add_to_hist(self, tlms=[], evrs=[]):
# Translate ids to names:
evrs = [(self.api.get_events().getNameDict()[p[0]],p[1]) for p in evrs]
tlms = [(self.api.get_channels().getNameDict()[p[0]],p[1]) for p in tlms]
super(TestApi, self).add(evrs, tlms)
####
# Inherited methods from GseApi now wrapped.
# **Ideally would not exist**
####
def create_downlink_subprocess(self):
'''
Start new process to listen for incoming files.
@return: Downlink Process
'''
return self.api.create_downlink_subprocess()
def create_uplink_suprocess(self, src_path, dest_path):
'''
Creates an uplink subprocess.
@param src_path: Source path of file to be sent
@param dest_path: Destination path of file to be recieved by target application
@return: Uplink Process
'''
return self.api.create_uplink_suprocess()
def disconnect(self):
'''
Disconnect form the socket
'''
return self.api.disconnect()
def flush(self):
'''
Clears the telemetry/event queue and drops all data within it.
'''
return self.api.flush()
def get_cmd_id(self, command_name):
'''
Given a command_name (mnemonic), return the corresponding command op code id
@param command_name: the name of a specific command (mnemonic)
@return: the id (op code) of command_name
'''
return self.api.get_cmd_id(command_name)
def get_cmd_name(self, command_id):
'''
Given a command_id (opcode), return the corresponding command name (mnemonic)
@param command_id: the id of a specific command (opcode)
@return: the name (mnemonic) of command_id
'''
return self.api.get_cmd_name(command_id)
def get_evr_id(self, evr_name):
'''
Given an evr name, return the corresponding evr id
@param evr_name: the name of a specific evr
@return: the id of evr_name
'''
return self.api.get_evr_id(evr_name)
def get_evr_name(self, evr_id):
'''
Given an evr id, return the corresponding evr name
@param evr_id: the id of a specific id
@return: the name of evr_id
'''
return self.get_evr_name(evr_id)
def get_tlm_id(self, tlm_name):
'''
Given a tlm name, return the corresponding tlm id
@param tlm_name: the name of a specific tlm
@return: the id of tlm_name
'''
return self.api.get_tlm_id(tlm_name)
def get_tlm_name(self, tlm_id):
'''
Given a tlm id, return the corresponding tlm name
@param tlm_id: the id of a specific tlm
@return: the name of tlm_id
'''
return self.api.get_tlm_name(tlm_id)
def list(self, kind='cmds', ids=False):
'''
Return a list of available commands, EVRs, or Channels.
@param kind: kind of list desired: cmds, evrs, channels
@param ids: if True return id numbers, else nnmonics
@return: list of items
'''
return self.api.list(kind=kind, ids=ids)
def monitor_evr(self, id=None, blocking=True):
'''
Monitors for log event messages from a listener thread
connected to the Threaded TCP Socket Server. The routine
uses the python logging module to display to stdout and
to a log file.
@param id: This is ether a None for displaying any event log message,
or a list of id integers for the messages desired to be displayed,
or a list of string names of the mnemonic for each message to be displayed.
@param blocking: If True the routine blocks and waits for each messge,
False it will poll for a message and display if one is present otherwise
return.
'''
return self.api.monitor_evr(id=id, blocking=blocking)
def monitor_tlm(self, id=None, blocking=True):
'''
Monitors for channel telemetry from a listener thread
connected to the Threaded TCP Socket Server. The routine
uses the python logging module to display to stdout and
to a log file.
@param id: This is ether a None for displaying any channel telemetry,
or a list of id integers for the channels desired to be displayed,
or a list of string names of the mnemonic for each channel to be displayed.
@param blocking: If True the routine blocks and waits for each channel update,
False it will poll for a channel value and display if one is present otherwise
return.
'''
return self.api.monitor_tlm(self, id=id, blocking=blocking)
def receive(self):
'''
Grabs all telemetry and data in event listener's queue until the queue is emptied.
Return a list of telemetry and events found.
'''
return self.api.receive()
def recieve_file(self, src, dest):
'''
Request a file from target application.
@param src: Source path
@param dest: Destination path
@param subprocess: Spawn new process
@return: DownlinkStatus
'''
return self.api.recieve_file(src, dest)
def send(self, cmd_name, args=None):
'''
Send a command to the target applicaiton.
@param cmd_name: Valid command mnemonic.
@param args: Optional argument list for the command.
'''
return self.api.send(cmd_name, args=args)
def send_file(self, src_path, dest_path, offset=0, data_size=512):
'''
Send a file to the target application.
If subprocess is True: starts a subprocess to handle the file upload.
Else: Send file over current socket connection.
@param src_path: Source path of file to be sent.
@param dest_path: Destination path of file to be received by target application.
@param offset: Byte offset into the source file (0 by default).
@param data_size: Size of data packets (in bytes) being sent to application (default = 512).
@param subprocess: Spawn new process
@return: The subprocess if subprocess is True. UplinkStatus if subprocess is False.
'''
return self.api.send_file(src_path, dest_path, offset=offset, data_size=data_size)
def _timeout_sig_handler(self, signum, frame):
raise GseApi.TimeoutException()
| python |
from textual import events
from textual.app import App
from textual.widgets import Header, Footer, Placeholder, ScrollView
import json
from rich.panel import Panel
from textual.app import App
from textual.reactive import Reactive
from textual.widget import Widget
import pandas as pd
import numpy as np
from rich.table import Table
from rich.tree import Tree
from csvdata import CSV
from view import View
import argparse
class Data(Widget):
def __init__(self, filename:str):
self.filename = filename
self.data = CSV(filename)
self.view = View(self.data.get_number_columns(), self.data.get_number_rows())
super().__init__()
async def action_toggle_bar(self) -> None:
self.refresh()
async def action_toggle_always_visible(self) -> None:
self.view.toggle_always_visible()
self.refresh()
async def action_nav(self, direction:str, amount:int) -> None:
self.view.navigate(direction, amount)
self.refresh()
async def action_col(self, operation:str, direction:str, amount:int) -> None:
if operation == 'width':
self.data.columns[self.view.column_select].adjust_width(direction, amount)
new_width = self.data.columns[self.view.column_select].width
self.view.update_column_width(self.view.column_select, new_width)
elif operation == 'hide':
self.data.columns[self.view.column_select].toggle_visibility()
elif operation == 'justify':
self.data.columns[self.view.column_select].toggle_justification()
self.refresh()
async def resize(self) -> None:
self.view.update_view_size(self._size)
self.refresh()
async def on_resize(self, event: events.Resize) -> None:
self.view.update_view_size(self._size)
self.refresh()
def render(self) -> Panel:
self.view.update_view_size(self._size)
table = Table(title=f'{self.filename}: {self._size.width}x{self._size.height} select {self.view.row_select},{self.view.column_select} top {self.view.row_top} bot {self.view.row_bottom} lft {self.view.column_left} rgt {self.view.column_right} {self.view.get_columns_width(self.view.column_left, self.view.column_right)} {self.view.width}')
for icol,col_is_selected in self.view.get_drawn_columns():
style = 'red' if col_is_selected else None
column = self.data.get_column(icol)
table.add_column(column.column_name, width=column.get_width()-3, header_style=style,no_wrap=True)
for irow, row_is_selected in self.view.get_drawn_rows():
table.add_row(*[ ('[red]' if row_is_selected or col_is_selected else '') + self.data.get_column(icol).get_value(irow) for icol,col_is_selected in self.view.get_drawn_columns()])
return Panel(table)
class ColumnList(Widget):
def __init__(self, data_widget):
self.data_widget = data_widget
super().__init__()
async def action_nav(self, direction:str, amount:int) -> None:
self.refresh()
async def action_col(self, operation:str, direction:str, amount:int) -> None:
self.refresh()
async def on_resize(self, event: events.Resize) -> None:
self.refresh()
def render(self) -> Panel:
tree = Tree('Columns')
for icol, col_is_selected in self.data_widget.view.get_all_columns():
column = self.data_widget.data.get_column(icol)
column_label = f'{column.column_name}'
if not column.visible:
column_label += ' [H]'
if col_is_selected:
subtree = tree.add(f'[red]{column_label}')
subtree.add(f'dtype: {str(column.column_dtype)}')
subtree.add(f'format: {column.format_string}')
else:
tree.add(column_label)
return Panel(tree)
class StatsView(Widget):
def __init__(self, data_widget):
self.data_widget = data_widget
super().__init__()
async def action_nav(self, direction:str, amount:int) -> None:
self.refresh()
async def action_col(self, operation:str, direction:str, amount:int) -> None:
self.refresh()
async def on_mount(self, event: events.Mount) -> None:
self.visible = False
async def on_resize(self, event: events.Resize) -> None:
self.refresh()
def render(self) -> Panel:
column = self.data_widget.data.get_column(self.data_widget.view.column_select)
stats = column.get_stats()
avail_width = self._size.width-20
stat_tree = Tree('Stats')
# make a histogram
if 'Quantiles' in stats.keys():
hist = column.get_histogram(avail_width)
#qtree = Tree('Quantiles')
levels = stats['Quantiles']['levels']
values = stats['Quantiles']['values']
#for l,v in zip(stats['Quantiles']['levels'], stats['Quantiles']['values']):
# qtree.add(f'P{l:0.2f} = {column.format_value(v)}')
x_axis, q_index = "", 0
while len(x_axis) < avail_width:
quantile = len(x_axis) / float(avail_width)
q_index = np.argmin(stats['Quantiles']['levels'] < quantile)
x_axis += f'|{column.format_value(stats["Quantiles"]["values"][q_index])} '
stat_tree.add(x_axis)
hist_str = ""
levels = 10
for ii in range(levels, 0, -1):
value = np.max(hist[0]) * (ii-1) / levels
hist_str += ''.join([ '#' if x > value else ' ' for x in hist[0]])+'\n'
stat_tree.add(Panel(hist_str))
# count most frequent
elif 'counts' in stats.keys():
ctree = Tree('Counts')
category_count = 0
for key, count in zip(stats['counts'].index, stats['counts'].values):
if category_count > self._size.height:
break
ctree.add(f'{key} = {count}')
stat_tree.add(ctree)
return Panel(stat_tree)
class CSView(App):
#def __init__(self, filepath, **kwargs):
# self.filepath = filepath
# super().__init__(**kwargs)
#async def set_filepath(self, filepath):
# self.filepath = filepath
async def on_load(self, event: events.Load) -> None:
"""Bind keys with the app loads (but before entering application mode)"""
await self.bind("b", "toggle_columns()", "Toggle Columns")
await self.bind("s", "toggle_stats())", "Toggle Stats")
await self.bind("q", "quit", "Quit")
await self.bind("up", "nav('up',1)", "Up 1 row")
await self.bind("down", "nav('down',1)", "Down 1 row")
await self.bind("right", "nav('right',1)", "Right 1 column")
await self.bind("left", "nav('left',1)", "Left 1 column")
await self.bind("pageup", "nav('up',10)", "Up 10 rows")
await self.bind("pagedown", "nav('down',10)", "Down 10 rows")
await self.bind("ctrl+right", "nav('right',10)", "Right 10 columns")
await self.bind("ctrl+left", "nav('left',10)", "Left 10 columns")
#
await self.bind("shift+up","col('width','+',1)","Increase column width")
await self.bind("shift+down","col('width','-',1)","Decrease column width")
await self.bind("h","col('hide','',0)","toggle visible")
await self.bind("j","col('justify','',0)","toggle r/l justified")
await self.bind("v","toggle_always_visible()","toggle always visible")
async def action_nav(self, direction:str, amount:int) -> None:
await self.data.action_nav(direction, amount)
await self.statsview.action_nav(direction, amount)
await self.columnlist.action_nav(direction, amount)
async def action_col(self, operation:str, direction:str, amount:int) -> None:
await self.data.action_col(operation, direction, amount)
await self.columnlist.action_col(operation, direction, amount)
async def action_toggle_columns(self):
await self.view.action_toggle('columnsbar')
await self.data.action_toggle_bar()
async def action_toggle_stats(self):
await self.view.action_toggle('statsbar')
await self.data.action_toggle_bar()
async def action_toggle_always_visible(self):
await self.data.action_toggle_always_visible()
async def on_resize(self, event: events.Resize) -> None:
# redock to new view
await self.view.dock(Header(), edge="top")
await self.view.dock(Footer(), edge="bottom")
await self.view.dock(self.columnlist, edge="left", size=int(0.25*self.console.width), name="columnsbar")
await self.view.dock(self.statsview, edge="bottom", size=int(0.5*self.console.height), name="statsbar")
# Dock the body in the remaining space
#await self.data.resize()
await self.view.dock(self.data, edge="right")
async def on_mount(self, event: events.Mount) -> None:
"""Create and dock the widgets."""
self.data = Data(self.title.split(':')[-1])
self.columnlist = ColumnList(self.data)
self.statsview = StatsView(self.data)
# Header / footer / dock
await self.view.dock(Header(), edge="top")
await self.view.dock(Footer(), edge="bottom")
await self.view.dock(self.columnlist, edge="left", size=int(0.25*self.console.width), name="columnsbar")
await self.view.dock(self.statsview, edge="bottom", size=int(0.5*self.console.height), name="statsbar")
# Dock the body in the remaining space
await self.view.dock(self.data, edge="right")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("filepath", help="csv file to view", type=str)
args = parser.parse_args()
#TODO: How to you create an app with custom init?
# hack solution, embedd filepath in app title
CSView.run(title=f"CSView:{args.filepath}", log="textual.log")
| python |
import time
import pickle
import json
import numpy as np
from threading import Thread
from typing import Dict, List
from nxs_libs.queue import *
from azure.core import exceptions as AzureCoreException
from azure.storage.queue import (
QueueClient,
)
class NxsAzureQueuePuller(NxsQueuePuller):
def __init__(self, conn_str: str, queue_name: str, **kwargs) -> None:
super().__init__()
self._conn_str = conn_str
self._session_uuid = ""
if "session_uuid" in kwargs:
self._session_uuid: str = kwargs["session_uuid"]
self._queue_name = f"{queue_name}{self._session_uuid}"
self._queue_client = QueueClient.from_connection_string(
self._conn_str, self._queue_name
)
def pull(self) -> List:
results = []
# FIXME: Catch non-existing queue exception or any other exceptions
messages = self._queue_client.receive_messages()
for message in messages:
data = json.loads(message.content)
self._queue_client.delete_message(message)
results.append(data)
return results
def pull_buffered_and_close(self) -> List:
self._queue_client.close()
return []
def set_buf_size(self, size: int):
pass
def get_num_buffered_items(self):
properties = self._queue_client.get_queue_properties()
return properties.approximate_message_count
def set_num_partitions(self, num_partitions: int):
pass
class NxsAzureQueuePusher(NxsQueuePusher):
def __init__(self, conn_str: str) -> None:
super().__init__()
self._conn_str = conn_str
self._topic2client: Dict[str, QueueClient] = {}
def create_topic(self, topic: str) -> None:
if topic in self._topic2client:
return
client = QueueClient.from_connection_string(self._conn_str, topic)
try:
client.create_queue()
self._topic2client[topic] = client
except AzureCoreException.ResourceExistsError as e:
# queue is already existed - no need to create
self._topic2client[topic] = client
except Exception as e:
raise NxsQueueExceptionFailedToCreateTopic
def push(self, topic: str, data) -> None:
if topic not in self._topic2client:
self.create_topic(topic)
queue_client = self._topic2client[topic]
queue_client.send_message(json.dumps(data))
def push_to_session(self, topic: str, session_uuid: str, data) -> None:
new_topic = f"{topic}{session_uuid}"
return self.push(new_topic, data)
def delete_topic(self, topic: str) -> None:
pass
def update_config(self, config: dict = {}):
pass
| python |
from lib_rovpp import ROVPPV1SimpleAS, ROVPPV1LiteSimpleAS
from .trusted_server import TrustedServer
from lib_secure_monitoring_service.sim_logger import sim_logger as logger
from lib_secure_monitoring_service.report import Report
class ROVSMS(ROVPPV1LiteSimpleAS):
name="ROV V4"
__slots__ = tuple()
trusted_server = TrustedServer(0)
def __init__(self, *args, reset_trusted_server=True, **kwargs):
"""When everything is being reset, reset the trust server also"""
# logger.debug("Created ROVSMS {0}".format(kwargs['asn']))
# At the end of the graphing, everything should be reset
if reset_trusted_server:
self.trusted_server.__init__()
super(ROVSMS, self).__init__(*args, **kwargs)
def receive_ann(self, ann, *args, **kwargs):
"""Recieves ann and reports it"""
logger.debug(f"ASN {self.asn} inside receive_ann")
if ann.invalid_by_roa:
logger.debug(f"ASN {self.asn} sending report about {ann.prefix}")
adjusted_as_path = (self.asn,) + ann.as_path
report = Report(reporting_asn=self.asn, prefix=ann.prefix, as_path=adjusted_as_path)
self.trusted_server.recieve_report(report)
return super(ROVSMS, self).receive_ann(ann, *args, **kwargs)
def _force_add_blackholes_from_avoid_list(self, engine_input):
holes = []
logger.debug("Entered _force_add_blackholes_from_avoid_list")
for _, ann in self._local_rib.prefix_anns():
ann_holes = []
# For each hole in ann: (holes are invalid subprefixes)
for subprefix in engine_input.prefix_subprefix_dict[ann.prefix]:
if self.trusted_server.rec_blackhole(subprefix,
ann.as_path):
does_not_have_subprefix = True
# Check if AS already has blackhole
for _, rib_entry in self._local_rib.prefix_anns():
if rib_entry.prefix == subprefix:
logger.debug(f"Found subprefix in RIB of {self.asn}")
does_not_have_subprefix = False
assert(rib_entry.blackhole == True, "The found subprefix does not have blackhole set to true")
assert(rib_entry.traceback_end == True, "The found subprefix does not have traceback_end set to true")
if does_not_have_subprefix:
# We need to create our own subprefix ann
# Since we may not have actually received the hijack
# Since this policy is for hidden hijacks
blackhole_ann = ann.copy(
prefix=subprefix,
roa_valid_length=False,
roa_origin=engine_input.victim_asn,
blackhole=True,
traceback_end=True)
holes.append(blackhole_ann)
for hole in holes:
# Add blackhole ann to localRIB
self._local_rib.add_ann(hole)
class ROVSMSK1(ROVSMS):
name = "ROV V4 K1"
__slots__ = tuple()
trusted_server = TrustedServer(max_num_dishonest_nodes=1)
def __init__(self, *args, **kwargs):
super(ROVSMS, self).__init__(*args, **kwargs)
class ROVSMSK2(ROVSMS):
name = "ROV V4 K2"
__slots__ = tuple()
trusted_server = TrustedServer(max_num_dishonest_nodes=2)
def __init__(self, *args, **kwargs):
super(ROVSMS, self).__init__(*args, **kwargs)
class ROVSMSK3(ROVSMS):
name = "ROV V4 K3"
__slots__ = tuple()
trusted_server = TrustedServer(max_num_dishonest_nodes=3)
def __init__(self, *args, **kwargs):
super(ROVSMS, self).__init__(*args, **kwargs) | python |
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..linalg_builder import FuncRegistry, is_int, is_float, broadcast_type
from ..func_registry import add_func
import math
add_func(slice, "slice")
add_func(range, "range")
registry = FuncRegistry()
def register_func(name, orig_func=None):
global registry
return registry.register_func(name, orig_func)
@register_func("bool", bool)
def bool_cast_impl(builder, arg):
return builder.cast(arg, builder.bool)
@register_func("int", int)
def int_cast_impl(builder, arg):
return builder.cast(arg, builder.int64)
@register_func("float", float)
def float_cast_impl(builder, arg):
return builder.cast(arg, builder.float64)
@register_func("len", len)
def len_impl(builder, arg):
return builder.cast(len(arg), builder.int64)
def _get_type(builder, v):
if isinstance(v, float):
return builder.float64
elif isinstance(v, int):
return builder.int64
return v.type
@register_func("min", min)
def min_impl(builder, *args):
if len(args) > 2:
rhs = min_impl(builder, *args[1:])
else:
rhs = args[1]
lhs = args[0]
res_type = broadcast_type(
builder, (_get_type(builder, lhs), _get_type(builder, rhs))
)
lhs = builder.cast(lhs, res_type)
rhs = builder.cast(rhs, res_type)
cond = lhs < rhs
return builder.select(cond, lhs, rhs)
@register_func("max", max)
def max_impl(builder, *args):
if len(args) > 2:
rhs = max_impl(builder, *args[1:])
else:
rhs = args[1]
lhs = args[0]
res_type = broadcast_type(
builder, (_get_type(builder, lhs), _get_type(builder, rhs))
)
lhs = builder.cast(lhs, res_type)
rhs = builder.cast(rhs, res_type)
cond = lhs > rhs
return builder.select(cond, lhs, rhs)
def _gen_math_funcs():
def get_func(name, N):
def func(builder, *args):
if len(args) != N:
return None
t = args[0].type
if not is_int(t, builder) and not is_float(t, builder):
return None
for a in args[1:]:
if a.type != t:
return None
fname = name
if t == builder.float32:
fname = "f" + fname
elif t != builder.float64:
t = builder.float64
args = tuple(builder.cast(arg, builder.float64) for arg in args)
res = builder.cast(0, t)
return builder.external_call(fname, args, res, decorate=False)
return func
math_funcs = [
("log", 1),
("sqrt", 1),
("exp", 1),
("erf", 1),
("sin", 1),
("cos", 1),
("tanh", 1),
("atan2", 2),
]
for func, N in math_funcs:
fname = "math." + func
py_func = eval(fname)
register_func(fname, py_func)(get_func(func, N))
_gen_math_funcs()
del _gen_math_funcs
| python |
from keras.models import load_model
from keras.optimizers import SGD, Adam
from skimage.io import imshow
from cnnlevelset.pascalvoc_util import PascalVOC
from cnnlevelset.localizer import Localizer
from cnnlevelset.generator import pascal_datagen, pascal_datagen_singleobj
from cnnlevelset import config as cfg
import sys
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
tf.python.control_flow_ops = tf
nb_epoch = 160
pascal = PascalVOC(voc_dir=cfg.PASCAL_PATH)
if len(sys.argv) > 1:
if sys.argv[1] == 'test':
X_img_test, X_test, y_test = pascal.get_test_data(10, random=True)
localizer = Localizer(model_path=cfg.MODEL_PATH)
cls_preds, bbox_preds = localizer.predict(X_test)
for img, y, cls_pred, bbox_pred in zip(X_img_test, y_test, cls_preds, bbox_preds):
label = pascal.idx2label[np.argmax(cls_pred)]
print(label)
img = img.reshape(224, 224, 3)
imshow(pascal.draw_bbox(img, bbox_pred))
plt.show()
sys.exit(0)
X_train, y_train = pascal.load_features_trainset()
y_cls = y_train[:, :, 0]
y_reg = y_train[:, :, 1:]
idxes = np.argmax(y_cls, axis=1)
y_reg = y_reg[range(y_train.shape[0]), idxes]
y_train = [y_cls, y_reg]
localizer = Localizer()
localizer.train(X_train, y_train, nb_epoch=nb_epoch)
| python |
from .InteractionRedshift import InteractionRedshift | python |
N = int(raw_input())
if N < 0:
print N * -1
else:
print N
| python |
#!/usr/bin/python
#
# Nagios class.
#
version = "1.2.2"
from core import *
| python |
"""
Created on Wed Feb 5 13:04:17 2020
@author: matias
"""
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import minimize
import emcee
import corner
from scipy.interpolate import interp1d
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_data import leer_data_taylor
from funciones_BAO import params_to_chi2_taylor
np.random.seed(1)
#%%
os.chdir(path_git+'/Software/Estadística/Datos/BAO/')
dataset = []
archivo_BAO = ['datos_BAO_da.txt','datos_BAO_dh.txt','datos_BAO_dm.txt',
'datos_BAO_dv.txt','datos_BAO_H.txt']
for i in range(5):
aux = leer_data_BAO(archivo_BAO[i])
dataset.append(aux)
#%% Predeterminados:
omega_m_true = 0.24
b_true = 0.01
H0_true = 73.48 #Unidades de (km/seg)/Mpc
n = 1
nll = lambda theta: params_to_chi2_taylor(theta, n, dataset)
initial = np.array([omega_m_true,b_true,H0_true])
bnds = ((0.1,0.4),(-1,1),(50,80))
soln = minimize(nll, initial, bounds=bnds)#, options = {'eps': 0.01})
omega_m_ml, b_ml, H0_true = soln.x
print(omega_m_ml, b_ml, H0_true)
os.chdir(path_git + '/Software/Estadística/Resultados_simulaciones/LCDM')
np.savez('valores_medios_HS_BAO_3params_taylor', sol=soln.x)
| python |
"""Linear Classifiers."""
import numpy as np
from abc import ABC, abstractmethod
from alchina.exceptions import InvalidInput, NotFitted
from alchina.metrics import accuracy_score
from alchina.optimizers import GradientDescent
from alchina.preprocessors import Standardization
from alchina.utils import check_dataset_consistency, features_reshape
class AbstractLinearClassifier(ABC):
"""Abstract class for linear classifiers algorithms."""
def __init__(self, *args, optimizer=None, standardize: bool = True, **kwargs):
self.standardize = Standardization() if standardize else None
self.optimizer = optimizer if optimizer else GradientDescent(*args, **kwargs)
self.optimizer.build(self.cost, self.gradient)
self.labels = None
@abstractmethod
def hypothesis(self, X, theta):
"""Hypothesis."""
pass # pragma: no cover
@abstractmethod
def cost(self, X, y, theta):
"""Cost function."""
pass # pragma: no cover
@abstractmethod
def gradient(self, X, y, theta):
"""Gradient."""
pass # pragma: no cover
@property
def parameters(self):
return self.optimizer.parameters
@property
def history(self):
return self.optimizer.history
def fit(self, X, y):
"""Fit the model."""
X = features_reshape(X)
if not check_dataset_consistency(X, y):
raise InvalidInput("the features set and target set must have as many rows")
if self.standardize is not None:
X = self.standardize(X)
X = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1)
self.labels = np.unique(y)
n_labels = np.size(self.labels)
if n_labels < 2:
raise InvalidInput("target must have at least two different classes")
elif n_labels == 2:
self.optimizer(X, y)
else:
self.optimizer(X, (y == self.labels).astype(int))
def predict_probability(self, X):
"""Predict the probability of a target given features."""
if self.parameters is None or self.labels is None:
raise NotFitted("the model must be fitted before usage")
X = features_reshape(X)
if self.standardize is not None:
X = self.standardize(X)
X = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1)
return self.hypothesis(X, self.parameters)
def predict(self, X):
"""Predict a target given features."""
probability = self.predict_probability(X)
if np.size(probability, axis=1) > 1:
return self.labels[np.argmax(probability, axis=1).reshape(-1, 1)]
return self.labels[np.around(probability).astype("int")]
def score(self, X, y):
"""Score of the model."""
if self.parameters is None or self.labels is None:
raise NotFitted("the model must be fitted before usage")
return accuracy_score(self.predict(X), y)
class LinearClassifier(AbstractLinearClassifier):
"""Linear classifier (logistic regressor)."""
def sigmoid(self, z):
"""Logistic function."""
return 1 / (1 + np.exp(-z))
def hypothesis(self, X, theta):
"""Logistic hypothesis."""
return self.sigmoid(np.dot(X, theta))
def cost(self, X, y, theta):
"""Cost function."""
return (
-y.T.dot(np.log(self.hypothesis(X, theta)))
- (1 - y).T.dot(np.log(1 - self.hypothesis(X, theta)))
).flat[0]
def gradient(self, X, y, theta):
"""Gradient."""
return X.T.dot(self.hypothesis(X, theta) - y)
class RidgeClassifier(AbstractLinearClassifier):
"""Regularized linear classifier."""
def __init__(self, *args, regularization: float = 1, **kwargs):
super().__init__(*args, **kwargs)
self.regularization = regularization
def sigmoid(self, z):
"""Logistic function."""
return 1 / (1 + np.exp(-z))
def hypothesis(self, X, theta):
"""Logistic hypothesis."""
return self.sigmoid(np.dot(X, theta))
def cost(self, X, y, theta):
"""Regularized cost function."""
return (
-y.T.dot(np.log(self.hypothesis(X, theta)))
- (1 - y).T.dot(np.log(1 - self.hypothesis(X, theta)))
).flat[0] + self.regularization * np.sum(np.square(theta[:, 1:]), axis=0)
def gradient(self, X, y, theta):
"""Regularized gradient."""
return (
X.T.dot(self.hypothesis(X, theta) - y)
+ self.regularization * np.c_[np.zeros((theta.shape[0], 1)), theta[:, 1:]]
)
| python |
"""
This is a crawler that downloads 'friends' screenplays.
"""
import re
import requests
from bs4 import BeautifulSoup
from seinfeld_laugh_corpus.corpus_creation.screenplay_downloader.screenplay_downloader import ScreenplayDownloader
def run(input_filename, output_filename):
screenplay_downloader = SeinfeldScreenplayDownloader()
screenplay_downloader.download(input_filename, output_filename)
class FriendsScreenplayDownloader(ScreenplayDownloader):
friends_scripts_url = 'https://fangj.github.io/friends/season/'
def _download_screenplay(self, season_num, episode_num, is_double_episode):
screenplay_url = self._get_screenplay_url(season_num, episode_num)
url_content = self._get_content(screenplay_url)
# get text
soup = BeautifulSoup(url_content, 'lxml')
try:
header = soup.find_all("hr", limit=2)[-1]
except IndexError:
header = soup.find("p", class_="scene")
s = header.find_all_next("p")
s = [tag for tag in s if not ('align' in tag.attrs or 'class' in tag.attrs)]
screenplay_txt = "\n".join((line.get_text() for line in s if "transcribed by:" not in line.get_text().lower()))
result = screenplay_txt
if is_double_episode:
return [result, self._download_screenplay(season_num, episode_num + 1, False)[0]]
else:
return [result]
def _get_screenplay_url(self, season_num, episode_num):
return self.friends_scripts_url + "%02d%02d.html" % (season_num, episode_num)
def _cleanup(self, screenplay_txt):
lines = re.split(r"[\n\r\t]+", screenplay_txt)
lines = [l for l in lines if l]
lines = self._capitalize_all_character_names(lines)
lines = lines[:-1] if "end" in lines[-1].lower() else lines
return "\n".join(lines)
if __name__ == '__main__':
# test
downloader = FriendsScreenplayDownloader()
print(downloader.download("S10E01.mkv", "S10E01.screenplay"))
| python |
"a shared stack module"
stack = []
class error(Exception): pass
def push(obj):
global stack
stack = [obj] + stack
def pop():
global stack
if not stack:
raise error('stack underflow')
top, *stack = stack
return top
def top():
if not stack:
raise error('stack underflow')
return stack[0]
def empty():
return not stack
def member(obj):
return obj in stack
def item(offset):
return stack[offset]
def length():
return len(stack)
def dump():
print('<Stack:{}>'.format(stack))
| python |
def solution(A): # O(N)
"""
Given a variable length array of integers, partition them such that the even
integers precede the odd integers in the array. Your must operate on the array
in-place, with a constant amount of extra space. The answer should scale
linearly in time with respect to the size of the array.
>>> solution([7, 7, 4, 0, 9, 8, 2, 4, 1, 9])
[4, 2, 4, 0, 8, 9, 7, 7, 1, 9]
"""
i = 0 # O(1)
j = len(A) - 1 # O(1)
while i < j: # O(<N)
if A[i] % 2 == 0: # O(1)
i += 1 # O(1)
if A[j] % 2 == 1: # O(1)
j -= 1 # O(1)
if A[i] % 2 == 1 and A[j] % 2 == 0: # O(1)
A[i], A[j] = A[j], A[i] # O(1)
i += 1 # O(1)
j -= 1 # O(1)
return A # O(1)
if __name__ == '__main__':
import doctest
doctest.testmod()
| python |
# -*- coding: utf-8 -*-
# pylint: disable=redefined-outer-name
"""
Some simple unit tests of the Counter device, exercising the device from
the same host as the tests by using a DeviceTestContext.
"""
import logging
import time
import pytest
import tango
from tango.test_utils import DeviceTestContext
from ska_tango_examples.counter.Counter import Counter
@pytest.fixture
def counter(request):
"""Create DeviceProxy for tests"""
true_context = request.config.getoption("--true-context")
if not true_context:
with DeviceTestContext(Counter) as proxy:
yield proxy
else:
database = tango.Database()
instance_list = database.get_device_exported_for_class("Counter")
for instance in instance_list.value_string:
yield tango.DeviceProxy(instance)
break
def test_init(counter):
counter.Init()
print(counter.value)
assert counter.value == 0
def test_increment(counter):
counter.Init()
value_before_inc = counter.value
counter.increment()
assert value_before_inc == counter.value - 1
def test_decrement(counter):
counter.Init()
value_before_inc = counter.value
counter.decrement()
assert value_before_inc == counter.value + 1
def test_reset(counter):
counter.Init()
counter.CounterReset(1)
assert counter.value == 1
@pytest.mark.post_deployment
def test_polled_value(counter):
pytest.count = 0
def count_events(evt):
logging.info("%s", evt)
pytest.count += 1
counter.subscribe_event(
"polled_value",
tango.EventType.CHANGE_EVENT,
count_events,
)
counter.increment()
time.sleep(1)
counter.increment()
time.sleep(1)
counter.increment()
time.sleep(1)
assert pytest.count == 4 # 3 changes, 1 subscription
| python |
#!/bin/python3
# Imports
import math
import os
import random
import re
import sys
#
# Instructions
#
def solution_function(a, b):
# Write your code here
return [a, b]
if __name__ == '__main__':
a_count = int(input().strip())
a = []
for _ in range(a_count):
a_item = input()
a.append(a_item)
b_count = int(input().strip())
b = []
for _ in range(b_count):
b_item = input()
b.append(b_item)
result = solution_function(a, b)
print('\n'.join(map(str, result)))
print('\n')
| python |
# 2017-04-16
"""
Using first half of Knuth-Morris-Pratt (KMP) pattern-matching
for shortest repeating sub-pattern (SRSP) determination in O(n) time
Left edge and right edge are "sacred" locations. If we have a repeating sub-pattern that covers the whole input string, it will exist starting at left edge and exist ending at right edge. We always have a repeating pattern, even if it happens to be size n. We never match the whole string with first half of KMP for bulk of the algorithm. We have three cases. For the first case, we have smaller repeating pattern, e.g. with input string "abcabcabc" and smaller repeating sub-pattern "abc", in which case removing max. proper suffix from whole string gives us smallest repeating sub-pattern "abc". For the second case, we have a non-empty normal-prefix and proper-suffix overlap but no smaller repeating sub-pattern, e.g. "abcpppabc" and removing max. proper suffix from whole string gives us "abcppp", but n % leftover_size = 9 % 6 != 0, so the smallest repeating sub-pattern is the whole string "abcpppabc". For the third case, we have an empty normal-prefix and proper-suffix overlap and no smaller repeating sub-pattern, e.g. "abcpppppp" and removing max. proper suffix from whole string gives us "abcpppppp", so the smallest repeating sub-pattern is the whole string "abcpppppp". The key is that the three situations cover the whole space of possible situations and left and right edge are "sacred" locations because they are what the first half of KMP (table-building) work with and if we have a repeating pattern, it exists at the left and right edges.
"""
"""
inspired by buge
"""
# first half of KMP
def KMPFailureFunction(pattern_str):
i = 1
j = 0
m = len(pattern_str)
f = [0] * m
while i < m:
if pattern_str[j] == pattern_str[i]:
f[i] = j + 1
i = i + 1
j = j + 1
elif j > 0:
j = f[j - 1]
else:
f[i] = 0
i = i + 1
return f
# uses first half of KMP
def SRSP(pattern_str):
if len(pattern_str) == 0:
return []
m = len(pattern_str)
f = KMPFailureFunction(pattern_str)
proper_suffix_size = f[m - 1]
left_piece_size = m - proper_suffix_size
if m % left_piece_size == 0:
return pattern_str[ : left_piece_size]
else:
return pattern_str
# second half of KMP
# retrieve index for beginning of first occurrence of P in T
def KMPMatch(T, P):
n = len(T)
m = len(P)
f = KMPFailureFunction(P)
i = 0
j = 0
while i < n:
if P[j] == T[i]:
if j == m - 1:
return i - m + 1
i = i + 1
j = j + 1
elif j > 0:
j = f[j - 1]
else:
i = i + 1
raise Exception("no substring of T matching P")
def main():
print SRSP("abcabcabc")
print KMPMatch("abacaabaccabacabaabb", "abacab") == 10
if __name__ == "__main__":
main()
| python |
"""
https://wiki.jikexueyuan.com/project/easy-learn-algorithm/floyd.html
"""
def floyd_warshall(edges, V):
# dp: 顶点对 (i,j) 间距离
dp = [[float('inf')] * V for _ in range(V)]
for i in range(V):
dp[i][i] = 0
# 根据 edges 初始化
for u, v, w in edges:
dp[u][v] = w
# 选择引入中间节点 k,更新 i..j 距离
for k in range(V):
# 内层循环,组成任意顶点对
# 并且更新完引入 0,...k-1 顶点的最优情况
# dp[i][k] 暗含 i,k 两个顶点,中间已经过 0,...k-1 最优解
# dp[k][j] 暗含 k,j 两个顶点,中间已经过 0,...k-1 最优解
# 只要理解,这里的 k 其实也是 1..V 中某个顶点,并且 k-1 时刻最优距离已知
for i in range(V):
for j in range(V):
dp[i][j] = min(dp[i][j], dp[i][k] + dp[k][j])
print(dp)
V = 4
edges = [
(0, 1, 2),
(0, 2, 6),
(0, 3, 4),
(1, 2, 3),
(2, 0, 7),
(2, 3, 1),
(3, 0, 5),
(3, 2, 12)
]
floyd_warshall(edges, V)
| python |
import logging
log = logging.getLogger('agents')
from enforce_typing import enforce_types
from typing import List, Dict
import random
import math
from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens
from engine.AgentBase import AgentBase
from web3tools.web3util import toBase18
from util.constants import S_PER_MONTH
@enforce_types
class PublicKnowledgeMarketAgent(AgentBase):
'''
Public knowledge market. Stores all private knowledge assets (data, algorithms, compute),
distributes rewards to asset owners, sends fees to DAOTreasury
Properties:
- collects/stores knowledge assets (and OCEAN)
- sends transaction fees to DAO Treasury & Stakers
- sends OCEAN to Researchers for publishing knowledge assets
- collects OCEAN (this will be a fixed ratio from the funding,
representing the researchers publishing their research papers on the platform
(basically the value of their research))
'''
def __init__(self, name: str, USD: float, OCEAN: float,
transaction_fees_percentage: float,
fee_receiving_agents=None):
"""receiving_agents -- [agent_n_name] : method_for_%_going_to_agent_n
The dict values are methods, not floats, so that the return value
can change over time. E.g. percent_burn changes.
"""
super().__init__(name, USD, OCEAN)
self._receiving_agents = fee_receiving_agents
#track amounts over time
self._USD_per_tick: List[float] = [] #the next tick will record what's in self
self._OCEAN_per_tick: List[float] = [] # ""
self.OCEAN_last_tick = 0.0
self.transaction_fees_percentage = transaction_fees_percentage
self.total_fees: float = 0.0
self.knowledge_assets: dict = {}
self.total_knowledge_assets = 0
self.types = ['algo', 'data', 'compute']
def _ToDistribute(self, state):
received = self.OCEAN() - self.OCEAN_last_tick
if received > 0:
fees = 0
OCEAN_to_self = 0
sum_OCEAN_received = 0.0
# iterate through all researchers
for researcher in state.researchers.keys():
r = state.getAgent(researcher)
# if r.last_tick_spent == (state.tick-1) or r.last_tick_spent == state.tick or r.last_tick_spent == (state.tick - 2):
# get the OCEAN received by this agent (add it to total for assertion later)
received_from_r = r.last_OCEAN_spent
if received_from_r != {}:
# make sure the researcher is really buying from this market
if received_from_r['market'] == 'private_market':
continue
assert received_from_r['market'] == 'public_market'
sum_OCEAN_received += received_from_r['spent']
ratio = received_from_r['ratio']
# print(f"RESEARCHER: {r.name} | received_from {received_from_r} | RATIO: {ratio}")
# new publishing functionality | if the researcher is publishing assets to the marketplace
if received_from_r['publish'] and r.research_type == 'public':
# add total knowledge_assets
self.total_knowledge_assets += r.proposal['assets_generated']
if r.asset_type not in self.knowledge_assets.keys():
self.knowledge_assets[r.asset_type] = r.proposal['assets_generated']
else:
self.knowledge_assets[r.asset_type] += r.proposal['assets_generated']
# calculate fee for this transaction
r_fee = received_from_r['spent'] * self.transaction_fees_percentage
fees += r_fee # append it to total fees
# to self
OCEAN_to_self += (received_from_r['spent'] - r_fee) * ratio
fees += received_from_r['spent'] - r_fee - OCEAN_to_self # since this is public, on top of the fees, the price for the asset also goes to the treasury
if round(sum_OCEAN_received, 5) != round(received, 5):
OCEAN_to_self += received - sum_OCEAN_received
sum_OCEAN_received += OCEAN_to_self
assert round(sum_OCEAN_received, 5) == round(received, 5) # sum of the OCEAN received from researchers must equal the total received
return fees, OCEAN_to_self
else:
return 0, 0
def _disburseFeesOCEAN(self, state, fee) -> None:
'''
Sends transaction fees to DAO Treasury and to Stakers
ratio of fees transferred is determined by the amount of OCEAN left in treasury vs. the amount
of OCEAN staked by Stakers
'''
self.total_fees += fee
total = 0
for percent in self._receiving_agents.values():
total += fee*percent
assert (round(total, 1) == round(fee, 1))
for name, computePercent in self._receiving_agents.items():
self._transferOCEAN(state.getAgent(name), computePercent * fee)
def takeStep(self, state):
fee, keep = self._ToDistribute(state)
if fee > 0:
self._disburseFeesOCEAN(state, fee)
#record what we had up until this point
self._USD_per_tick.append(self.USD())
self._OCEAN_per_tick.append(self.OCEAN())
self.OCEAN_last_tick = self.OCEAN() | python |
import os
import tempfile
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected as fc
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.client import timeline
batch_size = 100
inputs = tf.placeholder(tf.float32, [batch_size, 784])
targets = tf.placeholder(tf.float32, [batch_size, 10])
with tf.variable_scope("layer_1"):
fc_1_out = fc(inputs, num_outputs=500, activation_fn=tf.nn.sigmoid)
with tf.variable_scope("layer_2"):
fc_2_out = fc(fc_1_out, num_outputs=784, activation_fn=tf.nn.sigmoid)
with tf.variable_scope("layer_3"):
logits = fc(fc_2_out, num_outputs=10)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=targets))
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
if __name__ == '__main__':
mnist_save_dir = os.path.join(tempfile.gettempdir(), 'MNIST_data')
mnist = input_data.read_data_sets(mnist_save_dir, one_hot=True)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
for i in range(3):
batch_input, batch_target = mnist.train.next_batch(batch_size)
feed_dict = {inputs: batch_input,
targets: batch_target}
sess.run(train_op,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open('timeline_02_step_%d.json' % i, 'w') as f:
f.write(chrome_trace)
| python |
from .model import FaPN | python |
import sys
import vnpy_chartwizard
sys.modules[__name__] = vnpy_chartwizard
| python |
level = 3
name = 'Arjasari'
capital = 'Patrolsari'
area = 64.98
| python |
"""
模块功能:
1. 采集批改网所有在库作文数据
2. 清洗,预处理
3. 入库信息键:pid作文号、title作文标题、abstract简介、refer参考答案{可能为空}、
spider_time采集时间、source_href答题页面访问链接
"""
from gevent import monkey
monkey.patch_all()
import json
import requests
from lxml import etree
import gevent
from gevent.queue import Queue
from fake_useragent import UserAgent
work_q = Queue()
pids = dict()
session = requests.session()
with open('../database/cookies.txt', 'r') as f:
# cookies_dict = json.loads(f.read())
# cookies = ';'.join(['{}:{}'.format(i['name'], i['value']) for i in json.loads(f.read())])
data = json.loads(f.read())
cookies_dict = dict(zip([i['name'] for i in data], [i['value'] for i in data]))
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
session.cookies = cookies
print(session.cookies)
def handle_html(url):
headers = {
'User-Agent': UserAgent().random,
'Host': 'tiku.pigai.org',
'DNT': '1',
# 'Cookie': cookies,
}
res = session.get(url, headers=headers)
if res.status_code == 200:
print('>>> 访问成功')
tree = etree.HTML(res.text)
# print(res.text)
titles = tree.xpath("//li[@class='title']/text()")
for title in titles:
print(title)
def coroutine_engine():
while not work_q.empty():
url = work_q.get_nowait()
handle_html(url)
def coroutine_speed_up(power: int = 4):
task_list = []
for x in range(power):
task = gevent.spawn(coroutine_engine)
task_list.append(task)
gevent.joinall(task_list)
def run():
pass
if __name__ == '__main__':
handle_html('http://tiku.pigai.org/Home/Index/essayNormal/tp/0/yycd/1/grade/%E5%A4%A7%E5%AD%A6%E8%8B%B1%E8%AF%AD')
| python |
import threading
import csv
import re
from sqlalchemy import create_engine
from IPython.display import display, Javascript, HTML
from ..python_js.interface_js import load_js_scripts
def threaded(fn):
def wrapper(*args, **kwargs):
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
return wrapper
class HTMLTable(list):
"""
Creates an HTML table if pandas isn't installed.
The .empty attribute takes the place of df.empty,
and to_csv takes the place of df.to_csv.
"""
def __init__(self, data, id_):
self.id_ = id_
self.data = data
empty = []
def _repr_html_(self, n_rows=100, length=100, edit=False):
table = '<table id="table'+self.id_+'" width=100%>'
thead = '<thead><tr>'
tbody = '<tbody>'
j = 48
query_plan = False
for n,row in enumerate(self.data):
if n == 0:
if list(row):
query_plan = True if row[0] == 'QUERY PLAN' else False
if query_plan:
execution_time = re.findall('[0-9]{,}\.[0-9]{,}', str(self.data[-1][0]))
execution_time = execution_time if not execution_time else float(execution_time[0])
thead += '<th>' + ' ' + '</th>' ''.join([('<th>' + str(r) + '</th>') for r in row])
elif n > n_rows:
if not query_plan:
break
else:
if not query_plan:
if n > 50 and length > 100:
n = length - j
j -= 1
tbody += '<tr class="text-nowrap"><td>' + str(n) + '</td>' + ''.join([('<td tabindex="1" data-column="'+str(r).replace('\\', '\\\\')+'">' + str(r).replace('\\', '\\\\') + '</td>') for r in row]) + '</tr>'
else:
section_time = re.search('actual time=([0-9]{,}\.[0-9]{,})\.\.([0-9]{,}\.[0-9]{,})', str(row[0]))
background_color = ""
if section_time:
start_time = float(section_time.group(1))
stop_time = float(section_time.group(2))
if (stop_time - start_time) > (execution_time * 0.9):
background_color = "#800026"
elif (stop_time - start_time) > (execution_time * 0.8):
background_color = "#bd0026"
elif (stop_time - start_time) > (execution_time * 0.7):
background_color = "#e31a1c"
elif (stop_time - start_time) > (execution_time * 0.6):
background_color = "#fc4e2a"
elif (stop_time - start_time) > (execution_time * 0.5):
background_color = "#fd8d3c"
elif (stop_time - start_time) > (execution_time * 0.4):
background_color = "#feb24c"
elif (stop_time - start_time) > (execution_time * 0.3):
background_color = "#fed976"
elif (stop_time - start_time) > (execution_time * 0.2):
background_color = "#ffeda0"
elif (stop_time - start_time) > (execution_time * 0.1):
background_color = "#ffffcc"
else:
background_color = ""
td_row = '<tr><td>' + str(n) + '</td>' + ''.join([('<td>' + str(r).replace(' ', ' ') + '</td>') for r in row]) + '</tr>'
repl = '<b style="background-color:{color};">actual time</b>'.format(color=background_color)
td_row = re.sub('actual time', repl, td_row)
tbody += td_row
# tbody += '<tr style="height:40px;">' + ''.join([('<td></td>') for r in row]) + '</tr>' # for adding new row
thead += '</tr></thead>'
tbody += '</tbody>'
table += thead + tbody
return table
@threaded
def display(self, columns=[], msg=None):
data = self.data if len(self.data) <= 100 else self.data[:49] + [['...'] * (len(self.data[0]))] + self.data[-49:]
table_str = HTMLTable([columns] + data, self.id_)._repr_html_(n_rows=100, length=len(self.data))
table_str = table_str.replace('<table', '<table class="table-striped table-hover table-bordered"').replace("'", "\\'").replace('\n','')
display(
HTML(
"""
<script type="text/Javascript">
$('#dbinfo{id}').append('{msg}');
$('#table{id}').append('{table}');
</script>
""".format(msg=str(msg), table=table_str, id=self.id_)
)
)
def to_csv(self, path):
with open(path, 'w') as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(self.data)
def build_dict(output, row, __KERNEL_VARS__):
output[row.replace('%(','').replace(')s','')] = eval("__KERNEL_VARS__.get('"+row.replace('%(','').replace(')s','')+"')")
return output
def kill_last_pid(app=None, db=None):
connection = create_engine("postgresql://tdobbins:tdobbins@localhost:5432/"+db+"?application_name=garbage_collection")
try:
pid_sql = """
SELECT pid
FROM pg_stat_activity
where application_name = %(app)s
"""
pids = [i.pid for i in connection.execute(pid_sql, {
'app': app
}
)]
for pid in pids:
cancel_sql = "select pg_cancel_backend(%(pid)s);"
cancel_execute = [i for i in connection.execute(cancel_sql, {
'pid': pid
}
)]
print 'cancelled postgres job:', pid, 'application: ', app
return True
except Exception as e:
print e
return False
finally:
print 'closing DB connection....'
connection.dispose()
return True
class ParseNodes(object):
def __init__(self, obj):
self.obj = obj
def get_depth(self, itr=0, depth=[]):
if isinstance(self.obj, dict):
for k, v2 in self.obj.items():
if 'Plan' in k:
if k == 'Plans':
itr += 1
depth.append(itr)
ParseNodes(v2).get_depth(itr=itr, depth=depth)
elif isinstance(self.obj, list):
for i, v2 in enumerate(self.obj):
if 'Plans' in v2:
ParseNodes(v2).get_depth(itr=itr, depth=depth)
else:
depth.append(itr)
return depth
@staticmethod
def build_node(id_, node, xPos):
_node = {
'name': id_,
'nodetype': node.get('Plan', node).get('Node Type'),
'starttime': node.get('Plan', node).get('Actual Startup Time'),
'endtime': node.get('Plan', node).get('Actual Total Time'),
'subplan': node.get('Plan', node).get('Subplan Name'),
'display': str(node.get('Plan', node).get('Join Filter',
node.get('Filter',
node.get('Index Cond',
node.get('Hash Cond',
node.get('One-Time Filter',
node.get('Recheck Cond',
node.get('Group Key')
)
)
)
)
)
) or '') + (' using '
+ str(node.get('Index Name',
node.get('Relation Name',
node.get('Schema')))) + ' ' + str(node.get('Alias')or'')
if node.get('Index Name',
node.get('Relation Name',
node.get('Schema')))
else ''),
'rows': node.get('Plan', node).get('Plan Rows'),
'xPos': xPos
}
return _node
def node_walk(self, key, nodes={}, xPos=None):
if not nodes.get('nodes'):
nodes['nodes'] = []
nodes['links'] = []
nodes['executionTime'] = self.obj.get('Execution Time')
nodes['depth'] = 0
target = id(self.obj)
source_node = ParseNodes.build_node(target, self.obj, xPos)
xPos -= 1
if source_node not in nodes['nodes']:
nodes['nodes'].append(source_node)
for i in self.obj.get('Plan', self.obj)[key]:
source = id(i)
if isinstance(i, dict):
plans = i.get('Plans')
target_node = ParseNodes.build_node(source, i, xPos)
if target_node not in nodes['nodes']:
nodes['nodes'].append(target_node)
nodes['links'].append({'source':source, 'target':target,'value':i.get('Total Cost')})
if plans:
nodes['depth'] += 1
ParseNodes(i).node_walk('Plans', nodes, xPos)
return nodes
def load_js_files():
display(Javascript(
load_js_scripts()
))
return None | python |
from microbit import *
from math import sqrt
while True:
x, y, z = accelerometer.get_values()
acc = sqrt(x*x + y*y + z*z)
y = int(2 + (acc - 1000) / 100)
display.clear()
if y < 0:
y = 0
if y > 4:
y = 4
for x in range(0, 5):
display.set_pixel(x, y, 9) | python |
from datetime import datetime, timedelta
from discord.ext import commands
from lib.mysqlwrapper import mysql
from lib.rediswrapper import Redis
from typing import Optional
import discord
import lib.embedder
import logging
import uuid
class FriendCode(commands.Cog):
def __init__(self, client):
self.client = client
# Set up the loggers
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.NullHandler())
self.logger.info("Loading friendcode cog")
# Set up Redis
self.temp_redis = Redis("temp_message:friendcode")
def cog_unload(self):
self.logger.info("Unloading friendcode cog")
def is_guild_owner():
def predicate(ctx):
return ctx.guild is not None and \
ctx.guild.owner_id == ctx.author.id
return commands.check(predicate)
@commands.group(
name="friendcode",
aliases=["fc"],
brief="Friend Code Sharing System",
description="Cherubi Bot - Friend Code Sharing System",
usage="[tagged user] [filter] | <add | list | remove>",
help="You can run the command without a tagged user to bring up your \
info, tag a user to bring up theirs, or run one of the \
subcommands that are below.",
invoke_without_command=True
)
async def friendcode_group(
self,
ctx,
target: Optional[discord.Member],
filter=None
):
# If no target is given, use the user who wrote the command
target = target or ctx.author
db = mysql()
query = """
SELECT
up.home_guild AS home_guild,
up.fc_visibility AS visibility,
fc.identifier AS identifier,
fc.code AS code,
fc.main AS main
FROM friend_codes fc
LEFT JOIN user_preferences up ON up.user_id = fc.user_id
WHERE fc.user_id = %s
AND fc.identifier LIKE %s
ORDER BY fc.main DESC, fc.identifier ASC;
"""
results = db.query(query, [target.id, f"%{filter if filter else ''}%"])
db.close()
# Check if the target has any friend codes on file. If not, send a
# warning embed and return.
if not results:
if filter:
await ctx.send(embed=lib.embedder.make_embed(
type="warning",
title=f"{target.display_name}'s Friend Codes",
content=f"No friend codes were found for `{target.display_name}` with `{filter}` in it"
))
return
else:
await ctx.send(embed=lib.embedder.make_embed(
type="warning",
title=f"{target.display_name}'s Friend Codes",
content=f"Sadly `{target.display_name}` doesn't have any friend codes stored."
))
return
# Check if the user's visibility is hidden. If so, give an error and
# return.
if target.id != ctx.author.id and results[0]['visibility'] == "hidden":
await ctx.send(embed=lib.embedder.make_embed(
type="error",
title=f"{target.display_name}'s Friend Codes",
content=f"`{target.display_name}` has their friend code visibility set to hidden. Only they can send them."
))
return
# Check if they have a home server set. If not, give an error and
# return.
if target.id != ctx.author.id and not results[0]['home_guild']:
await ctx.send(embed=lib.embedder.make_embed(
type="error",
title=f"{target.display_name}'s Friend Codes",
content=f"`{target.display_name}` doesn't have a home server set.",
footer=f"They need to run !sethome"
))
return
# Check if the target is the original author,
# if not then check if their visibility is private,
# if it is then check if this is their home guild.
# If it isn't, send an error embed and return.
if (target.id != ctx.author.id
and (not results[0]['visibility'] or results[0]['visibility'] == "private")
and results[0]['home_guild'] != ctx.guild.id):
await ctx.send(embed=lib.embedder.make_embed(
type="error",
title=f"{target.display_name}'s Friend Codes",
content=f"This is not `{target.display_name}`'s home server and their visibility is set to private."
))
return
# Send the instructions message and store the info in Redis for cleanup
# later if needed
delete_delay = 60
message = await ctx.send(embed=lib.embedder.make_embed(
type="info",
title=f"F.C.'s for {target.display_name}",
content=f"The friend codes below are for `{target.display_name}`.\
\n\nThe codes below will auto-delete in 15 minutes. \
\n\nYou can copy-paste the message below right into Pokemon \
GO's Add Friend page, since Pokemon GO only uses the first \
12 characters in a paste to the Add Friend page.",
footer=f"This message will self-destruct in {delete_delay} seconds"
), delete_after=delete_delay)
expire_time = datetime.now() + timedelta(seconds=delete_delay)
self.temp_redis.set(
str(uuid.uuid4()),
f"{ctx.channel.id},{message.id},{expire_time}",
0
)
# For every result returned, send a message with the friend code. Also
# store the info in Redis for cleanup later if needed
delete_delay = 60 * 15
for result in results:
code = str(result['code']).zfill(12)
message = await ctx.send(
f"{code} <- {result['identifier']}{' (main)' if result['main'] else ''}",
delete_after=delete_delay
)
expire_time = datetime.now() + timedelta(seconds=delete_delay)
self.temp_redis.set(
str(uuid.uuid4()),
f"{ctx.channel.id},{message.id},{expire_time}",
0
)
# NOTE: This currently doesn't quite work because on IOS you can't
# copy from an embed's content, but on Android you can. So this is
# being disabled until Discord fixes that.
# delete_delay = 60 * 15
# url = f"https://chart.googleapis.com/chart?chs=300x300&cht=qr&chl={code}"
# message = await ctx.send(embed = lib.embedder.make_embed(
# type = "info",
# title = f"F.C. for {result['identifier']}",
# title_url = url,
# content = code,
# thumbnail = url,
# footer = f"Owned by {target.display_name}"
# ), delete_after=delete_delay)
#
# expire_time = datetime.now() + timedelta(seconds=delete_delay)
# self.temp_redis.set(
# str(uuid.uuid4()),
# f"{ctx.channel.id},{message.id},{expire_time}",
# 0
# )
@friendcode_group.command(
name="add",
aliases=["a"],
brief="Adds / edits a friend code on your list",
description="Cherubi Bot - Friend Code Sharing System",
usage="<trainer name> <friend code>",
help="This adds the given friend code to your list. If you run this \
again with the same trainer name, it'll change the friend code for it."
)
async def add_subcommand(
self,
ctx,
input_identifier,
code,
code_part2="",
code_part3=""
):
# Check that the user has their home guild set. If not, then set it.
# Check if this was invoked from a guild
if not isinstance(ctx.channel, discord.DMChannel):
db = mysql()
query = """
SELECT
user_id,
home_guild
FROM user_preferences
WHERE user_id = %s;
"""
results = db.query(query, [ctx.author.id])
db.close()
# If nothing was returned, then invoke the sethome command
if not results or not results[0]['home_guild']:
await ctx.invoke(self.client.get_command("sethome"))
# This and the additional two code "parts" are for if the user
# uses a separated version of the friend code.
if code_part2 != "" or code_part3 != "":
code = code + code_part2 + code_part3
# Checks if the identifier if over 16 characters long. If so then send
# an error embed and return.
if len(input_identifier) > 16:
await ctx.send(embed=lib.embedder.make_embed(
type="error",
title=f"Error Adding Friend Code",
content="The trainer name / identifier that you gave is longer than the maximum character limit."
))
return
# Check that the friend code was numbers and that it was 12 digits
# long, if it isn't then send an error embed and return
if not code.isdigit():
await ctx.send(embed=lib.embedder.make_embed(
type="error",
title=f"Error Adding Friend Code",
content="The given friend code isn't all numbers."
))
await ctx.send_help(str(ctx.command))
return
if len(code) != 12:
await ctx.send(embed=lib.embedder.make_embed(
type="error",
title=f"Error Adding Friend Code",
content="The given friend code isn't 12 digits long."
))
await ctx.send_help(str(ctx.command))
return
db = mysql()
query = """
INSERT INTO friend_codes (user_id, identifier, code, updated)
VALUES (%s, %s, %s, NOW())
ON DUPLICATE KEY UPDATE
code = VALUES(code),
updated = VALUES(updated);
"""
db.execute(query, [
ctx.message.author.id,
input_identifier,
code
])
db.close()
# Set up the output text ahead of time so that we can add in info if
# needed.
output = f"Added friend code `{code}` for `{input_identifier}`."
# Delete the user's command message, for privacy reasons
if not isinstance(ctx.message.channel, discord.DMChannel):
await ctx.message.delete()
output += "\n\nYour message was deleted for privacy reasons."
delete_delay = 120
message = await ctx.send(embed=lib.embedder.make_embed(
type="success",
title=f"Added Friend Code",
content=output,
footer=f"This message will self-destruct in {delete_delay} seconds"
), delete_after=delete_delay)
expire_time = datetime.now() + timedelta(seconds=delete_delay)
self.temp_redis.set(
str(uuid.uuid4()),
f"{ctx.channel.id},{message.id},{expire_time}",
0
)
@friendcode_group.group(
name="help",
brief="Runs the equivalent of \"help friendcode\"",
description="Cherubi Bot - Shiny Checklist System",
help="",
hidden=True
)
async def help_subcommand(self, ctx):
"""Just an alias for the help command for this
This is an alias for the help page for friendcode for if anyone types
it
"""
await ctx.send(f"_This is the equivalent of running:_\n`{ctx.prefix}help friendcode`")
await ctx.send_help("friendcode")
@friendcode_group.command(
name="list",
aliases=["l"],
brief="Lists all of your friend codes in a single message",
description="Cherubi Bot - Friend Code Sharing System",
help="This lists all of your friend codes in a single message. This \
command is not mobile friendly."
)
async def list_subcommand(self, ctx):
db = mysql()
query = """
SELECT
fc.identifier AS identifier,
fc.code AS code
FROM friend_codes fc
WHERE fc.user_id = %s
ORDER BY fc.identifier ASC;
"""
results = db.query(query, [ctx.author.id])
db.close()
# For every result returned, send an embed with the friend code and
fields = []
for result in results:
fields.append((result['identifier'], result['code'], True))
delete_delay = 60
message = await ctx.send(embed=lib.embedder.make_embed(
type="info",
title=f"F.C. List for {ctx.author.display_name}",
fields=fields,
footer=f"This message will self-destruct in {delete_delay} seconds"
), delete_after=delete_delay)
expire_time = datetime.now() + timedelta(seconds=delete_delay)
self.temp_redis.set(
str(uuid.uuid4()),
f"{ctx.channel.id},{message.id},{expire_time}",
0
)
@friendcode_group.command(
name="listall",
aliases=["list_all"],
brief="Lists all the server's friend codes",
description="Cherubi Bot - Friend Code Sharing System",
help="Lists all friend codes for everyone on your server. This \
command is not mobile friendly"
)
@commands.check_any(commands.is_owner(), is_guild_owner())
@commands.cooldown(1, 30, commands.BucketType.user)
async def listall_subcommand(self, ctx):
# This MySQL statement is janky, but it works. Plus it is just an
# admin command, so it doesn't really matter
db = mysql()
query = """
SELECT
fc.user_id AS user_id,
GROUP_CONCAT(CONCAT(fc.identifier, ': ', LPAD(fc.code, 12, '0')) SEPARATOR '\n') AS information
FROM friend_codes fc
LEFT JOIN user_preferences up ON up.user_id = fc.user_id
WHERE up.home_guild = %s
GROUP BY fc.user_id
ORDER BY fc.identifier ASC;
"""
results = db.query(query, [ctx.guild.id])
db.close()
# For every result returned, send an embed with the friend code and
fields = []
for result in results:
# This is here in case someone leaves the guild, but it is still
# set to their home guild
if ctx.guild.get_member(result['user_id']):
user_name = ctx.guild.get_member(result['user_id']).display_name
else:
user_name = self.client.get_user(result['user_id'])
fields.append((user_name, result['information'], True))
await ctx.send(embed=lib.embedder.make_embed(
type="info",
title=f"F.C. List for {ctx.guild.name}",
fields=fields
))
@friendcode_group.command(
name="remove",
aliases=["r", "delete", "d"],
brief="Removes a friend code from your list.",
description="Cherubi Bot - Friend Code Sharing System",
usage="<trainer name>",
help="Removes the given friend code from your list"
)
async def remove_subcommand(self, ctx, identifier):
db = mysql()
query = """
DELETE FROM friend_codes
WHERE user_id = %s
AND identifier = %s;
"""
db.execute(query, [ctx.author.id, identifier])
count = db.cursor.rowcount
db.close()
if count == 0:
pass
await ctx.send(embed=lib.embedder.make_embed(
type="error",
title=f"Error Removing Friend Code",
content=f"{identifier} not found on your list."
))
else:
await ctx.send(embed=lib.embedder.make_embed(
type="success",
title=f"Removed Friend Code",
content=f"Removed {identifier} from your list."
))
@friendcode_group.command(
name="setmain",
brief="Sets your main friend code.",
description="Cherubi Bot - Friend Code Sharing System",
usage="<trainer name>",
help="Changes your main friend code to being the given one."
)
async def setmain_subcommand(self, ctx, identifier):
db = mysql()
# Remove any friend codes that the user has set as their main
query = """
UPDATE friend_codes
SET main = 0
WHERE user_id = %s;
"""
db.execute(query, [ctx.author.id])
# Then set the new one
query = """
UPDATE friend_codes
SET main = 1
WHERE user_id = %s
AND identifier = %s;
"""
db.execute(query, [ctx.author.id, identifier])
db.close()
await ctx.send(embed=lib.embedder.make_embed(
type="success",
title="Changed Main Friend Code",
content=f"Changed your main friend code to {identifier}."
))
@friendcode_group.command(
name="visibility",
aliases=["vis", "v"],
brief="Changes your friend code visibility.",
description="Cherubi Bot - Friend Code Sharing System",
usage="<public | private | hidden>",
help="This lets you change your visiblity to either public, private, \
or hidden depending what you want.\n\n\
Public: lets anyone on any server you're in to tag you and see your friend \
codes.\n\n\
Private: lets only your home server see your friend codes.\n\n\
Hidden: lets no one tag you to see your friend codes. You have to invoke \
!friendcode yourself for them to show."
)
async def visibility_subcommand(self, ctx, visibility=None):
# If they don't give a visibility, tell them what their current
# setting is
if not visibility:
db = mysql()
query = """
SELECT fc_visibility
FROM user_preferences
WHERE user_id = %s;
"""
results = db.query(query, [ctx.author.id])
db.close()
if not results:
visibility = "private"
else:
visibility = results[0]['fc_visibility']
await ctx.send(embed=lib.embedder.make_embed(
type="info",
title=f"Your F.C. Visibility",
content=f"Your friend code visibility is currently set to `{visibility.title()}`"
))
return
# Normalize it to all lowercase
visibility = visibility.lower()
# List of available visibility settings
visibility_settings = ["public", "private", "hidden"]
# Check if the given one is within the list. If not, spit out an
# error embed and return
if visibility not in visibility_settings:
await ctx.send(embed=lib.embedder.make_embed(
type="error",
title=f"Error Changing F.C. Visibility",
content=f"{visibility.title()} is not a valid option."
))
return
db = mysql()
query = """
INSERT INTO user_preferences (user_id, fc_visibility)
VALUES (%s, %s)
ON DUPLICATE KEY UPDATE fc_visibility = VALUES(fc_visibility);
"""
db.execute(query, [ctx.author.id, visibility])
db.close()
await ctx.send(embed=lib.embedder.make_embed(
type="success",
title=f"Changed F.C. Visibility",
content=f"Changed your friend code visibility to `{visibility.title()}`."
))
def setup(client):
client.add_cog(FriendCode(client))
| python |
# -*- coding: utf-8 -*-
# Scrapy settings for telesurscraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
import os
# Database
PRISMA_ENDPOINT = os.getenv('PRISMA_ENDPOINT', 'http://localhost:4466/')
PRISMA_TOKEN = os.getenv('PRISMA_TOKEN')
# Tenant
SERVICE_ID = os.getenv('SERVICE_ID')
# Broadcast schedules
SCHEDULE_URL = os.getenv('SCHEDULE_URL')
SCHEDULE_TIMEZONE = os.getenv('SCHEDULE_TIMEZONE')
# Article listings
JSPLISTING_PAGE_SIZE = os.getenv('JSPLISTING_PAGE_SIZE')
JSPLISTING_MAX_PAGES = os.getenv('JSPLISTING_MAX_PAGES')
JSPLISTING_START_PAGE = os.getenv('JSPLISTING_START_PAGE')
JSPLISTING_URL = os.getenv('JSPLISTING_URL')
BOT_NAME = 'telesurscraper'
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'telesurscraper.pipelines.PrismaArticlePipeline': 300,
}
SPIDER_MODULES = ['telesurscraper.spiders']
NEWSPIDER_MODULE = 'telesurscraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'telesur (+https://www.telesurtv.net)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'telesurscraper.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'telesurscraper.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'telesurscraper.extensions.telnet.TelnetConsole': None,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| python |
t = int(input())
for q in range(t):
#n,k=input().split()
#n,k=int(n),int(k)
#n,m,k=input().split()
#n,m,k=int(n),int(m),int(k)
#n=int(input())
#n=int(input())
#arr=list(map(int,input().split()))
num=int(input())
n=num%8
if(n==0):
print(num-1,"SL",sep="")
elif(n==7):
print(num+1,"SU",sep="")
elif(n==1):
print(num+3,"LB",sep="")
elif(n==4):
print(num-3,"LB",sep="")
elif(n==2):
print(num+3,"MB",sep="")
elif(n==5):
print(num-3,"MB",sep="")
elif(n==3):
print(num+3,"UB",sep="")
elif(n==6):
print(num-3,"UB",sep="")
| python |
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
class Clipper(BaseEstimator, TransformerMixin):
def __init__(self, params = {}):
super().__init__()
self.name = self.__class__.__name__
self.params = params
def fit(self, X, y = None):
self.min_max = {}
for feature in X:
max_value = X[feature].max()
min_value = X[feature].min()
#TODO: check which one is better
for i in range(99, 0, -1):
max_value = np.percentile(X[feature].dropna(), i)
if max_value != np.inf and not np.isnan(max_value):
break
for i in range(1, 100):
min_value = np.percentile(X[feature].dropna(), i)
if min_value != np.NINF and not np.isnan(min_value):
break
self.min_max[feature] = {'min_value': min_value, 'max_value': max_value}
# values_no_inf = X[feature].dropna()
# values_median = values_no_inf.median()
# values_no_inf[values_no_inf == np.inf] = values_median
# values_no_inf[values_no_inf == np.NINF] = values_median
# self.min_max[feature] = {'min_value': values_no_inf.min(), 'max_value': values_no_inf.max()}
return self
def transform(self, X):
new_features = pd.DataFrame()
for feature in X:
new_features[feature] = np.clip(X[feature], self.min_max[feature]['min_value'], self.min_max[feature]['max_value'])
return new_features | python |
# MIT License
#
# Copyright (c) 2017 Tom Runia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to conditions.
#
# Author: Tom Runia
# Date Created: 2017-10-19
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class LSTM(object):
def __init__(self, input_length, input_dim, num_hidden, num_classes, batch_size):
self._input_length = input_length
self._input_dim = input_dim
self._num_hidden = num_hidden
self._num_classes = num_classes
self._batch_size = batch_size
initializer_weights = tf.variance_scaling_initializer()
initializer_biases = tf.constant_initializer(0.0)
# Dim of [h_{t-1}, x_t]
self._gate_inputs_dim = self._input_dim + self._num_hidden
# Input data [time, batch_size, input_dim]
self.inputs = tf.placeholder(dtype=tf.float32,
shape=[self._input_length, self._batch_size, self._input_dim],
name='inputs')
# Targets [batch_size, output_dim]
self.labels = tf.placeholder(dtype=tf.float32,
shape=[self._batch_size, self._num_classes],
name='labels')
with tf.variable_scope('lstm_cell'):
# Forget gate
self._Wf = tf.get_variable(name='W_f', shape=(self._gate_inputs_dim, self._num_hidden), dtype=tf.float32,
initializer=initializer_weights)
self._bf = tf.get_variable(name='b_f', shape=(self._num_hidden), dtype=tf.float32,
initializer=initializer_biases)
# Input gate
self._Wi = tf.get_variable(name='W_i', shape=(self._gate_inputs_dim, self._num_hidden), dtype=tf.float32,
initializer=initializer_weights)
self._bi = tf.get_variable(name='b_i', shape=(self._num_hidden), dtype=tf.float32,
initializer=initializer_biases)
self._Wg = tf.get_variable(name='W_g', shape=(self._gate_inputs_dim, self._num_hidden), dtype=tf.float32,
initializer=initializer_weights)
self._bg = tf.get_variable(name='b_g', shape=(self._num_hidden), dtype=tf.float32,
initializer=initializer_biases)
# Output gate
self._Wo = tf.get_variable(name='W_o', shape=(self._gate_inputs_dim, self._num_hidden), dtype=tf.float32,
initializer=initializer_weights)
self._bo = tf.get_variable(name='b_o', shape=(self._num_hidden), dtype=tf.float32,
initializer=initializer_biases)
# inputs (h_{t-1}, x_t): [batch_size, self.input_dim + self.num_hidden)
# Use less matmul ops as specified by Zaremba et. al 2014: https://arxiv.org/pdf/1409.2329.pdf
# Order: input gate (sigmoid), new candidates (tanh), forget gate (sigmoid), output gate (sigmoid)
# dim: [input_dim + num_hidden, 4 * num_hidden]
self._weights = tf.concat([self._Wi, self._Wg, self._Wf, self._Wo], axis=1)
# dim: [4 * num_hidden]
self._biases = tf.concat([self._bi, self._bg, self._bf, self._bo], axis=0)
# Logits
with tf.variable_scope('logits'):
self._Wout = tf.get_variable(name='W_out', shape=(self._num_hidden, self._num_classes), dtype=tf.float32,
initializer=initializer_weights)
self._bout = tf.get_variable(name='b_out', shape=(self._num_classes), dtype=tf.float32,
initializer=initializer_biases)
self.logits_op = self.compute_logits()
self.loss_op = self.compute_loss()
self.accuracy_op = self.accuracy()
# self.confusion_matrix_op = self.confusion_matrix()
def _lstm_step(self, lstm_state_tuple, x_t):
"""
Performs a single LSTM step
Use this function with a tf.scan to unroll the network and perform inference over a sequence of inputs
Follows the convention of Zaremba et. al 2014: https://arxiv.org/pdf/1409.2329.pdf
:param lstm_state_tuple: previous LSTM state tuple (c_{t-1}, h_{t-1})
:param x_t: input for current step from previous (input) layer. [batch_size, input_dim]
:return: LSTM state tuple for current step. (c_{t-1}, h_{t-1})
"""
# unstack LSTM state (c, h) from prev time step
c_prev, h_prev = tf.unstack(lstm_state_tuple, axis=0)
# forward pass
_inpt = tf.concat([h_prev, x_t], axis=1)
# preactivations: input gate, new candidates, forget gate, output gate
_gates = tf.matmul(_inpt, self._weights) + self._biases
i, g, f, o = tf.split(value=_gates, num_or_size_splits=4, axis=1)
# Update cell state and hidden state
next_c = tf.sigmoid(i) * tf.tanh(g) + tf.sigmoid(f) * c_prev
next_h = tf.tanh(next_c) * tf.sigmoid(o)
next_state = tf.stack((next_c, next_h), axis=0)
return next_state
@staticmethod
def _zero_state(hidden_dim, batch_size, dtype=tf.float32):
"""
Returns an empty (zero) state for the hidden state of the RNN
:param hidden_dim: number of hidden units, int
:param batch_size: batch_size, int
:param dtype: data type, float32 by default
:return: a zero vector [batch_size, hidden_dim]
"""
return tf.stack(values=(tf.zeros(shape=(batch_size, hidden_dim), dtype=dtype),
tf.zeros(shape=(batch_size, hidden_dim), dtype=dtype)), axis=0)
def _get_hidden_states(self):
"""
Unrolls the RNN and computes hidden states for each timestep in self.inputs placeholder
:return: hidden states for each time step. Float [time, batch_size, hidden_dim]
"""
return tf.scan(fn=lambda lstm_state_tuple, x: self._lstm_step(lstm_state_tuple=lstm_state_tuple, x_t=x),
elems=self.inputs,
initializer=self._zero_state(hidden_dim=self._num_hidden,
batch_size=self._batch_size,
dtype=tf.float32),
parallel_iterations=10,
name='hidden_states')
def compute_logits(self):
"""
Forward propagates inputs, computes hidden states and then computes the outputs (logits) from the last hidden state
:return: logits. Float [batch_size, output_dim]
"""
# [time, batch_size, hidden_dim]
hidden_states = self._get_hidden_states()
last_hidden_state = hidden_states[-1]
c, h = tf.unstack(last_hidden_state, axis=0)
# h{T} => p{T}
logits = tf.add(tf.matmul(h, self._Wout), self._bout, name='logits')
# tf.summary.histogram('logits', logits)
return logits
def compute_loss(self):
"""
Computes the cross-entropy loss using the internal variable _logits
:return: loss, scalar float
"""
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=self.labels,
logits=self.logits_op,
name='softmax_cross_entropy_loss'
)
loss = tf.reduce_mean(loss, name='mean_cross_entropy_loss')
tf.summary.scalar('mean cross-entropy loss', loss)
return loss
def accuracy(self):
"""
Computes the prediction accuracy, i.e. the average of correct predictions
of the network.
As in self.loss above, you can use tf.summary.scalar to save
scalar summaries of accuracy for later use with the TensorBoard.
Args:
logits: 2D float Tensor of size [batch_size, self.n_classes].
The predictions returned through self.inference.
labels: 2D int Tensor of size [batch_size, self.n_classes]
with one-hot encoding. Ground truth labels for
each sample in the batch.
Returns:
accuracy: scalar float Tensor, the accuracy of predictions,
i.e. the average correct predictions over the whole batch.
"""
# Implement the accuracy of predicting the
# last digit over the current batch ...
predictions = tf.argmax(input=self.logits_op, axis=1, name='label_predictions')
class_labels = tf.argmax(input=self.labels, axis=1)
accuracy = tf.to_float(tf.equal(predictions, class_labels))
accuracy = tf.reduce_mean(accuracy, name='accuracy')
tf.summary.scalar('accuracy', accuracy)
# tf.summary.histogram('label predictions', predictions)
return accuracy
def confusion_matrix(self):
predictions = tf.argmax(input=self.logits_op, axis=1)
class_labels = tf.argmax(input=self.labels, axis=1)
confusion_matrix = tf.contrib.metrics.confusion_matrix(
labels=class_labels,
predictions=predictions,
num_classes=10,
dtype=tf.int32,
name='confusion_matrix')
# tf.summary.image('confusion_matrix', tf.reshape(tf.cast(confusion_matrix, dtype=tf.float32), [1, self._num_classes, self._num_classes, 1]))
return confusion_matrix
| python |
from .plot import Plot
import matplotlib.pyplot as plt
from .plot_funcs import average_traits
import numpy as np
class AverageTraitTime(Plot):
def __init__(self):
self.avgtraits = {}
def plot(self, game:"Game", file_path:str, height:int, width:int) -> None:
"""Plot the game information saving the plot to the given
file path
Parameters
----------
game: Game
The object that holds all information about the simulation.
file_path: str
The file path to save the plot to.
"""
traits = average_traits(game)
for key in traits:
if key not in self.avgtraits:
self.avgtraits[key] = [[],[],[]]
self.avgtraits[key][0].append(traits[key][0])
self.avgtraits[key][1].append(traits[key][1])
self.avgtraits[key][2].append(traits[key][2])
else:
self.avgtraits[key][0].append(traits[key][0])
self.avgtraits[key][1].append(traits[key][1])
self.avgtraits[key][2].append(traits[key][2])
# Create the figure before plotting and set all non-variable params
fig = plt.figure(figsize=(height/96 ,width/96),dpi=120)
ax = fig.add_axes([0.3,0.2,0.6,0.6])
ax.set_xlabel('Time Step')
ax.set_ylabel('Trait Averages')
ax.set_title('Traits over Time')
plt.ylim((0.0,1.0))
for key in self.avgtraits:
x_vals_e = np.arange(len(self.avgtraits[key][0]))
x_vals_sp = np.arange(len(self.avgtraits[key][1]))
x_vals_se = np.arange(len(self.avgtraits[key][2]))
ax.plot(x_vals_e, self.avgtraits[key][0], color='red', label=key + '_Energy')
ax.plot(x_vals_sp, self.avgtraits[key][1], color='green', label=key + '_Speed')
ax.plot(x_vals_se, self.avgtraits[key][2], color='blue', label=key + '_Sense')
ax.legend(fontsize=4)
plt.savefig(file_path,dpi=96)
plt.close(fig)
| python |
#!/usr/bin/env python
# Copyright 2011-2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
File: mp_obdump2mpt
This script processes an objdump output and generates the corresponding
mpt file.
"""
# Futures
from __future__ import absolute_import, print_function
# Built-in modules
import gzip
import struct
import sys
# Third party modules
import six
# Own modules
from microprobe.code.address import Address
from microprobe.code.ins import instruction_to_definition
from microprobe.target import import_definition
from microprobe.utils.cmdline import CLI, existing_file, int_type, \
new_file_ext, print_error, print_info, print_warning
from microprobe.utils.misc import open_generic_fd
from microprobe.utils.mpt import mpt_configuration_factory, \
mpt_parser_factory, variable_to_test_definition
from microprobe.utils.objdump import interpret_objdump
# Constants
# Functions
def dump_mpt(input_file_fd, target, arguments):
"""
:param input_file_fd:
:type input_file_fd:
:param target:
:type target:
:param arguments:
:type arguments:
"""
try:
contents = input_file_fd.read()
if six.PY3 and not isinstance(contents, str):
contents = contents.decode()
except KeyboardInterrupt:
print_info("No input data provided. Exiting...")
exit(1)
print_info("Parsing input file...")
print_info("Sections to parse: %s" % arguments['sections'])
var_defs, req_defs, instr_defs = \
interpret_objdump(contents, target,
strict=arguments.get('strict', False),
sections=arguments['sections'],
start_address=arguments['from_address'],
end_address=arguments['to_address'])
print_info("Input file parsed")
print_info(
"%d instructions processed from the input file" % len(instr_defs)
)
if var_defs != []:
print_info(
"Variables referenced and detected in the dump: %s" %
','.join([var.name for var in var_defs])
)
if req_defs != []:
print_warning(
"Variables referenced and *NOT* detected in the dump: %s" %
','.join([var.name for var in req_defs])
)
print_warning(
"You might need to edit the generated MPT to fix the"
" declaration of such variables"
)
print_info("Generating the MPT contents...")
mpt_config = mpt_configuration_factory()
if 'default_code_address' in arguments:
mpt_config.set_default_code_address(arguments['default_code_address'])
else:
mpt_config.set_default_code_address(instr_defs[0].address.displacement)
if 'default_data_address' in arguments:
mpt_config.set_default_data_address(arguments['default_data_address'])
else:
mpt_config.set_default_data_address(0)
if arguments.get('elf_abi', False):
kwargs = {}
if "stack_name" in arguments:
kwargs["stack_name"] = arguments["stack_name"]
if "stack_address" in arguments:
kwargs["stack_address"] = Address(
base_address="code",
displacement=arguments["stack_address"]
)
variables, instructions = target.elf_abi(
arguments["stack_size"], arguments.get(
"start_symbol", None
), **kwargs
)
for variable in variables:
req_defs.append(variable_to_test_definition(variable))
address = instr_defs[0].address
for instr in reversed(instructions):
instr_defs = [instruction_to_definition(instr)] + instr_defs
address -= instr.architecture_type.format.length
if address.displacement < 0:
print_error(
"Default code address is below zero after"
" adding the initialization code."
)
print_error(
"Check/modify the objdump provided or do not use"
" the elf_abi flag."
)
exit(-1)
mpt_config.set_default_code_address(address.displacement)
instr = None
if "end_branch_to_itself" in arguments:
instr = target.branch_to_itself()
elif arguments.get('elf_abi', False):
instr = target.nop()
if instr is not None:
instr.set_label("ELF_ABI_EXIT")
instr_defs.append(instruction_to_definition(instr))
for var in var_defs + req_defs:
mpt_config.register_variable_definition(var)
mpt_config.register_instruction_definitions(instr_defs)
print_info("Dumping MPT to '%s'" % arguments['output_mpt_file'])
mpt_parser = mpt_parser_factory()
mpt_parser.dump_mpt_config(mpt_config, arguments['output_mpt_file'])
# Main
def main():
"""
Program main
"""
args = sys.argv[1:]
cmdline = CLI(
"Microprobe Objdump to MPT tool",
default_config_file="mp_objdump2mpt.cfg",
force_required=['target']
)
groupname = "Objdump to MPT arguments"
cmdline.add_group(
groupname, "Command arguments related to Objdump to MPT tool"
)
cmdline.add_option(
"input-objdump-file",
"i",
None,
"Objdump file to process, if not provided, the input is read from"
" standard input",
group=groupname,
opt_type=existing_file,
required=False
)
cmdline.add_option(
"output-mpt-file",
"O",
None,
"Output file name",
group=groupname,
opt_type=new_file_ext(".mpt"),
required=True
)
cmdline.add_flag(
"strict",
"S",
"Be strict when parsing objdump input, if not set, silently skip "
"unparsed elements",
group=groupname
)
cmdline.add_option(
"sections",
"s", ['.text'],
"Space separated CODE section names to interpret. "
"(default: '.text' section)",
group=groupname,
nargs='+',
required=False
)
cmdline.add_option(
"from-address",
"f",
0x0,
"If set, start interpreting from this address",
group=groupname,
opt_type=int_type(0, float('+inf')),
required=False
)
cmdline.add_option(
"to-address",
"t",
float('+inf'),
"If set, end interpreting at this address",
group=groupname,
opt_type=int_type(0, float('+inf')),
required=False
)
cmdline.add_option(
"default-code-address",
"X",
None,
"Default code address",
group=groupname,
opt_type=int_type(0, float('+inf')),
required=False
)
cmdline.add_option(
"default-data-address",
"D",
None,
"Default data address",
group=groupname,
opt_type=int_type(0, float('+inf')),
required=False
)
cmdline.add_flag(
"elf-abi",
None,
"Ensure ELF Application Binary Interface (e.g. define stack, stack"
" pointer, etc.)",
group=groupname
)
cmdline.add_option(
"stack-size",
None,
4096,
"Stack size in bytes (Default: 4096)",
group=groupname,
opt_type=int_type(0, float('+inf')),
required=False
)
cmdline.add_option(
"stack-name",
None,
None,
"Stack name (Default: microprobe_stack)",
group=groupname,
opt_type=str,
required=False
)
cmdline.add_option(
"stack-address",
None,
None,
"Stack address (Default: allocated in the data area)",
group=groupname,
opt_type=int_type(0, float('+inf')),
required=False
)
cmdline.add_option(
"start-symbol",
None,
None,
"Symbol to call after initializing the stack. If not specified, "
"no call is performed",
group=groupname,
opt_type=str,
required=False
)
cmdline.add_flag(
"end-branch-to-itself",
None,
"A branch to itself instruction will be added at the end of the test",
group=groupname
)
print_info("Processing input arguments...")
cmdline.main(args, _main)
def _main(arguments):
"""
Program main, after processing the command line arguments
:param arguments: Dictionary with command line arguments and values
:type arguments: :class:`dict`
"""
print_info("Arguments processed!")
print_info("Importing target definition...")
target = import_definition(arguments['target'])
if "input_objdump_file" in arguments:
print_info("Input file provided")
file_fd = open_generic_fd(arguments["input_objdump_file"], 'r')
else:
print_info("No input file provided, reading from standard input... ")
file_fd = sys.stdin
dump_mpt(file_fd, target, arguments)
if __name__ == '__main__': # run main if executed from the command line
# and the main method exists
if callable(locals().get('main')):
main()
exit(0)
| python |
"""Rx Workshop: Observables versus Events.
Part 2 - Dispose Example.
Usage:
python wksp3.py
"""
from __future__ import print_function
import rx
class Program:
"""Main Class.
"""
@staticmethod
def main():
"""Main Method.
"""
subject = rx.subjects.Subject()
subscription = subject.subscribe(lambda x: print(x))
subject.on_next(42)
subscription.dispose()
subject.on_next(43)
if __name__ == '__main__':
Program.main()
| python |
from __future__ import print_function
import numpy as np
import testing as tm
import unittest
import pytest
import xgboost as xgb
try:
from sklearn.linear_model import ElasticNet
from sklearn.preprocessing import scale
from regression_test_utilities import run_suite, parameter_combinations
except ImportError:
None
def is_float(s):
try:
float(s)
return 1
except ValueError:
return 0
def xgb_get_weights(bst):
return np.array([float(s) for s in bst.get_dump()[0].split() if
is_float(s)])
def assert_regression_result(results, tol):
regression_results = [r for r in results if
r["param"]["objective"] == "reg:squarederror"]
for res in regression_results:
X = scale(res["dataset"].X,
with_mean=isinstance(res["dataset"].X, np.ndarray))
y = res["dataset"].y
reg_alpha = res["param"]["alpha"]
reg_lambda = res["param"]["lambda"]
pred = res["bst"].predict(xgb.DMatrix(X))
weights = xgb_get_weights(res["bst"])[1:]
enet = ElasticNet(alpha=reg_alpha + reg_lambda,
l1_ratio=reg_alpha / (reg_alpha + reg_lambda))
enet.fit(X, y)
enet_pred = enet.predict(X)
assert np.isclose(weights, enet.coef_, rtol=tol,
atol=tol).all(), (weights, enet.coef_)
assert np.isclose(enet_pred, pred, rtol=tol, atol=tol).all(), (
res["dataset"].name, enet_pred[:5], pred[:5])
# TODO: More robust classification tests
def assert_classification_result(results):
classification_results = [r for r in results if
r["param"]["objective"] != "reg:squarederror"]
for res in classification_results:
# Check accuracy is reasonable
assert res["eval"][-1] < 0.5, (res["dataset"].name, res["eval"][-1])
class TestLinear(unittest.TestCase):
datasets = ["Boston", "Digits", "Cancer", "Sparse regression",
"Boston External Memory"]
@pytest.mark.skipif(**tm.no_sklearn())
def test_coordinate(self):
variable_param = {'booster': ['gblinear'], 'updater':
['coord_descent'], 'eta': [0.5], 'top_k':
[10], 'tolerance': [1e-5], 'nthread': [2],
'alpha': [.005, .1], 'lambda': [.005],
'feature_selector': ['cyclic', 'shuffle',
'greedy', 'thrifty']}
for param in parameter_combinations(variable_param):
results = run_suite(param, 150, self.datasets, scale_features=True)
assert_regression_result(results, 1e-2)
assert_classification_result(results)
@pytest.mark.skipif(**tm.no_sklearn())
def test_shotgun(self):
variable_param = {'booster': ['gblinear'], 'updater':
['shotgun'], 'eta': [0.5], 'top_k': [10],
'tolerance': [1e-5], 'nthread': [2],
'alpha': [.005, .1], 'lambda': [.005],
'feature_selector': ['cyclic', 'shuffle']}
for param in parameter_combinations(variable_param):
results = run_suite(param, 200, self.datasets, True)
assert_regression_result(results, 1e-2)
assert_classification_result(results)
| python |
import PIL
print(PIL.PILLOW_VERSION)
import load_data
from load_data import *
import load_data
import gc
import matplotlib.pyplot as plt
from torch import autograd
import patch_config
plt.rcParams["axes.grid"] = False
plt.axis('off')
img_dir = "inria/Train/pos"
lab_dir = "inria/Train/pos/yolo-labels"
cfgfile = "cfg/yolov2.cfg"
weightfile = "weights/yolov2.weights"
printfile = "non_printability/30values.txt"
patch_size = 300
mode = "exp1"
config = patch_config.patch_configs[mode]()
print('LOADING MODELS')
darknet_model = Darknet(cfgfile)
darknet_model.load_weights(weightfile)
darknet_model = darknet_model.eval().cuda()
patch_applier = PatchApplier().cuda()
patch_transformer = PatchTransformer().cuda()
prob_extractor = MaxProbExtractor(0, 80, config).cuda()
nps_calculator = NPSCalculator(printfile, patch_size)
nps_calculator = nps_calculator.cuda()
total_variation = TotalVariation().cuda()
print('MODELS LOADED')
img_size = darknet_model.height
batch_size = 6 # 10#18
n_epochs = 10000
max_lab = 14
# Choose between initializing with gray or random
adv_patch_cpu = torch.full((3, patch_size, patch_size), 0.5)
# adv_patch_cpu = torch.rand((3,patch_size,patch_size))
patch_img = Image.open("saved_patches/patchnew0.jpg").convert('RGB')
tf = transforms.Resize((patch_size, patch_size))
patch_img = tf(patch_img)
tf = transforms.ToTensor()
adv_patch_cpu = tf(patch_img)
adv_patch_cpu.requires_grad_(True)
print('INITIALIZING DATALOADER')
train_loader = torch.utils.data.DataLoader(InriaDataset(img_dir, lab_dir, max_lab, img_size, shuffle=True),
batch_size=batch_size,
shuffle=True,
num_workers=10)
print('DATALOADER INITIALIZED')
optimizer = optim.Adam([adv_patch_cpu], lr=.03, amsgrad=True)
# try:
et0 = time.time()
for epoch in range(n_epochs):
ep_det_loss = 0
bt0 = time.time()
for i_batch, (img_batch, lab_batch) in enumerate(train_loader):
with autograd.detect_anomaly():
img_batch = img_batch.cuda()
lab_batch = lab_batch.cuda()
# print('TRAINING EPOCH %i, BATCH %i'%(epoch, i_batch))
adv_patch = adv_patch_cpu.cuda()
adv_batch_t = patch_transformer(adv_patch, lab_batch, img_size, do_rotate=True)
p_img_batch = patch_applier(img_batch, adv_batch_t)
p_img_batch = F.interpolate(p_img_batch, (darknet_model.height, darknet_model.width))
output = darknet_model(p_img_batch)
max_prob = prob_extractor(output)
nps = nps_calculator(adv_patch)
tv = total_variation(adv_patch)
det_loss = torch.mean(max_prob)
ep_det_loss += det_loss.detach().cpu().numpy()
'''
nps_loss = nps
tv_loss = tv*8
loss = nps_loss + (det_loss**3/tv_loss + tv_loss**3/det_loss)**(1/3)
loss.backward()
optimizer.step()
optimizer.zero_grad()
adv_patch_cpu.data.clamp_(0,1) #keep patch in image range
'''
nps_loss = nps * 0.01
tv_loss = tv * 2.5
loss = det_loss + nps_loss + tv_loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
adv_patch_cpu.data.clamp_(0, 1) # keep patch in image range
bt1 = time.time()
if i_batch % 5 == 0:
print('BATCH', i_batch, end='...\n')
im = transforms.ToPILImage('RGB')(adv_patch_cpu)
plt.imshow(im)
plt.show()
'''
print(' BATCH NR: ', i_batch)
print('BATCH LOSS: ', loss.detach().cpu().numpy())
print(' DET LOSS: ', det_loss.detach().cpu().numpy())
print(' NPS LOSS: ', nps_loss.detach().cpu().numpy())
print(' TV LOSS: ', tv_loss.detach().cpu().numpy())
print('BATCH TIME: ', bt1-bt0)
'''
if i_batch + 1 >= len(train_loader):
print('\n')
else:
del adv_batch_t, output, max_prob, det_loss, p_img_batch, nps_loss, tv_loss, loss
torch.cuda.empty_cache()
bt0 = time.time()
et1 = time.time()
ep_det_loss = ep_det_loss / len(train_loader)
ep_nps_loss = nps_loss.detach().cpu().numpy()
ep_tv_loss = tv_loss.detach().cpu().numpy()
tot_ep_loss = ep_det_loss + ep_nps_loss + ep_tv_loss
if True:
print(' EPOCH NR: ', epoch),
print('EPOCH LOSS: ', tot_ep_loss)
print(' DET LOSS: ', ep_det_loss)
print(' NPS LOSS: ', ep_nps_loss)
print(' TV LOSS: ', ep_tv_loss)
print('EPOCH TIME: ', et1 - et0)
im = transforms.ToPILImage('RGB')(adv_patch_cpu)
plt.imshow(im)
plt.show()
im.save("saved_patches/patchnew1.jpg")
del adv_batch_t, output, max_prob, det_loss, p_img_batch, nps_loss, tv_loss, loss
torch.cuda.empty_cache()
et0 = time.time() | python |
import time
import os
import getopt
import sys
import datetime
import numpy as np
from milvus import *
import config
import logging
import random
milvus = Milvus()
def is_normalized():
filenames = os.listdir(NL_FOLDER_NAME)
filenames.sort()
vetors = load_vec_list(NL_FOLDER_NAME+'/'+filenames[0])
for i in range(10):
sqrt_sum = np.sum(np.power(vetors[i], 2))
print(sqrt_sum)
def connect_server():
try:
status = milvus.connect(host=config.MILVUS_HOST, port=config.MILVUS_PORT)
# print(status)
except Exception as e:
logging.error(e)
def build_collection(collection_name,it):
connect_server()
if it == 'flat':
index_type = IndexType.FLAT
index_param = {'nlist': config.NLIST}
elif it == 'ivf_flat':
index_type = IndexType.IVF_FLAT
index_param = {'nlist': config.NLIST}
elif it == 'sq8':
index_type = IndexType.IVF_SQ8
index_param = {'nlist': config.NLIST}
elif it == 'sq8h':
index_type = IndexType.IVF_SQ8H
index_param = {'nlist': config.NLIST}
elif it == 'pq':
index_type = IndexType.IVF_PQ
index_param = {'nlist': config.NLIST, 'm':config.PQ_M}
elif it == 'nsg':
index_type = IndexType.RNSG
index_param = {'search_length': config.SEARCH_LENGTH, 'out_degree':config.OUT_DEGREE, 'candidate_pool_size':config.CANDIDATE_POOL, 'knng':config.KNNG}
elif it == 'hnsw':
index_type = IndexType.HNSW
index_param = {'M': config.HNSW_M, 'efConstruction':config.EFCONSTRUCTION}
else:
print("error index_type, only support these index: flat, ivf_flat, sq8, sq8h, pq, nsg, hnsw")
print("please try again!")
sys.exit(2)
print(collection_name, " ", index_type, " ", index_param)
status = milvus.create_index(collection_name,index_type,index_param)
print(status)
def search(collection_name,search_param):
connect_server()
performance_file = config.PERFORMANCE_FILE_NAME
nq_scope = config.nq_scope
topk_scope = config.topk_scope
if not os.path.exists(performance_file):
os.mkdir(performance_file)
filename = performance_file + '/' + collection_name + '_' + str(search_param) + '_performance.csv'
search_params = get_search_params(collection_name,search_param)
with open(filename,'w+') as f:
f.write("nq,topk,total_time,avg_time"+'\n')
for nq in nq_scope:
time_start = time.time()
query_list = load_nq_vec(nq)
print("load query:", len(query_list), "time_load = ", time.time() - time_start)
for topk in topk_scope:
time_start = time.time()
status,result = milvus.search(collection_name=collection_name, query_records=query_list, top_k=topk, params=search_params)
time_cost = time.time() - time_start
line = str(nq) + ',' + str(topk) + ',' + str(round(time_cost, 4)) + ',' + str(round(time_cost / nq, 4)) + '\n'
f.write(line)
print(nq, topk, time_cost)
f.write('\n')
# file.close()
print("search_vec_list done !")
def get_search_params(collection_name,search_param):
index_type = str(milvus.describe_index(collection_name)[1]._index_type)
if index_type == 'RNSG':
search_params = {'search_length':search_param}
elif index_type == 'HNSW':
search_params == {'ef':search_param}
else:
search_params = {'nprobe': search_param}
return search_params
def load_nq_vec(nq):
vectors = []
length = 0
filenames = os.listdir(config.NQ_FOLDER_NAME)
filenames.sort()
for filename in filenames:
vec_list = load_vec_list(config.NQ_FOLDER_NAME + '/' + filename)
length += len(vec_list)
if length > nq:
num = nq % len(vec_list)
vec_list = vec_list[0:num]
vectors += vec_list
if len(vectors) == nq:
return vectors
def load_vec_list(file_name):
if config.IS_CSV:
import pandas as pd
data = pd.read_csv(file_name, header=None)
data = np.array(data)
else:
data = np.load(file_name)
# if config.IS_UINT8:
# data = (data + 0.5) / 255
vec_list = data.tolist()
return vec_list
def recall_test(collection_name,search_param):
connect_server()
vectors = load_vec_list(config.recall_vec_fname)
# for nq in config.nq_scope:
nq = config.recall_nq
query_list = []
rand = sorted(random.sample(range(0, len(vectors)), nq))
for i in rand:
query_list.append(vectors[i])
# print("load query:", len(query_list))
search_params = get_search_params(collection_name,search_param)
print("collection name:", collection_name, "query list:", len(query_list), "topk:", config.recall_topk, "search_params:", search_params)
time_start = time.time()
status, results = milvus.search_vectors(collection_name=collection_name, query_records=query_list, top_k=config.recall_topk, params=search_params)
# time_end = time.time()
time_cost = time.time() - time_start
print("time_search = ", time_cost)
save_re_to_file(collection_name, rand, results, search_param,nq)
compute_recall(collection_name,nq,results,search_param,rand)
def save_re_to_file(collection_name, rand, results, search_param, nq):
if not os.path.exists(config.recall_res_fname):
os.mkdir(config.recall_res_fname)
file_name = config.recall_res_fname + '/' + collection_name + '_' + str(search_param) + '_' + str(nq) + '_recall.txt'
with open(file_name, 'w') as f:
for i in range(len(results)):
for j in range(len(results[i])):
line = str(rand[i]) + ' ' + str(results[i][j].id) + ' ' + str(results[i][j].distance)
f.write(line + '\n')
f.write('\n')
f.close()
def compute_recall(collection_name,nq,results,search_param,rand):
ids = []
# dis = []
for nq_result in (results):
temp = []
for result in (nq_result):
temp.append(result.id)
ids.append(temp)
gt_ids = load_gt_ids()
for top_k in config.compute_recall_topk:
recalls, count_all = compare_correct(nq, top_k, rand, gt_ids, ids)
fname = config.recall_out_fname+ '/' + collection_name + '_' + str(search_param) + '_' + str(nq) + "_" + str(top_k) + ".csv"
with open(fname,'w') as f:
f.write('nq,topk,recall\n')
for i in range(nq):
line = str(i + 1) + ',' + str(top_k) + ',' + str(recalls[i] * 100) + "%"
f.write(line + '\n')
f.write("max, avarage, min\n")
f.write( str(max(recalls) * 100) + "%," + str(round(count_all / nq / top_k, 3) * 100) + "%," + str(min(recalls) * 100) + "%\n")
print("top_k=", top_k, ", total accuracy", round(count_all / nq / top_k, 3) * 100, "%")
def load_gt_ids():
file_name = config.GT_FNAME_NAME
gt_ids = []
result = []
with open(file_name, 'r') as f:
for line in f.readlines():
data = line.split()
if data:
result.append(int(data[0]))
else:
gt_ids.append(result)
result = []
return gt_ids
def compare_correct(nq, top_k, rand, gt_ids, ids):
recalls = []
count_all = 0
for i in range(nq):
milvus_results = []
ground_truth = []
for j in range(top_k):
milvus_results.append(ids[i][j])
ground_truth.append(gt_ids[int(rand[i])][j])
# ground_truth += gt_ids[int(rand[i * top_k]) * config.ground_truth_topk + j]
# print(milvus_results)
# print(ground_truth)
union = list(set(milvus_results).intersection(set(ground_truth)))
recalls.append(len(union) / top_k)
count_all += len(union)
# print("topk_ground_truth:", topk_ground_truth)
return recalls, count_all | python |
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QDateTime, QTimer
# from openssl_lib import OpenSSLLib
from .set_csr import SetCSRView
class CSRData:
def __init__(self):
self.country_name = ''
self.state_name = ''
self.locality_name = ''
self.org_name = ''
self.org_unit_name = ''
self.common_name = ''
self.email = ''
class MainView(QMainWindow):
def __init__(self):
super().__init__()
# UI Component Init
self.pfx_path = QLineEdit()
self.crt_path = QLineEdit()
self.key_path = QLineEdit()
self.cert_contents = QTextEdit()
# Variable
self.csr_data = CSRData()
self.datetime = QDateTime.currentDateTime()
self.datetime_label = ''
self.init_ui()
def init_ui(self):
self.init_menu_bar()
self.init_widget()
# Status Bar #
self.set_current_time()
qtimer = QTimer(self)
qtimer.timeout.connect(self.set_current_time)
qtimer.start(1000)
# Window #
self.setWindowTitle('Certificates Tool(Developed by [email protected])')
self.resize(700, 600)
self.move_to_center()
self.setWindowIcon(QIcon('./image/icon.png'))
self.show()
def set_current_time(self):
current_date = QDateTime.currentDateTime()
self.datetime_label = f"Date : {current_date.toString('yyyy-MM-dd HH:mm:ss')}"
self.statusBar().showMessage(self.datetime_label)
def init_menu_bar(self):
# Top Menu Init #
exit_action = QAction('Exit', self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Exit application')
exit_action.triggered.connect(qApp.quit)
menu_bar = self.menuBar()
menu_bar.setNativeMenuBar(False)
file_menu = menu_bar.addMenu('&File')
file_menu.addAction(exit_action)
return
def init_widget(self):
self.setCentralWidget(QWidget())
cw = self.centralWidget()
grid = QGridLayout()
cw.setLayout(grid)
grid.addWidget(self.create_csr_group_layout(), 0, 0, 1, 6)
grid.addWidget(QLabel('PFX file : '), 1, 0, 1, 1)
grid.addWidget(QLabel('Crt file : '), 2, 0, 1, 1)
grid.addWidget(QLabel('Key file : '), 3, 0, 1, 1)
grid.addWidget(QLabel('Content : '), 4, 0, 1, 1)
self.pfx_path.setReadOnly(True)
self.crt_path.setReadOnly(True)
self.key_path.setReadOnly(True)
self.cert_contents.setReadOnly(True)
grid.addWidget(self.pfx_path, 1, 1, 1, 4)
grid.addWidget(self.crt_path, 2, 1, 1, 4)
grid.addWidget(self.key_path, 3, 1, 1, 4)
grid.addWidget(self.cert_contents, 6, 1, 1, 4)
pfx_file_btn = QPushButton('File Select', self)
pfx_file_btn.clicked.connect(self.onclick_crt_file_open_btn)
crt_file_btn = QPushButton('File Select', self)
crt_file_btn.clicked.connect(self.onclick_crt_file_open_btn)
key_file_btn = QPushButton('File Select', self)
key_file_btn.clicked.connect(self.onclick_key_file_open_btn)
grid.addWidget(pfx_file_btn, 1, 5, 1, 1)
grid.addWidget(crt_file_btn, 2, 5, 1, 1)
grid.addWidget(key_file_btn, 3, 5, 1, 1)
return
def create_csr_group_layout(self):
groupbox = QGroupBox('CSR Setting')
hbox = QHBoxLayout()
set_csr_btn = QPushButton('Set CSR Attributes')
set_csr_btn.clicked.connect(self.onclick_set_csr_btn)
hbox.addWidget(set_csr_btn)
save_csr_btn = QPushButton('Save CSR')
hbox.addWidget(save_csr_btn)
groupbox.setLayout(hbox)
return groupbox
def move_to_center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def onclick_set_csr_btn(self):
set_csr_view = SetCSRView()
res = set_csr_view.show_modal()
if res:
self.csr_data.country_name = set_csr_view.country_name.text()
self.csr_data.state_name = set_csr_view.state_name.text()
self.csr_data.locality_name = set_csr_view.locality_name.text()
self.csr_data.org_name = set_csr_view.org_name.text()
self.csr_data.org_unit_name = set_csr_view.org_unit_name.text()
self.csr_data.common_name = set_csr_view.common_name.text()
self.csr_data.email = set_csr_view.email.text()
def onclick_crt_file_open_btn(self):
file_name = QFileDialog.getOpenFileName(self)
if file_name[0]:
self.crt_path.setText(file_name[0])
f = open(file_name[0], 'r')
with f:
data = f.read()
self.cert_contents.setText(data)
def onclick_key_file_open_btn(self):
file_name = QFileDialog.getOpenFileName(self)
if file_name[0]:
self.key_path.setText(file_name[0])
f = open(file_name[0], 'r')
with f:
data = f.read()
self.cert_contents.setText(data)
| python |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Accelerating.
Provide auto accelerating for network, such as Less BN, Gradient Freeze.
"""
from .acc import *
from .base import *
from .less_batch_normalization import *
from .grad_freeze import *
__all__ = ['AutoAcc',
'OptimizerProcess', 'ParameterProcess',
'LessBN',
'GradientFreeze', 'FreezeOpt', 'freeze_cell',
'GradientAccumulation']
| python |
#Python program for continuous and discrete sine wave plot
import numpy as np
import scipy as sy
from matplotlib import pyplot as plt
t = np.arange(0,1,0.01)
#frequency = 2 Hz
f = 2
#Amplitude of sine wave = 1
PI = 22/7
a = np.sin(2*PI*2*t)
#Plot a continuous sine wave
fig, axs = plt.subplots(1,2)
axs[0].plot(t,a)
#Give a title for the sine wave
axs[0].set_title('Continuous Sine wave')
#X-axis label
axs[0].set(xlabel='Time')
#Y-axis label
axs[0].set(ylabel='Amplitude')
axs[0].grid(True, which='both')
axs[0].axhline(y=0, color='k')
axs[1].plot(t,a,'--r')
#Give a title for the sine wave
axs[1].set_title('Discrete Sine wave')
#X-axis label
axs[1].set(xlabel='Time')
#Y-axis label
axs[1].set(ylabel='Amplitude')
axs[1].grid(True, which='both')
axs[1].axhline(y=0, color='k')
#Display the sine wave
plt.show()
| python |
"""
Module containing NHL game objects
"""
from dataclasses import dataclass
from .flyweight import Flyweight
from .list import List
from .gameinfo import GameInfo
from .team import Team
from .venue import Venue
@dataclass(frozen=True)
class Game(Flyweight):
"""
NHL game object.
This is the detailed docstring.
"""
__slots__ = ["info", "home", "away", "players", "events"]
_instances = {}
info: GameInfo
"""GameInfo: Game info"""
home: Team
"""Team: Game home"""
away: Team
"""Team: Game away"""
players: List
"""List: """
events: List
"""List: """
@classmethod
def _key(cls, info, *args, **kwargs):
return info.id
@classmethod
def has_key(cls, id):
return super().has_key(id)
@classmethod
def from_key(cls, id):
return super().from_key(id)
def __repr__(self):
return "<nhl.Game: {}, {} ({}) at ({}) {}, {}, ID {}>".format(self.info.description, self.away.abbreviation, self.info.score[1], self.info.score[0], self.home.abbreviation, self.info.date, self.info.id)
# return "<nhl.Game: {} at {}, ID {}>".format(self.away.abbreviation, self.home.abbreviation, self.id)
@property
def skaters(self):
return self.players.filter("player.position", "G", "!=")
@property
def forwards(self):
return self.players.filter("player.position", ["LW", "C", "RW"], "in")
@property
def defensemen(self):
return self.players.filter("player.position", "D")
@property
def goalies(self):
return self.players.filter("player.position", "G")
| python |
#!/usr/bin/env python
"""Base class for model elements."""
from __future__ import annotations
import logging
import uuid
from typing import TYPE_CHECKING, Callable, Iterator, Protocol, TypeVar, overload
from gaphor.core.modeling.event import ElementUpdated
from gaphor.core.modeling.properties import (
attribute,
relation_many,
relation_one,
umlproperty,
)
if TYPE_CHECKING:
from gaphor.core.modeling.coremodel import Comment
from gaphor.core.modeling.diagram import Diagram
from gaphor.core.modeling.presentation import Presentation
__all__ = ["Element"]
log = logging.getLogger(__name__)
class UnlinkEvent:
"""Used to tell event handlers this element should be unlinked."""
def __init__(self, element: Element, diagram: Diagram | None = None):
self.element = element
self.diagram = diagram
Id = str
class Element:
"""Base class for all model data classes."""
note: attribute[str]
appliedStereotype: relation_many[Element]
comment: relation_many[Comment]
directedRelationship: relation_many[Presentation]
ownedElement: relation_many[Element]
owner: relation_one[Element]
presentation: relation_many[Presentation]
relationship: relation_many[Presentation]
ownedDiagram: relation_many[Diagram]
def __init__(self, id: Id | None = None, model: RepositoryProtocol | None = None):
"""Create an element. As optional parameters an id and model can be
given.
Id is a serial number for the element. The default id is None and will
result in an automatic creation of an id. An existing id (such as an
int or string) can be provided as well.
A model can be provided to refer to the model this element belongs to.
"""
self._id: Id = id or str(uuid.uuid1())
# The model this element belongs to.
self._model = model
self._unlink_lock = 0
@property
def id(self) -> Id:
"Id"
return self._id
@property
def model(self) -> RepositoryProtocol:
"""The owning model, raises AssertionError when model is not set."""
assert (
self._model
), "You can not retrieve the model since it's not set on construction"
return self._model
@classmethod
def umlproperties(class_) -> Iterator[umlproperty]:
"""Iterate over all properties."""
umlprop = umlproperty
for propname in dir(class_):
if not propname.startswith("_"):
prop = getattr(class_, propname)
if isinstance(prop, umlprop):
yield prop
def save(self, save_func):
"""Save the state by calling save_func(name, value)."""
for prop in self.umlproperties():
prop.save(self, save_func)
def load(self, name, value):
"""Loads value in name.
Make sure that for every load postload() should be called.
"""
prop = getattr(type(self), name)
prop.load(self, value)
def __str__(self):
return f"<{self.__class__.__module__}.{self.__class__.__name__} element {self._id}>"
__repr__ = __str__
def postload(self):
"""Fix up the odds and ends."""
for prop in self.umlproperties():
prop.postload(self)
def unlink(self):
"""Unlink the element. All the elements references are destroyed.
The unlink lock is acquired while unlinking this elements
properties to avoid recursion problems.
"""
self.inner_unlink(UnlinkEvent(self))
def inner_unlink(self, unlink_event):
if self._unlink_lock:
return
try:
self._unlink_lock += 1
for prop in self.umlproperties():
prop.unlink(self)
log.debug("unlinking %s", self)
self.handle(unlink_event)
self._model = None
finally:
self._unlink_lock -= 1
def handle(self, event):
"""Propagate incoming events."""
model = self._model
if model:
model.handle(event)
def watcher(self, default_handler: Handler | None = None) -> EventWatcherProtocol:
model = self._model
if model:
return model.watcher(self, default_handler)
else:
return DummyEventWatcher()
def isKindOf(self, class_: type[Element]) -> bool:
"""Returns true if the object is an instance of `class_`."""
return isinstance(self, class_)
def isTypeOf(self, other: Element) -> bool:
"""Returns true if the object is of the same type as other."""
return isinstance(self, type(other))
class DummyEventWatcher:
def watch(self, path: str, handler: Handler | None = None) -> DummyEventWatcher:
return self
def unsubscribe_all(self) -> None:
pass
T = TypeVar("T", bound=Element)
Handler = Callable[[ElementUpdated], None]
class RepositoryProtocol(Protocol):
def create(self, type: type[T]) -> T:
...
def create_as(self, type: type[T], id: str) -> T:
...
@overload
def select(self, expression: Callable[[Element], bool]) -> Iterator[Element]:
...
@overload
def select(self, expression: type[T]) -> Iterator[T]:
...
@overload
def select(self, expression: None) -> Iterator[Element]:
...
def lookup(self, id: str) -> Element | None:
...
def watcher(
self, element: Element, default_handler: Handler | None = None
) -> EventWatcherProtocol:
...
def handle(self, event: object) -> None:
...
class EventWatcherProtocol(Protocol):
def watch(self, path: str, handler: Handler | None = None) -> EventWatcherProtocol:
...
def unsubscribe_all(self) -> None:
...
| python |
#!/usr/bin/env python
"""
@script: DeployerApp.py
@purpose: Deployer for HomeSetup
@created: Nov 12, 2019
@author: <B>H</B>ugo <B>S</B>aporetti <B>J</B>unior
@mailto: [email protected]
@site: https://github.com/yorevs/homesetup
@license: Please refer to <https://opensource.org/licenses/MIT>
"""
# @verified versions: ???
import sys
from Versioner import Versioner
from GitUtils import GitUtils
from DocBuilder import Readme
from os import path, environ
from getopt import getopt
APP_NAME = path.basename(__file__)
# Version tuple: (major, minor, build)
APP_VERSION = (0, 9, 0)
# Usage message
APP_USAGE = """
Deployer for HomeSetup
Usage: {} [reset,build,minor,major]
""".format(APP_NAME)
# @purpose: Display the usage message and exit with the specified code ( or zero as default )
def usage(exit_code=0):
print(APP_USAGE)
quit_app(exit_code)
# @purpose: Display the current program version and exit
def version():
print('{} v{}.{}.{}'.format(APP_NAME, APP_VERSION[0], APP_VERSION[1], APP_VERSION[2]))
quit_app(0)
# @purpose: Quit the app.
def quit_app(exit_code=0, exit_message=''):
print(exit_message)
sys.exit(exit_code)
# @purpose: Parse the command line arguments and execute the program accordingly.
def main(argv):
if len(argv) > 0 and argv[0] in ['-h', '--help']:
usage()
elif len(argv) > 0 and argv[0] in ['-v', '--version']:
version()
opts, args = getopt(argv, 'hv', ['help', 'version'])
for opt, args in opts:
if opt in ('-h', '--help'):
usage()
elif opt in ('-v', '--version'):
version()
# print("--- VersionUtils ---")
# ver_field = 'patch' if len(argv) < 1 else argv[0].strip().lower()
# # ver_file = environ['HHS_HOME'] + '/.VERSION'
# ver_file = '../samples/.VERSION'
# ver = Versioner(ver_field, ver_file)
# print('Current version: {}\n'.format(ver.current()))
# ver.update_build()
# ver.update_version()
# print('After increase build version: {}\n'.format(ver.current()))
# ver.update_minor()
# ver.update_version()
# print('After increase build minor: {}\n'.format(ver.current()))
# ver.update_major()
# ver.update_version()
# print('After increase build major: {}\n'.format(ver.current()))
print("--- GitUtils ---")
# print("TopLevelDir: {}".format(GitUtils.top_level_dir()))
# print("CurrentBranch: {}".format(GitUtils.current_branch()))
# print("GitUserName: {}\n".format(GitUtils.username()))
# print("v1.3.0 Released at {}\n".format(GitUtils.release_date("v1.3.0")))
# print("Unreleased: ---- Current ---- \n{}\n".format(GitUtils.unreleased()))
print("ChangeLog: ---- v1.3.0 ---- \n{}\n".format(GitUtils.changelog("v1.3.0", "v1.4.0")))
# print("ChangeLog: ---- v1.2.0 ---- \n{}\n".format(GitUtils.changelog("v1.2.0", "v1.3.0")))
# print("ChangeLog: ---- v1.1.0 ---- \n{}\n".format(GitUtils.changelog("v1.1.0", "v1.2.0")))
# print("ChangeLog: ---- v1.0.0 ---- \n{}\n".format(GitUtils.changelog("v1.0.0", "v1.1.0")))
# print("ChangeLog: ---- v0.9.0 ---- \n{}\n".format(GitUtils.changelog("v0.9.0", "v1.0.0")))
print("--- DocUtils ---")
doc = Readme()
print(doc)
# Program entry point.
if __name__ == '__main__':
main(sys.argv[1:])
quit_app(0)
| python |
import numpy as np
from deep500.lv0.operators.operator_interface import CustomPythonOp
from deep500.frameworks.reference.custom_operators.python.conv_op_common import get_pad_shape, get_output_shape, get_fullconv_pad_shape, crosscorrelation, crosscorrelation_dilx_flipw, crosscorrelation_swap_axes
from deep500 import TensorDescriptor
class ConvOp(CustomPythonOp):
def __init__(
self,
input_descriptors,
output_descriptors,
auto_pad='NOTSET',
dilations=None,
group=1,
kernel_shape=None,
pads=None,
strides=None):
super(ConvOp, self).__init__(input_descriptors, output_descriptors)
self._input_desc = input_descriptors
self._output_desc = output_descriptors
self.auto_pad = auto_pad
self.kernel_shape = kernel_shape
#default values if not specified
temp_dilations = []
temp_pads = []
temp_strides = []
for i in range(0, len(kernel_shape)):
temp_dilations.append(1)
temp_pads.append(0)
temp_pads.append(0)
temp_strides.append(1)
self.dilations = temp_dilations if dilations is None else dilations
self.group = group
self.pads = temp_pads if pads is None else pads
self.strides = temp_strides if strides is None else strides
def forward(self, X, W, B=None):
if B is None:
#optional input B is not given:
B = np.zeros(W.shape[0], dtype=W.dtype)
if self.kernel_shape is None:
self.kernel_shape = W.shape[2:]
input_spatial_shape = X.shape[2:]
if self.auto_pad != 'NOTSET':
out_shape = get_output_shape(
self.auto_pad,
X.shape[2:],
self.kernel_shape,
self.dilations,
self.strides
)
else:
out_shape = [0] * len(input_spatial_shape)
for i in range(len(input_spatial_shape)):
'''
caffe implementation:
_const int input_dim = this->input_shape(i + 1);
const int kernel_extent = dilation_data[i] * (kernel_shape_data[i] - 1) + 1; // actual kernel size
const int output_dim = (input_dim + 2 * pad_data[i] - kernel_extent)
/ stride_data[i] + 1;
'''
out_shape[i] = int(
np.floor(
float(
input_spatial_shape[i] + \
self.pads[i] + \
self.pads[i + len(self.kernel_shape)] - \
(self.dilations[i] * (self.kernel_shape[i] - 1) + 1)
) / \
float(
self.strides[i]
)
) + 1
)
pad_shape = get_pad_shape(
self.auto_pad,
X.shape[2:],
self.kernel_shape,
self.dilations,
self.strides,
out_shape
)
pads_computed_before = [] #top, left, ...
pads_computed_after = [] #bottom, right, ...
if self.auto_pad == 'SAME_UPPER':
for i in range(0, len(X.shape) - 2 ):
pads_computed_before.append(pad_shape[i] // 2)
pads_computed_after.append(pad_shape[i] - (pad_shape[i] // 2))
elif self.auto_pad == 'SAME_LOWER':
for i in range(0, len(X.shape) - 2 ):
pads_computed_before.append(pad_shape[i] - (pad_shape[i] // 2))
pads_computed_after.append(pad_shape[i] // 2)
elif self.auto_pad == 'VALID':
for i in range(0, len(X.shape) - 2 ):
pads_computed_before.append(0)
pads_computed_after.append(0)
elif self.auto_pad == 'NOTSET':
for i in range(0, len(X.shape) - 2 ):
pads_computed_before.append(self.pads[i])
pads_computed_after.append(self.pads[i + len(self.kernel_shape)])
pad_shape[i] = self.pads[i] + self.pads[i + len(self.kernel_shape)]
return crosscorrelation(
input_spatial_shape,
self.kernel_shape,
self.group,
self.dilations,
self.strides,
pads_computed_before,
out_shape,
X,
W,
B)
def backward(self, grads, fwd_inputs, fwd_outputs):
X = fwd_inputs[0]
W = fwd_inputs[1]
Y = fwd_outputs[0]
grad_Y = grads[0]
if len(fwd_inputs) < 3:
B = np.zeros(fwd_inputs[1].shape[0], dtype=W.dtype)
else:
B = fwd_inputs[2]
grad_X = np.zeros(X.shape, dtype=X.dtype)
grad_W = np.zeros(W.shape, dtype=W.dtype)
#compute pads used in forward:
pad_shape = get_pad_shape(
self.auto_pad,
X.shape[2:],
self.kernel_shape,
self.dilations,
self.strides,
Y.shape
)
pads_computed_before = [] #top, left, ...
pads_computed_after = [] #bottom, right, ...
if self.auto_pad == 'SAME_UPPER':
for i in range(0, len(X.shape) - 2 ):
pads_computed_before.append(pad_shape[i] // 2)
pads_computed_after.append(pad_shape[i] - (pad_shape[i] // 2))
elif self.auto_pad == 'SAME_LOWER':
for i in range(0, len(X.shape) - 2 ):
pads_computed_before.append(pad_shape[i] - (pad_shape[i] // 2))
pads_computed_after.append(pad_shape[i] // 2)
elif self.auto_pad == 'VALID':
for i in range(0, len(X.shape) - 2 ):
pads_computed_before.append(0)
pads_computed_after.append(0)
elif self.auto_pad == 'NOTSET':
for i in range(0, len(X.shape) - 2 ):
pads_computed_before.append(self.pads[i])
pads_computed_after.append(self.pads[i + len(self.kernel_shape)])
pad_shape[i] = self.pads[i] + self.pads[i + len(self.kernel_shape)]
#in order to compute input gradient note:
#pad for 'full convolution'
#convolution (crosscorrelation )X * W = Y where W is flipped
#X = grad_Y
#W = W
#dilate W tensor with dilations
#dilate X tensor with strides
#no bias
#compute pads for full convolution
fullconv_pads_before, fullconv_pads_after = get_fullconv_pad_shape(
self.kernel_shape,
self.dilations,
self.strides)
for i in range(len(self.kernel_shape)):
fullconv_pads_before[i] -= pads_computed_before[i]
fullconv_pads_after[i] -= pads_computed_after[i]
#compute input gradient
grad_X = crosscorrelation_dilx_flipw(
grad_Y.shape,
self.kernel_shape,
self.group,
self.dilations,
[1, 1, 1],
fullconv_pads_before,
X.shape[2:],
grad_Y,
W,
self.strides
)
#in order to compute weight gradient note:
#swap dilations and strides:
temp_dilations = list(self.strides)
temp_strides = list(self.dilations)
#compute weight gradient, don't use bias
grad_W = crosscorrelation_swap_axes(
X.shape[2:],
Y.shape[2:],
self.group,
temp_dilations,
temp_strides,
pads_computed_before,
W.shape[2:],
X,
grads[0],
)
grad_X = np.reshape(grad_X, X.shape)
grad_W = np.reshape(grad_W, W.shape)
if len(fwd_inputs) > 2:
#compute bias gradient
grad_B = grad_Y
for i in range(2, len(Y.shape)):
grad_B = np.sum(grad_B, axis=2)
grad_B = np.sum(grad_B, axis=0)
return [grad_X, grad_W, grad_B]
else:
return [grad_X, grad_W] | python |
"""CelebA data-module."""
from typing import Any
import albumentations as A
import attr
from pytorch_lightning import LightningDataModule
from ranzen import implements
from conduit.data.datamodules.base import CdtDataModule
from conduit.data.datamodules.vision.base import CdtVisionDataModule
from conduit.data.datasets.vision.celeba import CelebA, CelebASplit, CelebAttr
from conduit.data.structures import TrainValTestSplit
__all__ = ["CelebADataModule"]
@attr.define(kw_only=True)
class CelebADataModule(CdtVisionDataModule):
"""Data-module for the CelebA dataset."""
image_size: int = 224
superclass: CelebAttr = CelebAttr.Smiling
subclass: CelebAttr = CelebAttr.Male
use_predefined_splits: bool = False
@implements(LightningDataModule)
def prepare_data(self, *args: Any, **kwargs: Any) -> None:
CelebA(root=self.root, download=True)
@property # type: ignore[misc]
@implements(CdtVisionDataModule)
def _default_train_transforms(self) -> A.Compose:
base_transforms = A.Compose(
[
A.Resize(self.image_size, self.image_size),
A.CenterCrop(self.image_size, self.image_size),
]
)
normalization = super()._default_train_transforms
return A.Compose([base_transforms, normalization])
@property # type: ignore[misc]
@implements(CdtVisionDataModule)
def _default_test_transforms(self) -> A.Compose:
return self._default_train_transforms
@implements(CdtDataModule)
def _get_splits(self) -> TrainValTestSplit:
# Split the data according to the pre-defined split indices
if self.use_predefined_splits:
train_data, val_data, test_data = (
CelebA(root=self.root, superclass=self.superclass, transform=None, split=split)
for split in CelebASplit
)
# Split the data randomly according to test- and val-prop
else:
all_data = CelebA(root=self.root, superclass=self.superclass, transform=None)
val_data, test_data, train_data = all_data.random_split(
props=(self.val_prop, self.test_prop)
)
return TrainValTestSplit(train=train_data, val=val_data, test=test_data)
| python |
import pytest
def test_repr(module):
v = module.Dict({"x": module.Int(min=0, max=100)}, nullable=True)
assert repr(v) == (
"<Dict(schema=frozendict({'x': <Int(min=0, max=100)>}), nullable=True)>"
)
v = module.Dict({"x": module.LazyRef("foo")})
assert repr(v) == "<Dict(schema=frozendict({'x': <LazyRef(use='foo')>}))>"
def test_load_dump(module):
data = {
"__class__": "Dict",
"schema": {
"x": {"__class__": "Int", "min": 0, "max": 10},
"y": {
"__class__": "List",
"item": {"__class__": "Int", "options": {1, 2, 3}},
"nullable": True,
},
},
"extra": [{"__class__": "Str"}, {"__class__": "Str"}],
}
v1 = module.Validator.load(data)
assert isinstance(v1, module.Dict)
assert isinstance(v1.schema["x"], module.Int)
assert isinstance(v1.schema["y"], module.List)
assert isinstance(v1.schema["y"].item, module.Int)
assert isinstance(v1.extra, tuple)
assert isinstance(v1.extra[0], module.Str)
assert isinstance(v1.extra[1], module.Str)
assert v1.schema["x"].min == 0
assert v1.schema["x"].max == 10
assert v1.schema["y"].nullable is True
assert v1.schema["y"].item.options == frozenset([1, 2, 3])
assert v1.dump() == data
def test_clone(module):
v = module.Int()
assert v.clone(nullable=True) == module.Int(nullable=True)
v = module.Dict({"x": module.Int()})
assert v.clone({"schema.x.nullable": True}) == (
module.Dict({"x": module.Int(nullable=True)})
)
v = module.Int(min=0, max=100)
assert v.clone({"-": ["min", "max"], "+": {"nullable": True}}) == (
module.Int(nullable=True)
)
v = module.Int(options=[1, 2, 3])
assert v.clone({"options+": [4, 5], "options-": [1, 2]}) == (
module.Int(options=[3, 4, 5])
)
v = module.Dict({"x": module.Int(options=[1, 2, 3])})
assert v.clone({"schema.x.options+": [4, 5], "schema.x.options-": [1, 2]}) == (
module.Dict({"x": module.Int(options=[3, 4, 5])})
)
v = module.OneOf(module.Int(), module.Float())
assert v.clone({"steps+": [module.Str()], "steps-": [module.Float()]}) == (
module.OneOf(module.Int(), module.Str())
)
# fmt: off
assert v.clone(
{
"steps+": [{"__class__": "Str"}],
"steps-": [{"__class__": "Float"}],
}
) == module.OneOf(module.Int(), module.Str())
# fmt: on
v = module.Dict({"x": module.Int()})
with pytest.raises(KeyError) as info:
v.clone({"schema-": ["y"]})
assert info.value.args == ("'y' is not in dict at 'schema'",)
v = module.Dict({"x": module.Int(options=[1, 2, 3])})
with pytest.raises(KeyError) as info:
v.clone({"schema.x.options-": [4]})
assert info.value.args == ("4 is not in set at 'schema.x.options'",)
v = module.Dict({"x": module.OneOf(module.Int(), module.Float())})
with pytest.raises(ValueError) as info:
v.clone({"schema.x.steps-": [module.Str()]})
assert info.value.args == ("<Str()> is not in list at 'schema.x.steps'",)
v = module.Dict({"x": module.Int()})
with pytest.warns(DeprecationWarning) as record:
assert v.clone(update={"/schema/x": {"nullable": True}}) == (
module.Dict({"x": module.Int(nullable=True)})
)
assert len(record) == 1
assert record[0].message.args[0] == (
"This syntax is deprecated. Consider to use 'schema.x+' instead."
)
v = module.Dict({"x": module.Int(options=[1, 2, 3])})
with pytest.warns(DeprecationWarning) as record:
assert v.clone(unset={"/schema/x/options": [3]}) == (
module.Dict({"x": module.Int(options=[1, 2])})
)
assert len(record) == 1
assert record[0].message.args[0] == (
"This syntax is deprecated. Consider to use 'schema.x.options-' instead "
"and place it into update param."
)
def test_alias(module):
v1 = module.Int(alias="foo")
assert module.instances.get("foo") is v1
with pytest.raises(AssertionError):
module.Str(alias="foo")
v2 = module.Str(alias="foo", replace=True)
assert module.instances.get("foo") is v2
assert module.Validator.load({"__use__": "foo"}) is v2
v3 = module.Validator.load({"__clone__": "foo", "update": {"nullable": True}})
assert v3 is not v2
assert isinstance(v3, module.Str)
assert v3.nullable is True
| python |
"""
Copyright 2021 Dynatrace LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional, Dict, List, Any
from requests import Response
from dynatrace.dynatrace_object import DynatraceObject
from dynatrace.http_client import HttpClient
class DeploymentService:
ENDPOINT_INSTALLER_AGENT = "/api/v1/deployment/installer/agent"
ENDPOINT_INSTALLER_GATEWAY = "/api/v1/deployment/installer/gateway"
ENDPOINT_BOSHRELEASE = "/api/v1/deployment/boshrelease"
ENDPOINT_LAMBDA = "/api/v1/deployment/lambda/agent/latest"
ENDPOINT_ORCHESTRATION = "/api/v1/deployment/orchestration/agent"
def __init__(self, http_client: HttpClient):
self.__http_client = http_client
def get_agent_installer_latest_metainfo(
self, os_type: str, installer_type: str, flavor: Optional[str] = None, arch: Optional[str] = None, bitness: Optional[str] = None
) -> "InstallerMetaInfoDto":
"""Returns the OneAgent version of the installer of the specified type.
Non-required parameters are only applicable to the paas and paas-sh installer types.
:param os_type: The operating system of the installer. Use one of: windows, unix, aix, solaris
:param installer_type: The type of installer. Use one of:
- default: Self-extracting installer for manual installation. Downloads an .exe file for Windows or an .sh file for Unix.
- paas: Code modules installer. Downloads a *.zip archive, containing the manifest.json file with meta information or a .jar file for z/OS.
- paas-sh: Code modules installer. Downloads a self-extracting shell script with the embedded tar.gz archive. \n
:param flavor: (only for paas and paas-sh) the flavor of your Linux distribution. Use one of:
- musl: for Linux distributions, which are using the musl C standard library, for example Alpine Linux.
- multidistro: for all Linux distributions which are using musl C and glibc standard library. \n
:param arch: (only for paas and paas-sh) the architecture of your OS. Use one of:
- all: Use this value for AIX and z/OS. Defaults to x86 for other OS types.
- x86: x86 architecture.
- ppc: PowerPC architecture, only supported for AIX and Linux.
- ppcle: PowerPC Little Endian architecture, only supported for Linux.
- sparc: Sparc architecture, only supported for Solaris.
- arm: ARM architecture, only supported for Linux.
- s390: S/390 architecture, only supported for Linux. \n
:param bitness: (only for paas and paas-sh) the bitness of your OS. Must be supported by the OS. Use one of:
- 32
- 64
- all \n
:returns InstallerMetaInfo: the latest version of the installer of that type
"""
params = {"flavor": flavor, "arch": arch, "bitness": bitness}
response = self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_AGENT}/{os_type}/{installer_type}/latest/metainfo", params=params)
return InstallerMetaInfoDto(raw_element=response.json())
def get_agent_installer(
self,
os_type: str,
installer_type: str,
version: str = "latest",
flavor: Optional[str] = None,
arch: Optional[str] = None,
bitness: Optional[str] = None,
include: Optional[List[str]] = None,
skip_metadata: Optional[bool] = None,
network_zone: Optional[str] = None,
if_none_match: Optional[str] = None,
) -> "Response":
"""Downloads OneAgent installer of the specified version.
The installer is avaialable in the "content" attribute of the response.
:param os_type: The operating system of the installer. Use one of: windows, unix, aix, solaris
:param installer_type: The type of installer. Use one of:
- default: Self-extracting installer for manual installation. Downloads an .exe file for Windows or an .sh file for Unix.
- paas: Code modules installer. Downloads a *.zip archive, containing the manifest.json file with meta information or a .jar file for z/OS.
- paas-sh: Code modules installer. Downloads a self-extracting shell script with the embedded tar.gz archive. \n
:param version: The exact version of the OneAgent installer. If none is provided, latest available is used.
:param flavor: (only for paas and paas-sh) the flavor of your Linux distribution. Use one of:
- musl: for Linux distributions, which are using the musl C standard library, for example Alpine Linux.
- multidistro: for all Linux distributions which are using musl C and glibc standard library. \n
:param arch: (only for paas and paas-sh) the architecture of your OS. Use one of:
- all: Use this value for AIX and z/OS. Defaults to x86 for other OS types.
- x86: x86 architecture.
- ppc: PowerPC architecture, only supported for AIX and Linux.
- ppcle: PowerPC Little Endian architecture, only supported for Linux.
- sparc: Sparc architecture, only supported for Solaris.
- arm: ARM architecture, only supported for Linux.
- s390: S/390 architecture, only supported for Linux. \n
:param bitness: (only for paas and paas-sh) the bitness of your OS. Must be supported by the OS. Use one of:
- 32
- 64
- all \n
:param include: (only for paas and paas-sh) the code modules to be included to the installer (e.g. ['java', 'apache'])
:param skip_metadata: (only for paas and paas-sh) set true to omit the OneAgent connectivity information from the installer.
:param network_zone: the network zone you want the result to be configured with.
:param if_none_match: The ETag of the previous request. Do not download if it matches the ETag of the installer.
The ETag is available in the headers of the response.
:returns Response: HTTP Response to the request. Can be written to file from the "content" attribute.
"""
if version != "latest":
version = "version/" + version
params = {
"flavor": flavor,
"arch": arch,
"bitness": bitness,
"include": "&include=".join(include) if include else None,
"skipMetadata": skip_metadata,
"networkZone": network_zone,
}
headers = {"If-None-Match": if_none_match} if if_none_match else None
return self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_AGENT}/{os_type}/{installer_type}/{version}", params=params, headers=headers)
def get_agent_installer_connection_info(self, network_zone: Optional[str] = "default", version: Optional[str] = None) -> "ConnectionInfo":
"""Gets the connectivity information for OneAgent.
:param network_zone: The network zone you want the result to be configured with.
:param version: The version of the OneAgent to which the result will be applied.
:returns ConnectionInfo: connectivity information
"""
params = {"networkZone": network_zone, "version": version}
response = self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_AGENT}/connectioninfo", params=params)
return ConnectionInfo(raw_element=response.json())
def get_agent_installer_connection_endpoints(self, network_zone: Optional[str] = "default") -> str:
"""Gets the list of the ActiveGate-Endpoints to be used for Agents.
Ordered by networkzone-priorities. Highest priority first, separated by a semicolon.
Responds with 404 if network zone is not known.
:param network_zone: The network zone you want the result to be configured with.
:returns str: ActiveGate Endpoints separated by semicolons
"""
params = {"networkZone": network_zone}
return self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_AGENT}/connectioninfo/endpoints", params=params).text
def list_agent_installer_versions(
self, os_type: str, installer_type: str, flavor: Optional[str] = None, arch: Optional[str] = None
) -> "AgentInstallerVersions":
"""Lists all available versions of OneAgent installer
:param os_type: The operating system of the installer. Use one of: windows, unix, aix, solaris
:param installer_type: The type of installer. Use one of:
- default: Self-extracting installer for manual installation. Downloads an .exe file for Windows or an .sh file for Unix.
- paas: Code modules installer. Downloads a *.zip archive, containing the manifest.json file with meta information or a .jar file for z/OS.
- paas-sh: Code modules installer. Downloads a self-extracting shell script with the embedded tar.gz archive. \n
:param flavor: (only for paas and paas-sh) the flavor of your Linux distribution. Use one of:
- musl: for Linux distributions, which are using the musl C standard library, for example Alpine Linux.
- multidistro: for all Linux distributions which are using musl C and glibc standard library. \n
:param arch: (only for paas and paas-sh) the architecture of your OS. Use one of:
- all: Use this value for AIX and z/OS. Defaults to x86 for other OS types.
- x86: x86 architecture.
- ppc: PowerPC architecture, only supported for AIX and Linux.
- ppcle: PowerPC Little Endian architecture, only supported for Linux.
- sparc: Sparc architecture, only supported for Solaris.
- arm: ARM architecture, only supported for Linux.
- s390: S/390 architecture, only supported for Linux. \n
:returns AgentInstallerVersions: list of available versions of the OneAgent installer
"""
params = {"flavor": flavor, "arch": arch}
response = self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_AGENT}/versions/{os_type}/{installer_type}", params=params)
return AgentInstallerVersions(raw_element=response.json())
def get_gateway_installer_connection_info(self, network_zone: Optional[str] = "default") -> "ActiveGateConnectionInfo":
"""Gets the connectivity information for Environment ActiveGate.
:param network_zone: The network zone you want the result to be configured with.
:returns ActiveGateConnectionInfo: connectivity information
"""
params = {"networkZone": network_zone}
response = self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_GATEWAY}/connectioninfo", params=params)
return ActiveGateConnectionInfo(raw_element=response.json())
def list_gateway_installer_versions(self, os_type: str) -> "ActiveGateInstallerVersions":
"""Lists all available versions of ActiveGate installer.
:param os_type: The operating system of the installer. Use one of:
- windows
- unix
:returns ActiveGateInstallerVersions: all available versions of the installer
"""
response = self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_GATEWAY}/versions/{os_type}")
return ActiveGateInstallerVersions(raw_element=response.json())
def get_gateway_installer(self, os_type: str, version: str = "latest", if_none_match: Optional[str] = None) -> "Response":
"""Downloads the configured standard ActiveGate installer.
:param os_type: The operating system of the installer. Use one of:
- windows
- unix \n
:param version: The required version of the ActiveGate installer, in 1.155.275.20181112-084458 format.
If none is specified, latest available version is used.
:param if_none_match: The ETag of the previous request. Do not download if it matches the ETag of the installer.
The ETag is available in the headers of the response.
:returns Response: HTTP Response to the request. Can be written to file from the "content" attribute.
"""
if version != "latest":
version = "version/" + version
headers = {"If-None-Match": if_none_match} if if_none_match else None
return self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_GATEWAY}/{os_type}/{version}", headers=headers)
def list_boshrelease_agent_versions(self, os_type: str) -> "BoshReleaseAvailableVersions":
"""Lists available OneAgent versions for BOSH release tarballs.
:param os_type: The operating system of the installer. Use one of:
- windows
- unix \n
:returns BoshReleaseAvailableVersions: available versions
"""
response = self.__http_client.make_request(path=f"{self.ENDPOINT_BOSHRELEASE}/versions/{os_type}")
return BoshReleaseAvailableVersions(raw_element=response.json())
def get_boshrelease_agent_checksum(
self, os_type: str, version: str, skip_metadata: Optional[bool] = None, network_zone: Optional[str] = None
) -> "BoshReleaseChecksum":
"""Gets the checksum of the specified BOSH release tarball.
The checksum is the sha256 hash of the installer file. For SaaS only works on environment ActiveGates version 1.176 or higher
:param os_type: The operating system of the installer. Use one of:
- windows
- unix \n
:param version: The required version of the OneAgent in the 1.155.275.20181112-084458 format.
:param skip_metadata: Set true to omit the OneAgent connectivity information from the installer. If not set, false is used.
:param network_zone: The network zone you want the result to be configured with.
:returns BoshReleaseChecksum: checksum of the BOSH release tarball
"""
params = {"skipMetadata": skip_metadata, "networkZone": network_zone}
response = self.__http_client.make_request(path=f"{self.ENDPOINT_BOSHRELEASE}/agent/{os_type}/version/{version}/checksum", params=params)
return BoshReleaseChecksum(raw_element=response.json())
def get_boshrelease_agent(self, os_type: str, version: str, skip_metadata: Optional[bool] = None, network_zone: Optional[str] = None) -> "Response":
"""Downloads the BOSH release tarballs of the specified version, OneAgent included.
For SaaS, the call is executed on an Environment ActiveGate. *Be sure to use the base URL of an ActiveGate, not the environment*
:param os_type: The operating system of the installer. Use one of:
- windows
- unix \n
:param version: The required version of the OneAgent in the 1.155.275.20181112-084458 format.
:param skip_metadata: Set true to omit the OneAgent connectivity information from the installer. If not set, false is used.
:param network_zone: The network zone you want the result to be configured with.
:returns Response: HTTP Response to the request. Can be written to file from the "content" attribute.
"""
params = {"skipMetadata": skip_metadata, "networkZone": network_zone}
return self.__http_client.make_request(path=f"{self.ENDPOINT_BOSHRELEASE}/agent/{os_type}/version/{version}", params=params)
def get_lambda_agent_versions(self) -> "LatestLambdaLayerNames":
"""Get the latest version names of the OneAgent for AWS Lambda.
Version names include Java, Node.js, and Python AWS Lambda runtime.
:returns LatestLambdaLayerNames: version names
"""
return LatestLambdaLayerNames(raw_element=self.__http_client.make_request(path=f"{self.ENDPOINT_LAMBDA}").json())
def get_orchestration_agent(self, orchestration_type: str, version: str = "latest") -> "Response":
"""Downloads the OneAgent deployment orchestration tarball.
:param orchestration_type: The Orchestration Type of the orchestration deployment script. Use one of:
- ansible
- puppet \n
:param version: The requested version of the OneAgent orchestration deployment tarball in 0.1.0.20200925-120822 format.
If none is provided, the latest available is used.
:returns Response: HTTP Response to the request. Can be written to file from the "content" attribute.
"""
if version != "latest":
version = "version/" + version
return self.__http_client.make_request(path=f"{self.ENDPOINT_ORCHESTRATION}/{orchestration_type}/{version}")
def get_orchestration_agent_signature(self, orchestration_type: str, version: str = "latest") -> "Response":
""" ""Downloads the signature matching the OneAgent deployment orchestration tarball.
:param orchestration_type: The Orchestration Type of the orchestration deployment script. Use one of:
- ansible
- puppet \n
:param version: The requested version of the OneAgent orchestration deployment tarball in 0.1.0.20200925-120822 format.
If none is provided, the latest available is used.
:returns Response: HTTP Response to the request. Can be written to file from the "content" attribute.
"""
if version != "latest":
version = "version/" + version
return self.__http_client.make_request(path=f"{self.ENDPOINT_ORCHESTRATION}/{orchestration_type}/{version}/signature")
class ConnectionInfo(DynatraceObject):
def _create_from_raw_data(self, raw_element: Dict[str, Any]):
self.tenant_uuid: str = raw_element["tenantUUID"]
self.tenant_token: str = raw_element["tenantToken"]
self.communication_endpoints: List[str] = raw_element.get("communicationEndpoints", [])
self.formatted_communication_endpoints: str = raw_element["formattedCommunicationEndpoints"]
class InstallerMetaInfoDto(DynatraceObject):
def _create_from_raw_data(self, raw_element: Dict[str, Any]):
self.latest_agent_version: str = raw_element["latestAgentVersion"]
class AgentInstallerVersions(DynatraceObject):
def _create_from_raw_data(self, raw_element: Dict[str, Any]):
self.available_versions: List[str] = raw_element["availableVersions"]
class ActiveGateConnectionInfo(DynatraceObject):
def _create_from_raw_data(self, raw_element: Dict[str, Any]):
self.tenant_uuid: str = raw_element["tenantUUID"]
self.tenant_token: str = raw_element["tenantToken"]
self.communication_endpoints: str = raw_element["communicationEndpoints"]
class ActiveGateInstallerVersions(DynatraceObject):
def _create_from_raw_data(self, raw_element: Dict[str, Any]):
self.available_versions: List[str] = raw_element["availableVersions"]
class BoshReleaseChecksum(DynatraceObject):
def _create_from_raw_data(self, raw_element: Dict[str, Any]):
self.sha_256: str = raw_element["sha256"]
class BoshReleaseAvailableVersions(DynatraceObject):
def _create_from_raw_data(self, raw_element: Dict[str, Any]):
self.available_versions: List[str] = raw_element["availableVersions"]
class LatestLambdaLayerNames(DynatraceObject):
def _create_from_raw_data(self, raw_element: Dict[str, Any]):
self.java: str = raw_element["java"]
self.python: str = raw_element["python"]
self.nodejs: str = raw_element["nodejs"]
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define the main basic Camera interface
This defines the main basic camera interface from which all other interfaces which uses a camera inherit from.
"""
from pyrobolearn.tools.interfaces.interface import InputInterface
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "[email protected]"
__status__ = "Development"
class CameraInterface(InputInterface):
r"""Camera Interface.
This is the abstract class Camera Interface which is inherited from all the interfaces that use cameras
such as webcams, kinects, asus xtion, etc.
"""
def __init__(self, use_thread=False, sleep_dt=0, verbose=False):
"""
Initialize the camera input interface.
Args:
use_thread (bool): If True, it will run the interface in a separate thread than the main one.
The interface will update its data automatically.
sleep_dt (float): If :attr:`use_thread` is True, it will sleep the specified amount before acquiring or
setting the next sample.
verbose (bool): If True, it will print information about the state of the interface. This is let to the
programmer what he / she wishes to print.
"""
super(CameraInterface, self).__init__(use_thread=use_thread, sleep_dt=sleep_dt, verbose=verbose)
self.frame = None
| python |
# Kenny Sprite Sheet Slicer
# KennySpriteSlice.py
# Copyright Will Blankenship 2015
# This will attempt to correctly slice sprite sheets from the Kenny Donation Collection
import xml.etree.ElementTree
from PIL import Image
import shutil
import os
from .Sprite import Sprite
from .Error import Error
from .SpriteMetaFileData import create_meta_file
# Parse a .xml file that includes the sprite map information
def parse_xml(format_file, image_height):
sprites = []
for texture in xml.etree.ElementTree.parse(format_file).getroot().iter('SubTexture'):
sprite = Sprite(texture.attrib['name'].replace('.png', ''),
texture.attrib['x'],
texture.attrib['y'],
texture.attrib['width'],
texture.attrib['height'])
sprite.reverse_y(image_height)
sprites.append(sprite)
return sprites
# Parse a .txt file that includes the sprite map information
def parse_text(format_file, image_height):
sprites = []
with open(format_file) as ff:
for line in ff:
name, x, y, width, height = line.replace(' =', '').split(' ')
sprite = Sprite(name, x, y, width, height.replace('\n', ''))
sprite.reverse_y(image_height)
sprites.append(sprite)
return sprites
def kenny_sprite_slicer():
sprites = []
sprite_sheet = input('Where is the sprite sheet: ').replace('"', '').strip()
# Get image height
image_height = Image.open(sprite_sheet).size[1]
if input('Is there a format file?\n1)Yes\n2)No\n') == '1':
format_file = input('Where is the format file (.txt or .xml): ').replace('"', '').strip()
format_file_extension = os.path.splitext(format_file)[1]
if not os.path.isfile(format_file):
raise Error('Format file does not exist.')
if format_file_extension == '.xml':
sprites = parse_xml(format_file, image_height)
elif format_file_extension == '.txt':
sprites = parse_text(format_file, image_height)
else:
raise Error('Wrong format file type')
destination = input('Where is the destination: ').replace('"', '').strip()
sprite_sheet_name = os.path.split(sprite_sheet)[1]
if not os.path.isfile(sprite_sheet):
raise Error('Sprite sheet does not exist.')
# Create the meta file for the sprite sheet
create_meta_file(os.path.join(destination, sprite_sheet_name + ".meta"), sprites)
# Copy the sprite sheet over
shutil.copy(sprite_sheet, os.path.join(destination, sprite_sheet_name))
| python |
from quantitative_node import QuantitativeNode
from qualitative_node import QualitativeNode
from dataset import Dataset
from leaf_node import Leaf
from dparser import DParser
import numpy as np
import info_gain
import random
import time
import math
isBenchmark = False
def getMostFrequentClass(result_vector):
if result_vector.size > 0:
(values, counts) = np.unique(result_vector, return_counts=True)
ind = np.argmax(counts)
return result_vector[ind]
def removeChosenAttribute(attributes, chosen_attribute, values_matrix):
chosen_attribute_index = attributes.index(chosen_attribute)
attributes.remove(chosen_attribute)
values_matrix = np.delete(values_matrix, chosen_attribute_index, axis=1)
return values_matrix
def generateNewValuesMatrix(attributes, chosen_attribute, values_matrix):
chosen_attribute_index = attributes.index(chosen_attribute)
new_values_matrix = np.delete(values_matrix, chosen_attribute_index, axis=1)
new_attributes = list(attributes)
new_attributes.remove(chosen_attribute)
return new_attributes, new_values_matrix
class DecisionTree:
def __init__(self, possibleAttributeValues, quantitativeAttrs, m=None, sampling=False):
self.possibleAttributeValues = possibleAttributeValues
self.quantitativeAttrs = quantitativeAttrs
self.m = m
self.sampling = sampling
def createQualitativeNode(self, attributes, values_matrix, classification,
chosen_attribute):
"""
"""
N = QualitativeNode(chosen_attribute)
chosen_attribute_index = attributes.index(chosen_attribute)
new_attributes, new_values_matrix = generateNewValuesMatrix(attributes, chosen_attribute, values_matrix)
# Splitting nodes
for value in self.possibleAttributeValues[chosen_attribute]:
# print('Attribute: ', chosen_attribute, '=', value)
Dv = values_matrix[values_matrix[:, attributes.index(chosen_attribute)] == value]
Dv = np.delete(Dv, chosen_attribute_index, axis=1) # Deletes the column of the attribute value
if Dv.size == 0:
mostFrequent = getMostFrequentClass(values_matrix[:, -1])
next_tree = Leaf(mostFrequent)
else:
dataset = Dataset(new_attributes, classification, Dv, Dv[:, -1],
self.possibleAttributeValues,
self.quantitativeAttrs)
next_tree = self.createDecisionTree(dataset)
N.add_child(value, next_tree)
return N
def createQuantitativeNode(self, attributes, values_matrix, classification,
chosen_attribute):
"""
"""
N = QuantitativeNode(chosen_attribute)
# print("====================")
# print(values_matrix)
attr_index = attributes.index(chosen_attribute)
new_attributes, new_values_matrix = generateNewValuesMatrix(attributes, chosen_attribute, values_matrix)
entr = info_gain.entropy_attr_quantitative(attributes, values_matrix,
values_matrix[:, -1], attr_index)
N.set_split_value(entr[0])
# print("Split Value:", entr[0], "[", attr_index, "]")
# x < SPLIT
select = values_matrix[:, attr_index].astype(float) > float(entr[0])
Dv = values_matrix[select]
Dv = np.delete(Dv, attr_index, axis=1) # Deletes the column of the attribute value
# print(">>>")
# print(select)
if len(Dv) == 0:
mostFrequent = getMostFrequentClass(values_matrix[:, -1])
next_tree = Leaf(mostFrequent)
else:
dataset = Dataset(new_attributes, classification, Dv, Dv[:, -1],
self.possibleAttributeValues,
self.quantitativeAttrs)
next_tree = self.createDecisionTree(dataset)
N.set_right(next_tree)
# x > SPLIT:
select = values_matrix[:, attr_index].astype(float) <= float(entr[0])
Dv = values_matrix[select]
Dv = np.delete(Dv, attr_index, axis=1) # Deletes the column of the attribute value
# print("<<<")
# print(select)
# print("====================")
if len(Dv) == 0:
mostFrequent = getMostFrequentClass(values_matrix[:, -1])
next_tree = Leaf(mostFrequent)
else:
dataset = Dataset(new_attributes, classification, Dv, Dv[:, -1],
self.possibleAttributeValues,
self.quantitativeAttrs)
next_tree = self.createDecisionTree(dataset)
N.set_left(next_tree)
return N
def createDecisionTree(self, dataset: Dataset):
attributes = dataset.attributes
classification = dataset.classification
values_matrix = dataset.values_matrix
result_vector = dataset.results_vector
quantitativeAttrs = dataset.quantitative
# values_matrix = np.append(values_matrix, result_vector[np.newaxis].T, axis=1)
# Starts m with default value.
if self.m is None:
m = math.ceil(math.sqrt(len(attributes)))
# There is only one predicted class in the training set
if len(np.unique(result_vector)) == 1:
tree = Leaf(result_vector[0])
# print("Creating Leaf: '{}' [{}]".format(result_vector[0],
# len(result_vector)))
return tree
# There are no predictive attributes
elif len(attributes) == 0:
# print('There are no predictive attributes. Predicted class:',
# getMostFrequentClass(result_vector))
tree = Leaf(getMostFrequentClass(result_vector))
return tree
else:
# Amostragem de atributos:
if self.sampling is True:
attribute_sample = random.sample(attributes, m)
attributes_index = []
for attribute in attribute_sample:
attributes_index.append(attributes.index(attribute))
v_matrix_sample = values_matrix[:, attributes_index]
else:
attribute_sample = attributes
v_matrix_sample = values_matrix
# print('\n\nSelected from the m-features sampling: ', attribute_sample)
attr_entropy = info_gain.entropy_all(attribute_sample,
v_matrix_sample, result_vector, quantitativeAttrs)
chosen_attribute = max(attr_entropy, key=attr_entropy.get)
global isBenchmark
if isBenchmark:
print("--------------------------------------------------")
print("Gain/Parameter:")
for key, value in attr_entropy.items():
print(" {}: {:.3f}".format(key, value))
print("Selected:", chosen_attribute)
N = None
v_matrix_sample = np.append(v_matrix_sample, result_vector[np.newaxis].T, axis=1)
if chosen_attribute in quantitativeAttrs:
N = self.createQuantitativeNode(attribute_sample, v_matrix_sample,
classification, chosen_attribute)
else:
N = self.createQualitativeNode(attribute_sample, v_matrix_sample,
classification, chosen_attribute)
return N
def print_decision_tree(tree, count=0):
children = tree.get_children()
space = ''
for i in range(count):
space += ' '
count += 1
print(space, '\033[94m', ' >>>', tree.attrName, '\033[0m', sep='')
for key in children:
print(space, '\033[92m', key, '\033[0m', sep='')
if children[key].is_leaf():
print(space, 'Joga: ', '\033[91m', children[key].value, '\033[0m', sep='')
else:
print_decision_tree(children[key], count)
print(space, '\033[94m', ' <<<', tree.attrName, '\033[0m', '\n', sep='')
def main():
global isBenchmark
isBenchmark=True
dparser = DParser("dataset/dadosBenchmark_validacaoAlgoritmoAD.csv", ";", [])
dataset = Dataset(dparser.attributes, dparser.classification, dparser.values_matrix,
dparser.result_vector, dparser.uniqueValues, dparser.quantitative)
quantitative_attributes = dparser.get_quantitative_attributes()
start_time = time.time()
tree = DecisionTree(dparser.uniqueValues, quantitative_attributes, sampling=False).createDecisionTree(dataset)
print_decision_tree(tree)
elapsed_time = time.time() - start_time
print('Done. Elapsed time: ', elapsed_time)
isBenchmark=False
if __name__ == "__main__":
main()
| python |
from ad_api.base import Client, sp_endpoint, fill_query_params, ApiResponse
class NegativeKeywords(Client):
@sp_endpoint('/v2/sp/negativeKeywords/{}', method='GET')
def get_negative_keyword(self, keywordId, **kwargs) -> ApiResponse:
r"""
get_negative_keyword(self, keywordId, \*\*kwargs) -> ApiResponse
Gets a campaign negative keyword specified by identifier.
path **keywordId**:*number* | Required. The identifier of an existing keyword.
Returns:
ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)
@sp_endpoint('/v2/sp/negativeKeywords/{}', method='DELETE')
def delete_negative_keyword(self, keywordId, **kwargs) -> ApiResponse:
r"""
delete_negative_keyword(self, keywordId, \*\*kwargs) -> ApiResponse
Archives a campaign negative keyword.
path **keywordId**:*number* | Required. The identifier of an existing keyword.
Returns:
ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)
@sp_endpoint('/v2/sp/negativeKeywords/extended/{}', method='GET')
def get_negative_keyword_extended(self, keywordId, **kwargs) -> ApiResponse:
r"""
get_negative_keyword_extended(self, keywordId, \*\*kwargs) -> ApiResponse
Gets a campaign negative keyword that has extended data fields.
path **keywordId**:*number* | Required. The identifier of an existing keyword.
Returns:
ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)
@sp_endpoint('/v2/sp/negativeKeywords/extended', method='GET')
def list_negative_keywords_extended(self, **kwargs) -> ApiResponse:
r"""
list_negative_keywords_extended(self, \*\*kwargs) -> ApiResponse
Gets a list of negative keywords that have extended data fields.
query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0
query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.
query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.
query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.
query **stateFilter**:*string* | Optional. The returned array is filtered to include only ad groups with state set to one of the values in the specified comma-delimited list. Available values : enabled, archived.
query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.
query **adGroupIdFilter**:*string* | Optional. Restricts results to keywords associated with ad groups specified by identifier in the comma-delimited list.
query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.
Returns:
ApiResponse
"""
return self._request(kwargs.pop('path'), params=kwargs)
@sp_endpoint('/v2/sp/negativeKeywords', method='GET')
def list_negative_keywords(self, **kwargs) -> ApiResponse:
r"""
list_negative_keywords(self, \*\*kwargs) -> ApiResponse
Gets a list of negative keyword objects.
query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0
query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.
query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.
query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.
query **stateFilter**:*string* | Optional. The returned array is filtered to include only ad groups with state set to one of the values in the specified comma-delimited list. Available values : enabled, archived.
query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.
query **adGroupIdFilter**:*string* | Optional. Restricts results to keywords associated with ad groups specified by identifier in the comma-delimited list.
query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list..
Returns:
ApiResponse
"""
return self._request(kwargs.pop('path'), params=kwargs)
@sp_endpoint('/v2/sp/negativeKeywords', method='POST')
def create_negative_keywords(self, **kwargs) -> ApiResponse:
r"""
create_negative_keywords(self, \*\*kwargs) -> ApiResponse:
Creates one or more campaign negative keywords.
body: | REQUIRED {'description': 'An array of keyword objects.}'
| '**campaignId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}
| '**adGroupId**': *number*, {'description': 'The identifier of the ad group to which this keyword is associated.'}
| '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ enabled ]'}
| '**keywordText**': *string*, {'description': 'The text of the expression to match against a search query.'}
| '**matchType**': *string*, {'description': 'The type of match.' , 'Enum': '[ negativeExact, negativePhrase ]'}
Returns:
ApiResponse
"""
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)
@sp_endpoint('/v2/sp/negativeKeywords', method='PUT')
def edit_negative_keywords(self, **kwargs) -> ApiResponse:
r"""
edit_negative_keywords(self, \*\*kwargs) -> ApiResponse:
Updates one or more campaign negative keywords.
body: | REQUIRED {'description': 'An array of campaign negative keywords with updated values.'}
| '**keywordId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}
| '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ enabled, paused, archived ]'}
Returns:
ApiResponse
"""
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)
| python |
# -*- encoding: utf-8 -*-
"""Initialization of Flask REST-API Environment"""
from flask import Flask
from flask_bcrypt import Bcrypt # Bcrypt hashing for Flask
from flask_sqlalchemy import SQLAlchemy
from .config import config_by_name
db = SQLAlchemy() # database object
flask_bcrypt = Bcrypt() # bcrypt hashing utilities
def create_app(config_name : str = 'dev'):
"""Initializes the Flask API, by Creating an APP
with the necessary configurations and parameters which are taken from
`config`. By default, the environment is intialized, however a
template `.env` file is present in the `template` branch.
:type config_name: str
:param config_name: Configuration for Setting up the Environment, can be
any of the following: ['dev', 'test', 'prod'].
Defaults to test, which is mentioned to safekeep
production and development environment.
"""
app = Flask(__name__)
app.config.from_object(config_by_name[config_name])
db.init_app(app)
flask_bcrypt.init_app(app)
return app | python |
from context import DBVendor, DBConnection, DBContext
from converters import *
from datasource import * | python |
from collections import OrderedDict
from typing import List
from typing import Union, Dict, Callable, Any
from tequila.ml.utils_ml import preamble, TequilaMLException
from tequila.objective import Objective, Variable, vectorize, QTensor
from tequila.tools import list_assignment
from tequila.simulators.simulator_api import simulate
import numpy as np
import tensorflow as tf
class TFLayer(tf.keras.layers.Layer):
"""
Tensorflow Layer
DISCLAIMER:
This is very much a WIP, since we are not exactly sure how users intend to use it. Please feel free to raise issues
and give feedback without hesitation.
"""
def __init__(self, objective: Union[Objective, QTensor], compile_args: Dict[str, Any] = None,
input_vars: Dict[str, Any] = None, **kwargs):
"""
Tensorflow layer that compiles the Objective (or QTensor) with the given compile arguments and/or
input variables if there are any when initialized. When called, it will forward the input variables into the
compiled objective (if there are any inputs needed) alongside the parameters and will return the output.
The gradient values can also be returned.
Parameters
----------
objective
Objective or QTensor to compile and run.
compile_args
dict of all the necessary information to compile the objective
input_vars
List of variables that will be inputs
"""
super(TFLayer, self).__init__(**kwargs)
# Currently, the optimizers in tf.keras.optimizers don't support float64. For now, all values will be cast to
# float32 to accommodate this, but in the future, whenever it is supported, this can be changed with
# set_cast_type()
self._cast_type = tf.float32
self.objective = objective
# Store the objective and vectorize it if necessary
if isinstance(objective, tuple) or isinstance(objective, list):
for i, elem in enumerate(objective):
if not isinstance(elem, Objective):
raise TequilaMLException("Element {} in {} is not a Tequila Objective: {}"
"".format(i, type(objective), elem))
objective = vectorize(list_assignment(objective))
elif isinstance(objective, Objective):
objective = vectorize(list_assignment(objective))
elif not isinstance(objective, QTensor):
raise TequilaMLException("Objective must be a Tequila Objective, QTensor "
"or list/tuple of Objectives. Received a {}".format(type(objective)))
self.objective = objective
# Compile the objective, prepare the gradients and whatever else that may be necessary
self.comped_objective, self.compile_args, self.input_vars, self.weight_vars, self.i_grads, self.w_grads, \
self.first, self.second = preamble(objective, compile_args, input_vars)
# VARIABLES
# These variables will hold 1D tensors which each will store the values in the order found by self.input_vars
# for the variable in self.input_variable, and in the order found by self.weight_vars for the variable in
# self.weight_variable
# If there are inputs, prepare an input tensor as a trainable variable
# NOTE: if the user specifies values for the inputs, they will be assigned in the set_input_values()
if self.input_vars:
initializer = tf.constant_initializer(np.random.uniform(low=0., high=2 * np.pi, size=len(self.input_vars)))
self.input_variable = self.add_weight(name="input_tensor_variable",
shape=(len(self.input_vars)),
dtype=self._cast_type,
initializer=initializer,
trainable=True)
else:
self.input_variable = None
# If there are weight variables, prepare a params tensor as a trainable variable
if self.weight_vars:
# Initialize the variable tensor that will hold the weights/parameters/angles
initializer = tf.constant_initializer(np.random.uniform(low=0., high=2 * np.pi, size=len(self.weight_vars)))
self.weight_variable = self.add_weight(name="params_tensor_variable",
shape=(len(self.weight_vars)),
dtype=self._cast_type,
initializer=initializer,
trainable=True)
# If the user specified initial values for the parameters, use them
if compile_args is not None and compile_args["initial_values"] is not None:
# Assign them in the order given by self.second
toVariable = [self.second[i] for i in self.second] # Variable names in the correct order
self.weight_variable.assign([compile_args["initial_values"][val]
for val in toVariable])
else:
self.weight_variable = None
# Store extra useful information
self._input_len = 0
if input_vars:
self._input_len = len(self.input_vars)
self._params_len = len(list(self.weight_vars))
self.samples = None
if self.compile_args is not None:
self.samples = self.compile_args["samples"]
def __call__(self, input_tensor: tf.Tensor = None) -> tf.Tensor:
"""
Calls the Objective on a TF tensor object and returns the results.
There are three cases which we could have:
1) We have just input variables
2) We have just parameter variables
3) We have both input and parameter variables
We must determine which situation we are in and execute the corresponding _do() function to also get the
correct gradients.
Returns
-------
tf.Tensor:
a TF tensor, the result of calling the underlying objective on the input combined with the parameters.
"""
# This is for the situation where various different inputs are being introduced
if input_tensor is not None:
self.set_input_values(input_tensor)
# Case of both inputs and parameters
if self.input_vars and self.weight_vars:
return self._do(self.get_inputs_variable(), self.get_params_variable())
# Case of just inputs
elif self.input_vars:
return self._do_just_input(self.get_inputs_variable())
# Case of just parameters
return self._do_just_params(self.get_params_variable())
@tf.custom_gradient
def _do_just_input(self, input_tensor_variable: tf.Variable) -> (tf.Tensor, Callable):
"""
Forward pass with just the inputs.
This in-between function is necessary in order to have the custom gradient work in Tensorflow. That is the
reason for returning the grad() function as well.
Parameters
----------
input_tensor_variable
the tf.Variable which holds the values of the input
Returns
-------
result
The result of the forwarding
"""
if input_tensor_variable.shape != self._input_len:
raise TequilaMLException(
'Received input of len {} when Objective takes {} inputs.'.format(len(input_tensor_variable.numpy()),
self._input_len))
input_tensor_variable = tf.stack(input_tensor_variable)
def grad(upstream):
# Get the gradient values
input_gradient_values = self.get_grads_values(only="inputs")
# Convert to tensor
in_Tensor = tf.convert_to_tensor(input_gradient_values, dtype=self._cast_type)
# Right-multiply the upstream
in_Upstream = tf.dtypes.cast(upstream, self._cast_type) * in_Tensor
# Transpose and reduce sum
return tf.reduce_sum(tf.transpose(in_Upstream), axis=0)
return self.realForward(inputs=input_tensor_variable, params=None), grad
@tf.custom_gradient
def _do_just_params(self, params_tensor_variable: tf.Variable) -> (tf.Tensor, Callable):
"""
Forward pass with just the parameters
This in-between function is necessary in order to have the custom gradient work in Tensorflow. That is the
reason for returning the grad() function as well.
Parameters
----------
params_tensor_variable
the tf.Variable which holds the values of the parameters
Returns
-------
result
The result of the forwarding
"""
if params_tensor_variable.shape != self._params_len:
raise TequilaMLException(
'Received input of len {} when Objective takes {} inputs.'.format(len(params_tensor_variable.numpy()),
self._input_len))
params_tensor_variable = tf.stack(params_tensor_variable)
def grad(upstream):
# Get the gradient values
parameter_gradient_values = self.get_grads_values(only="params")
# Convert to tensor
par_Tensor = tf.convert_to_tensor(parameter_gradient_values, dtype=self._cast_type)
# Right-multiply the upstream
par_Upstream = tf.dtypes.cast(upstream, self._cast_type) * par_Tensor
# Transpose and reduce sum
return tf.reduce_sum(tf.transpose(par_Upstream), axis=0)
return self.realForward(inputs=None, params=params_tensor_variable), grad
@tf.custom_gradient
def _do(self, input_tensor_variable: tf.Variable, params_tensor_variable: tf.Variable) -> (tf.Tensor, Callable):
"""
Forward pass with both input and parameter variables
This in-between function is necessary in order to have the custom gradient work in Tensorflow. That is the
reason for returning the grad() function as well.
Parameters
----------
input_tensor_variable
the tf.Variable which holds the values of the input
params_tensor_variable
the tf.Variable which holds the values of the parameters
Returns
-------
result
The result of the forwarding
"""
if params_tensor_variable.shape != self._params_len:
raise TequilaMLException(
'Received input of len {} when Objective takes {} inputs.'.format(len(params_tensor_variable.numpy()),
self._input_len))
params_tensor_variable = tf.stack(params_tensor_variable)
if input_tensor_variable.shape != self._input_len:
raise TequilaMLException(
'Received input of len {} when Objective takes {} inputs.'.format(len(input_tensor_variable.numpy()),
self._input_len))
input_tensor_variable = tf.stack(input_tensor_variable)
def grad(upstream):
input_gradient_values, parameter_gradient_values = self.get_grads_values()
# Convert to tensor
in_Tensor = tf.convert_to_tensor(input_gradient_values, dtype=self._cast_type)
par_Tensor = tf.convert_to_tensor(parameter_gradient_values, dtype=self._cast_type)
# Multiply with the upstream
in_Upstream = tf.dtypes.cast(upstream, self._cast_type) * in_Tensor
par_Upstream = tf.dtypes.cast(upstream, self._cast_type) * par_Tensor
# Transpose and sum
return tf.reduce_sum(tf.transpose(in_Upstream), axis=0), tf.reduce_sum(tf.transpose(par_Upstream), axis=0)
return self.realForward(inputs=input_tensor_variable,
params=params_tensor_variable), grad
def realForward(self, inputs: Union[tf.Variable, None], params: Union[tf.Variable, None]) -> tf.Tensor:
"""
This is where we really execute the forward pass.
Parameters
----------
inputs
tf.Variable of the inputs
params
tf.Variable of the parameters
Returns
-------
The result of the forwarding
"""
def tensor_fix(inputs_tensor: Union[tf.Tensor, None], params_tensor: Union[tf.Tensor, None],
first: Dict[int, Variable], second: Dict[int, Variable]):
"""
Prepare a dict with the right information about the involved variables (whether input or parameter) and
their corresponding values.
Note: if "inputs_tensor" and "angles_tensor" are None or "first" and "second" are empty dicts, something
went wrong, since the objective should have either inputs or parameters to tweak.
Parameters
----------
inputs_tensor
Tensor holding the values of the inputs
params_tensor
Tensor holding the values of the parameters
first
Dict mapping numbers to input variable names
second
Dict mapping numbers to parameter variable names
Returns
-------
variables
Dict mapping all variable names to values
"""
variables = {}
if inputs_tensor is not None:
for i, val in enumerate(inputs_tensor):
variables[first[i]] = val.numpy()
if params_tensor is not None:
for i, val in enumerate(params_tensor):
variables[second[i]] = val.numpy()
return variables
variables = tensor_fix(inputs, params, self.first, self.second)
result = self.comped_objective(variables=variables, samples=self.samples)
if not isinstance(result, np.ndarray):
# this happens if the Objective is a scalar since that's usually more convenient for pure quantum stuff.
result = np.array(result)
if hasattr(inputs, 'device'):
if inputs.device == 'cuda':
return tf.convert_to_tensor(result).to(inputs.device)
else:
return tf.convert_to_tensor(result)
return tf.convert_to_tensor(result)
def get_grads_values(self, only: str = None):
"""
Gets the values of the gradients with respect to the inputs and the parameters.
You can specify whether you want just the input or parameter gradients for the sake of efficiency.
Returns
-------
grad_values
If "only" is None, a tuple of two elements, the first one being a list of gradients to apply to the input
variables, and the second element being a list of gradients to apply to the parameter variables.
If only == inputs, just the list of gradient values w.r.t. the input variables.
If only == params, just the list of gradient values w.r.t. the parameter variables.
"""
get_input_grads = True
get_param_grads = True
# Determine which gradients to calculate
if only is not None:
if only == "inputs":
get_input_grads = True
get_param_grads = False
elif only == "params":
get_input_grads = False
get_param_grads = True
else:
raise TequilaMLException("Valid values for \"only\" are \"inputs\" and \"params\".")
# Get the current values of the inputs and parameters in a dict called "variables"
variables = {}
# Inputs
list_inputs = self.get_inputs_list()
if list_inputs:
for i in self.first:
variables[self.first[i]] = list_inputs[i]
# Parameters
list_angles = self.get_params_list()
if list_angles:
for w in self.second:
variables[self.second[w]] = list_angles[w]
# GETTING THE GRADIENT VALUES
# Get the gradient values with respect to the inputs
inputs_grads_values = []
if get_input_grads and self.first:
for in_var in self.first.values():
self.fill_grads_values(inputs_grads_values, in_var, variables, self.i_grads)
# Get the gradient values with respect to the parameters
param_grads_values = []
if get_param_grads and self.second:
for param_var in self.second.values(): # Iterate through the names of the parameters
self.fill_grads_values(param_grads_values, param_var, variables, self.w_grads)
# Determine what to return
if get_input_grads and get_param_grads:
return inputs_grads_values, param_grads_values
elif get_input_grads and not get_param_grads:
return inputs_grads_values
elif not get_input_grads and get_param_grads:
return param_grads_values
def set_input_values(self, initial_input_values: Union[dict, tf.Tensor]):
"""
Stores the values of the tensor into the self.input_variable. Intended to be used to set the values that the
input variables initially will have before training.
Parameters
----------
"""
# If the input is a dictionary
if isinstance(initial_input_values, dict):
input_values_tensor = tf.convert_to_tensor([initial_input_values[i] for i in self.first.values()])
# Check that input variables are expected
if self.input_vars is not None:
# Check that the length of the tensor of the variable is the correct one
if input_values_tensor.shape == self._input_len:
self.input_variable.assign(input_values_tensor)
else:
raise TequilaMLException("Input tensor has shape {} which does not match "
"the {} inputs expected".format(input_values_tensor.shape,
self._input_len))
else:
raise TequilaMLException("No input variables were expected.")
# If the input is a tensor
elif isinstance(initial_input_values, tf.Tensor):
if initial_input_values.shape == self._input_len:
# We have no information about which value corresponds to which variable, so we assume that the user
# knows that the order will be the same as in self.first
self.input_variable.assign(initial_input_values)
else:
raise TequilaMLException("Input tensor has shape {} which does not match "
"the {} inputs expected".format(initial_input_values.shape, self._input_len))
def fill_grads_values(self, grads_values, var, variables, objectives_grad):
"""
Inserts into "grads_values" the gradient values per objective in objectives_grad[var], where var is the name
of the variable.
Parameters
----------
grads_values
List in which we insert the gradient values (No returns)
var
Variable over which we are calculating the gradient values
variables
Dict mapping all variables to their current values
objectives_grad
List of ExpectationValueImpls that will be simulated to calculate the gradient value of a given variable
"""
var_results = []
grads_wrt_var = objectives_grad[var]
if not isinstance(grads_wrt_var, List):
grads_wrt_var = [grads_wrt_var]
for obj in grads_wrt_var:
var_results.append(simulate(objective=obj, variables=variables,
backend=self.compile_args["backend"],
samples=self.samples))
grads_values.append(var_results)
def get_params_variable(self):
return self.weight_variable
def get_params_list(self):
if self.get_params_variable() is not None:
return self.get_params_variable().numpy().tolist()
return []
def get_inputs_variable(self):
return self.input_variable
def get_inputs_list(self):
if self.get_inputs_variable() is not None:
return self.get_inputs_variable().numpy().tolist()
return []
def get_input_values(self):
# Tensor values is in the order of self.input_vars
input_values = self.get_inputs_list()
input_values_dict = {}
for i, value in enumerate(self.input_vars):
input_values_dict[value] = input_values[i]
return input_values_dict
def get_params_values(self):
# Tensor values is in the order of self.weight_vars
params_values = self.get_params_list()
params_values_dict = {}
for i, value in enumerate(self.weight_vars):
params_values_dict[value] = params_values[i]
return params_values_dict
def set_cast_type(self, datatype):
"""
The default datatype of this TFLayer is float32, since this is the most precise float supported by TF
optimizers at the time of writing.
This method is intended so that in the future, whenever TF optimizers support float64, the datatype cast to can
be changed to float64. However, if for some reason you'd like to cast it to something else, you may, although it
only really makes sense to cast it to float types since these are the values that the variables will have.
Parameters
----------
datatype
Datatype to cast to. Expecting typing.Union[tf.float64, tf.float32, tf.float16].
"""
self._cast_type = datatype
def __repr__(self) -> str:
string = 'Tequila TFLayer. Represents: \n'
string += '{} \n'.format(str(self.objective))
string += 'Current Weights: {}'.format(list(self.weight_vars))
return string
| python |
import setuptools
setuptools.setup(
name="epaper_standalone",
version="4.0",
license="Apache-2.0",
author="Steve Zheng",
description="Show time, weather and calendar.",
packages=setuptools.find_packages(exclude=['test']),
setup_requires=['Pillow>=5.4'],
package_data={
'cwt': ['utils/fonts/*.ttf']
},
entry_points={
'console_scripts': [
'run-standalone=cwt.main:run'
]
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License",
"Operating System :: OS Independent",
],
) | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Tests for the virtual file system.'''
from __future__ import unicode_literals
import os
import unittest
from UnifiedLog import virtual_file
from UnifiedLog import virtual_file_system
from tests import test_lib
class VirtualFileSystemTests(test_lib.BaseTestCase):
'''Tests for the virtual file system.'''
def testPathExists(self):
'''Tests the path_exists function.'''
file_system = virtual_file_system.VirtualFileSystem(
virtual_file.VirtualFile)
path = os.path.join(
self._TEST_DATA_PATH, '7EF56328D53A78B59CCCE3E3189F57')
result = file_system.path_exists(path)
self.assertTrue(result)
path = os.path.join(self._TEST_DATA_PATH, 'bogus')
result = file_system.path_exists(path)
self.assertFalse(result)
def testListdir(self):
'''Tests the listdir function.'''
file_system = virtual_file_system.VirtualFileSystem(
virtual_file.VirtualFile)
expected_directory_entries = [
'0000000000000030.tracev3',
'7EF56328D53A78B59CCCE3E3189F57',
'8E21CAB1DCF936B49F85CF860E6F34EC']
directory_entries = file_system.listdir(self._TEST_DATA_PATH)
self.assertEqual(len(directory_entries), 3)
self.assertEqual(sorted(directory_entries), expected_directory_entries)
def testIsDir(self):
'''Tests the is_dir function.'''
file_system = virtual_file_system.VirtualFileSystem(
virtual_file.VirtualFile)
result = file_system.is_dir(self._TEST_DATA_PATH)
self.assertTrue(result)
path = os.path.join(
self._TEST_DATA_PATH, '7EF56328D53A78B59CCCE3E3189F57')
result = file_system.is_dir(path)
self.assertFalse(result)
def testPathJoin(self):
'''Tests the path_join function.'''
file_system = virtual_file_system.VirtualFileSystem(
virtual_file.VirtualFile)
expected_path = os.path.join(
self._TEST_DATA_PATH, '7EF56328D53A78B59CCCE3E3189F57')
path = file_system.path_join(
self._TEST_DATA_PATH, '7EF56328D53A78B59CCCE3E3189F57')
self.assertEqual(path, expected_path)
def testGetVirtualFile(self):
'''Tests the get_virtual_file function.'''
file_system = virtual_file_system.VirtualFileSystem(
virtual_file.VirtualFile)
path = os.path.join(
self._TEST_DATA_PATH, '7EF56328D53A78B59CCCE3E3189F57')
file_entry = file_system.get_virtual_file(path, filetype='uuidtext')
self.assertIsNotNone(file_entry)
self.assertIsInstance(file_entry, virtual_file.VirtualFile)
if __name__ == '__main__':
unittest.main()
| python |
def number_of_equal_elements(list1, list2):
return sum([x == y for x, y in zip(list1, list2)])
| python |
# Portions of code used in this file and implementation logic are based
# on lightgbm.dask.
# https://github.com/microsoft/LightGBM/blob/b5502d19b2b462f665e3d1edbaa70c0d6472bca4/python-package/lightgbm/dask.py
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# License:
# https://github.com/microsoft/LightGBM/blob/c3b9363d02564625332583e166e3ab3135f436e3/LICENSE
from typing import (Tuple, Dict, Any, List, Optional, Type, Union, Sequence,
Callable)
from copy import deepcopy
from dataclasses import dataclass
from distutils.version import LooseVersion
import time
import logging
import os
import warnings
import gc
import numpy as np
import pandas as pd
import lightgbm
from lightgbm import LGBMModel, LGBMRanker, Booster
from lightgbm.basic import _choose_param_value, _ConfigAliases, LightGBMError
from lightgbm.callback import CallbackEnv
import ray
from ray.util.annotations import PublicAPI
from xgboost_ray.main import (
_handle_queue, RayXGBoostActor, LEGACY_MATRIX, RayDeviceQuantileDMatrix,
concat_dataframes, _set_omp_num_threads, Queue, Event, DistributedCallback,
ENV, RayActorError, pickle, _PrepareActorTask, RayParams as RayXGBParams,
_TrainingState, _is_client_connected, is_session_enabled,
force_on_current_node, _assert_ray_support, _maybe_print_legacy_warning,
_Checkpoint, _create_communication_processes, TUNE_USING_PG, RayTaskError,
RayXGBoostActorAvailable, RayXGBoostTrainingError, _create_placement_group,
_shutdown, PlacementGroup, ActorHandle, combine_data, _trigger_data_load,
DEFAULT_PG, _autodetect_resources as _autodetect_resources_base)
from xgboost_ray.session import put_queue
from xgboost_ray import RayDMatrix
from lightgbm_ray.util import find_free_port, is_port_free, lgbm_network_free
from lightgbm_ray.tune import _try_add_tune_callback, _TuneLGBMRank0Mixin
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ELASTIC_RESTART_DISABLED = True
LIGHTGBM_VERSION = LooseVersion(lightgbm.__version__)
class StopException(Exception):
pass
def _check_cpus_per_actor_at_least_2(cpus_per_actor: int,
suppress_exception: bool):
"""Raise an exception or a warning if cpus_per_actor < 2"""
if cpus_per_actor < 2:
if suppress_exception:
warnings.warn("cpus_per_actor is set to less than 2. Distributed"
" LightGBM needs at least 2 CPUs per actor to "
"train efficiently. This may lead to a "
"degradation of performance during training.")
else:
raise ValueError(
"cpus_per_actor is set to less than 2. Distributed"
" LightGBM needs at least 2 CPUs per actor to "
"train efficiently. You can suppress this "
"exception by setting allow_less_than_two_cpus "
"to True.")
def _get_data_dict(data: RayDMatrix, param: Dict) -> Dict:
if not LEGACY_MATRIX and isinstance(data, RayDeviceQuantileDMatrix):
# If we only got a single data shard, create a list so we can
# iterate over it
if not isinstance(param["data"], list):
param["data"] = [param["data"]]
if not isinstance(param["label"], list):
param["label"] = [param["label"]]
if not isinstance(param["weight"], list):
param["weight"] = [param["weight"]]
if not isinstance(param["data"], list):
param["base_margin"] = [param["base_margin"]]
param["label_lower_bound"] = [None]
param["label_upper_bound"] = [None]
dm_param = {
"feature_names": data.feature_names,
"feature_types": data.feature_types,
"missing": data.missing,
}
param.update(dm_param)
else:
if isinstance(param["data"], list):
dm_param = {
"data": concat_dataframes(param["data"]),
"label": concat_dataframes(param["label"]),
"weight": concat_dataframes(param["weight"]),
"base_margin": concat_dataframes(param["base_margin"]),
"label_lower_bound": concat_dataframes(
param["label_lower_bound"]),
"label_upper_bound": concat_dataframes(
param["label_upper_bound"]),
}
param.update(dm_param)
return param
# data.update_matrix_properties(matrix)
# return matrix
@dataclass
class RayParams(RayXGBParams):
# The RayParams from XGBoost-Ray can also be used, in which
# case allow_less_than_two_cpus will just default to False
allow_less_than_two_cpus: bool = False
__doc__ = RayXGBParams.__doc__.replace(
""" elastic_training (bool): If True, training will continue with
fewer actors if an actor fails. Default False.""",
""" allow_less_than_two_cpus (bool): If True, an exception will not
be raised if `cpus_per_actor`. Default False."""
).replace(
"""cpus_per_actor (int): Number of CPUs to be used per Ray actor.""",
"""cpus_per_actor (int): Number of CPUs to be used per Ray actor.
If smaller than 2, training might be substantially slower
because communication work and training work will block
each other. This will raise an exception unless
`allow_less_than_two_cpus` is True.""")
def get_tune_resources(self):
_check_cpus_per_actor_at_least_2(
self.cpus_per_actor,
getattr(self, "allow_less_than_two_cpus", False))
return super().get_tune_resources()
def _validate_ray_params(ray_params: Union[None, RayParams, dict]) \
-> RayParams:
if ray_params is None:
ray_params = RayParams()
elif isinstance(ray_params, dict):
ray_params = RayParams(**ray_params)
elif not isinstance(ray_params, RayParams):
raise ValueError(
f"`ray_params` must be a `RayParams` instance, a dict, or None, "
f"but it was {type(ray_params)}."
f"\nFIX THIS preferably by passing a `RayParams` instance as "
f"the `ray_params` parameter.")
if ray_params.num_actors <= 0:
raise ValueError(
"The `num_actors` parameter is set to 0. Please always specify "
"the number of distributed actors you want to use."
"\nFIX THIS by passing a `RayParams(num_actors=X)` argument "
"to your call to lightgbm_ray.")
elif ray_params.num_actors < 2:
warnings.warn(
f"`num_actors` in `ray_params` is smaller than 2 "
f"({ray_params.num_actors}). LightGBM will NOT be distributed!")
return ray_params
class RayLightGBMActor(RayXGBoostActor):
def __init__(
self,
rank: int,
num_actors: int,
model_factory: Optional[Type[LGBMModel]] = None,
queue: Optional[Queue] = None,
stop_event: Optional[Event] = None,
checkpoint_frequency: int = 5,
distributed_callbacks: Optional[List[DistributedCallback]] = None,
network_params: Optional[dict] = None,
):
self.network_params = {} if not network_params else \
network_params.copy()
self.fixed_port = "local_listen_port" in self.network_params
if "time_out" not in self.network_params:
self.network_params["time_out"] = 120
self.model_factory = model_factory
super().__init__(
rank=rank,
num_actors=num_actors,
queue=queue,
stop_event=stop_event,
checkpoint_frequency=checkpoint_frequency,
distributed_callbacks=distributed_callbacks)
def _save_checkpoint_callback(self, is_rank_0: bool) -> Callable:
this = self
def _save_internal_checkpoint_callback() -> Callable:
def _callback(env: CallbackEnv) -> None:
if not is_rank_0:
return
if (env.iteration == env.end_iteration - 1
or env.iteration % this.checkpoint_frequency == 0):
if env.iteration == env.end_iteration - 1:
iter = -1
else:
# LightGBM starts iterations from 0
iter = env.iteration + 1
put_queue(
_Checkpoint(
iter,
pickle.dumps(
env.model.model_to_string(num_iteration=-1))))
_callback.order = 1 # type: ignore
return _callback
return _save_internal_checkpoint_callback()
def _stop_callback(self, is_rank_0: bool) -> Callable:
this = self
# Keep track of initial stop event. Since we're training in a thread,
# the stop event might be overwritten, which should he handled
# as if the previous stop event was set.
initial_stop_event = self._stop_event
def _stop_callback() -> Callable:
def _callback(env: CallbackEnv) -> None:
try:
if this._stop_event.is_set() or \
this._get_stop_event() is not initial_stop_event:
raise StopException()
except RayActorError:
raise StopException()
_callback.order = 2 # type: ignore
_callback.before_iteration = True # type: ignore
return _callback
return _stop_callback()
def find_free_address(self) -> Tuple[str, int]:
port = self.port()
ip = self.ip()
if not port:
port = find_free_port()
elif not self.is_port_free(port):
if not self.fixed_port:
port = find_free_port()
else:
raise RuntimeError(f"Port {port} on {ip} is not free!")
return (ip, port)
def port(self) -> Optional[int]:
return self.network_params.get("local_listen_port", None)
def is_port_free(self, port: int) -> bool:
return is_port_free(port)
def set_network_params(
self,
machines: str,
local_listen_port: int,
num_machines: int,
time_out: Optional[int] = None,
):
"""Set LightGBM params responsible for networking"""
self.network_params["machines"] = machines
self.network_params["local_listen_port"] = local_listen_port
self.network_params["num_machines"] = num_machines
if time_out is not None:
self.network_params["time_out"] = time_out
def load_data(self, data: RayDMatrix):
# LightGBM specific - Main difference between this and XGBoost:
# XGBoost needs a local DMatrix, while this runs off Pandas
# objects returned by the RayDMatrix directly.
if data in self._data:
return
self._distributed_callbacks.before_data_loading(self, data)
param = data.get_data(self.rank, self.num_actors)
if isinstance(param["data"], list):
self._local_n[data] = sum(len(a) for a in param["data"])
else:
self._local_n[data] = len(param["data"])
data.unload_data() # Free object store
d = _get_data_dict(data, param).copy()
self._data[data] = d
self._distributed_callbacks.after_data_loading(self, data)
def train(self, return_bst: bool, params: Dict[str, Any],
dtrain: RayDMatrix, evals: Tuple[RayDMatrix, str],
boost_rounds_left: int, *args, **kwargs) -> Dict[str, Any]:
if self.model_factory is None:
raise ValueError("model_factory cannot be None for training")
self._distributed_callbacks.before_train(self)
num_threads = _set_omp_num_threads()
local_params = _choose_param_value(
main_param_name="num_threads",
params=params,
default_value=num_threads if num_threads > 0 else
sum(num
for _, num in ray.worker.get_resource_ids().get("CPU", [])))
if "init_model" in kwargs:
if isinstance(kwargs["init_model"], bytes):
# bytearray type gets lost in remote actor call
kwargs["init_model"] = bytearray(kwargs["init_model"])
if dtrain not in self._data:
self.load_data(dtrain)
local_dtrain = self._data[dtrain]
# if not local_dtrain.get_label().size:
# raise RuntimeError(
# "Training data has no label set. Please make sure to set "
# "the `label` argument when initializing `RayDMatrix()` "
# "for data you would like to train on.")
local_evals = []
local_eval_names = []
local_eval_sample_weights = []
local_eval_init_scores = []
for deval, name in evals:
if deval not in self._data:
self.load_data(deval)
local_evals.append((self._data[deval]["data"],
self._data[deval]["label"]))
local_eval_names.append(name)
local_eval_sample_weights.append(self._data[deval]["weight"])
local_eval_init_scores.append(self._data[deval]["base_margin"])
if "callbacks" in kwargs:
callbacks = kwargs["callbacks"] or []
else:
callbacks = []
callbacks.append(self._save_checkpoint_callback(is_rank_0=return_bst))
callbacks.append(self._stop_callback(is_rank_0=return_bst))
for callback in callbacks:
if isinstance(callback, _TuneLGBMRank0Mixin):
callback.is_rank_0 = return_bst
kwargs["callbacks"] = callbacks
if LIGHTGBM_VERSION < LooseVersion("3.3.0"):
# In lightgbm<3.3.0, verbosity doesn't always work as a parameter
# but passing it as kwarg to fit does
local_params = _choose_param_value(
main_param_name="verbosity",
params=local_params,
default_value=1)
kwargs["verbose"] = local_params.pop("verbosity")
result_dict = {}
error_dict = {}
network_params = self.network_params
local_params.update(network_params)
local_params["n_estimators"] = boost_rounds_left
is_ranker = issubclass(self.model_factory, LGBMRanker)
def _train():
logger.debug(f"starting LightGBM training, rank {self.rank}, "
f"{self.network_params}, {local_params}, {kwargs}")
try:
model = self.model_factory(**local_params)
# LightGBM specific - this context calls
# _LIB.LGBM_NetworkFree(), which is
# supposed to clean up the network and
# free up ports should the training fail
# this is also called separately for good measure
with lgbm_network_free(model):
if is_ranker:
# missing group arg, update later
model.fit(
local_dtrain["data"],
local_dtrain["label"],
sample_weight=local_dtrain["weight"],
init_score=local_dtrain["base_margin"],
eval_set=local_evals,
eval_names=local_eval_names,
eval_sample_weight=local_eval_sample_weights,
eval_init_score=local_eval_init_scores,
**kwargs)
else:
model.fit(
local_dtrain["data"],
local_dtrain["label"],
sample_weight=local_dtrain["weight"],
init_score=local_dtrain["base_margin"],
eval_set=local_evals,
eval_names=local_eval_names,
eval_sample_weight=local_eval_sample_weights,
eval_init_score=local_eval_init_scores,
**kwargs)
result_dict.update({
"bst": model,
"evals_result": model.evals_result_,
"train_n": self._local_n[dtrain]
})
except StopException:
# Usually this should be caught by XGBoost core.
# Silent fail, will be raised as RayXGBoostTrainingStopped.
return
except LightGBMError as e:
error_dict.update({"exception": e})
return
_train()
if not result_dict:
raise_from = error_dict.get("exception", None)
raise RayXGBoostTrainingError("Training failed.") from raise_from
self._distributed_callbacks.after_train(self, result_dict)
if not return_bst:
result_dict.pop("bst", None)
return result_dict
def predict(self,
model: Union[LGBMModel, Booster],
data: RayDMatrix,
method="predict",
**kwargs):
self._distributed_callbacks.before_predict(self)
_set_omp_num_threads()
if data not in self._data:
self.load_data(data)
local_data = self._data[data]["data"]
predictions = getattr(model, method)(local_data, **kwargs)
if predictions.ndim == 1:
callback_predictions = pd.Series(predictions)
else:
callback_predictions = pd.DataFrame(predictions)
self._distributed_callbacks.after_predict(self, callback_predictions)
return predictions
@ray.remote
class _RemoteRayLightGBMActor(RayLightGBMActor):
pass
def _autodetect_resources(ray_params: RayParams,
use_tree_method: bool = False) -> Tuple[int, int]:
cpus_per_actor, gpus_per_actor = _autodetect_resources_base(
ray_params, use_tree_method)
if ray_params.cpus_per_actor <= 0:
cpus_per_actor = max(2, cpus_per_actor)
return cpus_per_actor, gpus_per_actor
def _create_actor(
rank: int,
num_actors: int,
model_factory: Type[LGBMModel],
num_cpus_per_actor: int,
num_gpus_per_actor: int,
resources_per_actor: Optional[Dict] = None,
placement_group: Optional[PlacementGroup] = None,
queue: Optional[Queue] = None,
checkpoint_frequency: int = 5,
distributed_callbacks: Optional[Sequence[DistributedCallback]] = None,
ip: Optional[str] = None,
port: Optional[int] = None,
) -> ActorHandle:
# If we have an IP passed, force the actor to be spawned on a node
# with that IP
if ip:
if resources_per_actor is not None:
resources_per_actor[f"node:{ip}"] = 0.01
else:
resources_per_actor = {f"node:{ip}": 0.01}
# Send DEFAULT_PG here, which changed in Ray > 1.4.0
# If we send `None`, this will ignore the parent placement group and
# lead to errors e.g. when used within Ray Tune
return _RemoteRayLightGBMActor.options(
num_cpus=num_cpus_per_actor,
num_gpus=num_gpus_per_actor,
resources=resources_per_actor,
placement_group_capture_child_tasks=True,
placement_group=placement_group or DEFAULT_PG).remote(
rank=rank,
num_actors=num_actors,
model_factory=model_factory,
queue=queue,
checkpoint_frequency=checkpoint_frequency,
distributed_callbacks=distributed_callbacks,
network_params={"local_listen_port": port} if port else None)
def _train(params: Dict,
dtrain: RayDMatrix,
model_factory: Type[LGBMModel],
boost_rounds_left: int,
*args,
evals=(),
ray_params: RayParams,
cpus_per_actor: int,
gpus_per_actor: int,
_training_state: _TrainingState,
machine_addresses: Optional[List[Tuple[str, str]]] = None,
listen_port: Optional[int] = None,
**kwargs) -> Tuple[LGBMModel, Dict, Dict]:
"""This is the local train function wrapped by :func:`train() <train>`.
This function can be thought of one invocation of a multi-actor lightgbm
training run. It starts the required number of actors, triggers data
loading, collects the results, and handles (i.e. registers) actor failures
- but it does not handle fault tolerance or general training setup.
Generally, this function is called one or multiple times by the
:func:`train() <train>` function. It is called exactly once if no
errors occur. It is called more than once if errors occurred (e.g. an
actor died) and failure handling is enabled.
"""
from xgboost_ray.elastic import _maybe_schedule_new_actors, \
_update_scheduled_actor_states, _get_actor_alive_status
# Un-schedule possible scheduled restarts
_training_state.restart_training_at = None
params = deepcopy(params)
if "n_jobs" in params:
if params["n_jobs"] > cpus_per_actor:
raise ValueError(
"Specified number of threads greater than number of CPUs. "
"\nFIX THIS by passing a lower value for the `n_jobs` "
"parameter or a higher number for `cpus_per_actor`.")
else:
params["n_jobs"] = cpus_per_actor
_check_cpus_per_actor_at_least_2(
params["n_jobs"], getattr(ray_params, "allow_less_than_two_cpus",
False))
# This is a callback that handles actor failures.
# We identify the rank of the failed actor, add this to a set of
# failed actors (which we might want to restart later), and set its
# entry in the actor list to None.
def handle_actor_failure(actor_id):
rank = _training_state.actors.index(actor_id)
_training_state.failed_actor_ranks.add(rank)
_training_state.actors[rank] = None
# Here we create new actors. In the first invocation of _train(), this
# will be all actors. In future invocations, this may be less than
# the num_actors setting, depending on the failure mode.
newly_created = 0
for i in list(_training_state.failed_actor_ranks):
if _training_state.actors[i] is not None:
raise RuntimeError(
f"Trying to create actor with rank {i}, but it already "
f"exists.")
ip = None
port = None
if machine_addresses:
ip = machine_addresses[i][0]
port = machine_addresses[i][1]
elif listen_port:
port = listen_port
actor = _create_actor(
rank=i,
num_actors=ray_params.num_actors,
model_factory=model_factory,
num_cpus_per_actor=cpus_per_actor,
num_gpus_per_actor=gpus_per_actor,
resources_per_actor=ray_params.resources_per_actor,
placement_group=_training_state.placement_group,
queue=_training_state.queue,
checkpoint_frequency=ray_params.checkpoint_frequency,
distributed_callbacks=ray_params.distributed_callbacks,
ip=ip,
port=port)
# Set actor entry in our list
_training_state.actors[i] = actor
# Remove from this set so it is not created again
_training_state.failed_actor_ranks.remove(i)
newly_created += 1
alive_actors = sum(1 for a in _training_state.actors if a is not None)
logger.info(f"[RayLightGBM] Created {newly_created} new actors "
f"({alive_actors} total actors). Waiting until actors "
f"are ready for training.")
# For distributed datasets (e.g. Modin), this will initialize
# (and fix) the assignment of data shards to actor ranks
dtrain.assert_enough_shards_for_actors(num_actors=ray_params.num_actors)
dtrain.assign_shards_to_actors(_training_state.actors)
for deval, _ in evals:
deval.assert_enough_shards_for_actors(num_actors=ray_params.num_actors)
deval.assign_shards_to_actors(_training_state.actors)
load_data = [dtrain] + [eval[0] for eval in evals]
prepare_actor_tasks = [
_PrepareActorTask(
actor,
# Maybe we got a new Queue actor, so send it to all actors.
queue=_training_state.queue,
# Maybe we got a new Event actor, so send it to all actors.
stop_event=_training_state.stop_event,
# Trigger data loading
load_data=load_data) for actor in _training_state.actors
if actor is not None
]
start_wait = time.time()
last_status = start_wait
try:
# Construct list before calling any() to force evaluation
ready_states = [task.is_ready() for task in prepare_actor_tasks]
while not all(ready_states):
if time.time() >= last_status + ENV.STATUS_FREQUENCY_S:
wait_time = time.time() - start_wait
logger.info(f"Waiting until actors are ready "
f"({wait_time:.0f} seconds passed).")
last_status = time.time()
time.sleep(0.1)
ready_states = [task.is_ready() for task in prepare_actor_tasks]
except Exception as exc:
_training_state.stop_event.set()
_get_actor_alive_status(_training_state.actors, handle_actor_failure)
raise RayActorError from exc
logger.info("[RayLightGBM] Starting LightGBM training.")
# # Start Rabit tracker for gradient sharing
# rabit_process, env = _start_rabit_tracker(alive_actors)
# rabit_args = [("%s=%s" % item).encode() for item in env.items()]
# Load checkpoint if we have one. In that case we need to adjust the
# number of training rounds.
if _training_state.checkpoint.value:
booster = Booster(
model_str=pickle.loads(_training_state.checkpoint.value))
kwargs["init_model"] = booster
if _training_state.checkpoint.iteration == -1:
# -1 means training already finished.
logger.error(
"Trying to load continue from checkpoint, but the checkpoint"
"indicates training already finished. Returning last"
"checkpointed model instead.")
return kwargs["init_model"], {}, _training_state.additional_results
# The callback_returns dict contains actor-rank indexed lists of
# results obtained through the `put_queue` function, usually
# sent via callbacks.
callback_returns = _training_state.additional_results.get(
"callback_returns")
if callback_returns is None:
callback_returns = [list() for _ in range(len(_training_state.actors))]
_training_state.additional_results[
"callback_returns"] = callback_returns
_training_state.training_started_at = time.time()
# Trigger the train function
live_actors = [
actor for actor in _training_state.actors if actor is not None
]
# LightGBM specific: handle actor addresses
# if neither local_listening_port nor machines are set
# get the ips and a random port from the actors, and then
# assign them back so the lgbm params are updated.
# do this in a loop to ensure that if there is a port
# confilict, it can try and choose a new one. Most of the times
# it will complete in one iteration
machines = None
for _ in range(5):
addresses = ray.get(
[actor.find_free_address.remote() for actor in live_actors])
if addresses:
_, ports = zip(*addresses)
ports = list(ports)
machine_addresses_new = [f"{ip}:{port}" for ip, port in addresses]
if len(machine_addresses_new) == len(set(machine_addresses_new)):
machines = ",".join(machine_addresses_new)
break
if machine_addresses:
raise ValueError(
"Machine addresses contains non-unique entries.")
else:
logger.debug("Couldn't obtain unique addresses, trying again.")
if machines:
logger.debug(f"Obtained unique addresses in {i} attempts.")
else:
raise ValueError(
f"Couldn't obtain enough unique addresses for {len(live_actors)}."
" Try reducing the number of actors.")
for i, actor in enumerate(live_actors):
actor.set_network_params.remote(machines, ports[i], len(live_actors),
params.get("time_out", 120))
training_futures = [
actor.train.remote(
i == 0, # return_bst
params,
dtrain,
evals,
boost_rounds_left,
*args,
**kwargs) for i, actor in enumerate(live_actors)
]
# Failure handling loop. Here we wait until all training tasks finished.
# If a training task fails, we stop training on the remaining actors,
# check which ones are still alive, and raise the error.
# The train() wrapper function will then handle the error.
start_wait = time.time()
last_status = start_wait
try:
not_ready = training_futures
while not_ready:
if _training_state.queue:
_handle_queue(
queue=_training_state.queue,
checkpoint=_training_state.checkpoint,
callback_returns=callback_returns)
if ray_params.elastic_training \
and not ELASTIC_RESTART_DISABLED:
_maybe_schedule_new_actors(
training_state=_training_state,
num_cpus_per_actor=cpus_per_actor,
num_gpus_per_actor=gpus_per_actor,
resources_per_actor=ray_params.resources_per_actor,
ray_params=ray_params,
load_data=load_data)
# This may raise RayXGBoostActorAvailable
_update_scheduled_actor_states(_training_state)
if time.time() >= last_status + ENV.STATUS_FREQUENCY_S:
wait_time = time.time() - start_wait
logger.info(f"Training in progress "
f"({wait_time:.0f} seconds since last restart).")
last_status = time.time()
ready, not_ready = ray.wait(
not_ready, num_returns=len(not_ready), timeout=1)
ray.get(ready)
# Get items from queue one last time
if _training_state.queue:
_handle_queue(
queue=_training_state.queue,
checkpoint=_training_state.checkpoint,
callback_returns=callback_returns)
# The inner loop should catch all exceptions
except Exception as exc:
logger.debug(f"Caught exception in training loop: {exc}")
# Stop all other actors from training
_training_state.stop_event.set()
# Check which actors are still alive
_get_actor_alive_status(_training_state.actors, handle_actor_failure)
raise RayActorError from exc
# Training is now complete.
# # Stop Rabit tracking process
# _stop_rabit_tracker(rabit_process)
# Get all results from all actors.
all_results: List[Dict[str, Any]] = ray.get(training_futures)
# All results should be the same. But only
# the first one actually returns its bst object.
bst: LGBMModel = all_results[0]["bst"]
evals_result = all_results[0]["evals_result"]
if not listen_port:
for param in _ConfigAliases.get("local_listen_port"):
bst._other_params.pop(param, None)
if not machine_addresses:
for param in _ConfigAliases.get("machines"):
bst._other_params.pop(param, None)
for param in _ConfigAliases.get("num_machines", "time_out"):
bst._other_params.pop(param, None)
if callback_returns:
_training_state.additional_results[
"callback_returns"] = callback_returns
total_n = sum(res["train_n"] or 0 for res in all_results)
_training_state.additional_results["total_n"] = total_n
return bst, evals_result, _training_state.additional_results
@PublicAPI(stability="beta")
def train(
params: Dict,
dtrain: RayDMatrix,
model_factory: Type[LGBMModel] = LGBMModel,
num_boost_round: int = 10,
*args,
valid_sets: Optional[List[RayDMatrix]] = None,
valid_names: Optional[List[str]] = None,
verbose_eval: Union[bool, int] = True,
evals: Union[List[Tuple[RayDMatrix, str]], Tuple[RayDMatrix, str]] = (
),
evals_result: Optional[Dict] = None,
additional_results: Optional[Dict] = None,
ray_params: Union[None, RayParams, Dict] = None,
_remote: Optional[bool] = None,
**kwargs) -> LGBMModel:
"""Distributed LightGBM training via Ray.
This function will connect to a Ray cluster, create ``num_actors``
remote actors, send data shards to them, and have them train an
LightGBM model using LightGBM's built-in distributed mode.
This method handles setting up the following network parameters:
- ``local_listen_port``: port that each LightGBM worker opens a
listening socket on, to accept connections from other workers.
This can differ from LightGBM worker to LightGBM worker, but
does not have to.
- ``machines``: a comma-delimited list of all workers in the cluster,
in the form ``ip:port,ip:port``. If running multiple workers
on the same Ray Node, use different ports for each worker. For
example, for ``ray_params.num_actors=3``, you might pass
``"127.0.0.1:12400,127.0.0.1:12401,127.0.0.1:12402"``.
The default behavior of this function is to generate ``machines`` based
on Ray workers, and to search for an open port on each worker to be
used as ``local_listen_port``.
If ``machines`` is provided explicitly in ``params``, this function uses
the hosts and ports in that list directly, and will try to start Ray
workers on the nodes with the given ips. If that is not possible, or any
of those ports are not free when training starts, training will fail.
If ``local_listen_port`` is provided in ``params`` and ``machines`` is not,
this function constructs ``machines`` automatically from auto-assigned Ray
workers, assuming that each one will use the same ``local_listen_port``.
Failure handling:
LightGBM on Ray supports automatic failure handling that can be configured
with the :class:`ray_params <RayParams>` argument. If an actor or local
training task dies, the Ray actor is marked as dead and
the number of restarts is below ``ray_params.max_actor_restarts``,
Ray will try to schedule the dead actor again, load the data shard
on this actor, and then continue training from the latest checkpoint.
Otherwise, training is aborted.
Args:
params (Dict): parameter dict passed to ``LGBMModel``
dtrain (RayDMatrix): Data object containing the training data.
model_factory (Type[LGBMModel]) Model class to use for training.
valid_sets (Optional[List[RayDMatrix]]):
List of data to be evaluated on during training.
Mutually exclusive with ``evals``.
valid_names Optional[List[str]]:
Names of ``valid_sets``.
evals (Union[List[Tuple[RayDMatrix, str]], Tuple[RayDMatrix, str]]):
``evals`` tuple passed to ``LGBMModel.fit()``.
Mutually exclusive with ``valid_sets``.
evals_result (Optional[Dict]): Dict to store evaluation results in.
verbose_eval (Union[bool, int]):
Requires at least one validation data.
If True, the eval metric on the valid set is printed at each
boosting stage.
If int, the eval metric on the valid set is printed at every
``verbose_eval`` boosting stage.
The last boosting stage or the boosting stage found by using
``early_stopping_rounds`` is also printed.
With ``verbose_eval`` = 4 and at least one item in ``valid_sets``,
an evaluation metric is printed every 4 (instead of 1) boosting
stages.
additional_results (Optional[Dict]): Dict to store additional results.
ray_params (Union[None, RayParams, Dict]): Parameters to configure
Ray-specific behavior. See :class:`RayParams` for a list of valid
configuration parameters.
_remote (bool): Whether to run the driver process in a remote
function. This is enabled by default in Ray client mode.
**kwargs: Keyword arguments will be passed to the local
`model_factory.fit()` calls.
Returns: An ``LGBMModel`` object.
"""
os.environ.setdefault("RAY_IGNORE_UNHANDLED_ERRORS", "1")
if _remote is None:
_remote = _is_client_connected() and \
not is_session_enabled()
if not ray.is_initialized():
ray.init()
if _remote:
# Run this function as a remote function to support Ray client mode.
@ray.remote(num_cpus=0)
def _wrapped(*args, **kwargs):
_evals_result = {}
_additional_results = {}
bst = train(
*args,
model_factory=model_factory,
num_boost_round=num_boost_round,
evals_result=_evals_result,
additional_results=_additional_results,
verbose_eval=verbose_eval,
**kwargs)
return bst, _evals_result, _additional_results
# Make sure that train is called on the server node.
_wrapped = force_on_current_node(_wrapped)
bst, train_evals_result, train_additional_results = ray.get(
_wrapped.remote(
params,
dtrain,
*args,
valid_sets=valid_sets,
valid_names=valid_names,
evals=evals,
ray_params=ray_params,
_remote=False,
**kwargs,
))
if isinstance(evals_result, dict):
evals_result.update(train_evals_result)
if isinstance(additional_results, dict):
additional_results.update(train_additional_results)
return bst
start_time = time.time()
ray_params = _validate_ray_params(ray_params)
params = params.copy()
if evals and valid_sets:
raise ValueError(
"Specifying both `evals` and `valid_sets` is ambiguous.")
if kwargs.get("early_stopping_rounds", None) is not None:
raise RuntimeError(
"early_stopping_rounds is not currently supported in "
"lightgbm-ray")
# LightGBM specific - capture whether local_listen_port or its aliases
# were provided
listen_port_in_params = any(
alias in params for alias in _ConfigAliases.get("local_listen_port"))
# LightGBM specific - capture whether machines or its aliases
# were provided
machines_in_params = any(
alias in params for alias in _ConfigAliases.get("machines"))
# LightGBM specific - validate machines and local_listening_port
machine_addresses = None
listen_port = None
if machines_in_params:
params = _choose_param_value(
main_param_name="machines", params=params, default_value=None)
machines = params["machines"]
machine_addresses = machines.split(",")
if len(set(machine_addresses)) != len(machine_addresses):
raise ValueError(
f"Found duplicates in `machines` ({machines}). Each entry in "
"`machines` must be a unique IP-port combination.")
if len(machine_addresses) != ray_params.num_actors:
raise ValueError(
f"`num_actors` in `ray_params` ({ray_params.num_actors}) must "
"match the number of IP-port combinations in `machines` "
f"({len(machine_addresses)}).")
logger.info(f"Using user passed machines {machine_addresses}")
if listen_port_in_params:
params = _choose_param_value(
main_param_name="local_listen_port",
params=params,
default_value=None)
listen_port = params["local_listen_port"]
logger.info(f"Using user passed local_listen_port {listen_port}")
max_actor_restarts = ray_params.max_actor_restarts \
if ray_params.max_actor_restarts >= 0 else float("inf")
_assert_ray_support()
if not isinstance(dtrain, RayDMatrix):
raise ValueError(
"The `dtrain` argument passed to `train()` is not a RayDMatrix, "
"but of type {}. "
"\nFIX THIS by instantiating a RayDMatrix first: "
"`dtrain = RayDMatrix(data=data, label=label)`.".format(
type(dtrain)))
added_tune_callback = _try_add_tune_callback(kwargs)
# LightGBM currently does not support elastic training.
if ray_params.elastic_training:
raise ValueError("Elastic Training cannot be used with LightGBM. "
"Please disable elastic_training in `ray_params` "
"in order to use LightGBM-Ray.")
params = _choose_param_value(
main_param_name="tree_learner", params=params, default_value="data")
params = _choose_param_value(
main_param_name="device_type", params=params, default_value="cpu")
if added_tune_callback:
# Don't autodetect resources when used with Tune.
cpus_per_actor = ray_params.cpus_per_actor
gpus_per_actor = max(0, ray_params.gpus_per_actor)
else:
cpus_per_actor, gpus_per_actor = _autodetect_resources(
ray_params=ray_params,
use_tree_method="device_type" in params
and params["device_type"] is not None
and params["device_type"] != "cpu")
allowed_tree_learners = {
"data", "data_parallel", "voting", "voting_parallel"
# not yet supported in LightGBM python API
# (as of ver 3.2.1)
# "feature", "feature_parallel",
}
if params["tree_learner"] not in allowed_tree_learners:
warnings.warn(
f"Parameter tree_learner set to {params['tree_learner']},"
" which is not allowed. Using 'data' as default")
params["tree_learner"] = "data"
for param_alias in _ConfigAliases.get("num_machines", "num_threads",
"num_iterations", "n_estimators"):
if param_alias in params:
warnings.warn(f"Parameter {param_alias} will be ignored.")
params.pop(param_alias)
if not verbose_eval and not any(
verbose_alias in params
for verbose_alias in _ConfigAliases.get("verbosity")):
params["verbose"] = -1
if gpus_per_actor > 0 and params["device_type"] == "cpu":
warnings.warn(
"GPUs have been assigned to the actors, but the current LightGBM "
"device type is set to 'cpu'. Thus, GPUs will "
"currently not be used. To enable GPUs usage, please set the "
"`device_type` to a GPU-compatible option, "
"e.g. `gpu`.")
if gpus_per_actor == 0 and cpus_per_actor == 0:
raise ValueError("cpus_per_actor and gpus_per_actor both cannot be "
"0. Are you sure your cluster has CPUs available?")
if ray_params.elastic_training and ray_params.max_failed_actors == 0:
raise ValueError(
"Elastic training enabled but the maximum number of failed "
"actors is set to 0. This means that elastic training is "
"effectively disabled. Please set `RayParams.max_failed_actors` "
"to something larger than 0 to enable elastic training.")
if ray_params.elastic_training and ray_params.max_actor_restarts == 0:
raise ValueError(
"Elastic training enabled but the maximum number of actor "
"restarts is set to 0. This means that elastic training is "
"effectively disabled. Please set `RayParams.max_actor_restarts` "
"to something larger than 0 to enable elastic training.")
if not dtrain.has_label:
raise ValueError(
"Training data has no label set. Please make sure to set "
"the `label` argument when initializing `RayDMatrix()` "
"for data you would like to train on.")
if not dtrain.loaded and not dtrain.distributed:
dtrain.load_data(ray_params.num_actors)
if valid_sets is not None:
evals = []
if isinstance(valid_sets, RayDMatrix):
valid_sets = [valid_sets]
if isinstance(valid_names, str):
valid_names = [valid_names]
for i, valid_data in enumerate(valid_sets):
if valid_names is not None and len(valid_names) > i:
evals.append((valid_data, valid_names[i]))
else:
evals.append((valid_data, f"valid_{i}"))
if evals:
for (deval, _name) in evals:
if not isinstance(deval, RayDMatrix):
raise ValueError("Evaluation data must be a `RayDMatrix`, got "
f"{type(deval)}.")
if not deval.has_label:
raise ValueError(
"Evaluation data has no label set. Please make sure to set"
" the `label` argument when initializing `RayDMatrix()` "
"for data you would like to evaluate on.")
if not deval.loaded and not deval.distributed:
deval.load_data(ray_params.num_actors)
bst = None
train_evals_result = {}
train_additional_results = {}
tries = 0
checkpoint = _Checkpoint() # Keep track of latest checkpoint
current_results = {} # Keep track of additional results
actors = [None] * ray_params.num_actors # All active actors
pending_actors = {}
# Create the Queue and Event actors.
queue, stop_event = _create_communication_processes(added_tune_callback)
placement_strategy = None
if not ray_params.elastic_training:
if added_tune_callback:
if TUNE_USING_PG:
# If Tune is using placement groups, then strategy has already
# been set. Don't create an additional placement_group here.
placement_strategy = None
else:
placement_strategy = "PACK"
elif ENV.USE_SPREAD_STRATEGY:
placement_strategy = "SPREAD"
if placement_strategy is not None:
pg = _create_placement_group(cpus_per_actor, gpus_per_actor,
ray_params.resources_per_actor,
ray_params.num_actors, placement_strategy)
else:
pg = None
start_actor_ranks = set(range(ray_params.num_actors)) # Start these
total_training_time = 0.
boost_rounds_left = num_boost_round
while tries <= max_actor_restarts:
if checkpoint.iteration >= 0:
# LightGBM specific - different boost_rounds_left calculation
boost_rounds_left = num_boost_round - checkpoint.iteration
logger.debug(f"Boost rounds left: {boost_rounds_left}")
training_state = _TrainingState(
actors=actors,
queue=queue,
stop_event=stop_event,
checkpoint=checkpoint,
additional_results=current_results,
training_started_at=0.,
placement_group=pg,
failed_actor_ranks=start_actor_ranks,
pending_actors=pending_actors)
try:
bst, train_evals_result, train_additional_results = _train(
params,
dtrain,
model_factory,
boost_rounds_left,
*args,
evals=evals,
ray_params=ray_params,
cpus_per_actor=cpus_per_actor,
gpus_per_actor=gpus_per_actor,
_training_state=training_state,
machine_addresses=machine_addresses,
listen_port=listen_port,
**kwargs)
if training_state.training_started_at > 0.:
total_training_time += time.time(
) - training_state.training_started_at
break
except (RayActorError, RayTaskError) as exc:
if training_state.training_started_at > 0.:
total_training_time += time.time(
) - training_state.training_started_at
alive_actors = sum(1 for a in actors if a is not None)
start_again = False
if ray_params.elastic_training:
if alive_actors < ray_params.num_actors - \
ray_params.max_failed_actors:
raise RuntimeError(
"A Ray actor died during training and the maximum "
"number of dead actors in elastic training was "
"reached. Shutting down training.") from exc
# Do not start new actors before resuming training
# (this might still restart actors during training)
start_actor_ranks.clear()
if exc.__cause__ and isinstance(exc.__cause__,
RayXGBoostActorAvailable):
# New actor available, integrate into training loop
logger.info(
f"A new actor became available. Re-starting training "
f"from latest checkpoint with new actor. "
f"This will use {alive_actors} existing actors and "
f"start {len(start_actor_ranks)} new actors. "
f"Sleeping for 10 seconds for cleanup.")
tries -= 1 # This is deliberate so shouldn't count
start_again = True
elif tries + 1 <= max_actor_restarts:
if exc.__cause__ and isinstance(exc.__cause__,
RayXGBoostTrainingError):
logger.warning(f"Caught exception: {exc.__cause__}")
logger.warning(
f"A Ray actor died during training. Trying to "
f"continue training on the remaining actors. "
f"This will use {alive_actors} existing actors and "
f"start {len(start_actor_ranks)} new actors. "
f"Sleeping for 10 seconds for cleanup.")
start_again = True
elif tries + 1 <= max_actor_restarts:
if exc.__cause__ and isinstance(exc.__cause__,
RayXGBoostTrainingError):
logger.warning(f"Caught exception: {exc.__cause__}")
logger.warning(
f"A Ray actor died during training. Trying to restart "
f"and continue training from last checkpoint "
f"(restart {tries + 1} of {max_actor_restarts}). "
f"This will use {alive_actors} existing actors and start "
f"{len(start_actor_ranks)} new actors. "
f"Sleeping for 10 seconds for cleanup.")
start_again = True
if start_again:
time.sleep(5)
queue.shutdown()
stop_event.shutdown()
gc.collect()
time.sleep(5)
queue, stop_event = _create_communication_processes()
else:
raise RuntimeError(
f"A Ray actor died during training and the maximum number "
f"of retries ({max_actor_restarts}) is exhausted."
) from exc
tries += 1
total_time = time.time() - start_time
train_additional_results["training_time_s"] = total_training_time
train_additional_results["total_time_s"] = total_time
logger.info("[RayLightGBM] Finished LightGBM training on training data "
"with total N={total_n:,} in {total_time_s:.2f} seconds "
"({training_time_s:.2f} pure LightGBM training time).".format(
**train_additional_results))
_shutdown(
actors=actors,
pending_actors=pending_actors,
queue=queue,
event=stop_event,
placement_group=pg,
force=False)
if isinstance(evals_result, dict):
evals_result.update(train_evals_result)
if isinstance(additional_results, dict):
additional_results.update(train_additional_results)
return bst
def _predict(model: LGBMModel, data: RayDMatrix, method: str,
ray_params: RayParams, **kwargs):
_assert_ray_support()
if not ray.is_initialized():
ray.init()
# Create remote actors
actors = [
_create_actor(
rank=i,
num_actors=ray_params.num_actors,
model_factory=None,
num_cpus_per_actor=ray_params.cpus_per_actor,
num_gpus_per_actor=ray_params.gpus_per_actor
if ray_params.gpus_per_actor >= 0 else 0,
resources_per_actor=ray_params.resources_per_actor,
distributed_callbacks=ray_params.distributed_callbacks)
for i in range(ray_params.num_actors)
]
logger.info(f"[RayLightGBM] Created {len(actors)} remote actors.")
# Split data across workers
wait_load = []
for actor in actors:
wait_load.extend(_trigger_data_load(actor, data, []))
try:
ray.get(wait_load)
except Exception as exc:
logger.warning(f"Caught an error during prediction: {str(exc)}")
_shutdown(actors, force=True)
raise
# Put model into object store
model_ref = ray.put(model)
logger.info("[RayLightGBM] Starting LightGBM prediction.")
# Train
fut = [
actor.predict.remote(model_ref, data, method, **kwargs)
for actor in actors
]
try:
actor_results = ray.get(fut)
except Exception as exc:
logger.warning(f"Caught an error during prediction: {str(exc)}")
_shutdown(actors=actors, force=True)
raise
_shutdown(actors=actors, force=False)
return combine_data(data.sharding, actor_results)
@PublicAPI(stability="beta")
def predict(model: Union[LGBMModel, Booster],
data: RayDMatrix,
method: str = "predict",
ray_params: Union[None, RayParams, Dict] = None,
_remote: Optional[bool] = None,
**kwargs) -> Optional[np.ndarray]:
"""Distributed LightGBM predict via Ray.
This function will connect to a Ray cluster, create ``num_actors``
remote actors, send data shards to them, and have them predict labels
using an LightGBM model. The results are then combined and
returned.
Args:
model (Union[LGBMModel, Booster]): Model or booster object to
call for prediction.
data (RayDMatrix): Data object containing the prediction data.
method (str): Name of estimator method to use for prediction.
ray_params (Union[None, RayParams, Dict]): Parameters to configure
Ray-specific behavior. See :class:`RayParams` for a list of valid
configuration parameters.
_remote (bool): Whether to run the driver process in a remote
function. This is enabled by default in Ray client mode.
**kwargs: Keyword arguments will be passed to the local
`xgb.predict()` calls.
Returns: ``np.ndarray`` containing the predicted labels.
"""
os.environ.setdefault("RAY_IGNORE_UNHANDLED_ERRORS", "1")
if _remote is None:
_remote = _is_client_connected() and \
not is_session_enabled()
if not ray.is_initialized():
ray.init()
if _remote:
return ray.get(
ray.remote(num_cpus=0)(predict).remote(
model, data, method, ray_params, _remote=False, **kwargs))
_maybe_print_legacy_warning()
ray_params = _validate_ray_params(ray_params)
max_actor_restarts = ray_params.max_actor_restarts \
if ray_params.max_actor_restarts >= 0 else float("inf")
_assert_ray_support()
if not isinstance(data, RayDMatrix):
raise ValueError(
"The `data` argument passed to `predict()` is not a RayDMatrix, "
"but of type {}. "
"\nFIX THIS by instantiating a RayDMatrix first: "
"`data = RayDMatrix(data=data)`.".format(type(data)))
tries = 0
while tries <= max_actor_restarts:
try:
return _predict(
model, data, method=method, ray_params=ray_params, **kwargs)
except RayActorError:
if tries + 1 <= max_actor_restarts:
logger.warning(
"A Ray actor died during prediction. Trying to restart "
"prediction from scratch. "
"Sleeping for 10 seconds for cleanup.")
time.sleep(10)
else:
raise RuntimeError(
"A Ray actor died during prediction and the maximum "
"number of retries ({}) is exhausted.".format(
max_actor_restarts))
tries += 1
return None
| python |
import xlsxwriter
class Writer:
def __init__(self, file, name):
self.excelFile = xlsxwriter.Workbook(file)
self.worksheet = self.excelFile.add_worksheet(name)
self.row = 0
self.col = 0
def close(self):
self.excelFile.close()
def write(self, identify, title, score):
self.worksheet.write(self.row, self.col, identify)
self.worksheet.write(self.row, self.col + 1, title)
self.worksheet.write(self.row, self.col + 2, score)
self.row += 1 | python |
n1 = int(input('Digite um valor:'))
n2 = int(input('digite outro valor:'))
s = n1 + n2
print('A soma de {} e {} vale:{}'.format(n1, n2, s))
| python |
import logging
import time
import alsaaudio
import webrtcvad
from .exceptions import ConfigurationException
logger = logging.getLogger(__name__)
class Capture(object):
MAX_RECORDING_LENGTH = 8
VAD_SAMPLERATE = 16000
VAD_FRAME_MS = 30
VAD_PERIOD = int((VAD_SAMPLERATE / 1000) * VAD_FRAME_MS)
VAD_SILENCE_TIMEOUT = 1000
VAD_THROWAWAY_FRAMES = 10
_vad = None
_config = None
_tmp_path = None
_state_callback = None
def __init__(self, config, tmp_path):
self._config = config
self._tmp_path = tmp_path
self.validate_config()
def validate_config(self):
input_device = self._config['sound']['input_device']
input_devices = alsaaudio.pcms(alsaaudio.PCM_CAPTURE)
if (input_device not in input_devices) and (not self._config['sound']['allow_unlisted_input_device']):
raise ConfigurationException(
"Your input_device '" + input_device + "' is invalid. Use one of the following:\n"
+ '\n'.join(input_devices))
def setup(self, state_callback):
self._vad = webrtcvad.Vad(2)
self._state_callback = state_callback
def silence_listener(self, throwaway_frames=None, force_record=None):
throwaway_frames = throwaway_frames or self.VAD_THROWAWAY_FRAMES
logger.debug("Setting up recording")
# Reenable reading microphone raw data
inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL, self._config['sound']['input_device'])
inp.setchannels(1)
inp.setrate(self.VAD_SAMPLERATE)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
inp.setperiodsize(self.VAD_PERIOD)
debug = logging.getLogger('alexapi').getEffectiveLevel() == logging.DEBUG
logger.debug("Start recording")
if self._state_callback:
self._state_callback()
def _listen():
start = time.time()
do_VAD = True
if force_record and not force_record[1]:
do_VAD = False
# Buffer as long as we haven't heard enough silence or the total size is within max size
thresholdSilenceMet = False
frames = 0
numSilenceRuns = 0
silenceRun = 0
if debug:
audio = b''
if do_VAD:
# do not count first 10 frames when doing VAD
while frames < throwaway_frames:
length, data = inp.read()
frames += 1
if length:
yield data
if debug:
audio += data
# now do VAD
while (force_record and force_record[0]()) \
or (do_VAD and (thresholdSilenceMet is False) and ((time.time() - start) < self.MAX_RECORDING_LENGTH)):
length, data = inp.read()
if length:
yield data
if debug:
audio += data
if do_VAD and (length == self.VAD_PERIOD):
isSpeech = self._vad.is_speech(data, self.VAD_SAMPLERATE)
if not isSpeech:
silenceRun += 1
else:
silenceRun = 0
numSilenceRuns += 1
if do_VAD:
# only count silence runs after the first one
# (allow user to speak for total of max recording length if they haven't said anything yet)
if (numSilenceRuns != 0) and ((silenceRun * self.VAD_FRAME_MS) > self.VAD_SILENCE_TIMEOUT):
thresholdSilenceMet = True
logger.debug("End recording")
inp.close()
if self._state_callback:
self._state_callback(False)
if debug:
with open(self._tmp_path + 'recording.wav', 'wb') as rf:
rf.write(audio)
return _listen()
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.