content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
class Solution:
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
lenh = len(haystack)
lenn = len(needle)
for i in range(lenh-lenn+1):
if haystack[i:i+lenn] == needle:
return i
return -1
# return haystack.find(needle)
|
python
|
######################################################################
# controller - deals with the UI concerns
# 1. navigation
# 2. preparing data elements in ui way for the screens
#
# It will not be referring to the business domain objects
# - it will use the bl component to deal with the business logic
######################################################################
import flask
import sys
import datetime
import traceback
from flask import send_file
from core.constants import _DATE_STR_DISPLAY_FORMAT_
from factory import XManFactory
from core.timer import Timer
# all app level variables
__version__=1.0
__author__='Ramakrishnan Jayachandran'
__appname__='XMAN (eXpense MANager) v1.0'
# Flask initialisation
app = flask.Flask( __name__ )
#######################################
## This section contains all the code
## related to just navigation to other
## pages in the system
#######################################
# This is the index page or the home page for the App
@app.route( '/', methods = [ 'GET'] )
def index_page() -> str:
with Timer( 'index_page') as stime:
summary = getExpenseSummary()
return flask.render_template( 'index.html', the_title=__appname__, summary=summary )
# redirection to input screen for expense - and build neccessary objects for it
@app.route( '/expense_input', methods = [ 'GET' ] )
def expense_input() -> str :
with Timer( 'expense_input' ) as start_time:
summary = getExpenseSummary()
# constants for accessing tuple with some readability
_EXPENSE_TYPES_ : int = 0
_PEOPLE_ : int = 1
_STORES_ : int = 2
_PAYMENT_MODE_ : int = 3
ui_objects : tuple = factory_object.getBusinessLayer().prepareExpenseInput()
## TODO: add code here to navigate to expense_input page
return flask.render_template( 'expense_input.html', the_title=__appname__ , summary=summary, \
short_names=ui_objects[ _PEOPLE_ ], store_names=ui_objects[ _STORES_ ], \
payment_types=ui_objects[ _PAYMENT_MODE_], expense_types=ui_objects[ _EXPENSE_TYPES_ ] )
# expense category redirection to the input screen
@app.route( '/expense_category_input', methods = [ 'GET' ] )
def expense_category_input() -> str :
summary = getExpenseSummary()
return flask.render_template( 'expense_category_input.html', the_title=__appname__, summary=summary )
@app.route( '/store_input', methods = [ 'GET' ] )
def store_input() -> str :
summary = getExpenseSummary()
return flask.render_template( 'store_input.html', the_title=__appname__, summary=summary )
@app.route( '/payment_type_input', methods = [ 'GET' ] )
def payment_type_input() -> str :
summary = getExpenseSummary()
return flask.render_template( 'payment_type_input.html', the_title=__appname__, summary=summary )
@app.route( '/person_input', methods = [ 'GET' ] )
def person_input() -> str :
summary = getExpenseSummary()
return flask.render_template( 'person_input.html', the_title=__appname__, summary=summary )
#######################################
## This section contains all the code
## related to just backend operations
## and then subsequent navigations
#######################################
# All Add flows go here ...
@app.route( '/expense_add', methods=['POST'] )
def add_expense() -> str :
summary = getExpenseSummary()
try:
factory_object.getBusinessLayer().addExpense( flask.request.form[ 'exp_type' ], flask.request.form[ 'exp_detail' ], datetime.datetime.strptime( flask.request.form[ 'exp_date' ], '%Y-%m-%d' ) , float( flask.request.form[ 'exp_amount' ]), flask.request.form[ 'payment_type' ], flask.request.form[ 'store_name' ], flask.request.form[ 'short_name' ])
except:
traceback.print_exc()
return flask.render_template( 'error_page.html', \
error_cause='Failed adding expense information', \
error_action = 'Please reenter the expense data and try again',\
summary=summary )
else:
return flask.render_template('data_saved.html', the_title= __appname__, summary=summary )
@app.route( '/expense_category_add', methods=['POST'] )
def add_expense_category() -> str :
print( 'add_expense_category')
summary = getExpenseSummary()
expense_type : str = flask.request.form[ 'expense_type' ]
expense_detail : str = flask.request.form[ 'expense_type_detail' ]
try:
factory_object.getBusinessLayer().addExpenseCategory( expense_type, expense_detail )
except:
traceback.print_exc()
return flask.render_template( 'error_page.html', \
error_cause='Failed adding expense category', \
error_action = 'Please reenter the Expense category - make sure it is not a duplicate', summary=summary )
else:
return flask.render_template('data_saved.html', the_title= __appname__, summary=summary )
@app.route( '/store_add', methods=['POST'] )
def add_store() -> str :
summary = getExpenseSummary()
try:
factory_object.getBusinessLayer().addStore( flask.request.form[ 'store_name' ], flask.request.form[ 'store_detail' ], flask.request.form[ 'home_delivery' ] )
except:
traceback.print_exc()
return flask.render_template( 'error_page.html', \
error_cause='Failed adding store data', \
error_action = 'Please reenter the Store data and make sure it is not a duplicate',\
summary=summary )
else:
return flask.render_template('data_saved.html', the_title= __appname__, summary=summary )
@app.route( '/payment_type_add', methods=['POST'] )
def add_payment_type() -> str :
summary = getExpenseSummary()
try:
factory_object.getBusinessLayer().addPaymentType( flask.request.form[ 'payment_mode' ], flask.request.form[ 'payment_mode_detail' ] )
except:
traceback.print_exc()
return flask.render_template( 'error_page.html', \
error_cause='Failed adding payment type data', \
error_action = 'Please reenter the payment type data and make sure it is not a duplicate',\
summary=summary )
else:
return flask.render_template('data_saved.html', the_title= __appname__, summary=summary )
@app.route( '/person_add', methods=['POST'] )
def add_person() -> str :
summary = getExpenseSummary()
try:
factory_object.getBusinessLayer().addPerson( flask.request.form[ 'person_first_name' ], flask.request.form[ 'person_last_name' ], flask.request.form[ 'person_short_name' ] )
except:
traceback.print_exc()
return flask.render_template( 'error_page.html', \
error_cause='Failed adding person data', \
error_action = 'Please reenter the person data and make sure short name it is not a duplicate',\
summary=summary )
else:
return flask.render_template('data_saved.html', the_title= __appname__, summary=summary )
# All list flows go here ...
@app.route( '/expenses_list', methods = [ 'GET'] )
def list_expenses() -> str :
print( 'list_expenses' )
summary = getExpenseSummary()
expenses : list = factory_object.getBusinessLayer().listExpenses()
ui_header = [ 'ID', 'Expense Detail', 'Expense Date', 'Amount', 'Spent by', 'Store', 'Expense Type', 'Payment mode' ]
ui_data : list = [ (e.getId(), e.getExpenseDetail(), e.getExpenseDate().strftime( _DATE_STR_DISPLAY_FORMAT_ ) , e.getExpenseAmount(), \
e.getPerson().getShortName(), e.getStore().getStoreName(), \
e.getExpenseCategory().getExpenseType(), e.getPaymentType().getPaymentMode()) for e in expenses ]
# Generate the csv file for future use
csv_rows = []
csv_rows.append( ui_header )
for row in ui_data:
csv_rows.append( [ c for c in row ])
factory_object.getCSVGenerator().generateFile( 'all_expenses.csv', csv_rows )
return flask.render_template( 'list_data_page.html', the_title=__appname__, \
summary=summary, the_header = ui_header, the_data = ui_data, module='expense', download=True )
@app.route( '/expense_categories_list', methods = [ 'GET'] )
def list_expense_categories() -> str :
print( 'list_expense_categories' )
summary = getExpenseSummary()
expense_categories : list = factory_object.getBusinessLayer().listExpenseCategories()
ui_header = ( 'Id', 'Expense Type', 'Expense Detail' )
ui_data : list = [ ( ec.getId(), ec.getExpenseType(), ec.getExpenseDetail()) for ec in expense_categories ]
mode = flask.request.args.get( 'mode' )
if mode == 'popup':
return flask.render_template( 'list_data_popup.html', the_title=__appname__, \
the_header = ui_header, summary=summary, the_data = ui_data, module = None )
else:
return flask.render_template( 'list_data_page.html', the_title=__appname__, summary=summary, \
the_header = ui_header, the_data = ui_data, module = 'expense_category' )
@app.route( '/stores_list', methods = [ 'GET'] )
def list_stores() -> str :
summary = getExpenseSummary()
stores : list = factory_object.getBusinessLayer().listStores()
ui_header = ('ID', 'Store Name', 'Store Detail', 'Home Delivery ?' )
ui_data : list = [ (st.getId(), st.getStoreName(), st.getStoreDetail(), ('Y' if st.getHomeDelivery() else 'N') ) for st in stores ]
mode = flask.request.args.get( 'mode' )
if mode == 'popup':
return flask.render_template( 'list_data_popup.html', the_title=__appname__, \
summary=summary, the_header = ui_header, the_data = ui_data, module=None)
else:
return flask.render_template( 'list_data_page.html', the_title=__appname__, \
summary=summary, the_header = ui_header, the_data = ui_data, module='store' )
@app.route( '/payment_type_list', methods = [ 'GET'] )
def list_payment_types() -> str :
summary = getExpenseSummary()
payment_modes : list = factory_object.getBusinessLayer().listPaymentTypes()
ui_header = ('ID', 'Payment Mode', 'Payment Mode Detail' )
ui_data : list = [ (p.getId(), p.getPaymentMode(), p.getPaymentModeDetail() ) for p in payment_modes ]
mode = flask.request.args.get( 'mode' )
if mode == 'popup':
return flask.render_template( 'list_data_popup.html', the_title=__appname__, \
summary=summary, the_header = ui_header, the_data = ui_data, module=None)
else:
return flask.render_template( 'list_data_page.html', the_title=__appname__, \
summary=summary, the_header = ui_header, the_data = ui_data, module='payment_type' )
@app.route( '/person_list', methods = [ 'GET'] )
def list_person() -> str :
summary = getExpenseSummary()
people : list = factory_object.getBusinessLayer().listPeople()
ui_header = ('ID', 'First Name', 'Last Name', 'Short Name' )
ui_data : list = [ (p.getId(), p.getFirstName(), p.getLastName(), p.getShortName() ) for p in people ]
mode = flask.request.args.get( 'mode' )
if mode == 'popup':
return flask.render_template( 'list_data_popup.html', the_title=__appname__, \
summary=summary, the_header = ui_header, the_data = ui_data, module=None)
else:
return flask.render_template( 'list_data_page.html', the_title=__appname__, \
summary=summary, the_header = ui_header, the_data = ui_data, module='person' )
# All delete flows go here ...
@app.route( '/expense_delete', methods = [ 'GET' ] )
def delete_expense() -> str :
print( 'delete_expense')
summary = getExpenseSummary()
Id = flask.request.args.get( 'Id' )
try:
factory_object.getBusinessLayer().deleteExpense( Id )
except:
traceback.print_exc()
return flask.render_template( 'error_page.html', \
error_cause='Failed deleting expense', \
error_action = 'Please retry or check the log for details',\
summary=summary )
else:
return flask.render_template('data_saved.html', the_title= __appname__, summary=summary )
@app.route( '/expense_category_delete', methods = [ 'GET' ] )
def delete_expense_category() -> str :
print( 'delete_expense_category')
summary = getExpenseSummary()
Id = flask.request.args.get( 'Id' )
try:
factory_object.getBusinessLayer().deleteExpenseCategory( Id )
except:
traceback.print_exc()
return flask.render_template( 'error_page.html', \
error_cause='Failed deleting expense category', \
error_action = 'Please retry or check the log for details',\
summary=summary )
else:
return flask.render_template('data_saved.html', the_title= __appname__, summary=summary )
@app.route( '/store_delete', methods=['GET'] )
def delete_store() -> str:
summary = getExpenseSummary()
Id = flask.request.args.get( 'Id' )
try:
factory_object.getBusinessLayer().deleteStore( Id )
except:
traceback.print_exc()
return flask.render_template( 'error_page.html', \
error_cause='Failed deleting store data', \
error_action = 'Please retry or check the log for details',\
summary=summary )
else:
return flask.render_template('data_saved.html', the_title= __appname__, summary=summary )
@app.route( '/payment_type_delete', methods=['GET'] )
def delete_payment_type() -> str:
summary = getExpenseSummary()
Id = flask.request.args.get( 'Id' )
try:
factory_object.getBusinessLayer().deletePaymentType( Id )
except:
traceback.print_exc()
return flask.render_template( 'error_page.html', \
error_cause='Failed deleting payment type data', \
error_action = 'Please retry or check the log for details',\
summary=summary )
else:
return flask.render_template('data_saved.html', the_title= __appname__, summary=summary )
@app.route( '/person_delete', methods=['GET'] )
def delete_person() -> str:
summary = getExpenseSummary()
Id = flask.request.args.get( 'Id' )
try:
factory_object.getBusinessLayer().deletePerson( Id )
except:
traceback.print_exc()
return flask.render_template( 'error_page.html', \
error_cause='Failed deleting person data', \
error_action = 'Please retry or check the log for details',\
summary=summary )
else:
return flask.render_template('data_saved.html', the_title= __appname__, summary=summary )
# All report flows go here ...
@app.route( '/expense_month_summary_list', methods = [ 'GET' ] )
def list_expenses_monthly_summary() -> str:
summary = getExpenseSummary()
report = factory_object.getReportingLayer().listMonthwiseSummary()
return flask.render_template( 'list_data_page.html', the_title=__appname__, \
summary=summary, the_header = report[ 0 ], the_data = report[ 1 ], module=None )
@app.route( '/expense_month_category_summary_list', methods = [ 'GET' ] )
def list_expenses_monthly_category_summary() -> str:
summary = getExpenseSummary()
report = factory_object.getReportingLayer().listMonthwiseCategorySummary()
return flask.render_template( 'list_data_page.html', the_title=__appname__, \
summary=summary, the_header = report[ 0 ], the_data = report[ 1 ], module=None )
@app.route( '/expense_month_person_summary_list', methods = [ 'GET' ] )
def list_expenses_monthly_person_summary() -> str:
summary = getExpenseSummary()
report = factory_object.getReportingLayer().listMonthwisePersonSummary()
return flask.render_template( 'list_data_page.html', the_title=__appname__, \
summary=summary, the_header = report[ 0 ], the_data = report[ 1 ], module=None )
@app.route( '/expense_month_paytype_summary_list', methods = [ 'GET' ] )
def list_expenses_monthly_paytype_summary() -> str:
summary = getExpenseSummary()
report = factory_object.getReportingLayer().listMonthwisePaymentTypeSummary()
return flask.render_template( 'list_data_page.html', the_title=__appname__, \
summary=summary, the_header = report[ 0 ], the_data = report[ 1 ], module=None )
# download links
@app.route( '/download_expenses', methods = [ 'GET' ] )
def download_expense_list() -> str:
return send_file( factory_object.getCSVGenerator().getFilenameWithPath( 'all_expenses.csv'), mimetype='text/csv' )
# other utility methods go here ...
def getExpenseSummary():
current_month_string = datetime.datetime.now().strftime( '%Y/%m' )
result = factory_object.getBusinessLayer().getExpenseSummary(current_month_string )
return result
# Main code ...
if len( sys.argv ) > 2:
factory_object = XManFactory()
dbargs = { 'dbtype' : sys.argv[ 1 ], 'username' : sys.argv[ 2 ], 'password' : sys.argv[ 3 ], 'hostname' : sys.argv[ 4 ] , 'dbname' : sys.argv[ 5 ] }
factory_object.createObjects( dbargs )
app.run(debug=True)
else:
print( 'Invalid usage python3 controller.py <DB-String>' )
|
python
|
import requests
from env import QuerybookSettings
from lib.notify.base_notifier import BaseNotifier
class SlackNotifier(BaseNotifier):
def __init__(self, token=None):
self.token = (
token if token is not None else QuerybookSettings.QUERYBOOK_SLACK_TOKEN
)
@property
def notifier_name(self):
return "slack"
@property
def notifier_format(self):
return "plaintext"
def notify(self, user, message):
to = f"@{user.username}"
url = "https://slack.com/api/chat.postMessage"
headers = {"Authorization": "Bearer {}".format(self.token)}
text = self._convert_markdown(message)
data = {
"text": text,
"channel": to,
}
requests.post(url, json=data, headers=headers, timeout=30)
|
python
|
#
# Copyright 2012 eNovance <[email protected]>
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log
from oslo_utils import timeutils
import ceilometer
from ceilometer.compute import pollsters
from ceilometer.compute.pollsters import util
from ceilometer.compute import util as compute_util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.i18n import _, _LW
from ceilometer import sample
LOG = log.getLogger(__name__)
class _Base(pollsters.BaseComputePollster):
NET_USAGE_MESSAGE = ' '.join(["NETWORK USAGE:", "%s %s:", "read-bytes=%d",
"write-bytes=%d"])
@staticmethod
def make_vnic_sample(instance, name, type, unit, volume, vnic_data):
metadata = copy.copy(vnic_data)
resource_metadata = dict(zip(metadata._fields, metadata))
resource_metadata['instance_id'] = instance.id
resource_metadata['instance_type'] = (instance.flavor['id'] if
instance.flavor else None)
compute_util.add_reserved_user_metadata(instance.metadata,
resource_metadata)
if vnic_data.fref is not None:
rid = vnic_data.fref
else:
instance_name = util.instance_name(instance)
rid = "%s-%s-%s" % (instance_name, instance.id, vnic_data.name)
return sample.Sample(
name=name,
type=type,
unit=unit,
volume=volume,
user_id=instance.user_id,
project_id=instance.tenant_id,
resource_id=rid,
timestamp=timeutils.isotime(),
resource_metadata=resource_metadata
)
CACHE_KEY_VNIC = 'vnics'
def _get_vnic_info(self, inspector, instance):
return inspector.inspect_vnics(instance)
@staticmethod
def _get_rx_info(info):
return info.rx_bytes
@staticmethod
def _get_tx_info(info):
return info.tx_bytes
def _get_vnics_for_instance(self, cache, inspector, instance):
i_cache = cache.setdefault(self.CACHE_KEY_VNIC, {})
if instance.id not in i_cache:
i_cache[instance.id] = list(
self._get_vnic_info(inspector, instance)
)
return i_cache[instance.id]
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
instance_name = util.instance_name(instance)
LOG.debug(_('checking net info for instance %s'), instance.id)
try:
vnics = self._get_vnics_for_instance(
cache,
self.inspector,
instance,
)
for vnic, info in vnics:
LOG.debug(self.NET_USAGE_MESSAGE, instance_name,
vnic.name, self._get_rx_info(info),
self._get_tx_info(info))
yield self._get_sample(instance, vnic, info)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
{'instance_id': instance.id,
'pollster': self.__class__.__name__, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'),
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
{'name': instance_name, 'error': err})
class _RateBase(_Base):
NET_USAGE_MESSAGE = ' '.join(["NETWORK RATE:", "%s %s:",
"read-bytes-rate=%d",
"write-bytes-rate=%d"])
CACHE_KEY_VNIC = 'vnic-rates'
def _get_vnic_info(self, inspector, instance):
return inspector.inspect_vnic_rates(instance,
self._inspection_duration)
@staticmethod
def _get_rx_info(info):
return info.rx_bytes_rate
@staticmethod
def _get_tx_info(info):
return info.tx_bytes_rate
class IncomingBytesPollster(_Base):
def _get_sample(self, instance, vnic, info):
return self.make_vnic_sample(
instance,
name='network.incoming.bytes',
type=sample.TYPE_CUMULATIVE,
unit='B',
volume=info.rx_bytes,
vnic_data=vnic,
)
class IncomingPacketsPollster(_Base):
def _get_sample(self, instance, vnic, info):
return self.make_vnic_sample(
instance,
name='network.incoming.packets',
type=sample.TYPE_CUMULATIVE,
unit='packet',
volume=info.rx_packets,
vnic_data=vnic,
)
class OutgoingBytesPollster(_Base):
def _get_sample(self, instance, vnic, info):
return self.make_vnic_sample(
instance,
name='network.outgoing.bytes',
type=sample.TYPE_CUMULATIVE,
unit='B',
volume=info.tx_bytes,
vnic_data=vnic,
)
class OutgoingPacketsPollster(_Base):
def _get_sample(self, instance, vnic, info):
return self.make_vnic_sample(
instance,
name='network.outgoing.packets',
type=sample.TYPE_CUMULATIVE,
unit='packet',
volume=info.tx_packets,
vnic_data=vnic,
)
class IncomingBytesRatePollster(_RateBase):
def _get_sample(self, instance, vnic, info):
return self.make_vnic_sample(
instance,
name='network.incoming.bytes.rate',
type=sample.TYPE_GAUGE,
unit='B/s',
volume=info.rx_bytes_rate,
vnic_data=vnic,
)
class OutgoingBytesRatePollster(_RateBase):
def _get_sample(self, instance, vnic, info):
return self.make_vnic_sample(
instance,
name='network.outgoing.bytes.rate',
type=sample.TYPE_GAUGE,
unit='B/s',
volume=info.tx_bytes_rate,
vnic_data=vnic,
)
|
python
|
"""
Module docstring
"""
from copy import deepcopy
from uuid import uuid4
from os import mkdir
import numpy as np
from scipy.integrate import solve_ivp
class OmicsGenerator:
"""
Handles all omics generation.
This class is used to specify omics generation parameters and generate synthetic data. Typical workfolow is:
Initialize generator -> set interactions -> set interventions -> generate synthetic data
Attributes:
-----------
nodes:
List of nodes.
Args:
-----
time_points:
Integer. How many total time points to generate. Not to be confused with downsampling coefficient (applied
later).
nodes:
List of strings. (Unique) node names for each node.
node_sizes:
List of ints. Node sizes for each node.
discard_first:
Integer. How many initial time points to discard. Setting higher discard_first values generally ensures
samples closer to equilibrium.
init_full:
Boolean. If True, initializes all interactions, growth rates,and initial abundances at random.
silent:
Boolean. If True, suppresses all print statements.
**kwargs:
C, d, sigma, rho for AT-Normal matrix
Returns:
--------
OmicsGenerator object.
Raises:
-------
TODO
"""
def __init__(
self,
node_sizes : list = None,
nodes : list = None,
time_points : int = 100,
discard_first : int = 0,
init_full : bool = False,
silent : bool = False,
**kwargs) -> None:
"""
Initializes generator. See docstring for class.
"""
# Require node sizes
if node_sizes == None:
raise Exception("Must specify at least one node size.")
# Better handling for single-node systems
if isinstance(nodes, str):
nodes = [nodes]
if isinstance(node_sizes, int):
node_sizes = [node_sizes]
# Give default node names
if node_sizes is not None and nodes is None:
nodes = [f"n{i}" for i in range(len(node_sizes))]
elif len(nodes) != len(node_sizes):
raise Exception(f"Node lengths and node sizes do not match: {len(nodes)} != {len(node_sizes)}")
self._interactions = []
self._interventions = []
self._time_points = time_points + discard_first
self._T = np.array(range(self._time_points))
self._namespace = set()
self._discard_first = discard_first
self._silent = silent
# Process nodes
self.nodes = []
for node_name, size in zip(nodes, node_sizes):
self.add_node(node_name, size)
if init_full:
self._init_full(**kwargs)
if not self._silent:
print("Initialized")
class _OmicsNode:
"""
PRIVATE METHOD. Call with self.add_node() instead.
A class for omics nodes. Contains pointers to interactions, interventions.
Attributes:
-----------
inbound:
A dict of (node name, matrix) tuples representing matrix interactions of the type Ax --> y, where y is
another node. Maintained by self.add_interaction().
outbound:
A dict of (node name, matrix) tuples representing matrix interactions of the type Ay --> x, where y is
another node. Maintained by self.add_interaction().
interventions:
A list of interventions which affect this node. Maintained by self.add_intervention().
Args:
-----
name:
String. The node name. Must be unique.
size:
Integer: How many elements does this node have?
initial_value:
A vector of initial abundances for node elements. Length must be equal to size. Generally not called
on initialization - use self.add_initial_value() instead.
growth_rates:
Intrinsic growth/death rates for node elements. Length must be equal to size. Generally not called on
initialization - use self.add_initial_value() with 'growth_rate = True' instead.
names:
List of strings for naming node dimensions.
log_noise:
Boolean. If True, noise will be added to log-relative abundances. True by default.
verbose:
Boolean. If False, suppresses print statements.
Returns:
--------
_OmicsNode object.
Raises:
-------
None (fails silently, use add_node() instead.)
"""
def __init__(
self,
name : str,
size : int,
initial_value : np.ndarray,
growth_rates : np.ndarray,
names : list,
log_noise : bool,
verbose : bool = True) -> None:
"""
Initializes node. See docstring for class.
"""
self.name = name
self.size = size
self.initial_value = initial_value
self.growth_rates = growth_rates
self.log_noise = log_noise
self.outbound = {}
self.inbound = {}
self.interventions = []
self.names = names
if verbose:
print(f"Node '{name}' initialized")
def __str__(self):
return f"{self.name}\t{self.size}"
class _OmicsInteraction:
"""
PRIVATE METHOD. Call with self.add_interaction() instead.
A class for omics interactions. This has the general form of an m x n matrix representing interactions between
one set (e.g. taxa) and another set (e.g. other taxa, metabolites, whatever)
Attributes:
-----------
nrows:
Number of rows (e.g. taxa) in matrix.
ncols:
Number of columns (e.g. metabolites) in matrix.
Args:
-----
name:
String. A name for this interaction. Must be unique.
outbound_node:
Node from which the edge originates
inbound_node:
Node at which the edge terminates
matrix:
A matrix-type object with interactions
lag:
Integer. How much delay to put into dependencies. For instance, a lag of 1 on an interaction means we
compute Ax_t = y_(t+1)
verbose:
Boolean. If False, suppresses print statements.
Returns:
--------
_OmicsInteraction object.
Raises:
-------
None (fails silently, use add_interaction() instead).
"""
def __init__(
self,
name : str,
outbound_node : None,
inbound_node : None,
matrix : np.ndarray,
lag : int,
verbose : bool = True) -> None:
"""
Initializes interaction. See docstring for class.
"""
self.name = name
self.outbound_node = outbound_node
self.inbound_node = inbound_node
self.matrix = np.array(matrix)
self.lag = lag
self.nrows = matrix.shape[0] # e.g. number of taxa
self.ncols = matrix.shape[1] # e.g. number of metabolites
if verbose:
print(f"Interaction '{name}' added")
def __str__(self):
return f"{self.name}:\t({self.outbound_node.name})-->({self.inbound_node.name})\tLag: {self.lag}"
class _OmicsIntervention:
"""
PRIVATE METHOD. Call with self.add_intervention() instead.
A class for omics interventions. This has the general form of an n-length matrix which describes the reactions
of some set (e.g. taxa) to this particular intervention.
Args:
-----
name:
String. A name for our intervention. Only used for printing and other bookkeeping.
vector:
A vector-type object with reactions to the intervention.
node_name:
String. Name of node affected by this intervention/matrix.
U:
An indicator vector which is 1 for time points when the intervention is active, 0 otherwise.
affects_abundance:
Boolean. If True, intervention vector will be applied directly to the abundance vector rather than
growth rates.
verbose:
Boolean. If False, suppresses print statements.
Returns:
--------
_OmicsIntevention object.
Raises:
-------
None (fails silently, use add_intervention() instead).
"""
def __init__(
self,
name : str,
vector : np.ndarray,
node_name : str,
U : np.ndarray,
affects_abundance : bool,
verbose : bool = True) -> None:
"""
Initializes an intervention. See docstring for class.
"""
self.name = name
self.vector = vector
self.node_name = node_name
self.U = np.array(U)
self.affects_abundance = affects_abundance
if verbose:
print(f"Intervention '{name}' added")
return
def __str__(self):
end = ""
if self.affects_abundance:
end = "\taffects abundance"
return f"{self.name}\t{self.node_name}{end}"
def add_node(
self,
name : str,
size : int,
initial_value : np.ndarray = None,
growth_rates : np.ndarray = None,
names : list = None,
log_noise : bool = True,
verbose : bool = True) -> None:
"""
Adds nodes to generator object.
Args:
-----
name:
String. Used to identify node. Must be unique.
size:
Length of vector associated with a time point of this node. For instance, for a metagenomics node, this
would correspond to the number of taxa.
initial_value:
Value of this node at t = 0. Must be same length as node size.
growth_rates:
Element-wise growth/death rates for this node. Must be same length as node size.
names:
Optional. List of names for each node element. Used for printing/saving data.
log_noise:
Boolean. If True, noise will be added to log-relative abundance.If False, noise will be added to relative
abundances.
verbose:
Boolean. If False, suppresses print statements.
Returns:
--------
None (modifies generator in place).
Raises:
-------
ValueError:
One or more of [initial_value, growth_rates, names] are the wrong size.
"""
# Check sizes of inputs agree
for param_name in ["initial_value", "growth_rates", "names"]:
param = eval(param_name)
if param is not None and len(param) != size:
raise ValueError(f"{param_name} is wrong size: {len(param)} != {size}")
# Check namespace
if name in self._namespace:
raise Exception(f"Name {name} already in use. Please use a unique name")
# Check verbosity
if self._silent:
verbose = False
# Generate node and append to object
node = self._OmicsNode(
name,
size,
initial_value,
growth_rates,
names,
log_noise,
verbose
)
self.nodes.append(node)
self._namespace.add(name)
def add_interaction(
self,
name : str,
outbound_node_name : str,
inbound_node_name : str,
matrix : np.ndarray,
lag : int = 0,
verbose : bool = True) -> None:
"""
Adds interactions to generator object.
Edges look like this:
Graphical: (OUTBOUND NODE)--->(INBOUND NODE)
Linear algebra: [inbound] = [matrix] @ [outbound] + [...]
Args:
-----
name:
String. A name for this interaction.
outbound_node_name:
String. Name of node from which the edge originates
inbound_node_name:
String. Name of node at which the edge terminates
matrix:
A matrix-type object with interactions
lag:
Integer. How much delay to put into dependencies. For instance, a lag of 1 on an interaction means we
compute Ax_t = y_(t+1)
verbose:
Boolean. If False, suppresses print statements.
Returns:
--------
None (modifies generator in place).
Raises:
-------
TODO
"""
# Check namespace
if name in self._namespace:
raise Exception(f"Name {name} already in use. Please use a unique name")
# Check verbosity
if self._silent:
verbose = False
# Get nodes
outbound_node = self.get(outbound_node_name, "node")
if outbound_node is None:
raise Exception("Outbound node is invalid")
inbound_node = self.get(inbound_node_name, "node")
if inbound_node is None:
raise Exception("Inbound node is invalid")
# Check that matrix dimensions match
if matrix.shape[1] != inbound_node.size:
raise ValueError(f"Matrix shape[1] = {matrix.shape[1]} != {inbound_node.size} (size of inbound node '{inbound_node.name}')")
if matrix.shape[0] != outbound_node.size:
raise ValueError(f"Matrix shape[0] = {matrix.shape[0]} != {outbound_node.size} (size of outbound node '{outbound_node.name}')")
interaction = self._OmicsInteraction(
name,
outbound_node,
inbound_node,
matrix,
lag,
verbose
)
self._interactions.append(interaction)
# Append to nodes
outbound_node.inbound[inbound_node_name] = interaction
inbound_node.outbound[outbound_node_name] = interaction
self._namespace.add(name)
def add_intervention(
self,
name : str,
node_name : str,
vector : np.ndarray,
affects_abundance : bool = False,
U : np.ndarray = None,
start : int = None,
end : int = None,
verbose : bool = True) -> None:
"""
Adds an intervention to generator.
Must have either U or (start, end) set to specify timeframe.
Args:
-----
name:
String. A name for our intervention. Only used for printing and other bookkeeping.
node_name:
String. Name of node affected by this intervention/matrix.
vector:
A vector-type object detailing, elementwise, the reactions of each node coordinate to an intervention.
affects_abundance:
Boolean. If True, intervention vector will be applied directly to the abundance vector rather than to growth
rates.
U:
An indicator vector which is 1 for time pointswhen the intervention is active, 0 otherwise.
start:
First time point when interaction begins. Use only for interactions of the form 0*1+0*. Otherwise, use U
variable instead.
end:
Last node when interaction is active. Use only for interactions of the form 0*1+0*. Otherwise, use U
variable instaed.
verbose:
Boolean. If False, suppresses print statements.
Returns:
--------
None (modifies generator in place).
Raises:
-------
TODO
"""
# Check namespace
if name in self._namespace:
raise Exception(f"Name {name} already in use. Please use a unique name")
# Check U vector is correct length
if U is not None:
if len(U) != self._time_points:
raise Exception(f"U vector is different size from number of time points: {len(U)} != {self._time_points}")
# Check verbosity
if self._silent:
verbose = False
# Process node
node = self.get(node_name, "node")
if node is None:
raise Exception("Invalid node! Please try again")
# A bunch of control flow to make a boolean vector called U
if U is not None:
pass # explicit U vectors are best
elif start is None or end is None:
raise Exception("Need to supply a (start,end) pair or a U vector")
else:
U = np.array([0] * self._time_points)
U[start:end] = 1
# Make the intervention and add it to self
intervention = self._OmicsIntervention(
name,
vector,
node_name,
U,
affects_abundance,
verbose
)
if len(intervention.U) == self._time_points:
self._interventions.append(intervention)
else:
raise Exception("Intervention vector is not the same length at time vector")
# Modify node accordingly
node.interventions.append(intervention)
self._namespace.add(name)
def set_initial_value(
self,
node_name : str,
values : np.ndarray,
growth_rate : bool = False,
verbose : bool = True) -> None:
"""
Sets a node value or growth rate.
Args:
-----
node_name:
Name of node being altered
values:
Vector. Initial values for node. Must be same length as node size.
growth_rate:
Boolean. If True, affects the growth_rate parameter of the node. Otherwise, affects initial values of node.
verbose:
Boolean. If False, suppresses print statements.
Returns:
--------
None (modifies generator in place).
Raises:
-------
TODO
"""
node = self.get(node_name, "node")
# Check node exists
if node is None:
raise Exception(f"Invalid node name: {node_name} does not exist")
# Check dimensions match
if len(values) != node.size:
raise Exception(f"Size mismatch with node size: {len(values)} != {node.size}")
# Set values
if not growth_rate:
node.initial_value = values
elif growth_rate:
node.growth_rates = values
# Print output
if verbose and not self._silent:
if not growth_rate:
print(f"Added x0 vector to node {node_name}")
elif growth_rate:
print(f"Added growth rates to node {node_name}")
def get(
self,
name : str,
node_type : str in ["node", "interaction", "intervention"] = None) -> "generator element":
"""
Gets a (node/interaction/intervention) by name.
Args:
-----
name:
String. Name of node/interaction/intervention.
type:
String. One of ["node", "interaction", "intervention"]. Specifies the type of generator element to look for.
Returns:
--------
_OmicsNode, _OmicsInteraction, _OmicsIntervention, or None.
Raises:
-------
None
"""
if node_type in (None, "node"):
for node in self.nodes:
if node.name == name:
return node
if node_type in (None, "interaction"):
for interaction in self._interactions:
if interaction.name == name:
return interaction
if node_type in (None, "intervention"):
for intervention in self._interventions:
if intervention.name == name:
return intervention
return None
def remove(
self,
name : str,
verbose : bool = True) -> None:
"""
Removes a node, intervention, or interaction from the generator by name.
Args:
-----
name:
A string specifying the (unique) name of the element to be removed.
Returns:
--------
None (modifies generator in place).
Raises:
-------
TODO
"""
obj = self.get(name)
if obj is None:
raise Exception(f"Cannot find object named {name} to remove")
if isinstance(obj, self._OmicsNode):
for interaction in reversed(self._interactions): # reversed so we can remove interactions as we go
if obj in (interaction.inbound_node, interaction.outbound_node):
self._interactions.remove(interaction)
for intervention in reversed(self._interventions):
if intervention.node_name == name:
self._interventions.remove(intervention)
for node in self.nodes:
node.inbound.pop(name, None)
node.outbound.pop(name, None)
self.nodes.remove(obj)
if verbose:
print(f"Removed node '{name}'")
elif isinstance(obj, self._OmicsInteraction):
# Remove interaction from inbound node
obj.inbound_node.outbound.pop(obj.outbound_node.name, None)
# Remove interaction from outbound node
obj.outbound_node.inbound.pop(obj.inbound_node.name, None)
# Remove interaction from list
self._interactions.remove(obj)
if verbose:
print(f"Removed interaction '{name}'")
elif isinstance(obj, self._OmicsIntervention):
node = self.get(obj.node_name)
node.interventions.remove(obj)
self._interventions.remove(obj)
if verbose:
print(f"Removed intervention '{name}'")
else:
raise Exception(f"Cannot remove '{name}': unknown type. Is the name correct?")
self._namespace.remove(name)
def generate(
self,
noise_var : float = 1e-2,
n_reads : int = 1e5,
dt : float = 1e-2,
downsample : int = 1) -> (dict, dict, dict):
"""
Generates a single timecourse of synthetic data.
Args:
-----
noise_var:
Float. variance parameter for gaussian noise term.
n_reads:
Integer. Number of reads to draw from the unsampled distribution.
dt:
Float. time step size which gets passed to IVP solver
downsample:
Integer. fraction of outputs to keep (1/n). By default, keeps all samples. downsample=4 means every 4th
sample is kept, etc. Downsample is deprecated. Simply modify "dt" instead.
Returns:
--------
The following three dicts (in order):
//======================================================\\
||Name: Sampling: Normalization: Number of samples:||
||======================================================||
||Z unsampled unnormalized full ||
||X unsampled normalized downsampled ||
||Y sampled normalized downsampled ||
\\======================================================//
Each Z/X/Y dict contains (node, timecourse) pairs. The timecourse is a numpy array with shape (number of time
points, node size).
Raises:
-------
TODO
"""
# Sanity checks
for node in self.nodes:
if node.initial_value is None:
raise ValueError(f"Node '{node.name}' has no x0 vector")
if node.growth_rates is None:
raise ValueError(f"Node '{node.name}' has no growth rate set")
def _grad_fn(
node : None,
X : list,
growth_rates : np.ndarray,
t : int) -> None:
"""
This gets passed to the solver. It's just the vector f used in GLV calculations.
"""
# Interactions:
interaction_coef = np.zeros(node.size)
for node_name in node.outbound:
interaction = node.outbound[node_name]
# Adjust for lag
idx = -1 - interaction.lag
try:
# Get interaction matrix
M = interaction.matrix
# Get last value (modulo lag term) of node abundance
y = X[node_name][idx]
# f += yM (GLV equation)
interaction_coef += y @ M
except IndexError:
# Happens when lag is larger than number of values already generated
pass
# Interventions:
intervention_coef = np.zeros(node.size)
for intervention in node.interventions:
if not intervention.affects_abundance:
intervention_coef += intervention.vector.dot(intervention.U[t])
# Self
xt = X[node.name][-1]
# The function itself:
def fn(t, x):
return xt * (growth_rates + interaction_coef + intervention_coef)
return fn
# Initialization steps
Z = {} # Latent absolute abundances
X = {} # Probability distribution/normalized abundances
Y = {} # Sampled abundances
for node in self.nodes:
Z[node.name] = [node.initial_value]
# Generalized Lotka-Volterra steps, plus bells and whistles
for t in range(self._time_points - 1):
Z_temp = {} # Use this so that all values are updated at once
for node in self.nodes:
# Get values from dicts
z = Z[node.name]
g = node.growth_rates
# Initialize values
Zprev = np.copy(z[-1]) # last time point, X_(t-1)
# Pass to solver
# TODO: possible to do this all in one shot rather than looping?
grad = _grad_fn(node, Z, g, t)
ivp = solve_ivp(grad, (0,dt), Zprev, method="RK45")
Zt = ivp.y[:,-1]
# Tweak abundances on a per-node basis
# TODO: Maybe this would be better if it were size-adjusted?
for intervention in node.interventions:
if intervention.affects_abundance == True:
Zt += intervention.vector * intervention.U[t]
# Add biological noise:
noise = np.random.normal(scale=noise_var, size=node.size)
# No noise for missing taxa
noise = noise * (Zt > 0)
# Equivalent to log->add noise->exp
if node.log_noise == True:
Zt *= np.exp(noise)
else:
Zt += noise
# Push to results
Zt = np.clip(Zt, 0, None)
Z_temp[node.name] = Zt
# Push all values for this time point to X at once
for key in Z_temp:
Z[key] += [Z_temp[key]]
# Simulate sampling noise
for node in self.nodes:
z = np.array(Z[node.name])
# Save latent state
x = z.copy()
# Discard first couple elements (ensure values are near attractor)
x = x[self._discard_first:]
# Take every nth element
# Negative coefficient ensures we sample from the end
x = x[::-downsample]
# Need to un-reverse the data now
x = x[::-1]
# Relative abundances
x = np.apply_along_axis(lambda a: a/sum(a), 1, x)
# y = y / np.sum(y, axis=1).reshape(-1,1)
# Draw samples
y = []
for idx in range(x.shape[0]):
try:
Yt = np.random.multinomial(n_reads, x[idx]) / n_reads
y += [Yt]
except ValueError:
# TODO: circle back and figure out what was breaking this
# print("ERROR: check self._weird for more info")
# self._weird = X[node.name][idx] # debugging variable
y += [np.zeros(node.size)]
# Push to output
X[node.name] = x
Y[node.name] = np.array(y)
Z[node.name] = z
return Z, X, Y
def generate_multiple(
self,
n : int,
extinct_fraction : float = 0,
**generate_args) -> (list, list, list):
"""
Generates several timecourses of synthetic data.
This is essentially a wrapper around a loop of generate() calls, with the added element of reinitializing
individuals. The extinct_fraction parameter gives some degree of control over re-initialization.
Args:
-----
n:
Integer. Number of individuals for whom to generate synthetic data timecourses.
extinct_fraction:
Float in [0, 1) range. Fraction of abundances that should be extinct for each individual.
Additional args (same as generate()):
-------------------------------------
noise_var:
Float. variance parameter for gaussian noise term.
n_reads:
Integer. Number of reads to draw from the unsampled distribution.
dt:
Float. time step size which gets passed to IVP solver
downsample:
Integer. fraction of outputs to keep (1/n). By default, keeps all samples. downsample=4 means every 4th
sample is kept, etc. Downsample is deprecated. Simply modify "dt" instead.
Returns:
--------
The following three arrays (in order):
//======================================================\\
||Name: Sampling: Normalization: Number of samples:||
||======================================================||
||Z unsampled unnormalized full ||
||X unsampled normalized downsampled ||
||Y sampled normalized downsampled ||
\\======================================================//
Each Z/X/Y array contains n dicts, each of which contains (node, timecourse) pairs. The timecourse is a numpy
array with shape (number of time points, node size).
Raises:
-------
TODO
"""
# Initialize:
old_nodes = self.nodes # store old initial values
out_X = []
out_Y = []
out_Z = []
# Generation loop
for i in range(n):
# Set new initial values for each node
for node in self.nodes:
# TODO: allow passing of any function to generate this
abundances = np.random.exponential(size=node.size) * np.random.binomial(1, 1-extinct_fraction, size=node.size)
self.set_initial_value(node.name, abundances, verbose=False)
Z,X,Y = self.generate(**generate_args)
out_X.append(X)
out_Y.append(Y)
out_Z.append(Z)
# return nodes to old values
self.nodes = old_nodes
return out_Z, out_X, out_Y
def _allesina_tang_normal_matrix(
self,
n : int,
C : float,
d : float,
sigma : float,
rho : float) -> np.ndarray:
"""
Generates an Allesina-Tang normal matrix.
Inspired by https://stefanoallesina.github.io/Sao_Paulo_School/intro.html#multi-species-dynamics.
How this works:
---------------
1. Creates covariance matrix has the following form:
1 rho rho ...
rho 1 rho ...
rho rho 1 ...
... (you get the idea)
2. Draws multivariate normal pairs from this covariance matrix
3. Populates non-diagonal entries of matrix with drawn pairs
4. Symmetrically sparsifies matrix, keeping only ~C% of entries
5. Sets diagonals of matrix to -d
Args:
-----
n:
Integer. Number of rows/columns in square matrix.
C:
Float in (0,1]: Sparsity parameter. Higher C = less sparse.
d:
Float. Negative self-interaction size.
sigma:
Float. Variance used to generate multivariate normal covariance matrix.
rho:
Float in [-1, 1]. Correlation term of covariance matrix. Higher rho = positive connectance = mutualism =
harder to stabilize. Lower rho = predator-prey--type relationships = easier to stabilize.
Returns:
--------
A matrix M that can be used as an interaction matrix.
Raises:
-------
None (fails silently).
"""
# sample coefficients
mu = np.zeros(2)
cov = sigma ** 2 * np.array([[1, rho], [rho, 1]])
n_samples = int(n * (n-1) / 2)
pairs = np.random.multivariate_normal(mu, cov, n_samples)
# completely filled matrix
M = np.ndarray((n, n))
M[np.triu_indices(n, 1)] = pairs[:,0]
M = M.transpose()
M[np.triu_indices(n, 1)] = pairs[:,1]
# winnow down
connections = np.random.rand(n, n) <= C
connections = connections * 1 # binarize
connections[np.tril_indices(n,1)] = 0
connections += connections.transpose() # symmetric
M *= connections
# set negative self-interactions
M[np.diag_indices(n)] = -d
return M
def _set_interactions(
self,
C : float = 0.5,
d : float = None,
sigma : float = 1,
rho : float = -0.4) -> None:
"""
Sets all interaction matrices from one big AT-normal matrix
Args:
-----
C:
Float in (0,1]: Sparsity parameter. Higher C = less sparse.
d:
Float. Negative self-interaction size.
sigma:
Float. Variance used to generate multivariate normal covariance matrix.
rho:
Float in [-1, 1]. Correlation term of covariance matrix. Higher rho = positive connectance = mutualism =
harder to stabilize. Lower rho = predator-prey--type relationships = easier to stabilize.
Returns:
--------
None (modifies generator in place).
Raises:
-------
None (fails silently).
"""
# Generate master matrix
sizes = [node.size for node in self.nodes]
n = np.sum(sizes)
# Solve for a stable value of d if d is not provided
if d is None:
d = sigma * np.sqrt(n * C) + 1
m0 = self._allesina_tang_normal_matrix(n, C, d, sigma, rho)
# Carve up master matrix
i = 0 # row
for node1 in self.nodes:
j = 0 # col
for node2 in self.nodes:
m_ij = m0[i:i + node1.size, j:j + node2.size]
self.add_interaction(
f"{node1.name}->{node2.name}",
node1.name,
node2.name,
m_ij
)
if not self._silent:
print(f"set m:({node1.name})->({node2.name}): {i}:{i + node1.size} {j}:{j + node2.size}")
j += node2.size
i += node1.size
def _init_full(
self,
dist : None = np.random.exponential,
**kwargs) -> None:
"""
A fully random initialization of all generator parameters.
Args:
-----
dist:
A function to draw initial distributions (e.g. np.random.exponential, np.random.rand, etc)
Returns:
--------
None (modifies generator in place)
Raises:
-------
None
"""
# TODO: make use of dist argument
self._set_interactions(**kwargs)
for node in self.nodes:
self.set_initial_value(
node.name,
np.random.exponential(size=node.size)
)
self.set_initial_value(
node.name,
2 * (0.5 - np.random.rand(node.size)),
growth_rate=True
)
def case_control(
self,
participants : int,
case_frac : float,
node_name: str,
effect_size : float,
**generate_args) -> (list, list, list, list, list, list):
"""
Generates synthetic case and control timecourses.
Args:
-----
participants:
Integer. The total number of participants in the study.
case_frac:
Float in [0,1]. Fraction of total participants belonging to the case group.
node_name:
String. Name of node to which the intervention is applied.
effect_size:
Float. Magnitude of intervention.
**kwargs:
Arguments that get passed to generate_multiple().
Returns:
--------
Z_control:
Z-list like generate_multiple() for control group.
X_control:
X-list like generate_multiple() for control group.
Y_control:
Y-list like generate_multiple() for control group.
Z_case:
Z-list like generate_multiple() for case group.
X_case:
X-list like generate_multiple() for case group.
Y_case:
Y-list like generate_multiple() for case group.
Raises:
-------
TODO
"""
# inferred settings
n_cases = int(participants * case_frac)
n_controls = int(participants * (1-case_frac))
# get control values
x_control, y_control, z_control = self.generate_multiple(n_controls, **generate_args)
# get case values
case_gen = self.copy()
node_size = self.get(node_name).size
case_gen.add_intervention(
name='CASE',
node_name=node_name,
vector=effect_size * (0.5-np.random.rand(node_size)),
start=0,
end=self._time_points
)
z_case, x_case, y_case = case_gen.generate_multiple(n_cases, **generate_args)
return z_control, x_control, y_control, z_case, x_case, y_case
def copy(self) -> None:
"""
Makes a deep copy of generator.
Args:
-----
None
Returns:
--------
OmicsGenerator copy
Raises:
-------
None
"""
return deepcopy(self)
def _save_single(self,
data : "generator output",
path : str = None,
delim : str = "\t",
ext : str = ".tsv") -> None:
"""
Helper function. Saves a single timecourse.
"""
for node in data:
data_t = data[node].transpose()
names = self.get(node).names
if names is None:
names = [f"{node}_{x}" for x in range(data_t.shape[0])]
sample_names = [f"S_{x}" for x in range(data_t.shape[1])]
header = f"{delim}{delim.join(sample_names)}" # blank top-left cell
data_joined = np.column_stack([names, data_t])
np.savetxt(
f"{path}{node}.{ext}",
data_joined,
fmt="%-12s",
delimiter=delim,
header=header,
)
def save(self,
data : "generator output",
output_path : str = ".",
prefix : str = "",
delim : str = "\t",
ext : str = "tsv") -> None:
"""
Saves generator outputs (single or multiple timecourses) as a text file/files.
Args:
-----
data:
An output from the self.generate(), self.generate_multiple(), or self.case_control() method. Expected to be
a dict or a list of dicts.
path:
String. Where to save outputs.
prefix:
String. Name to append to beginning of filenames.
delim:
String. Delimiter character.
ext:
String. Filename extension for saved timecourses.
Returns:
--------
None. Saves output to disk (as .tsv files by default)
Raises:
-------
TODO
"""
# Path handling
save_id = uuid4()
if output_path is None:
output_path = f"./{save_id}"
try:
mkdir(output_path)
except FileExistsError as e:
raise FileExistsError("f{output_path} already exists.") from e # re-raise error
# Multiple outputs
if isinstance(data, list):
for idx, individual in enumerate(data):
if not self._silent:
print(f"\tSaving individual {idx} in directory {output_path}/{idx}/")
# Check correct nested datatypes
if not isinstance(individual, dict):
raise Exception(f"Wrong datatype: submitted list of {type(individual)}, expected list of dicts.")
mkdir(f"{output_path}/{idx}")
self._save_single(individual, f"{output_path}/{idx}/{prefix}{idx}", delim, ext)
# Single output
elif isinstance(data, dict):
self._save_single(data, f"{output_path}/{prefix}", delim, ext)
def __str__(self):
# TODO: Rewrite this more cleanly with f-strings
out = "\n=========================GENERATOR=========================\n\nTime_points:\t"
out += str(self._time_points)
out += "\n\nNodes:\n\t"
out += "\n\t".join([ str(x) for x in self.nodes ] )
out += "\n\nInteractions:\n\t"
out += "\n\t".join([ str(x) for x in self._interactions ] )
out += "\n\nInterventions:\n\t"
out += "\n\t".join([ str(x) for x in self._interventions ] )
return out
|
python
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
This script defines the function to do the irq related analysis
"""
import csv
import struct
from config import TSC_FREQ
TSC_BEGIN = 0
TSC_END = 0
VMEXIT_ENTRY = 0x10000
LIST_EVENTS = {
'VMEXIT_EXTERNAL_INTERRUPT': VMEXIT_ENTRY + 0x00000001,
}
IRQ_EXITS = {}
# 4 * 64bit per trace entry
TRCREC = "QQQQ"
def parse_trace(ifile):
"""parse the trace data file
Args:
ifile: input trace data file
Return:
None
"""
fd = open(ifile, 'rb')
while True:
global TSC_BEGIN, TSC_END
try:
line = fd.read(struct.calcsize(TRCREC))
if not line:
break
(tsc, event, vec, d2) = struct.unpack(TRCREC, line)
event = event & 0xffffffffffff
if TSC_BEGIN == 0:
TSC_BEGIN = tsc
TSC_END = tsc
for key in LIST_EVENTS.keys():
if event == LIST_EVENTS.get(key):
if vec in IRQ_EXITS.keys():
IRQ_EXITS[vec] += 1
else:
IRQ_EXITS[vec] = 1
except struct.error:
sys.exit()
def generate_report(ofile, freq):
""" generate analysis report
Args:
ofile: output report
freq: TSC frequency of the device trace data from
Return:
None
"""
global TSC_BEGIN, TSC_END
csv_name = ofile + '.csv'
try:
with open(csv_name, 'a') as filep:
f_csv = csv.writer(filep)
rt_cycle = TSC_END - TSC_BEGIN
assert rt_cycle != 0, "Total run time in cycle is 0, \
TSC end %d, TSC begin %d" \
% (TSC_END, TSC_BEGIN)
rt_sec = float(rt_cycle) / (float(freq) * 1000 * 1000)
print ("\nVector \t\tCount \tNR_Exit/Sec")
f_csv.writerow(['Vector', 'NR_Exit', 'NR_Exit/Sec'])
for e in IRQ_EXITS.keys():
pct = float(IRQ_EXITS[e]) / rt_sec
print ("0x%08x \t %d \t%.2f" % (e, IRQ_EXITS[e], pct))
f_csv.writerow([e, IRQ_EXITS[e], '%.2f' % pct])
except IOError as err:
print ("Output File Error: " + str(err))
def analyze_irq(ifile, ofile):
"""do the vm exits analysis
Args:
ifile: input trace data file
ofile: output report file
Return:
None
"""
print("IRQ analysis started... \n\tinput file: %s\n"
"\toutput file: %s.csv" % (ifile, ofile))
parse_trace(ifile)
# save report to the output file
generate_report(ofile, TSC_FREQ)
|
python
|
def Widget(self):
return self
|
python
|
import unittest
import torch
from torchdrug import data, layers
class GraphSamplerTest(unittest.TestCase):
def setUp(self):
self.num_node = 10
self.input_dim = 5
self.output_dim = 7
adjacency = torch.rand(self.num_node, self.num_node)
threshold = adjacency.flatten().kthvalue((self.num_node - 3) * self.num_node)[0]
adjacency = adjacency * (adjacency > threshold)
self.graph = data.Graph.from_dense(adjacency).cuda()
self.input = torch.rand(self.num_node, self.input_dim).cuda()
def test_sampler(self):
conv = layers.GraphConv(self.input_dim, self.output_dim, activation=None).cuda()
readout = layers.SumReadout().cuda()
sampler = layers.NodeSampler(ratio=0.8).cuda()
results = []
for i in range(2000):
graph = sampler(self.graph)
node_feature = conv(graph, self.input)
result = readout(graph, node_feature)
results.append(result)
result = torch.stack(results).mean(dim=0)
node_feature = conv(self.graph, self.input)
truth = readout(self.graph, node_feature)
self.assertTrue(torch.allclose(result, truth, rtol=5e-2, atol=5e-2), "Found bias in node sampler")
sampler = layers.EdgeSampler(ratio=0.8).cuda()
results = []
for i in range(2000):
graph = sampler(self.graph)
node_feature = conv(graph, self.input)
result = readout(graph, node_feature)
results.append(result)
result = torch.stack(results).mean(dim=0)
node_feature = conv(self.graph, self.input)
truth = readout(self.graph, node_feature)
self.assertTrue(torch.allclose(result, truth, rtol=5e-2, atol=5e-2), "Found bias in edge sampler")
if __name__ == "__main__":
unittest.main()
|
python
|
"""
Number
1. Integer
2. Floating point
3. Octal & Hexadecimal
1) Octal
a = 0o828
a = 0O828
2) Hexadecimal
a = 0x828
4. Operate
+, -, *, /
pow : **
mod : //
remainder : %
Contents Source : https://wikidocs.net/12
"""
|
python
|
from sys import argv
script, first, second = argv
print "This script is called: ", script
print "The first variable is: ", first
print "The second variable is: ", second
|
python
|
# Generated by Django 3.2.4 on 2021-06-15 22:49
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("rules", "0001_initial")]
operations = [
migrations.CreateModel(
name="Ordinance",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_at", models.DateTimeField(auto_now_add=True)),
("modified_at", models.DateTimeField(auto_now=True)),
("ordinance", models.CharField(max_length=25)),
("slug", models.SlugField(unique=True)),
],
options={"abstract": False},
),
migrations.AlterField(
model_name="rule",
name="ordinance",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="ordinance", to="rules.rulegroup"
),
),
migrations.AlterField(
model_name="rule",
name="rule_group",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="rule_group", to="rules.rulegroup"
),
),
]
|
python
|
from Crypto.PublicKey import RSA
from Crypto import Random #This one is important since it has the default function in RSA.generate() to generate random bytes!
from Crypto.Cipher import PKCS1_OAEP
import base64
#I'm leaving this function so that you understand how it works from encryption => decryption
def rsa_encrypt_decrypt():
#Generating RSA key pair
key = RSA.generate(2048)
#Extracting private_key
private_key = key.export_key('PEM')
#Extracting public_key
public_key = key.publickey().exportKey('PEM')
#Get the message to send
message = input('\nPlease enter your message for RSA encryption and decryption: ')
#Encode the message
message = str.encode(message)
#Import the public key in order to use it for encryption
rsa_public_key = RSA.importKey(public_key)
#PKCS#1 OAEP is an asymmetric cipher based on RSA and the OAEP padding
rsa_public_key = PKCS1_OAEP.new(rsa_public_key)
#Finally encryption
encrypted_message = rsa_public_key.encrypt(message)
#Base64 encoding so that we can store it easily on DB/Server
encrypted_message = base64.b64encode(encrypted_message)
print('\nYour encrypted message is : ', encrypted_message)
#DECRYPTION
#Import private key
rsa_private_key = RSA.importKey(private_key)
#Apply the same magic trick again using PKCS1 OAEP
rsa_private_key = PKCS1_OAEP.new(rsa_private_key)
#Base64 decoding before decrypting, otherwise it would be incorrect, it's logical right? :)
encrypted_message = base64.b64decode(encrypted_message)
decrypted_message = rsa_private_key.decrypt(encrypted_message)
print('\nYour message after decryption is : ', decrypted_message)
#THESE FUNCTIONS ARE THE ONES WE GONNA USE IN OUR FINAL APP
#How are we gonna get the public/private keys, I think that those are stored on the server
#So server will be able to get the proper key pair using users id maybe? or certificate?
#For the encrypt fct: sender calls it then sends the encrypted message to server along with the receiver's address
def rsa_encrypt(message, receiver_public_key):
message = str.encode(message)
rsa_public_key = RSA.importKey(receiver_public_key)
rsa_public_key = PKCS1_OAEP.new(rsa_public_key)
encrypted_message = rsa_public_key.encrypt(message)
encrypted_message = base64.b64encode(encrypted_message)
return encrypted_message
#LOGICALLY, the server now has the encrypted message and will distribute it to the receiver
#For the decrypt fct: receiver calls it using his private key to get the initial message
def rsa_decrypt(encrypted_message, receiver_private_key):
rsa_private_key = RSA.importKey(receiver_private_key)
rsa_private_key = PKCS1_OAEP.new(rsa_private_key)
encrypted_message = base64.b64decode(encrypted_message)
decrypted_message = rsa_private_key.decrypt(encrypted_message)
return decrypted_message
#FOR TESTING! SINCE WE DON'T HAVE RSA KEY PAIRS LOCALLY
#rsa_encrypt_decrypt()
# get rsa key from file
def get_rsa_key(filepath):
with open(filepath, mode='rb') as private_file:
priv_key_data = private_file.read()
private_key = RSA.importKey(priv_key_data)
#print(private_key.export_key())
return private_key
|
python
|
# -*- coding: utf-8 -*-
from model.contact import Contact
from fixture.application import Application
import pytest
from model.contact import Contact
def test_add_contact(app):
app.open_home_page()
app.contact.add(Contact(firstname="dsf", dlename="gdfg", lastname="ew", nickname="gdf", title="wer", company="dg",
address="dg", home="dg", mobile="43", work="sdg", fax="213", email="243", email2="234",
email3="245", homepage="fsdf", address2="dsf", phone2="sg", notes="sfghh"))
app.return_home_page()
def tearDown(self):
self.app.destroy()
|
python
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyCookiecutter(PythonPackage):
"""A command-line utility that creates projects from cookiecutters (project templates).
E.g. Python package projects, jQuery plugin projects."""
homepage = "https://cookiecutter.readthedocs.io/en/latest/"
url = "https://github.com/audreyr/cookiecutter/archive/1.6.0.tar.gz"
version('1.6.0', sha256='0c9018699b556b83d7c37b27fe0cc17485b90b6e1f47365b3cdddf77f6ca9d36')
depends_on('py-setuptools', type='build')
depends_on('py-future')
depends_on('py-binaryornot')
depends_on('py-jinja2')
depends_on('py-click')
depends_on('py-whichcraft')
depends_on('py-poyo')
depends_on('py-jinja2-time')
depends_on('py-requests')
|
python
|
from django.db import models
from cloudinary.models import CloudinaryField
class Image(models.Model):
short_title = models.CharField(max_length=20)
file = CloudinaryField('image',
default="https://cdn.pixabay.com/photo/2016/06/16/03/49/befall-the-earth-quote-1460570_960_720.jpg")
timeStamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.short_title
|
python
|
def zigzag(n):
'''zigzag rows'''
def compare(xy):
x, y = xy
return (x + y, -y if (x + y) % 2 else y)
xs = range(n)
return {index: n for n, index in enumerate(sorted(
((x, y) for x in xs for y in xs),
key=compare
))}
def printzz(myarray):
'''show zigzag rows as lines'''
n = int(len(myarray) ** 0.5 + 0.5)
xs = range(n)
print('\n'.join(
[''.join("%3i" % myarray[(x, y)] for x in xs) for y in xs]
))
printzz(zigzag(6))
|
python
|
import unittest
import requests
import time
from vaurienclient import Client
from vaurien.util import start_proxy, stop_proxy
from vaurien.tests.support import start_simplehttp_server
_PROXY = 'http://localhost:8000'
# we should provide a way to set an option
# for all behaviors at once
#
_OPTIONS = ['--behavior-delay-sleep', '1']
class TestHttpProxy(unittest.TestCase):
def setUp(self):
self._proxy_pid = start_proxy(options=_OPTIONS, log_level='error',
log_output='/dev/null',
protocol='http')
self._web = start_simplehttp_server()
time.sleep(.3)
try:
if self._web.poll():
raise ValueError("Could not start the proxy")
self.client = Client()
assert self.client.get_behavior() == 'dummy'
except Exception:
self.tearDown()
raise
def tearDown(self):
stop_proxy(self._proxy_pid)
self._web.terminate()
def test_proxy(self):
# let's do a few simple request first to make sure the proxy works
self.assertEqual(self.client.get_behavior(), 'dummy')
times = []
for i in range(10):
start = time.time()
try:
res = requests.get(_PROXY)
finally:
times.append(time.time() - start)
self.assertEqual(res.status_code, 200)
fastest = min(times)
# now let's try the various behaviors
with self.client.with_behavior('blackout'):
# oh look we broke it
self.assertRaises(requests.ConnectionError, requests.get, _PROXY)
self.assertEqual(self.client.get_behavior(), 'blackout')
with self.client.with_behavior('delay'):
# should work but be slower
start = time.time()
try:
res = requests.get(_PROXY)
finally:
duration = time.time() - start
self.assertEqual(res.status_code, 200)
self.assertTrue(duration > fastest + 1)
# we should be back to normal
self.assertEqual(self.client.get_behavior(), 'dummy')
res = requests.get(_PROXY)
self.assertEqual(res.status_code, 200)
|
python
|
import os
import unittest2 as unittest
import json
import sys
from sendgrid import SendGridClient, Mail
class TestSendGrid(unittest.TestCase):
def setUp(self):
self.sg = SendGridClient(os.getenv('SG_USER'), os.getenv('SG_PWD'))
@unittest.skipUnless(sys.version_info < (3, 0), 'only for python2')
def test_unicode_recipients(self):
recipients = [unicode('[email protected]'), unicode('[email protected]')]
m = Mail(to=recipients,
subject='testing',
html='awesome',
from_email='[email protected]')
mock = {'to[]': ['[email protected]', '[email protected]']}
result = self.sg._build_body(m)
self.assertEqual(result['to[]'], mock['to[]'])
def test_send(self):
m = Mail()
m.add_to('John, Doe <[email protected]>')
m.set_subject('test')
m.set_html('WIN')
m.set_text('WIN')
m.set_from('[email protected]')
m.add_substitution('subKey', 'subValue')
m.add_section('testSection', 'sectionValue')
m.add_category('testCategory')
m.add_unique_arg('testUnique', 'uniqueValue')
m.add_filter('testFilter', 'filter', 'filterValue')
m.add_attachment_stream('testFile', 'fileValue')
url = self.sg._build_body(m)
url.pop('api_key', None)
url.pop('api_user', None)
url.pop('date', None)
test_url = json.loads('''
{
"to[]": ["[email protected]"],
"toname[]": ["John Doe"],
"html": "WIN",
"text": "WIN",
"subject": "test",
"files[testFile]": "fileValue",
"from": "[email protected]"
}
''')
test_url['x-smtpapi'] = json.dumps(json.loads('''
{
"sub": {
"subKey": ["subValue"]
},
"section": {
"testSection":"sectionValue"
},
"category": ["testCategory"],
"unique_args": {
"testUnique":"uniqueValue"
},
"filters": {
"testFilter": {
"settings": {
"filter": "filterValue"
}
}
}
}
'''))
self.assertEqual(url, test_url)
if __name__ == '__main__':
unittest.main()
|
python
|
import os
import torch
from typing import Dict
from catalyst.dl.fp16 import Fp16Wrap, copy_params, copy_grads
from catalyst.dl.state import RunnerState
from catalyst.dl.utils import UtilsFactory
from catalyst.rl.registry import GRAD_CLIPPERS
from .core import Callback
from .utils import get_optimizer_momentum, scheduler_step
class CheckpointCallback(Callback):
"""
Checkpoint callback to save/restore your model/criterion/optimizer/metrics.
"""
def __init__(
self, save_n_best: int = 3, resume: str = None
):
"""
:param save_n_best: number of best checkpoint to keep
:param resume: path to checkpoint to load and initialize runner state
"""
self.save_n_best = save_n_best
self.resume = resume
self.top_best_metrics = []
self._keys_from_state = ["resume"]
@staticmethod
def load_checkpoint(*, filename, state):
if os.path.isfile(filename):
print("=> loading checkpoint \"{}\"".format(filename))
checkpoint = UtilsFactory.load_checkpoint(filename)
state.epoch = checkpoint["epoch"]
UtilsFactory.unpack_checkpoint(
checkpoint,
model=state.model,
criterion=state.criterion,
optimizer=state.optimizer,
scheduler=state.scheduler
)
print(
"loaded checkpoint \"{}\" (epoch {})".format(
filename, checkpoint["epoch"]
)
)
else:
raise Exception("no checkpoint found at \"{}\"".format(filename))
def save_checkpoint(
self,
logdir,
checkpoint,
is_best,
save_n_best=5,
main_metric="loss",
minimize_metric=True
):
suffix = f"{checkpoint['stage']}.{checkpoint['epoch']}"
filepath = UtilsFactory.save_checkpoint(
logdir=f"{logdir}/checkpoints/",
checkpoint=checkpoint,
suffix=suffix,
is_best=is_best,
is_last=True
)
checkpoint_metric = checkpoint["valid_metrics"][main_metric]
self.top_best_metrics.append((filepath, checkpoint_metric))
self.top_best_metrics = sorted(
self.top_best_metrics,
key=lambda x: x[1],
reverse=not minimize_metric
)
if len(self.top_best_metrics) > save_n_best:
last_item = self.top_best_metrics.pop(-1)
last_filepath = last_item[0]
os.remove(last_filepath)
def pack_checkpoint(self, **kwargs):
return UtilsFactory.pack_checkpoint(**kwargs)
def on_stage_start(self, state):
for key in self._keys_from_state:
value = getattr(state, key, None)
if value is not None:
setattr(self, key, value)
if self.resume is not None:
self.load_checkpoint(filename=self.resume, state=state)
def on_epoch_end(self, state: RunnerState):
if state.stage.startswith("infer"):
return
checkpoint = self.pack_checkpoint(
model=state.model,
criterion=state.criterion,
optimizer=state.optimizer,
scheduler=state.scheduler,
epoch_metrics=dict(state.metrics.epoch_values),
valid_metrics=dict(state.metrics.valid_values),
stage=state.stage,
epoch=state.epoch
)
self.save_checkpoint(
logdir=state.logdir,
checkpoint=checkpoint,
is_best=state.metrics.is_best,
save_n_best=self.save_n_best,
main_metric=state.main_metric,
minimize_metric=state.minimize_metric
)
def on_stage_end(self, state):
print("Top best models:")
top_best_metrics_str = "\n".join(
[
"{filepath}\t{metric:3.4f}".format(
filepath=filepath, metric=metric
) for filepath, metric in self.top_best_metrics
]
)
print(top_best_metrics_str)
class OptimizerCallback(Callback):
"""
Optimizer callback, abstraction over optimizer step.
"""
def __init__(
self,
grad_clip_params: Dict = None,
fp16_grad_scale: float = 128.0,
accumulation_steps: int = 1,
optimizer_key: str = None,
loss_key: str = None
):
"""
@TODO: docs
"""
grad_clip_params = grad_clip_params or {}
self.grad_clip_fn = GRAD_CLIPPERS.get_from_params(**grad_clip_params)
self.fp16 = False
self.fp16_grad_scale = fp16_grad_scale
self.accumulation_steps = accumulation_steps
self.optimizer_key = optimizer_key
self.loss_key = loss_key
self._optimizer_wd = 0
self._accumulation_counter = 0
def on_stage_start(self, state: RunnerState):
self.fp16 = isinstance(state.model, Fp16Wrap)
optimizer = state.get_key(
key="optimizer", inner_key=self.optimizer_key
)
assert optimizer is not None
lr = optimizer.defaults["lr"]
momentum = get_optimizer_momentum(optimizer)
state.set_key(lr, "lr", inner_key=self.optimizer_key)
state.set_key(momentum, "momentum", inner_key=self.optimizer_key)
def on_epoch_start(self, state):
optimizer = state.get_key(
key="optimizer", inner_key=self.optimizer_key
)
self._optimizer_wd = optimizer.param_groups[0].get("weight_decay", 0.0)
optimizer.param_groups[0]["weight_decay"] = 0.0
@staticmethod
def grad_step(*, optimizer, optimizer_wd=0, grad_clip_fn=None):
for group in optimizer.param_groups:
if optimizer_wd > 0:
for param in group["params"]:
param.data = param.data.add(
-optimizer_wd * group["lr"], param.data
)
if grad_clip_fn is not None:
grad_clip_fn(group["params"])
optimizer.step()
def on_batch_end(self, state):
if not state.need_backward:
return
self._accumulation_counter += 1
if not self.fp16:
model = state.model
optimizer = state.get_key(
key="optimizer", inner_key=self.optimizer_key
)
loss = state.get_key(key="loss", inner_key=self.loss_key)
loss.backward()
if (self._accumulation_counter + 1) % self.accumulation_steps == 0:
self.grad_step(
optimizer=optimizer,
optimizer_wd=self._optimizer_wd,
grad_clip_fn=self.grad_clip_fn
)
model.zero_grad()
self._accumulation_counter = 0
else:
model = state.model
model.zero_grad()
optimizer = state.get_key(
key="optimizer", inner_key=self.optimizer_key
)
loss = state.get_key(key="loss", inner_key=self.optimizer_key)
scaled_loss = self.fp16_grad_scale * loss.float()
scaled_loss.backward()
master_params = list(optimizer.param_groups[0]["params"])
model_params = list(
filter(lambda p: p.requires_grad, model.parameters())
)
copy_grads(source=model_params, target=master_params)
for param in master_params:
param.grad.data.mul_(1. / self.fp16_grad_scale)
self.grad_step(
optimizer=optimizer,
optimizer_wd=self._optimizer_wd,
grad_clip_fn=self.grad_clip_fn
)
copy_params(source=master_params, target=model_params)
torch.cuda.synchronize()
def on_epoch_end(self, state):
optimizer = state.get_key(
key="optimizer", inner_key=self.optimizer_key
)
optimizer.param_groups[0]["weight_decay"] = self._optimizer_wd
class SchedulerCallback(Callback):
def __init__(
self,
scheduler_key: str = None,
mode: str = "epoch",
reduce_metric: str = "loss"
):
self.scheduler_key = scheduler_key
self.mode = mode
self.reduce_metric = reduce_metric
def step(self, state):
scheduler = state.get_key(
key="scheduler", inner_key=self.scheduler_key
)
lr, momentum = scheduler_step(
scheduler=scheduler,
valid_metric=state.metrics.valid_values.get(
self.reduce_metric, None)
)
state.set_key(lr, key="lr", inner_key=self.scheduler_key)
state.set_key(momentum, key="momentum", inner_key=self.scheduler_key)
def on_stage_start(self, state):
scheduler = state.get_key(
key="scheduler", inner_key=self.scheduler_key
)
assert scheduler is not None
def on_batch_end(self, state):
if self.mode == "batch":
self.step(state=state)
def on_epoch_end(self, state):
if self.mode == "epoch":
self.step(state=state)
class LossCallback(Callback):
def __init__(self, input_key: str = "targets", output_key: str = "logits"):
self.input_key = input_key
self.output_key = output_key
def on_stage_start(self, state):
assert state.criterion is not None
def on_batch_end(self, state):
state.loss = state.criterion(
state.output[self.output_key], state.input[self.input_key]
)
class EarlyStoppingCallback(Callback):
def __init__(
self,
patience: int,
metric: str = "loss",
minimize: bool = True,
min_delta: float = 1e-6
):
self.best_score = None
self.metric = metric
self.patience = patience
self.num_bad_epochs = 0
self.is_better = None
if minimize:
self.is_better = lambda score, best: score <= (best - min_delta)
else:
self.is_better = lambda score, best: score >= (best - min_delta)
def on_epoch_end(self, state: RunnerState) -> None:
if state.stage.startswith("infer"):
return
score = state.metrics.valid_values[self.metric]
if self.best_score is None:
self.best_score = score
if self.is_better(score, self.best_score):
self.num_bad_epochs = 0
self.best_score = score
else:
self.num_bad_epochs += 1
if self.num_bad_epochs >= self.patience:
print(f"Early stop at {state.epoch} epoch")
state.early_stop = True
|
python
|
# O(N + M) time and space
def sum_swap(a, b):
a_sum = 0
a_s = {}
b_sum = 0
b_s = {}
for i, n in enumerate(a):
a_sum += n
a_s[n] = i
for i, n in enumerate(b):
b_sum += n
b_s[n] = i
diff = (a_sum - b_sum + 1) // 2
for i, n in enumerate(a):
if n - diff in b_s:
return i, b_s[n - diff]
return None
|
python
|
from django import template
register = template.Library()
@register.inclusion_tag('registration/error_messages.html')
def error_messages(errors):
return {'errors': errors}
|
python
|
if __name__ == "__main__":
user_inpu = int(input())
user_list = list(map(int, input().split()))
user_list = set(user_list)
n = int(input())
for _ in range(n):
user_input = input().split()
if user_input[0] == 'intersection_update':
new_list = list(map(int, input().split()))
user_list.intersection_update(new_list)
elif user_input[0] == 'symmetric_difference_update':
new_list2 = list(map(int, input().split()))
user_list.symmetric_difference_update(new_list2)
elif user_input[0] == 'difference_update':
new_list3 = list(map(int, input().split()))
user_list.difference_update(new_list3)
elif user_input[0] == 'update':
new_list4 = list(map(int, input().split()))
user_list.update(new_list4)
else:
print('Something gone wrong!')
a = sum(user_list)
print(a)
|
python
|
import serial, struct, traceback, sys
from rhum.rhumlogging import get_logger
from rhum.drivers.driver import Driver
from rhum.drivers.enocean.messages.message import EnOceanMessage
from rhum.drivers.enocean.messages.response.VersionMessage import VersionMessage
from rhum.drivers.enocean.constants import PacketType, CommonCommandType, ResponseType
from rhum.utils.crc8 import CRC8Utils
import logging
from rhum.drivers.enocean.messages.typingmessage import TypingMessage
class EnOceanDriver(Driver):
_logger = get_logger('rhum.driver.enocean.EnOceanDriver')
def __init__(self, port='/dev/ttyAMA0', callback=None):
super(EnOceanDriver, self).__init__(callback)
# Initialize serial port
self.__buffer = []
self.__port = port
self._logger.debug('initialize connection to '.format(port))
self.__connection = serial.Serial(self.__port, 57600, timeout=0)
def stop(self):
Driver.stop(self)
self.__connection.close()
self._logger.info('EnOcean Driver on {0} stopped'.format(self.__port))
def run(self):
self._logger.info('EnOcean Driver started on {0}'.format(self.__port))
while not self._stop.is_set():
# Read chars from serial port as hex numbers
try:
msg = self.parse()
__type, __datas, __opts = msg._get()
msg = TypingMessage.transform(__type, __datas, __opts)
self._logger.info(msg)
except serial.SerialException:
self._logger.error('Serial port exception! (device disconnected or multiple access on port?)')
break
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
for line in lines:
self._logger.error(line)
def test(self):
msg = EnOceanMessage(PacketType.COMMON_COMMAND.value, [CommonCommandType.CD_R_VERSION.value])
buffer = msg.build()
self._logger.debug('EnOcean Driver message {0}'.format(buffer))
self._logger.debug(self.__connection.isOpen())
#for index in range(len(buffer)):
#byte by byte tx
buffer = bytes(buffer)
self._logger.debug('writing byte {0}'.format(buffer))
self.__connection.write(buffer)
try:
self._logger.debug('ask for parsing data')
msg = self.parse()
msg = VersionMessage(msg._get()[0], msg._get()[1], msg._get()[2])
self._logger.info('EnOcean Test Message (Version)')
self._logger.info(msg)
if msg.isResponse() and msg.getReturnCode() == ResponseType.RET_OK:
return True
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
for line in lines:
self._logger.error(line)
self.__connection.close()
return False
def parse(self):
Driver.parse(self)
self._logger.debug('parsing data')
msg = self._getSerialData()
if isinstance(msg, EnOceanMessage):
return msg
raise Exception('No message parsed')
def _getSerialData(self):
self._logger.debug('searching for sync byte')
s = 0
while s != b'\x55':
if self.__connection.inWaiting() != 0:
s = self.__connection.read(1)
self._logger.debug('sync byte found')
while self.__connection.inWaiting() < 5:
()
header = self.__connection.read(4) #read header fields
headerCRC = self.__connection.read(1)[0] #read header crc field
self._logger.debug('header reading : {0} and crc : {1}'.format(header, headerCRC))
if (CRC8Utils.calc(header) == headerCRC):
self._logger.debug('header CRC OK')
data_length, opt_length, msgType = struct.unpack("!HBB", header)
self._logger.debug('data_length {0}; opt_length {1}; msg_type {2}'.format( data_length, opt_length, msgType ))
totalDataLength = data_length + opt_length
while self.__connection.inWaiting() < totalDataLength+1:
()
datas = self.__connection.read(data_length)
opts = self.__connection.read(opt_length)
dataCRC = self.__connection.read(1)
self._logger.debug('datas {0}; opts {1}; dataCRC {2}'.format( datas, opts, dataCRC ))
if(self._logger.isEnabledFor(logging.DEBUG)):
msg = header
msg += bytes({headerCRC})
msg += datas
msg += opts
msg += dataCRC
self._logger.debug(msg)
if (CRC8Utils.calc(datas+opts) == dataCRC[0]):
return EnOceanMessage(msgType, datas, opts)
return "Data CRC Failed"
return "Header CRC Failed"
|
python
|
from tkinter import Frame, Label, Button, messagebox, filedialog as fd
from tkinter.constants import DISABLED, E, NORMAL, RAISED, SUNKEN, X
import pandas
import requests
from threading import Thread
import json
from messages import messages
from utils import config
from ibuki import Ibuki
class TopFrame(Frame):
def __init__(self, parent):
super().__init__(parent, highlightcolor='black',
highlightthickness=2, padx=10, pady=10)
self.btn_select_input = Button(self, text='Select input file and upload', width=22, bg='yellow',
fg='blue', font=10, cursor='hand2', command=self.select_file)
self.btn_select_input.grid(row=0, column=0)
btn_view = Button(self, text='Extended warranty view',
width=18, bg='yellow', fg='blue', font=10, padx=10, cursor='hand2', command=self.view_extended_warranty_customers)
btn_view.grid(row=0, column=1)
btn_send_sms = Button(self, text='Send SMS', width=10,
bg='yellow', fg='red', font=10, padx=10, cursor='hand2', command=self.send_sms)
btn_send_sms.grid(row=0, column=2, sticky=E)
self.columnconfigure(2, weight=4)
self.columnconfigure(1, weight=2)
def select_file(self):
filetypes = (
('excel files', '*.xlsx'),
('All files', '*.*')
)
try:
select_folder = config.selectFolder or './'
filename = fd.askopenfilename(
title='Open customer data',
initialdir=select_folder,
filetypes=filetypes
)
data = self.get_json(filename)
self.enable_disable_button(self.btn_select_input, False)
s = Thread(target=self.upload_data, args=(data,))
s.start()
except(Exception) as error:
messagebox.showerror(
'Error', error or messages.get('errSelectingFile'))
self.enable_disable_button(self.btn_select_input, True)
def get_json(self, filename):
df = pandas.read_excel(filename, converters={'Purchased Date': str, 'Serial No': str}, header=1, usecols=['ASC Code', 'Customer Group', 'Job ID', 'Warranty Type', 'Warranty Category', 'Service Type', 'Product category name',
'Product sub category name', 'Set Model', 'Model Name', 'Serial No', 'Purchased Date', 'Customer Name', 'Mobile No', 'Postal Code', 'Address'
])
json_str = df.to_json(orient='index')
js = json_str.encode('ascii', "ignore").decode()
js = js.replace(u'\\ufeff', '').replace('\\/', '').replace("\'", '')
jsn = json.loads(js)
temp_data = [value for key, value in jsn.items()]
filtered = filter(
lambda value: ('TV' in value.get(
'Product category name', '').upper())
and (value.get('Purchased Date', None) is not None)
and (value.get('Purchased Date', '').strip() != ''), temp_data)
data = [item for item in filtered]
return(data)
def upload_data(self, data):
try:
upload_endpoint = config.uploadEndPoint
requests.post(upload_endpoint, json=data)
messagebox.showinfo("Success", messages['infoUploadSuccess'])
self.enable_disable_button(self.btn_select_input, True)
except(Exception) as error:
messagebox.showerror('Error', error or 'Upload error')
self.enable_disable_button(self.btn_select_input, True)
def enable_disable_button(self, btn, isEnabled):
btn.configure(relief=RAISED if isEnabled else SUNKEN)
btn.configure(state=NORMAL if isEnabled else DISABLED)
def view_extended_warranty_customers(self):
Ibuki.emit('VIEW-EXTENDED-WARRANTY-CUSTOMERS', None)
def send_sms(self):
Ibuki.emit('SEND-SMS', None)
def init_top_frame(root):
try:
frame_top = TopFrame(root)
frame_top.pack(fill=X, padx=10, pady=10)
except(Exception) as error:
messagebox.showerror('Error', error or messages.get('errGeneric'))
|
python
|
import os
import torch, pickle
from torch import nn
import torch.nn.functional as F
from dataloader import get_transform, get_dataset
from model import get_model
from utils import get_dirname_from_args
# how are we going to name our checkpoint file
def get_ckpt_path(args, epoch, loss):
ckpt_name = get_dirname_from_args(args)
# inside the ckpt path
ckpt_path = args.ckpt_path / ckpt_name
# if you are creating checkpoint file for the first time
args.ckpt_path.mkdir(exist_ok=True)
ckpt_path.mkdir(exist_ok=True)
# checkpoint name is named after the loss and epoch
loss = '{:.4f}'.format(loss)
ckpt_path = ckpt_path / 'loss_{}_epoch_{}.pickle'.format(loss, epoch)
# return the path name/address
return ckpt_path
# saving checkpoint file based on current status
def save_ckpt(args, epoch, loss, model):
# since checkpoint file is named based on epoch and loss, we state which epoch is being saved
print('saving epoch {}'.format(epoch))
dt = {
'args': args,
'epoch': epoch,
'loss': loss,
'model': model.state.dict(),
}
ckpt_path = get_ckpt_path(args, epoch, loss)
# name checkpoint file based on epoch and loss
print("Saving checkpoint {}".format(ckpt_path))
# what checkpoint in what epoch
torch.save(dt, str(ckpt_path))
# get a model from checkpoint file
def get_model_ckpt(args):
# if there is a model specified to be fetched
ckpt_available = args.ckpt_name is not None
if ckpt_available:
name = '{}'.format(args.ckpt_name)
# add * behind the name
name = '{}*'.format(name) if not name.endswith('*') else name
# now every name has * behind it
ckpt_paths = sorted(args.ckpt_path.glob(name), reverse=False)
assert len(ckpt_paths>0), "no ckpt candidate for {}".format(args.ckpt_path / args.ckpt_name)
# full address is ckpt_path / ckpt_name
ckpt_path = ckpt_paths[0]
print("loading from {}".format(ckpt_path))
# load model from ckpt_path
# 1. first update the arguments
args.update(dt['args'])
# 2. get model based on the arguments
model = get_model(args)
if ckpt_available:
model.load_state_dict(dt['model'])
# load other state in the model
return args, model, ckpt_available
|
python
|
import smtplib
import datetime
from email.mime.text import MIMEText
from flask import current_app
def notify(notifyType, message, all=True):
# Only notify if less than 3 notifications in the past 24 hours
sendNotification = True
now = datetime.datetime.now()
if current_app.config.get(notifyType) is None:
# Create and track this notify type
current_app.config[notifyType] = (now, 1)
else:
oneDayAgo = now - datetime.timedelta(days=1)
previousNotification = current_app.config.get(notifyType)
if previousNotification[0] > oneDayAgo and previousNotification[1] >= 3:
# If last notify was newer than 1 day ago and there have been 3 notifications
sendNotification = False
elif previousNotification[0] > oneDayAgo and previousNotification[1] < 3:
# If last notify was newer than 1 day ago and there less than 3 notifications
current_app.config[notifyType] = (
now, previousNotification[1] + 1)
else:
# Last notification was more than 1 day ago start over
current_app.config[notifyType] = (now, 1)
if sendNotification:
sender = current_app.config.get('SMTP_EMAIL')
recipients = current_app.config.get('ALL_NOTIFY') if all else current_app.config.get('PRIMARY_NOTIFY')
# Build email header
msg = MIMEText(message)
msg['Subject'] = 'Arduino Water Control Temperature Alert'
msg['From'] = sender
msg['To'] = ', '.join(recipients)
server = smtplib.SMTP_SSL(
current_app.config.get('SMTP_DOMAIN'),
port=current_app.config.get('SMTP_PORT'))
server.login(sender, current_app.config.get('SMTP_PASSWORD'))
server.sendmail(sender, recipients, msg.as_string())
server.quit()
|
python
|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["Markus Löning"]
__all__ = ["test_gscv_fit", "test_rscv_fit"]
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import ParameterGrid, ParameterSampler
from sktime.datasets import load_airline
from sktime.forecasting.compose import ReducedForecaster
from sktime.forecasting.compose import TransformedTargetForecaster
from sktime.forecasting.model_selection import ForecastingGridSearchCV
from sktime.forecasting.model_selection import ForecastingRandomizedSearchCV
from sktime.forecasting.model_selection import SingleWindowSplitter
from sktime.forecasting.model_selection import SlidingWindowSplitter
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.tests._config import TEST_OOS_FHS
from sktime.forecasting.tests._config import TEST_STEP_LENGTHS
from sktime.forecasting.tests._config import TEST_WINDOW_LENGTHS
from sktime.forecasting.tests._config import TEST_RANDOM_SEEDS
from sktime.forecasting.tests._config import TEST_N_ITERS
from sktime.forecasting.trend import PolynomialTrendForecaster
from sktime.performance_metrics.forecasting import make_forecasting_scorer
from sktime.performance_metrics.forecasting import sMAPE
from sktime.transformations.series.detrend import Detrender
def compute_expected_gscv_scores(forecaster, cv, param_grid, y, scoring):
training_window, test_window = cv.split_initial(y)
y_train, y_test = y.iloc[training_window], y.iloc[test_window]
scores = np.zeros(len(param_grid))
for i, params in enumerate(param_grid):
f = clone(forecaster)
f.set_params(**params)
f.fit(y_train, fh=cv.fh)
y_pred = f.update_predict(y_test, cv)
y_test_subset = y_test.loc[
y_pred.index
] # select only time points which we predicted
scores[i] = scoring(y_test_subset, y_pred)
return scores
@pytest.mark.parametrize(
"forecaster, param_dict",
[
(NaiveForecaster(strategy="mean"), {"window_length": TEST_WINDOW_LENGTHS}),
# atomic estimator
(
TransformedTargetForecaster(
[ # composite estimator
("t", Detrender(PolynomialTrendForecaster())),
("f", ReducedForecaster(LinearRegression(), scitype="regressor")),
]
),
{
"f__window_length": TEST_WINDOW_LENGTHS,
"f__step_length": TEST_STEP_LENGTHS,
},
), # multiple params
],
)
@pytest.mark.parametrize(
"scoring",
[sMAPE(), make_forecasting_scorer(mean_squared_error, greater_is_better=False)],
)
@pytest.mark.parametrize(
"cv",
[
*[SingleWindowSplitter(fh=fh) for fh in TEST_OOS_FHS],
# single split with multi-step fh
SlidingWindowSplitter(fh=1, initial_window=50)
# multiple splits with single-step fh
],
)
def test_gscv_fit(forecaster, param_dict, cv, scoring):
param_grid = ParameterGrid(param_dict)
y = load_airline()
gscv = ForecastingGridSearchCV(
forecaster, param_grid=param_dict, cv=cv, scoring=scoring
)
gscv.fit(y)
# check scores
gscv_scores = gscv.cv_results_[f"mean_test_{scoring.name}"]
expected_scores = compute_expected_gscv_scores(
forecaster, cv, param_grid, y, scoring
)
np.testing.assert_array_equal(gscv_scores, expected_scores)
# check best parameters
assert gscv.best_params_ == param_grid[gscv_scores.argmin()]
# check best forecaster is the one with best parameters
assert {
key: value
for key, value in gscv.best_forecaster_.get_params().items()
if key in gscv.best_params_.keys()
} == gscv.best_params_
@pytest.mark.parametrize(
"forecaster, param_dict",
[
(NaiveForecaster(strategy="mean"), {"window_length": TEST_WINDOW_LENGTHS}),
# atomic estimator
(
TransformedTargetForecaster(
[ # composite estimator
("t", Detrender(PolynomialTrendForecaster())),
("f", ReducedForecaster(LinearRegression(), "regressor")),
]
),
{
"f__window_length": TEST_WINDOW_LENGTHS,
"f__step_length": TEST_STEP_LENGTHS,
},
), # multiple params
],
)
@pytest.mark.parametrize(
"scoring",
[sMAPE(), make_forecasting_scorer(mean_squared_error, greater_is_better=False)],
)
@pytest.mark.parametrize(
"cv",
[
*[SingleWindowSplitter(fh=fh) for fh in TEST_OOS_FHS],
# single split with multi-step fh
SlidingWindowSplitter(fh=1, initial_window=50)
# multiple splits with single-step fh
],
)
@pytest.mark.parametrize(
"n_iter",
TEST_N_ITERS,
)
@pytest.mark.parametrize(
"random_state",
TEST_RANDOM_SEEDS,
)
def test_rscv_fit(forecaster, param_dict, cv, scoring, n_iter, random_state):
"""Tests that ForecastingRandomizedSearchCV successfully searches the
parameter distributions to identify the best parameter set
"""
# samples uniformly from param dict values
param_distributions = ParameterSampler(
param_dict, n_iter, random_state=random_state
)
y = load_airline()
rscv = ForecastingRandomizedSearchCV(
forecaster,
param_distributions=param_dict,
cv=cv,
scoring=scoring,
n_iter=n_iter,
random_state=random_state,
)
rscv.fit(y)
# check scores
rscv_scores = rscv.cv_results_[f"mean_test_{scoring.name}"]
# convert ParameterSampler to list to ensure consistent # of scores
expected_scores = compute_expected_gscv_scores(
forecaster, cv, list(param_distributions), y, scoring
)
np.testing.assert_array_equal(rscv_scores, expected_scores)
# check best parameters
assert rscv.best_params_ == list(param_distributions)[rscv_scores.argmin()]
# check best forecaster is the one with best parameters
assert {
key: value
for key, value in rscv.best_forecaster_.get_params().items()
if key in rscv.best_params_.keys()
} == rscv.best_params_
|
python
|
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for any plugin- or framework-specific behaviour of the plugin devices"""
import pytest
import numpy as np
from plugin_name.qiskit_device import z_eigs
from plugin_name import Device1
Z = np.diag([1, -1])
class TestZEigs:
r"""Test that eigenvalues of Z^{\otimes n} are correctly generated"""
def test_one(self):
"""Test that eigs(Z) = [1, -1]"""
assert np.all(z_eigs(1) == np.array([1, -1]))
@pytest.mark.parametrize("n", [2, 3, 6])
def test_multiple(self, n):
r"""Test that eigs(Z^{\otimes n}) is correct"""
res = z_eigs(n)
Zn = np.kron(Z, Z)
for _ in range(n - 2):
Zn = np.kron(Zn, Z)
expected = np.diag(Zn)
assert np.all(res == expected)
class TestProbabilities:
"""Tests for the probability function"""
def test_probability_no_results(self):
"""Test that the probabilities function returns
None if no job has yet been run."""
dev = Device1(backend="statevector_simulator", wires=1, shots=0)
assert dev.probabilities() is None
|
python
|
## @file test_git_dependency.py
# Unit test suite for the GitDependency class.
#
##
# Copyright (c) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import unittest
from edk2toolext.environment import var_dict
class TestVarDict(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def test_var_dict_basic_set_get(self):
v = var_dict.VarDict()
v.SetValue("test1", "value1", "test 1 comment")
## confirm basic get
vv = v.GetValue("test1")
self.assertEqual("value1", vv)
def test_var_dict_get_key_is_none(self):
v = var_dict.VarDict()
self.assertIsNone(v.GetValue(None))
def test_var_dict_get_key_unknown_return_value(self):
v = var_dict.VarDict()
self.assertIsNone(v.GetValue("invalidkey"))
self.assertEqual("test1", v.GetValue("invalidkey", "test1"))
def test_var_dict_cant_override(self):
v = var_dict.VarDict()
v.SetValue("test1", "value1", "test 1 comment")
## confirm override == false
v.SetValue("test1", "value2", "test for override")
vv = v.GetValue("test1")
self.assertEqual("value1", vv)
v.SetValue("test1", "value1", "set same") # to get coverage
vv = v.GetValue("test1")
self.assertEqual("value1", vv)
def test_var_dict_can_override(self):
v = var_dict.VarDict()
v.SetValue("test1", "value1", "test 1 comment", True)
## confirm override == true
v.SetValue("test1", "value2", "test for override")
vv = v.GetValue("test1")
self.assertEqual("value2", vv)
def test_var_dict_key_not_case_sensitive(self):
v = var_dict.VarDict()
v.SetValue("test1", "value1", "test 1 comment")
## confirm case sensitivity
vv = v.GetValue("TEST1")
self.assertEqual("value1", vv)
def test_var_dict_key_not_case_sensitive2(self):
v = var_dict.VarDict()
v.SetValue("TEST1", "value1", "test 1 comment")
## confirm case sensitivity
vv = v.GetValue("test1")
self.assertEqual("value1", vv)
def test_var_dict_key_not_case_sensitive3(self):
v = var_dict.VarDict()
v.SetValue("TeSt1", "value1", "test 1 comment")
## confirm case sensitivity
vv = v.GetValue("tEsT1")
self.assertEqual("value1", vv)
def test_var_dict_build_value_when_type_para_used(self):
v = var_dict.VarDict()
v.SetValue("bld_debug_test1", "builddvalue1", "build dtest 1 comment")
v.SetValue("bld_release_test1", "buildrvalue1", "build rtest 1 comment")
## confirm with correct build type debug
vv = v.GetBuildValue("TEST1", "DEBUG")
self.assertEqual("builddvalue1", vv)
## confirm with correct build type release
vv = v.GetBuildValue("TEST1", "release")
self.assertEqual("buildrvalue1", vv)
def test_var_dict_build_value_none_for_key(self):
v = var_dict.VarDict()
v.SetValue("bld_debug_test1", "builddvalue1", "build test 1 comment")
self.assertIsNone(v.GetBuildValue(None, "DEBUG"))
def test_var_dict_build_value_when_type_para_used_wc(self):
v = var_dict.VarDict()
v.SetValue("bld_*_test1", "buildvalue1", "build test 1 comment")
## confirm wildcard support build type fail back to *
vv = v.GetBuildValue("TEST1", "DEBUG")
self.assertEqual("buildvalue1", vv)
vv = v.GetBuildValue("TEST1", "RELEASE")
self.assertEqual("buildvalue1", vv)
## confirm match has higher priority
v.SetValue("bld_debug_test1", "builddvalue1", "build test 1 comment")
vv = v.GetBuildValue("TEST1", "DEBUG")
self.assertEqual("builddvalue1", vv)
v.SetValue("bld_release_test1", "buildrvalue1", "build test 1 comment")
vv = v.GetBuildValue("TEST1", "release")
self.assertEqual("buildrvalue1", vv)
vv = v.GetBuildValue("TEST1", "NOOPT")
self.assertEqual("buildvalue1", vv)
def test_var_dict_build_value_when_target_set(self):
v = var_dict.VarDict()
v.SetValue("bld_*_test1", "buildvalue1", "build test 1 comment")
v.SetValue("TARGET", "DEBUG", "Set to Debug")
## confirm can get it with target set
vv = v.GetBuildValue("TEST1")
self.assertEqual("buildvalue1", vv)
def test_var_dict_build_value_when_no_build_type(self):
v = var_dict.VarDict()
v.SetValue("bld_*_test1", "buildvalue1", "build test 1 comment")
## confirm can't get it without build type or target set
vv = v.GetBuildValue("TEST1")
self.assertEqual(None, vv)
def test_var_dict_get_all_with_no_entires(self):
v = var_dict.VarDict()
v.SetValue("test1", "buildvalue1", "build test 1 comment")
v.SetValue("test2", "test", "non build value")
## confirm result only has 1 value
vlist = v.GetAllBuildKeyValues("DEBUG")
self.assertEqual(len(vlist), 0)
def test_var_dict_get_all_with_no_target(self):
v = var_dict.VarDict()
v.SetValue("test1", "buildvalue1", "build test 1 comment")
v.SetValue("test2", "test", "non build value")
## confirm result only has 1 value
vlist = v.GetAllBuildKeyValues()
self.assertEqual(len(vlist), 0)
def test_var_dict_get_all_build_key_values_and_not_other_values(self):
v = var_dict.VarDict()
v.SetValue("bld_*_test1", "buildvalue1", "build test 1 comment")
v.SetValue("test2", "test", "non build value")
## confirm result only has 1 value
vlist = v.GetAllBuildKeyValues("DEBUG")
self.assertEqual(len(vlist), 1)
## confirm override behavior
v.SetValue("Target", "DEBUG", "Set target to debug")
v.SetValue("bld_release_test1", "buildvalue1", "build test 1 comment")
vlist = v.GetAllBuildKeyValues()
self.assertEqual(len(vlist), 1)
## override using parameter for build type
vlist = v.GetAllBuildKeyValues("RELEASE")
self.assertEqual(len(vlist), 1)
def test_var_dict_print_all(self):
v = var_dict.VarDict()
v.SetValue("bld_*_test1", "buildvalue1", "build test 1 comment")
v.SetValue("test2", "value1", "test 1 comment overrideable", True)
v.PrintAll()
if __name__ == '__main__':
unittest.main()
|
python
|
import os
from os import listdir
from os.path import isfile, join
import cv2
import numpy as np
number = 2
mypath = "pillPictures/" + str(number)
savepath = "pillPictures/saved"
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
img_count = 0
for file in onlyfiles:
img_count = img_count + 1
image_path = mypath + "/" + file
img = cv2.imread(image_path)
#print(np.shape(img))
img = img[500:2500,1000:3000]
#print(np.shape(img))
print(img_count)
cv2.imwrite(os.path.join(savepath +"/" + str(number) + "_pill" + "_" +str(img_count)+'.jpg'),img)
|
python
|
import numpy as np
import cv2
from mss import mss
from PIL import Image
# There's no native way of handling the feature of getting the window "always on top"
# It's OS dependent forcing it to not be cross platform
# -> this is a windows way of handling things. Marked with TODOs
#import os
# signals and signal handlers for garbage collection -> obsolete as there's an easier solution with a shared variable
# import signal
# shared_flag shared by multiple threads
#shared_flag = 0
class SCR():
# class var
arr = [0] * 4
bounding_box = {'top': 0, 'left': 0, 'width': 1000, 'height': 1000}
# To keep up with the active monitors, array elements are used as placeholders for each active screen
def __init__(self):
self.sct = mss()
def setVar(self,top,left,width,height):
self.bounding_box={'top':top,'left':left,'width':width,'height':height}
def run(self, name):
if (self.arr[int(name[6])] == 0):
#print(name[6] + "\'th bucket got filled up !")
self.arr[int(name[6])] = 1
while (self.arr[int(name[6])] == 1):
sct_img = self.sct.grab(self.bounding_box)
cv2.namedWindow(name, cv2.WINDOW_NORMAL|cv2.WINDOW_KEEPRATIO)
cv2.setMouseCallback(name, self.callback_func, param=name[6])
cv2.imshow(name, np.array(sct_img))
if (cv2.waitKey(1) & 0xFF) == ord('p'):
self.arr[int(name[6])] = 0
cv2.destroyWindow(name)
def callback_func(self, event, x,y,flags,param):
if event == cv2.EVENT_RBUTTONDOWN:
self.arr[int(param)]=0
cv2.destroyWindow('screen'+param)
#print("destroyed screen" + param)
|
python
|
from overrides import overrides
from typing import Dict, Iterator, List, Tuple
import json
from functools import reduce
from operator import mul
import os
def compute_alignment_differences(align_str: str):
aligns = align_str.split(" ")
align_diff = 0.
for align in aligns:
i, j = align.split("-")
align_diff += abs(int(i) - int(j))
align_diff = align_diff/len(aligns)
return align_diff
class Prediction():
def __init__(
self,
rawdata_file: str, labeleddata_file: str, leftdata_file: str,
align_file: str, leftalign_file: str,
conf_threshold: float, aligndiff_threshold: float,
test_lang: str, train_lang: str,
) -> None:
super().__init__()
self.rawdata_file = rawdata_file
self.labeleddata_file = labeleddata_file
self.leftdata_file = leftdata_file
self.align_file = align_file
self.leftalign_file = leftalign_file
self.test_lang = test_lang
self.train_lang = train_lang
self.conf_threshold = conf_threshold
self.aligndiff_threshold = aligndiff_threshold
def filtered_snts(self, snts: List[Dict]):
filtered_snts = []
aligns = self.get_aligns()
if len(aligns) != len(snts):
raise ValueError(
f"the num of alignment differences:{len(aligns)}\
and sentences:{len(snts)} are not equal."
)
data_writer = open(self.leftdata_file, "w", encoding="utf-8")
align_writer = open(self.leftalign_file, "w", encoding="utf-8")
for snt, align in zip(snts, aligns):
confidence_score = reduce(mul, snt["confidences"])
align_diff = compute_alignment_differences(align)
if (confidence_score > self.conf_threshold) and (align_diff <= self.aligndiff_threshold):
filtered_snts.append(snt)
else:
data_writer.write(json.dumps({
"tokens": snt["tokens"],
"postags": snt["postags"]
}, ensure_ascii=False)+"\n")
align_writer.write(align+"\n")
data_writer.close()
align_writer.close()
print(f"the num of the filtered sentences is {len(filtered_snts)}")
return filtered_snts
def get_aligns(self) -> List[str]:
aligns = []
with open(self.align_file, "r", encoding="utf-8") as reader:
for line in reader:
aligns.append(line.strip())
return aligns
def writing_snts(self, snts: List[Dict]) -> None:
with open(self.labeleddata_file, 'a', encoding='utf-8') as writer:
print(f'append sentences to {self.labeleddata_file}')
print(f"please check that language will be overrided to {self.train_lang}.")
for snt in snts:
writer.write(json.dumps({
"tokens": snt['tokens'],
"postags": snt['postags'],
"heads": snt['heads'],
"deprels": snt['deprels'],
"confidences": snt['confidences'],
"language": self.train_lang,
}, ensure_ascii=False)+'\n')
print(f'{len(snts)} sentences were written to {self.labeleddata_file}')
def jsonl_reader(
self,
inputfile: str,
override_lang: str = None,
) -> Iterator[Dict]:
print(f"reading data from {inputfile}")
if override_lang is not None:
print(f'please check that language will be overrided to {override_lang}')
with open(inputfile, 'r', encoding='utf-8') as reader:
for line in reader:
data = json.loads(line.strip())
if override_lang:
data['language'] = override_lang
yield data
def rawdata_processing(self):
raise NotImplementedError()
def processing(self):
raise NotImplementedError()
class PipelinePrediction(Prediction):
def __init__(
self,
model_inputfile: str, model_outputfile: str,
rawdata_file: str, labeleddata_file: str, leftdata_file: str,
align_file: str, leftalign_file: str,
conf_threshold: float, aligndiff_threshold: float,
test_lang: str, train_lang: str,
) -> None:
super().__init__(
rawdata_file, labeleddata_file, leftdata_file,
align_file, leftalign_file,
conf_threshold, aligndiff_threshold,
test_lang, train_lang
)
self.model_inputfile = model_inputfile
self.model_outputfile = model_outputfile
@overrides
def rawdata_processing(self):
num = 0
with open(self.model_inputfile, 'w', encoding='utf-8') as writer:
for snt in self.jsonl_reader(self.rawdata_file, override_lang=self.test_lang):
writer.write(json.dumps(snt, ensure_ascii=False)+'\n')
num += 1
print(f"{num} sentences were writted to {self.model_inputfile}")
@overrides
def processing(self):
snts_p = list(self.jsonl_reader(self.model_outputfile))
snts_p = self.filtered_snts(snts_p)
self.writing_snts(snts_p)
print('finish')
def jsonl_reader(inputfile: str, override_lang: str = None) -> List[Dict]:
if override_lang is not None:
print(f'please check that language will be overrided to {override_lang}')
snts = []
with open(inputfile, 'r', encoding='utf-8') as reader:
for line in reader:
snt = json.loads(line.strip())
if override_lang is not None:
snt["language"] = override_lang
snts.append(snt)
print(f"reading {len(snts)} sentences from {inputfile}")
return snts
def prepare_predict_input(
rawcorpus: str,
outputfile: str,
lang: str,
snt_start: int = None,
snt_end: int = None
) -> None:
snts = jsonl_reader(rawcorpus, override_lang=lang)
if snt_start is not None:
snts = snts[snt_start: snt_end]
print(f"filtering sentences from {snt_start} to {snt_end}")
writing_jsonl(snts, "w", outputfile)
def filtering(
snts: List[Dict],
snts_num: int,
) -> Tuple[List[Dict], List[Dict]]:
snts = sorted(snts, key=lambda inst: reduce(mul, inst['confidences']), reverse=True)
return snts[:snts_num], snts[snts_num:]
def writing_jsonl(snts: List[Dict], mode: str, file: str) -> None:
if mode == "w":
assert not os.path.exists(file), f"{file} exists"
with open(file, mode, encoding="utf-8") as writer:
for snt in snts:
writer.write(json.dumps(snt, ensure_ascii=False)+"\n")
print(f"writing {len(snts)} sentences to {file} with mode {mode}")
def filter_and_append_pseudo_sentences(
predictfile: str,
left_rawcorpus: str,
labeled_datafile: str,
lang: str,
snts_num: int
) -> None:
print(f"filter sentences from {predictfile} and append them to {labeled_datafile}")
snts = jsonl_reader(predictfile, override_lang=lang)
filtered_snts, left_snts = filtering(snts, snts_num)
left_snts = [{"tokens": snt["tokens"], "postags": snt["postags"]} for snt in left_snts]
writing_jsonl(filtered_snts, "a", labeled_datafile)
writing_jsonl(left_snts, "w", left_rawcorpus)
if __name__ == '__main__':
# prepare_predict_input(
# rawcorpus="./data/data2/origin/gd/gd.sorted.jsonl",
# outputfile="./results/base0/gd_input.jsonl",
# lang="en0",
# snt_start=0,
# snt_end=16000
# )
# filter_and_append_pseudo_sentences(
# predictfile="./results/base/roberta0/eva/sv_output.sub.jsonl",
# left_rawcorpus="./results/base/roberta0/eva/im_ex/sv.jsonl",
# labeled_datafile="./data/data2/train/base/im_ex/sv.jsonl",
# lang="sv1",
# snts_num=2000
# )
|
python
|
from rockstar import RockStar
css_code = """body:before {
content: "Hello, world!";
}"""
rock_it_bro = RockStar(days=400, file_name='helloworld.css', code=css_code)
rock_it_bro.make_me_a_rockstar()
|
python
|
"""Read command line argument.
Assign to _x the string value of the first command line parameter, after the program name.
Source: programming-idioms.org
"""
# Implementation author: nickname
# Created on 2016-02-18T16:58:00.600634Z
# Last modified on 2016-02-18T16:58:00.600634Z
# Version 1
# argv[0] is the program name
import sys
x = sys.argv[1]
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 11:24:29 2018
@author: mayank
"""
import numpy as np
#import pandas as pd
#from time import time
from sklearn.model_selection import StratifiedKFold
#import os
#from sklearn.cluster import KMeans
from sklearn.utils import resample
from scipy.stats import mode
#from sklearn.metrics import f1_score
from sklearn.neighbors import NearestNeighbors
from numpy.matlib import repmat
from sklearn.metrics.pairwise import linear_kernel,rbf_kernel,manhattan_distances,polynomial_kernel,sigmoid_kernel,cosine_similarity,laplacian_kernel,paired_euclidean_distances,pairwise_distances
from sklearn.cluster import KMeans,MiniBatchKMeans
from sklearn.decomposition import IncrementalPCA
from sklearn.kernel_approximation import RBFSampler, Nystroem
from numpy.linalg import eigh
#%%
#from scipy.io import loadmat
#from sklearn.decomposition import IncrementalPCA
#from sklearn import mixture
class MCM:
def __init__(self, C1 = 1.0, C2 = 1e-05, C3 =1.0, C4 =1.0, problem_type ='classification', algo_type ='MCM' ,kernel_type = 'rbf', gamma = 1e-05, epsilon = 0.1,
feature_ratio = 1.0, sample_ratio = 1.0, feature_sel = 'random', n_ensembles = 1,
batch_sz = 128, iterMax1 = 1000, iterMax2 = 1, eta = 0.01, tol = 1e-08, update_type = 'adam',
reg_type = 'l1', combine_type = 'concat', class_weighting = 'balanced', upsample1 = False,
PV_scheme = 'kmeans', n_components = 100, do_pca_in_selection = False ):
self.C1 = C1 #hyperparameter 1 #loss function parameter
self.C2 = C2 #hyperparameter 2 #when using L1 or L2 or ISTA penalty
self.C3 = C3 #hyperparameter 2 #when using elastic net penalty (this parameter should be between 0 and 1) or margin penalty value need not be between 0 and 1
self.C4 = C4 #hyperparameter for final regressor or classifier used to ensemble when concatenating
# the outputs of previos layer of classifier or regressors
self.problem_type = problem_type #{0:'classification', 1:'regression'}
self.algo_type = algo_type #{0:MCM,1:'LSMCM'}
self.kernel_type = kernel_type #{0:'linear', 1:'rbf', 2:'sin', 3:'tanh', 4:'TL1', 5:'linear_primal', 6:'rff_primal', 7:'nystrom_primal'}
self.gamma = gamma #hyperparameter3 (kernel parameter for non-linear classification or regression)
self.epsilon = epsilon #hyperparameter4 ( It specifies the epsilon-tube within which
#no penalty is associated in the training loss function with points predicted within a distance epsilon from the actual value.)
self.n_ensembles = n_ensembles #number of ensembles to be learnt, if setting n_ensembles > 1 then keep the sample ratio to be around 0.7
self.feature_ratio = feature_ratio #percentage of features to select for each PLM
self.sample_ratio = sample_ratio #percentage of data to be selected for each PLM
self.batch_sz = batch_sz #batch_size
self.iterMax1 = iterMax1 #max number of iterations for inner SGD loop
self.iterMax2 = iterMax2 #max number of iterations for outer SGD loop
self.eta = eta #initial learning rate
self.tol = tol #tolerance to cut off SGD
self.update_type = update_type #{0:'sgd',1:'momentum',3:'nesterov',4:'rmsprop',5:'adagrad',6:'adam'}
self.reg_type = reg_type #{0:'l1', 1:'l2', 2:'en', 4:'ISTA', 5:'M'}#ISTA: iterative soft thresholding (proximal gradient), M: margin + l1
self.feature_sel = feature_sel #{0:'sliding', 1:'random'}
self.class_weighting = class_weighting #{0:'average', 1:'balanced'}
self.combine_type = combine_type #{0:'concat',1:'average',2:'mode'}
self.upsample1 = upsample1 #{0:False, 1:True}
self.PV_scheme = PV_scheme # {0:'kmeans',1:'renyi'}
self.n_components = n_components #number of components to choose as Prototype Vector set, or the number of features to form for kernel_approximation as in RFF and Nystroem
self.do_pca_in_selection = do_pca_in_selection #{0:False, 1:True}
def add_bias(self,xTrain):
N = xTrain.shape[0]
if(xTrain.size!=0):
xTrain=np.hstack((xTrain,np.ones((N,1))))
return xTrain
def standardize(self,xTrain):
me=np.mean(xTrain,axis=0)
std_dev=np.std(xTrain,axis=0)
#remove columns with zero std
idx=(std_dev!=0.0)
# print(idx.shape)
xTrain[:,idx]=(xTrain[:,idx]-me[idx])/std_dev[idx]
return xTrain,me,std_dev
def generate_samples(self,X_orig,old_imbalance_ratio,new_imbalance_ratio):
N=X_orig.shape[0]
M=X_orig.shape[1]
neighbors_thresh=10
new_samples=int(new_imbalance_ratio/old_imbalance_ratio*N - N)
#each point must generate these many samples
new_samples_per_point_orig=new_imbalance_ratio/old_imbalance_ratio - 1
new_samples_per_point=int(new_imbalance_ratio/old_imbalance_ratio - 1)
#check if the number of samples each point has to generate is > 1
X1=np.zeros((0,M))
if(new_samples_per_point_orig>0 and new_samples_per_point_orig<=1):
idx_samples=resample(np.arange(0,N), n_samples=int(N*new_samples_per_point_orig), random_state=1,replace=False)
X=X_orig[idx_samples,]
new_samples_per_point=1
N=X.shape[0]
else:
X=X_orig
if(N==1):
X1=repmat(X,new_samples,1)
elif(N>1):
if(N<=neighbors_thresh):
n_neighbors=int(N/2)
else:
n_neighbors=neighbors_thresh
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree').fit(X)
for i in range(N):
#for each point find its n_neighbors nearest neighbors
inds=nbrs.kneighbors(X[i,:].reshape(1,-1), n_neighbors, return_distance=False)
temp_data=X[inds[0],:]
std=np.std(temp_data,axis=0)
me=np.mean(temp_data,axis=0)
np.random.seed(i)
x_temp=me + std*np.random.randn(new_samples_per_point,M)
X1=np.append(X1,x_temp,axis=0)
return X_orig, X1
def upsample(self,X,Y,new_imbalance_ratio,upsample_type):
#xTrain: samples X features
#yTrain : samples,
#for classification only
numClasses=np.unique(Y).size
class_samples=np.zeros((numClasses,))
X3=np.zeros((0,X.shape[1]))
Y3=np.zeros((0,))
#first find the samples per class per class
for i in range(numClasses):
idx1=(Y==i)
class_samples[i]=np.sum(idx1)
max_samples=np.max(class_samples)
# new_imbalance_ratio=0.5
if(upsample_type==1):
old_imbalance_ratio_thresh=0.5
else:
old_imbalance_ratio_thresh=1
for i in range(numClasses):
idx1=(Y==i)
old_imbalance_ratio=class_samples[i]/max_samples
X1=X[idx1,:]
Y1=Y[idx1,]
if(idx1.size==1):
X1=np.reshape(X1,(1,X.shape[1]))
if(old_imbalance_ratio<=old_imbalance_ratio_thresh and class_samples[i]!=0):
X1,X2=self.generate_samples(X1,old_imbalance_ratio,new_imbalance_ratio)
new_samples=X2.shape[0]
Y2=np.ones((new_samples,))
Y2=Y2*Y1[0,]
#append original and generated samples
X3=np.append(X3,X1,axis=0)
X3=np.append(X3,X2,axis=0)
Y3=np.append(Y3,Y1,axis=0)
Y3=np.append(Y3,Y2,axis=0)
else:
#append original samples only
X3=np.append(X3,X1,axis=0)
Y3=np.append(Y3,Y1,axis=0)
Y3=np.array(Y3,dtype=np.int32)
return X3,Y3
def kmeans_select(self,X,represent_points):
"""
Takes in data and number of prototype vectors and returns the indices of the prototype vectors.
The prototype vectors are selected based on the farthest distance from the kmeans centers
Parameters
----------
X: np.ndarray
shape = n_samples, n_features
represent_points: int
number of prototype vectors to return
do_pca: boolean
whether to perform incremental pca for dimensionality reduction before selecting prototype vectors
Returns
-------
sv: list
list of the prototype vector indices from the data array given by X
"""
do_pca = self.do_pca_in_selection
N = X.shape[0]
if(do_pca == True):
if(X.shape[1]>50):
n_components = 50
ipca = IncrementalPCA(n_components=n_components, batch_size=np.min([128,X.shape[0]]))
X = ipca.fit_transform(X)
kmeans = MiniBatchKMeans(n_clusters=represent_points, batch_size=np.min([128,X.shape[0]]),random_state=0).fit(X)
centers = kmeans.cluster_centers_
labels = kmeans.labels_
sv= []
unique_labels = np.unique(labels).size
all_ind = np.arange(N)
for j in range(unique_labels):
X1 = X[labels == j,:]
all_ind_temp = all_ind[labels==j]
tempK = pairwise_distances(X1,np.reshape(centers[j,:],(1,X1.shape[1])))**2
inds = np.argmax(tempK,axis=0)
sv.append(all_ind_temp[inds[0]])
return sv
def renyi_select(self,X,represent_points):
"""
Takes in data and number of prototype vectors and returns the indices of the prototype vectors.
The prototype vectors are selected based on maximization of quadratic renyi entropy, which can be
written in terms of log sum exp which is a tightly bounded by max operator. Now for rbf kernel,
the max_{ij}(-\|x_i-x_j\|^2) is equivalent to min_{ij}(\|x_i-x_j\|^2).
Parameters
----------
X: np.ndarray
shape = n_samples, n_features
represent_points: int
number of prototype vectors to return
do_pca: boolean
whether to perform incremental pca for dimensionality reduction before selecting prototype vectors
Returns
-------
sv: list
list of the prototype vector indices from the data array given by X
"""
do_pca = self.do_pca_in_selection
N= X.shape[0]
capacity=represent_points
selectionset=set([])
set_full=set(list(range(N)))
np.random.seed(1)
if(len(selectionset)==0):
selectionset = np.random.permutation(N)
sv = list(selectionset)[0:capacity]
else:
extrainputs = represent_points - len(selectionset)
leftindices =list(set_full.difference(selectionset))
info = np.random.permutation(len(leftindices))
info = info[1:extrainputs]
sv = selectionset.append(leftindices[info])
if(do_pca == True):
if(X.shape[1]>50): #takes more time
n_components = 50
ipca = IncrementalPCA(n_components=n_components, batch_size=np.min([128,X.shape[0]]))
X = ipca.fit_transform(X)
svX = X[sv,:]
min_info = np.zeros((capacity,2))
KsV = pairwise_distances(svX,svX)**2 #this is fast
KsV[KsV==0] = np.inf
min_info[:,1] = np.min(KsV,axis=1)
min_info[:,0] = np.arange(capacity)
minimum = np.min(min_info[:,1])
counter = 0
for i in range(N):
# find for which data the value is minimum
replace = np.argmin(min_info[:,1])
ids = int(min_info[min_info[:,0]==replace,0])
#Subtract from totalcrit once for row
tempminimum = minimum - min_info[ids,1]
#Try to evaluate kernel function
tempsvX = np.zeros(svX.shape)
tempsvX[:] = svX[:]
inputX = X[i,:]
tempsvX[replace,:] = inputX
tempK = pairwise_distances(tempsvX,np.reshape(inputX,(1,X.shape[1])))**2 #this is fast
tempK[tempK==0] = np.inf
distance_eval = np.min(tempK)
tempminimum = tempminimum + distance_eval
if (minimum < tempminimum):
minimum = tempminimum
min_info[ids,1] = distance_eval
svX[:] = tempsvX[:]
sv[ids] = i
counter +=1
return sv
def subset_selection(self,X,Y):
n_components = self.n_components
PV_scheme = self.PV_scheme
problem_type = self.problem_type
N = X.shape[0]
# M = X.shape[1]
numClasses = np.unique(Y).size
use_global_sig = False
use_global_sig1 = False
if(use_global_sig ==True or problem_type == 'regression'):
if(PV_scheme == 'renyi'):
# sig_global = np.power((np.std(X)*(np.power(N,(-1/(M+4))))),2)
subset = self.renyi_select(X,n_components)
elif(PV_scheme == 'kmeans'):
subset = self.kmeans_select(X,n_components)
else:
print('No PV_scheme provided... using all the samples!')
subset = list(np.arange(N))
else:
all_samples = np.arange(N)
subset=[]
subset_per_class = np.zeros((numClasses,))
class_dist = np.zeros((numClasses,))
for i in range(numClasses):
class_dist[i] = np.sum(Y == i)
subset_per_class[i] = int(np.ceil((class_dist[i]/N)*n_components))
for i in range(numClasses):
xTrain = X[Y == i,]
samples_in_class = all_samples[Y == i]
N1 = xTrain.shape[0]
# sig = np.power((np.std(xTrain)*(np.power(N1,(-1/(M+4))))),2)
if(PV_scheme == 'renyi'):
if(use_global_sig1 == False):
subset1 = self.renyi_select(xTrain,int(subset_per_class[i]))
else:
# sig_global = np.power((np.std(X)*(np.power(N,(-1/(M+4))))),2)
subset1 = self.renyi_select(xTrain,int(subset_per_class[i]))
elif(PV_scheme == 'kmeans'):
subset1 = self.kmeans_select(xTrain,int(subset_per_class[i]))
else:
print('No PV_scheme provided... using all the samples!')
subset1 = list(np.arange(N1))
temp=list(samples_in_class[subset1])
subset.extend(temp)
return subset
def divide_into_batches_stratified(self,yTrain):
batch_sz=self.batch_sz
#data should be of the form samples X features
N=yTrain.shape[0]
num_batches=int(np.ceil(N/batch_sz))
sample_weights=list()
numClasses=np.unique(yTrain).size
idx_batches=list()
skf=StratifiedKFold(n_splits=num_batches, random_state=1, shuffle=True)
j=0
for train_index, test_index in skf.split(np.zeros(N), yTrain):
idx_batches.append(test_index)
class_weights=np.zeros((numClasses,))
sample_weights1=np.zeros((test_index.shape[0],))
temp=yTrain[test_index,]
for i in range(numClasses):
idx1=(temp==i)
class_weights[i]=1.0/(np.sum(idx1)+1e-09)#/idx.shape[0]
sample_weights1[idx1]=class_weights[i]
sample_weights.append(sample_weights1)
j+=1
return idx_batches,sample_weights,num_batches
def kernel_transform(self, X1, X2 = None, kernel_type = 'linear_primal', n_components = 100, gamma = 1.0):
"""
X1: n_samples1 X M
X2: n_samples2 X M
X: n_samples1 X n_samples2 : if kernel_type is non primal
X: n_samples1 X n_components : if kernel_type is primal
"""
if(kernel_type == 'linear'):
X = linear_kernel(X1,X2)
elif(kernel_type == 'rbf'):
X = rbf_kernel(X1,X2,1/(2*gamma))
elif(kernel_type == 'tanh'):
X = sigmoid_kernel(X1,X2,-gamma)
elif(kernel_type == 'sin'):
X = np.sin(gamma*manhattan_distances(X1,X2))
elif(kernel_type =='TL1'):
X = np.maximum(0,gamma - manhattan_distances(X1,X2))
elif(kernel_type == 'rff_primal'):
rbf_feature = RBFSampler(gamma=gamma, random_state=1, n_components = n_components)
X = rbf_feature.fit_transform(X1)
elif(kernel_type == 'nystrom_primal'):
#cannot have n_components more than n_samples1
if(n_components > X1.shape[0]):
n_components = X1.shape[0]
self.n_components = n_components
rbf_feature = Nystroem(gamma=gamma, random_state=1, n_components = n_components)
X = rbf_feature.fit_transform(X1)
elif(kernel_type == 'linear_primal'):
X = X1
else:
print('No kernel_type passed: using linear primal solver')
X = X1
return X
def margin_kernel(self, X1, kernel_type = 'linear', gamma =1.0):
"""
X1: n_samples1 X M
X: n_samples1 X n_samples1 : if kernel_type is non primal
"""
if(kernel_type == 'linear'):
X = linear_kernel(X1,X1)
elif(kernel_type == 'rbf'):
X = rbf_kernel(X1,X1,1/(2*gamma))
elif(kernel_type == 'tanh'):
X = sigmoid_kernel(X1,X1,-gamma)
elif(kernel_type == 'sin'):
X = np.sin(gamma*manhattan_distances(X1,X1))
elif(kernel_type =='TL1'):
X = np.maximum(0,gamma - manhattan_distances(X1,X1))
else:
print('no kernel_type, returning None')
return None
return X
def matrix_decomposition(self, X):
"""
Finds the matrices consisting of positive and negative parts of kernel matrix X
Parameters:
----------
X: n_samples X n_samples
Returns:
--------
K_plus: kernel corresponding to +ve part
K_minus: kernel corresponding to -ve part
"""
[D,U]=eigh(X)
U_plus = U[:,D>0.0]
U_minus = U[:,D<=0.0]
D_plus = np.diag(D[D>0.0])
D_minus = np.diag(D[D<=0.0])
K_plus = np.dot(np.dot(U_plus,D_plus),U_plus.T)
K_minus = -np.dot(np.dot(U_minus,D_minus),U_minus.T)
return K_plus, K_minus
def inner_opt(self, X, Y, data1, level):
gamma = self.gamma
kernel_type = self.kernel_type
iterMax2 = self.iterMax2
iterMax1 = self.iterMax1
tol = self.tol
algo_type = self.algo_type
#if data1 = None implies there is no kernel computation, i.e., there is only primal solvers applicable
if(data1 is not None):
if(self.reg_type == 'M'):
K = self.margin_kernel( X1 = data1, kernel_type = kernel_type, gamma = gamma)
if(kernel_type == 'linear' or kernel_type =='rbf' or kernel_type =='sin' or kernel_type =='tanh' or kernel_type =='TL1'):
K_plus, K_minus = self.matrix_decomposition(K)
if(algo_type == 'MCM'):
W_prev,f,iters,fvals = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W_prev,f,iters,fvals = self.train_LSMCM(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W_prev,f,iters,fvals = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
if(kernel_type == 'linear' or kernel_type == 'rbf'):
#for mercer kernels no need to train for outer loop
print('Returning for mercer kernels')
return W_prev,f,iters,fvals
else:
print('Solving for non - mercer kernels')
#for non mercer kernels, train for outer loop with initial point as W_prev
W_best = np.zeros(W_prev.shape)
W_best[:] = W_prev[:]
f_best = np.inf
iter_best = 0
fvals = np.zeros((iterMax1+1,))
iters = 0
fvals[iters] = f
rel_error = 1.0
print('iters =%d, f_outer = %0.9f'%(iters,f))
while(iters < iterMax2 and rel_error > tol):
iters = iters + 1
if(algo_type == 'MCM'):
W,f,iters1,fvals1 = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = W_prev)
elif(algo_type == 'LSMCM'):
W,f,iters1,fvals1 = self.train_LSMCM(X, Y, level, K_plus = K_plus, K_minus = None, W = W_prev)
else:
print('Wrong algo selected! Using MCM instead!')
W,f,iters1,fvals1 = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = W_prev)
rel_error = np.abs((np.linalg.norm(W,'fro')-np.linalg.norm(W_prev,'fro'))/(np.linalg.norm(W_prev,'fro') + 1e-08))
W_prev[:] = W[:]
print('iters =%d, f_outer = %0.9f'%(iters,f))
if(f < f_best):
W_best[:] = W[:]
f_best = f
iter_best = iters
else:
break
fvals[iters] = -1
return W_best,f_best,iter_best,fvals
else:
print('Please choose a kernel_type from linear, rbf, sin, tanh or TL1 for reg_type = M to work ')
print('Using a linear kernel')
self.kernel_type = 'linear'
K_plus, K_minus = self.matrix_decomposition(K)
if(algo_type == 'MCM'):
W_prev,f,iters,fvals = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W_prev,f,iters,fvals = self.train_LSMCM(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W_prev,f,iters,fvals = self.train(X, Y, level, K_plus = K_plus, K_minus = None, W = None)
return W_prev,f,iters,fvals
else:
#i.e., reg_type is not M, then train accordingly using either l1, l2, ISTA or elastic net penalty
if(algo_type == 'MCM'):
W,f,iters,fvals = self.train(X, Y, level, K_plus = None, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W,f,iters,fvals = self.train_LSMCM(X, Y, level, K_plus = None, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W,f,iters,fvals = self.train(X, Y, level, K_plus = None, K_minus = None, W = None)
return W, f, iters, fvals
else:
#i.e., data1 is None -> we are using primal solvers with either l1, l2, ISTA or elastic net penalty
if(self.reg_type == 'M'):
print('Please choose a kernel_type from linear, rbf, sin, tanh or TL1 for reg_type = M to work')
print('doing linear classifier with l1 norm on weights')
self.reg_type = 'l1'
self.C3 = 0.0
if(algo_type == 'MCM'):
W,f,iters,fvals = self.train(X,Y,level, K_plus = None, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W,f,iters,fvals = self.train_LSMCM(X,Y,level, K_plus = None, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W,f,iters,fvals = self.train(X,Y,level, K_plus = None, K_minus = None, W = None)
return W,f,iters,fvals
else:
if(algo_type == 'MCM'):
W,f,iters,fvals = self.train(X,Y,level, K_plus = None, K_minus = None, W = None)
elif(algo_type == 'LSMCM'):
W,f,iters,fvals = self.train_LSMCM(X,Y,level, K_plus = None, K_minus = None, W = None)
else:
print('Wrong algo selected! Using MCM instead!')
W,f,iters,fvals = self.train(X,Y,level, K_plus = None, K_minus = None, W = None)
return W,f,iters,fvals
return W,f,iters,fvals
def select_(self, xTest, xTrain, kernel_type, subset, idx_features, idx_samples):
#xTest corresponds to X1
#xTrain corresponds to X2
if(kernel_type == 'linear' or kernel_type =='rbf' or kernel_type =='sin' or kernel_type =='tanh' or kernel_type =='TL1'):
X2 = xTrain[idx_samples,:]
X2 = X2[:,idx_features]
X2 = X2[subset,]
X1 = xTest[:,idx_features]
else:
X1 = xTest[:,idx_features]
X2 = None
return X1, X2
def normalize_(self,xTrain, me, std):
idx = (std!=0.0)
xTrain[:,idx] = (xTrain[:,idx]-me[idx])/std[idx]
return xTrain
def fit(self,xTrain,yTrain):
#xTrain: samples Xfeatures
#yTrain: samples
#for classification: entries of yTrain should be between {0 to numClasses-1}
#for regresison : entries of yTrain should be real values
N = xTrain.shape[0]
M = xTrain.shape[1]
if(self.problem_type =='classification'):
numClasses=np.unique(yTrain).size
if(self.problem_type =='regression'):
if(yTrain.size == yTrain.shape[0]):
yTrain = np.reshape(yTrain,(yTrain.shape[0],1))
numClasses = yTrain.shape[1] #for multi target SVM, assuming all targets are independent to each other
feature_indices=np.zeros((self.n_ensembles,int(M*self.feature_ratio)),dtype=np.int32)
sample_indices=np.zeros((self.n_ensembles,int(N*self.sample_ratio)),dtype=np.int32)
W_all={}
me_all= {}
std_all = {}
subset_all = {}
if(self.combine_type=='concat'):
P_all=np.zeros((N,self.n_ensembles*numClasses)) #to concatenate the classes
level=0
gamma = self.gamma
kernel_type = self.kernel_type
n_components = self.n_components
for i in range(self.n_ensembles):
print('training PLM %d'%i)
if(self.sample_ratio!=1.0):
idx_samples=resample(np.arange(0,N), n_samples=int(N*self.sample_ratio), random_state=i,replace=False)
else:
idx_samples = np.arange(N)
if(self.feature_ratio!=1.0):
idx_features=resample(np.arange(0,M), n_samples=int(M*self.feature_ratio), random_state=i,replace=False)
else:
idx_features = np.arange(0,M)
feature_indices[i,:] = idx_features
sample_indices[i,:] = idx_samples
xTrain_temp = xTrain[idx_samples,:]
xTrain_temp = xTrain_temp[:,idx_features]
yTrain1 = yTrain[idx_samples,]
if(kernel_type == 'linear' or kernel_type =='rbf' or kernel_type =='sin' or kernel_type =='tanh' or kernel_type =='TL1'):
subset = self.subset_selection(xTrain_temp,yTrain1)
data1 = xTrain_temp[subset,]
subset_all[i] = subset
else:
subset_all[i] = []
data1 = None
xTrain1 = self.kernel_transform( X1 = xTrain_temp, X2 = data1, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
#standardize the dataset
xTrain1, me, std = self.standardize(xTrain1)
me_all[i] = me
std_all[i] = std
if(self.problem_type == 'regression'):
epsilon = self.epsilon
N1 = yTrain1.shape[0]
W = np.zeros((xTrain1.shape[1]+2,numClasses*2)) #2 is added to incorporate the yTrain2 and bias term appended to xTrain1
for j in range(numClasses):
yTrain3 = np.append(np.ones((N1,)), np.zeros((N1,)))
yTrain2 = np.append(yTrain1[:,j] + epsilon, yTrain1[:,j] - epsilon, axis = 0)
xTrain2 = np.append(xTrain1, xTrain1, axis = 0)
xTrain2 = np.append(xTrain2, np.reshape(yTrain2,(2*N1,1)), axis =1)
# Wa,f,iters,fvals=self.train(xTrain2,yTrain3,level)
Wa,f,iters,fvals = self.inner_opt(xTrain2, yTrain3, data1, level)
W[:,j:j+2] = Wa
W_all[i]=W # W will be of the shape (M+2,), here numClasses = 1
if(self.problem_type == 'classification'):
# W,f,iters,fvals=self.train(xTrain1,yTrain1,level)
W,f,iters,fvals = self.inner_opt(xTrain1, yTrain1, data1, level)
W_all[i]=W # W will be of the shape (M+2,numClasses)
if(self.n_ensembles == 1 or self.combine_type != 'concat'):
return W_all, sample_indices, feature_indices, me_all, std_all, subset_all
else:
if(self.combine_type=='concat'):
level=1
for i in range(self.n_ensembles):
X1, X2 = self.select_(xTrain, xTrain, kernel_type, subset_all[i], feature_indices[i,:], sample_indices[i,:])
xTrain1 = self.kernel_transform( X1 = X1, X2 = X2, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
xTrain1 = self.normalize_(xTrain1,me_all[i],std_all[i])
M = xTrain1.shape[1]
xTrain1=self.add_bias(xTrain1)
W = W_all[i]
if(self.problem_type == 'regression'):
scores = np.zeros((xTrain1.shape[0],numClasses))
for j in range(numClasses):
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
scores1 = xTrain1[:,0:M].dot(W1[0:M,]) + np.dot(xTrain1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
if(self.problem_type == 'classification'):
scores = xTrain1.dot(W)
P_all[:,i*numClasses:numClasses+i*numClasses] = scores
#train another regressor or classifier on top
if(self.problem_type == 'regression'):
epsilon = self.epsilon
P_all_1 = np.zeros((P_all.shape[0],self.n_ensembles))
W1 = np.zeros((P_all_1.shape[1]+2,numClasses*2))
for j in range(numClasses):
for k in range(self.n_ensembles):
P_all_1[:,k] = P_all[:,numClasses*k+j]
yTrain3 = np.append(np.ones((N,)), np.zeros((N,)))
yTrain2 = np.append(yTrain[:,j] + epsilon, yTrain[:,j] - epsilon, axis = 0)
P_all_2 = np.append(P_all_1, P_all_1, axis = 0)
P_all_2 = np.append(P_all_2, np.reshape(yTrain2,(2*N,1)), axis =1)
# Wa,f,iters,fvals = self.train(P_all_2,yTrain3,level)
Wa,f,iters,fvals = self.inner_opt(P_all_2, yTrain3, None, level)
W1[:,j:j+2] = Wa
if(self.problem_type == 'classification'):
# W1,f1,iters1,fvals1 = self.train(P_all,yTrain,level)
W1,f,iters,fvals = self.inner_opt(P_all, yTrain, None, level)
W_all[self.n_ensembles] = W1
return W_all, sample_indices, feature_indices, me_all, std_all, subset_all
def train(self, xTrain, yTrain, level, K_plus = None, K_minus = None, W = None):
#min D(E|w|_1 + (1-E)*0.5*|W|_2^2) + C*\sum_i\sum_(j)|f_j(i)| + \sum_i\sum_(j_\neq y_i)max(0,(1-f_y_i(i) + f_j(i)))
#setting C = 0 gives us SVM
# or when using margin term i.e., reg_type = 'M'
#min D(E|w|_1) + (E)*0.5*\sum_j=1 to numClasses (w_j^T(K+ - K-)w_j) + C*\sum_i\sum_(j)|f_j(i)| + \sum_i\sum_(j_\neq y_i)max(0,(1-f_y_i(i) + f_j(i)))
#setting C = 0 gives us SVM with margin term
if(self.upsample1==True):
xTrain,yTrain=self.upsample(xTrain,yTrain,new_imbalance_ratio=0.5,upsample_type=1)
xTrain=self.add_bias(xTrain)
M=xTrain.shape[1]
N=xTrain.shape[0]
numClasses=np.unique(yTrain).size
verbose = False
if(level==0):
C = self.C1 #for loss function of MCM
D = self.C2 #for L1 or L2 penalty
E = self.C3 #for elastic net penalty or margin term
else:
C = self.C4 #for loss function of MCM
D = self.C2 #for L1 or L2 penalty
E = self.C3 #for elastic net penalty since in combining the classifiers we use a linear primal classifier
iterMax1 = self.iterMax1
eta_zero = self.eta
class_weighting = self.class_weighting
reg_type = self.reg_type
update_type = self.update_type
tol = self.tol
np.random.seed(1)
if(W is None):
W=0.001*np.random.randn(M,numClasses)
W=W/np.max(np.abs(W))
else:
W_orig = np.zeros(W.shape)
W_orig[:] = W[:]
class_weights=np.zeros((numClasses,))
sample_weights=np.zeros((N,))
#divide the data into K clusters
for i in range(numClasses):
idx=(yTrain==i)
class_weights[i]=1.0/np.sum(idx)
sample_weights[idx]=class_weights[i]
G_clip_threshold = 100
W_clip_threshold = 500
eta=eta_zero
scores = xTrain.dot(W) #samples X numClasses
N = scores.shape[0]
correct_scores = scores[range(N),np.array(yTrain,dtype='int32')]
mat = (scores.transpose()-correct_scores.transpose()).transpose()
mat = mat+1.0
mat[range(N),np.array(yTrain,dtype='int32')] = 0.0
thresh1 = np.zeros(mat.shape)
thresh1[mat>0.0] = mat[mat>0.0] #for the SVM loss
f=0.0
if(reg_type=='l2'):
f += D*0.5*np.sum(W**2)
if(reg_type=='l1'):
f += D*np.sum(np.abs(W))
if(reg_type=='en'):
f += D*0.5*(1-E)*np.sum(W**2) + D*E*np.sum(np.abs(W))
if(class_weighting=='average'):
f1 = C*np.sum(np.abs(scores)) + np.sum(thresh1)
f += (1.0/N)*f1
else:
f1 = C*np.sum(np.abs(scores)*sample_weights[:,None]) + np.sum(thresh1*sample_weights[:,None])
f+= (1.0/numClasses)*f1
if(K_minus is not None):
temp_mat = np.dot(K_minus,W_orig[0:(M-1),])
for i in range(numClasses):
#add the term (E/2*numclasses)*lambda^T*K_plus*lambda for margin
if(K_plus is not None):
w = W[0:(M-1),i]
f2 = np.dot(np.dot(K_plus,w),w)
f+= ((0.5*E)/(numClasses))*f2
#the second term in the objective function
if(K_minus is not None):
f3 = np.dot(temp_mat[:,i],w)
f+= -((0.5*E)/(numClasses))*f3
iter1=0
print('iter1=%d, f=%0.3f'%(iter1,f))
f_best=f
fvals=np.zeros((iterMax1+1,))
fvals[iter1]=f_best
W_best=np.zeros(W.shape)
iter_best=iter1
f_prev=f_best
rel_error=1.0
# f_prev_10iter=f
if(reg_type=='l1' or reg_type =='en' or reg_type == 'M'):
# from paper: Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty
if(update_type == 'adam' or update_type == 'adagrad' or update_type == 'rmsprop'):
u = np.zeros(W.shape)
else:
u = 0.0
q=np.zeros(W.shape)
z=np.zeros(W.shape)
all_zeros=np.zeros(W.shape)
eta1=eta_zero
v=np.zeros(W.shape)
v_prev=np.zeros(W.shape)
vt=np.zeros(W.shape)
m=np.zeros(W.shape)
vt=np.zeros(W.shape)
cache=np.zeros(W.shape)
eps=1e-08
decay_rate=0.99
mu1=0.9
mu=mu1
beta1 = 0.9
beta2 = 0.999
iter_eval=10 #evaluate after every 10 iterations
idx_batches, sample_weights_batch, num_batches = self.divide_into_batches_stratified(yTrain)
while(iter1<iterMax1 and rel_error>tol):
iter1=iter1+1
for batch_num in range(0,num_batches):
# batch_size=batch_sizes[j]
test_idx=idx_batches[batch_num]
data=xTrain[test_idx,]
labels=yTrain[test_idx,]
N=labels.shape[0]
scores=data.dot(W)
correct_scores=scores[range(N),np.array(labels,dtype='int32')]#label_batches[j] for this line should be in the range [0,numClasses-1]
mat=(scores.transpose()-correct_scores.transpose()).transpose()
mat=mat+1.0
mat[range(N),np.array(labels,dtype='int32')]=0.0
thresh1=np.zeros(mat.shape)
thresh1[mat>0.0]=mat[mat>0.0]
binary1 = np.zeros(thresh1.shape)
binary1[thresh1>0.0] = 1.0
row_sum=np.sum(binary1,axis=1)
binary1[range(N),np.array(labels,dtype='int32')]=-row_sum
if(C !=0.0):
binary2 = np.zeros(scores.shape)
binary2[scores>0.0] = 1.0
binary2[scores<0.0] = -1.0
else:
binary2 = 0
dscores1 = binary1
dscores2 = binary2
if(class_weighting=='average'):
gradW = np.dot((dscores1 + C*dscores2).transpose(),data)
gradW=gradW.transpose()
gradW = (1.0/N)*gradW
# gradW += gradW1 - gradW2
else:
sample_weights_b=sample_weights_batch[batch_num]
gradW=np.dot((dscores1 + C*dscores2).transpose(),data*sample_weights_b[:,None])
gradW=gradW.transpose()
gradW=(1.0/numClasses)*gradW
# gradW += gradW1 - gradW2
if(np.sum(gradW**2)>G_clip_threshold):#gradient clipping
gradW = G_clip_threshold*gradW/np.sum(gradW**2)
if(update_type=='sgd'):
W = W - eta*gradW
elif(update_type=='momentum'):
v = mu * v - eta * gradW # integrate velocity
W += v # integrate position
elif(update_type=='nesterov'):
v_prev[:] = v[:] # back this up
v = mu * v - eta * gradW # velocity update stays the same
W += -mu * v_prev + (1 + mu) * v # position update changes form
elif(update_type=='adagrad'):
cache += gradW**2
W += - eta1* gradW / (np.sqrt(cache) + eps)
elif(update_type=='rmsprop'):
cache = decay_rate * cache + (1 - decay_rate) * gradW**2
W += - eta1 * gradW / (np.sqrt(cache) + eps)
elif(update_type=='adam'):
m = beta1*m + (1-beta1)*gradW
mt = m / (1-beta1**(iter1+1))
v = beta2*v + (1-beta2)*(gradW**2)
vt = v / (1-beta2**(iter1+1))
W += - eta1 * mt / (np.sqrt(vt) + eps)
else:
W = W - eta*gradW
if(reg_type == 'M'):
gradW1= np.zeros(W.shape)
gradW2= np.zeros(W.shape)
for i in range(numClasses):
w=W[0:(M-1),i]
if(K_plus is not None):
gradW1[0:(M-1),i]=((E*0.5)/(numClasses))*2*np.dot(K_plus,w)
if(K_minus is not None):
gradW2[0:(M-1),i]=((E*0.5)/(numClasses))*temp_mat[:,i]
if(update_type == 'adam'):
W += -(gradW1-gradW2)*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -(gradW1-gradW2)*(eta1/(np.sqrt(cache) + eps))
else:
W += -(gradW1-gradW2)*(eta)
if(reg_type == 'ISTA'):
if(update_type == 'adam'):
idx_plus = W > D*(eta1/(np.sqrt(vt) + eps))
idx_minus = W < -D*(eta1/(np.sqrt(vt) + eps))
idx_zero = np.abs(W) < D*(eta1/(np.sqrt(vt) + eps))
W[idx_plus] = W[idx_plus] - D*(eta1/(np.sqrt(vt[idx_plus]) + eps))
W[idx_minus] = W[idx_minus] + D*(eta1/(np.sqrt(vt[idx_minus]) + eps))
W[idx_zero] = 0.0
elif(update_type == 'adagrad' or update_type =='rmsprop'):
idx_plus = W > D*(eta1/(np.sqrt(cache) + eps))
idx_minus = W < -D*(eta1/(np.sqrt(cache) + eps))
idx_zero = np.abs(W) < D*(eta1/(np.sqrt(cache) + eps))
W[idx_plus] = W[idx_plus] - D*(eta1/(np.sqrt(cache[idx_plus]) + eps))
W[idx_minus] = W[idx_minus] + D*(eta1/(np.sqrt(cache[idx_minus]) + eps))
W[idx_zero] = 0.0
else:
idx_plus = W > D*(eta)
idx_minus = W < -D*(eta)
idx_zero = np.abs(W) < D*(eta)
W[idx_plus] = W[idx_plus] - D*(eta)
W[idx_minus] = W[idx_minus] + D*(eta)
W[idx_zero] = 0.0
if(reg_type=='l2'):
if(update_type == 'adam'):
W += -D*W*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -D*W*(eta1/(np.sqrt(cache) + eps))
else:
W += -D*W*(eta)
if(reg_type=='en'):
if(update_type == 'adam'):
W += -D*(1.0-E)*W*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -D*(1.0-E)*W*(eta1/(np.sqrt(cache) + eps))
else:
W += -D*W*(eta)
if(reg_type=='l1' or reg_type == 'M'):
if(update_type=='adam'):
u = u + D*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
u = u + D*(eta1/(np.sqrt(cache) + eps))
else:
u = u + D*eta
z[:] = W[:]
idx_plus = W>0
idx_minus = W<0
W_temp = np.zeros(W.shape)
if(update_type=='adam' or update_type == 'adagrad' or update_type =='rmsprop'):
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u[idx_plus]+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u[idx_minus]-q[idx_minus]))
else:
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u-q[idx_minus]))
W[idx_plus]=W_temp[idx_plus]
W[idx_minus]=W_temp[idx_minus]
q=q+(W-z)
if(reg_type=='en'):
if(update_type=='adam'):
u = u + D*E*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
u = u + D*E*(eta1/(np.sqrt(cache) + eps))
else:
u = u + D*E*eta
z[:] = W[:]
idx_plus = W>0
idx_minus = W<0
W_temp = np.zeros(W.shape)
if(update_type=='adam' or update_type == 'adagrad' or update_type =='rmsprop'):
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u[idx_plus]+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u[idx_minus]-q[idx_minus]))
else:
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u-q[idx_minus]))
W[idx_plus]=W_temp[idx_plus]
W[idx_minus]=W_temp[idx_minus]
q=q+(W-z)
if(np.sum(W**2)>W_clip_threshold):#gradient clipping
W = W_clip_threshold*W/np.sum(W**2)
if(iter1%iter_eval==0):
#once the W are calculated for each epoch we calculate the scores
scores=xTrain.dot(W)
# scores=scores-np.max(scores)
N=scores.shape[0]
correct_scores = scores[range(N),np.array(yTrain,dtype='int32')]
mat = (scores.transpose()-correct_scores.transpose()).transpose()
mat = mat+1.0
mat[range(N),np.array(yTrain,dtype='int32')] = 0.0
thresh1 = np.zeros(mat.shape)
thresh1[mat>0.0] = mat[mat>0.0] #for the SVM loss
f=0.0
if(reg_type=='l2'):
f += D*0.5*np.sum(W**2)
if(reg_type=='l1'):
f += D*np.sum(np.abs(W))
if(reg_type=='en'):
f += D*0.5*(1-E)*np.sum(W**2) + D*E*np.sum(np.abs(W))
if(class_weighting=='average'):
f1 = C*np.sum(np.abs(scores)) + np.sum(thresh1)
f += (1.0/N)*f1
else:
f1 = C*np.sum(np.abs(scores)*sample_weights[:,None]) + np.sum(thresh1*sample_weights[:,None])
f+= (1.0/numClasses)*f1
for i in range(numClasses):
#first term in objective function for margin
if(K_plus is not None):
w = W[0:(M-1),i]
f2 = np.dot(np.dot(K_plus,w),w)
f += ((0.5*E)/(numClasses))*f2
#the second term in the objective function for margin
if(K_minus is not None):
f3 = np.dot(temp_mat[:,i],w)
f += -((0.5*E)/(numClasses))*f3
if(verbose == True):
print('iter1=%d, f=%0.3f'%(iter1,f))
fvals[iter1]=f
rel_error=np.abs(f_prev-f)/np.abs(f_prev)
max_W = np.max(np.abs(W))
W[np.abs(W)<1e-03*max_W]=0.0
if(f<f_best):
f_best=f
W_best[:]=W[:]
max_W = np.max(np.abs(W))
W_best[np.abs(W_best)<1e-03*max_W]=0.0
iter_best=iter1
else:
break
f_prev=f
eta=eta_zero/np.power((iter1+1),1)
fvals[iter1]=-1
return W_best,f_best,iter_best,fvals
def predict(self,data, xTrain, W_all, sample_indices, feature_indices, me_all, std_all, subset_all):
#type=2 -> mode of all labels
#type=1 -> average of all labels
#type=3 -> concat of all labels
types = self.combine_type
kernel_type = self.kernel_type
gamma = self.gamma
n_components = self.n_components
n_ensembles = feature_indices.shape[0]
N = data.shape[0]
M = data.shape[1]
if(self.problem_type == 'classification'):
numClasses = W_all[0].shape[1]
label = np.zeros((N,))
if(self.problem_type == 'regression'):
numClasses = int(W_all[0].shape[1]/2)
print('numClasses=%d'%numClasses)
label = np.zeros((N,numClasses))
# print('numClasses =%d'%numClasses)
if(types=='mode'):
label_all_1 = np.zeros((N,n_ensembles))
label_all_2 = np.zeros((N,n_ensembles*numClasses))
for i in range(n_ensembles):
# print('testing PLM %d'%i)
X1, X2 = self.select_(data, xTrain, kernel_type, subset_all[i], feature_indices[i,:], sample_indices[i,:])
data1 = self.kernel_transform(X1 = X1, X2 = X2, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
data1 = self.normalize_(data1,me_all[i],std_all[i])
M = data1.shape[1]
data1 = self.add_bias(data1)
W = W_all[i]
if(self.problem_type == 'regression'):
scores = np.zeros((data1.shape[0],numClasses))
for j in range(numClasses):
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
scores1 = data1[:,0:M].dot(W1[0:M,]) + np.dot(data1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
label_all_2[:,i*numClasses:i*numClasses+numClasses] = scores
if(self.problem_type == 'classification'):
scores = data1.dot(W)
label_all_1[:,i] = np.argmax(scores,axis=1)
if(self.problem_type == 'classification'):
label = mode(label_all_1,axis=1)[0]
label = np.int32(np.reshape(label,(N,)))
return label
if(self.problem_type == 'regression'):
label = np.zeros((N,numClasses))
for j in range(numClasses):
label_temp = np.zeros((N,n_ensembles))
for k in range(n_ensembles):
label_temp[:,k] = label_all_2[:,k*numClasses+j]
label[:,j] = np.reshape(mode(label_temp,axis=1)[0],(label.shape[0],))
return label
elif(types=='average'):
label_all_2=np.zeros((N,numClasses))
for i in range(n_ensembles):
# print('testing PLM %d'%i)
X1, X2 = self.select_(data, xTrain, kernel_type, subset_all[i], feature_indices[i,:], sample_indices[i,:])
data1 = self.kernel_transform( X1 = X1, X2 = X2, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
data1 = self.normalize_(data1,me_all[i],std_all[i])
M = data1.shape[1]
data1 = self.add_bias(data1)
W = W_all[i]
if(self.problem_type == 'regression'):
scores = np.zeros((data1.shape[0],numClasses))
for j in range(numClasses):
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
# W1 = (W[:,0]-W[:,1])/2
scores1 = data1[:,0:M].dot(W1[0:M,]) + np.dot(data1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
label += label + scores/n_ensembles
if(self.problem_type == 'classification'):
scores = data1.dot(W)
label_all_2 += label_all_2 + scores
if(self.problem_type == 'classification'):
label=np.argmax(label_all_2,axis=1)
return label
if(self.problem_type == 'regression'):
return label
elif(types =='concat'):
# if(self.problem_type == 'regression'):
# P_all=np.zeros((N,n_ensembles))
# if(self.problem_type == 'classification'):
N = data.shape[0]
P_all=np.zeros((N,n_ensembles*numClasses))
for i in range(n_ensembles):
# print('testing PLM %d'%i)
X1, X2 = self.select_(data, xTrain, kernel_type, subset_all[i], feature_indices[i,:], sample_indices[i,:])
data1 = self.kernel_transform( X1 = X1, X2 = X2, kernel_type = kernel_type, n_components = n_components, gamma = gamma)
data1 = self.normalize_(data1,me_all[i],std_all[i])
M = data1.shape[1]
data1 = self.add_bias(data1)
W = W_all[i]
if(self.problem_type == 'regression'):
scores = np.zeros((data1.shape[0],numClasses))
for j in range(numClasses):
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
scores1 = data1[:,0:M].dot(W1[0:M,]) + np.dot(data1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
# if(self.problem_type == 'regression'):
# W1 = (W[:,0]-W[:,1])/2
# scores=data1[:,0:M].dot(W1[0:M,]) + np.dot(data1[:,M], W1[M+1,])
# scores = -1.0/(W1[M,] + 1e-08)*scores
# P_all[:,i] = scores
if(self.problem_type == 'classification'):
scores = data1.dot(W)
P_all[:,i*numClasses:numClasses+i*numClasses] = scores
if(n_ensembles == 1):
if(self.problem_type == 'regression'):
if(numClasses == 1):
label = np.reshape(P_all,(P_all.shape[0],))
else:
label = P_all
if(self.problem_type == 'classification'):
label=np.argmax(P_all,axis=1)
return label
W = W_all[n_ensembles]
M = P_all.shape[1]
# P_all = self.add_bias(P_all)
if(self.problem_type == 'regression'):
scores = np.zeros((P_all.shape[0],numClasses))
P_all_1 = np.zeros((P_all.shape[0],n_ensembles))
# W = np.zeros((P_all_1.shape[1]+2,numClasses*2))
for j in range(numClasses):
P_all_1 = np.zeros((P_all.shape[0],n_ensembles))
for k in range(n_ensembles):
P_all_1[:,k] = P_all[:,numClasses*k+j]
M = P_all_1.shape[1]
P_all_1 = self.add_bias(P_all_1)
W2 = W[:,j:j+2]
W1 = (W2[:,0] - W2[:,1])/2
scores1 = P_all_1[:,0:M].dot(W1[0:M,]) + np.dot(P_all_1[:,M], W1[M+1,])
scores1 = -1.0/(W1[M,] + 1e-08)*scores1
scores[:,j] = scores1
label = scores
return label
# W1 = (W[:,0]-W[:,1])/2
# scores=P_all[:,0:M].dot(W1[0:M,]) + np.dot(P_all[:,M], W1[M+1,])
# scores = -1.0/(W1[M,] + 1e-08)*scores
# label = scores
if(self.problem_type == 'classification'):
P_all = self.add_bias(P_all)
scores = P_all.dot(W)
label = np.argmax(scores,axis=1)
return label
def accuracy_classifier(self,actual_label,found_labels):
acc=np.divide(np.sum(actual_label==found_labels)*100.0 , actual_label.shape[0],dtype='float64')
return acc
def accuracy_regressor(self,actual_label,found_labels):
acc=np.divide(np.linalg.norm(actual_label - found_labels)**2 , actual_label.shape[0],dtype='float64')
return acc
def train_LSMCM(self, xTrain, yTrain, level, K_plus = None, K_minus = None, W = None):
#min D(E|w|_1 + (1-E)*0.5*|W|_2^2) + C*\sum_i\sum_(j)|f_j(i)**2| + \sum_i\sum_(j_\neq y_i)(1-f_y_i(i) + f_j(i))**2
#setting C = 0 gives us SVM
# or when using margin term i.e., reg_type = 'M'
#min D(E|w|_1) + (E)*0.5*\sum_j=1 to numClasses (w_j^T(K+ - K-)w_j) + C*\sum_i\sum_(j)|f_j(i)**2| + \sum_i\sum_(j_\neq y_i)(1-f_y_i(i) + f_j(i))**2
#setting C = 0 gives us SVM with margin term
# print('LSMCM Training')
# print('reg_type=%s, algo_type=%s, problem_type=%s,kernel_type=%s'%(self.reg_type,self.algo_type,self.problem_type,self.kernel_type))
# print('C1=%0.4f, C2=%0.4f, C3=%0.4f'%(self.C1,self.C2,self.C3))
if(self.upsample1==True):
xTrain,yTrain=self.upsample(xTrain,yTrain,new_imbalance_ratio=0.5,upsample_type=1)
xTrain=self.add_bias(xTrain)
M=xTrain.shape[1]
N=xTrain.shape[0]
numClasses=np.unique(yTrain).size
verbose = False
if(level==0):
C = self.C1 #for loss function of MCM
D = self.C2 #for L1 or L2 penalty
E = self.C3 #for elastic net penalty or margin term
else:
C = self.C4 #for loss function of MCM
D = self.C2 #for L1 or L2 penalty
E = self.C3 #for elastic net penalty since in combining the classifiers we use a linear primal classifier
iterMax1 = self.iterMax1
eta_zero = self.eta
class_weighting = self.class_weighting
reg_type = self.reg_type
update_type = self.update_type
tol = self.tol
np.random.seed(1)
if(W is None):
W=0.001*np.random.randn(M,numClasses)
W=W/np.max(np.abs(W))
else:
W_orig = np.zeros(W.shape)
W_orig[:] = W[:]
class_weights=np.zeros((numClasses,))
sample_weights=np.zeros((N,))
#divide the data into K clusters
for i in range(numClasses):
idx=(yTrain==i)
class_weights[i]=1.0/np.sum(idx)
sample_weights[idx]=class_weights[i]
G_clip_threshold = 100
W_clip_threshold = 500
eta=eta_zero
scores = xTrain.dot(W) #samples X numClasses
N = scores.shape[0]
correct_scores = scores[range(N),np.array(yTrain,dtype='int32')]
mat = (scores.transpose()-correct_scores.transpose()).transpose()
mat = mat+1.0
mat[range(N),np.array(yTrain,dtype='int32')] = 0.0
scores1 = np.zeros(scores.shape)
scores1[:] = scores[:]
scores1[range(N),np.array(yTrain,dtype='int32')] = -np.inf
max_scores = np.max(scores1,axis =1)
mat1 = 1 - correct_scores + max_scores
# thresh1 = np.zeros(mat.shape)
# thresh1[mat>0.0] = mat[mat>0.0] #for the SVM loss
#(1- f_yi + max_j neq yi f_j)^2
f=0.0
if(reg_type=='l2'):
f += D*0.5*np.sum(W**2)
if(reg_type=='l1'):
f += D*np.sum(np.abs(W))
if(reg_type=='en'):
f += D*0.5*(1-E)*np.sum(W**2) + D*E*np.sum(np.abs(W))
if(class_weighting=='average'):
f1 = C*0.5*np.sum(scores**2) + 0.5*np.sum((mat1)**2)
f += (1.0/N)*f1
else:
f1 = C*0.5*np.sum((scores**2)*sample_weights[:,None]) + 0.5*np.sum((mat1**2)*sample_weights[:,None])
f+= (1.0/numClasses)*f1
if(K_minus is not None):
temp_mat = np.dot(K_minus,W_orig[0:(M-1),])
for i in range(numClasses):
#add the term (E/2*numclasses)*lambda^T*K_plus*lambda for margin
if(K_plus is not None):
w = W[0:(M-1),i]
f2 = np.dot(np.dot(K_plus,w),w)
f+= ((0.5*E)/(numClasses))*f2
#the second term in the objective function
if(K_minus is not None):
f3 = np.dot(temp_mat[:,i],w)
f+= -((0.5*E)/(numClasses))*f3
iter1=0
print('iter1=%d, f=%0.3f'%(iter1,f))
f_best=f
fvals=np.zeros((iterMax1+1,))
fvals[iter1]=f_best
W_best=np.zeros(W.shape)
iter_best=iter1
f_prev=f_best
rel_error=1.0
# f_prev_10iter=f
if(reg_type=='l1' or reg_type =='en' or reg_type == 'M'):
# from paper: Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty
if(update_type == 'adam' or update_type == 'adagrad' or update_type == 'rmsprop'):
u = np.zeros(W.shape)
else:
u = 0.0
q=np.zeros(W.shape)
z=np.zeros(W.shape)
all_zeros=np.zeros(W.shape)
eta1=eta_zero
v=np.zeros(W.shape)
v_prev=np.zeros(W.shape)
vt=np.zeros(W.shape)
m=np.zeros(W.shape)
vt=np.zeros(W.shape)
cache=np.zeros(W.shape)
eps=1e-08
decay_rate=0.99
mu1=0.9
mu=mu1
beta1 = 0.9
beta2 = 0.999
iter_eval=10 #evaluate after every 10 iterations
idx_batches, sample_weights_batch, num_batches = self.divide_into_batches_stratified(yTrain)
while(iter1<iterMax1 and rel_error>tol):
iter1=iter1+1
for batch_num in range(0,num_batches):
# batch_size=batch_sizes[j]
test_idx=idx_batches[batch_num]
data=xTrain[test_idx,]
labels=yTrain[test_idx,]
N=labels.shape[0]
scores=data.dot(W)
correct_scores=scores[range(N),np.array(labels,dtype='int32')]#label_batches[j] for this line should be in the range [0,numClasses-1]
mat=(scores.transpose()-correct_scores.transpose()).transpose()
mat=mat+1.0
mat[range(N),np.array(labels,dtype='int32')]=0.0
scores1 = np.zeros(scores.shape)
scores1[:] = scores[:]
scores1[range(N),np.array(labels,dtype='int32')] = -np.inf
max_scores = np.max(scores1,axis =1)
max_scores_idx = np.argmax(scores1, axis = 1)
mat1 = 1 - correct_scores + max_scores
dscores1 = np.zeros(mat.shape)
dscores1[range(N),np.array(max_scores_idx,dtype='int32')] = mat1
row_sum = np.sum(dscores1,axis=1)
dscores1[range(N),np.array(labels,dtype='int32')] = -row_sum
if(C !=0.0):
dscores2 = np.zeros(scores.shape)
dscores2[:] = scores[:]
else:
dscores2 = 0
dscores1 = 2*dscores1
dscores2 = 2*dscores2
if(class_weighting=='average'):
gradW = np.dot((dscores1 + C*dscores2).transpose(),data)
gradW = gradW.transpose()
gradW = (0.5/N)*gradW
# gradW += gradW1 - gradW2
else:
sample_weights_b = sample_weights_batch[batch_num]
gradW = np.dot((dscores1 + C*dscores2).transpose(),data*sample_weights_b[:,None])
gradW = gradW.transpose()
gradW = (0.5/numClasses)*gradW
# gradW += gradW1 - gradW2
if(np.sum(gradW**2)>G_clip_threshold):#gradient clipping
# print('clipping gradients')
gradW = G_clip_threshold*gradW/np.sum(gradW**2)
if(update_type=='sgd'):
W = W - eta*gradW
elif(update_type=='momentum'):
v = mu * v - eta * gradW # integrate velocity
W += v # integrate position
elif(update_type=='nesterov'):
v_prev[:] = v[:] # back this up
v = mu * v - eta * gradW # velocity update stays the same
W += -mu * v_prev + (1 + mu) * v # position update changes form
elif(update_type=='adagrad'):
cache += gradW**2
W += - eta1* gradW / (np.sqrt(cache) + eps)
elif(update_type=='rmsprop'):
cache = decay_rate * cache + (1 - decay_rate) * gradW**2
W += - eta1 * gradW / (np.sqrt(cache) + eps)
elif(update_type=='adam'):
m = beta1*m + (1-beta1)*gradW
mt = m / (1-beta1**(iter1+1))
v = beta2*v + (1-beta2)*(gradW**2)
vt = v / (1-beta2**(iter1+1))
W += - eta1 * mt / (np.sqrt(vt) + eps)
else:
W = W - eta*gradW
if(reg_type == 'M'):
gradW1= np.zeros(W.shape)
gradW2= np.zeros(W.shape)
for i in range(numClasses):
w=W[0:(M-1),i]
if(K_plus is not None):
gradW1[0:(M-1),i]=((E*0.5)/(numClasses))*2*np.dot(K_plus,w)
if(K_minus is not None):
gradW2[0:(M-1),i]=((E*0.5)/(numClasses))*temp_mat[:,i]
if(update_type == 'adam'):
W += -(gradW1-gradW2)*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -(gradW1-gradW2)*(eta1/(np.sqrt(cache) + eps))
else:
W += -(gradW1-gradW2)*(eta)
if(reg_type == 'ISTA'):
if(update_type == 'adam'):
idx_plus = W > D*(eta1/(np.sqrt(vt) + eps))
idx_minus = W < -D*(eta1/(np.sqrt(vt) + eps))
idx_zero = np.abs(W) < D*(eta1/(np.sqrt(vt) + eps))
W[idx_plus] = W[idx_plus] - D*(eta1/(np.sqrt(vt[idx_plus]) + eps))
W[idx_minus] = W[idx_minus] + D*(eta1/(np.sqrt(vt[idx_minus]) + eps))
W[idx_zero] = 0.0
elif(update_type == 'adagrad' or update_type =='rmsprop'):
idx_plus = W > D*(eta1/(np.sqrt(cache) + eps))
idx_minus = W < -D*(eta1/(np.sqrt(cache) + eps))
idx_zero = np.abs(W) < D*(eta1/(np.sqrt(cache) + eps))
W[idx_plus] = W[idx_plus] - D*(eta1/(np.sqrt(cache[idx_plus]) + eps))
W[idx_minus] = W[idx_minus] + D*(eta1/(np.sqrt(cache[idx_minus]) + eps))
W[idx_zero] = 0.0
else:
idx_plus = W > D*(eta)
idx_minus = W < -D*(eta)
idx_zero = np.abs(W) < D*(eta)
W[idx_plus] = W[idx_plus] - D*(eta)
W[idx_minus] = W[idx_minus] + D*(eta)
W[idx_zero] = 0.0
if(reg_type=='l2'):
if(update_type == 'adam'):
W += -D*W*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -D*W*(eta1/(np.sqrt(cache) + eps))
else:
W += -D*W*(eta)
if(reg_type=='en'):
if(update_type == 'adam'):
W += -D*(1.0-E)*W*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
W += -D*(1.0-E)*W*(eta1/(np.sqrt(cache) + eps))
else:
W += -D*W*(eta)
if(reg_type=='l1' or reg_type == 'M'):
if(update_type=='adam'):
u = u + D*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
u = u + D*(eta1/(np.sqrt(cache) + eps))
else:
u = u + D*eta
z[:] = W[:]
idx_plus = W>0
idx_minus = W<0
W_temp = np.zeros(W.shape)
if(update_type=='adam' or update_type == 'adagrad' or update_type =='rmsprop'):
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u[idx_plus]+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u[idx_minus]-q[idx_minus]))
else:
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u-q[idx_minus]))
W[idx_plus]=W_temp[idx_plus]
W[idx_minus]=W_temp[idx_minus]
q=q+(W-z)
if(reg_type=='en'):
if(update_type=='adam'):
u = u + D*E*(eta1/(np.sqrt(vt) + eps))
elif(update_type == 'adagrad' or update_type =='rmsprop'):
u = u + D*E*(eta1/(np.sqrt(cache) + eps))
else:
u = u + D*E*eta
z[:] = W[:]
idx_plus = W>0
idx_minus = W<0
W_temp = np.zeros(W.shape)
if(update_type=='adam' or update_type == 'adagrad' or update_type =='rmsprop'):
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u[idx_plus]+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u[idx_minus]-q[idx_minus]))
else:
W_temp[idx_plus]=np.maximum(all_zeros[idx_plus],W[idx_plus]-(u+q[idx_plus]))
W_temp[idx_minus]=np.minimum(all_zeros[idx_minus],W[idx_minus]+(u-q[idx_minus]))
W[idx_plus]=W_temp[idx_plus]
W[idx_minus]=W_temp[idx_minus]
q=q+(W-z)
if(np.sum(W**2)>W_clip_threshold):#gradient clipping
# print('clipping normW')
W = W_clip_threshold*W/np.sum(W**2)
if(iter1%iter_eval==0):
#once the W are calculated for each epoch we calculate the scores
scores=xTrain.dot(W)
# scores=scores-np.max(scores)
N=scores.shape[0]
correct_scores = scores[range(N),np.array(yTrain,dtype='int32')]
mat = (scores.transpose()-correct_scores.transpose()).transpose()
mat = mat+1.0
mat[range(N),np.array(yTrain,dtype='int32')] = 0.0
# thresh1 = np.zeros(mat.shape)
# thresh1[mat>0.0] = mat[mat>0.0] #for the SVM loss
scores1 = np.zeros(scores.shape)
scores1[:] = scores[:]
scores1[range(N),np.array(yTrain,dtype='int32')] = -np.inf
max_scores = np.max(scores1,axis =1)
mat1 = 1 - correct_scores + max_scores
f=0.0
if(reg_type=='l2'):
f += D*0.5*np.sum(W**2)
if(reg_type=='l1'):
f += D*np.sum(np.abs(W))
if(reg_type=='en'):
f += D*0.5*(1-E)*np.sum(W**2) + D*E*np.sum(np.abs(W))
if(class_weighting=='average'):
f1 = C*0.5*np.sum(scores**2) + 0.5*np.sum(mat1**2)
f += (1.0/N)*f1
else:
f1 = C*0.5*np.sum((scores**2)*sample_weights[:,None]) + 0.5*np.sum((mat1**2)*sample_weights[:,None])
f+= (1.0/numClasses)*f1
for i in range(numClasses):
#first term in objective function for margin
if(K_plus is not None):
w = W[0:(M-1),i]
f2 = np.dot(np.dot(K_plus,w),w)
f += ((0.5*E)/(numClasses))*f2
#the second term in the objective function for margin
if(K_minus is not None):
f3 = np.dot(temp_mat[:,i],w)
f += -((0.5*E)/(numClasses))*f3
if(verbose == True):
print('iter1=%d, f=%0.3f'%(iter1,f))
fvals[iter1]=f
rel_error=np.abs(f_prev-f)/np.abs(f_prev)
max_W = np.max(np.abs(W))
W[np.abs(W)<1e-03*max_W]=0.0
if(f<f_best):
f_best=f
W_best[:]=W[:]
max_W = np.max(np.abs(W))
W_best[np.abs(W_best)<1e-03*max_W]=0.0
iter_best=iter1
else:
break
f_prev=f
eta=eta_zero/np.power((iter1+1),1)
fvals[iter1]=-1
return W_best,f_best,iter_best,fvals
|
python
|
#!/usr/local/Cellar/python/2.7.6/bin/python
# -*- coding: utf-8 -*-
import sys
import scipy.misc, scipy.io, scipy.optimize
from sklearn import svm, grid_search
from numpy import *
import pylab
from matplotlib import pyplot, cm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.mlab as mlaba
from util import Util
def plot(data):
positives = data[data[:, 2] == 1]
negatives = data[data[:, 2] == 0]
pyplot.plot( positives[:, 0], positives[:, 1], 'b+' )
pyplot.plot( negatives[:, 0], negatives[:, 1], 'yo' )
def gaussianKernel(x1, x2, sigma):
return exp( -sum((x1 - x2) **2.0) / (2 * sigma**2.0) )
def visualizeBoundary( X, trained_svm ):
kernel = trained_svm.get_params()['kernel']
if kernel == 'linear':
w = trained_svm.dual_coef_.dot( trained_svm.support_vectors_ ).flatten()
xp = linspace( min(X[:, 0]), max(X[:, 0]), 100 )
yp = (-w[0] * xp + trained_svm.intercept_) / w[1]
pyplot.plot( xp, yp, 'b-')
elif kernel == 'rbf':
x1plot = linspace( min(X[:, 0]), max(X[:, 0]), 100 )
x2plot = linspace( min(X[:, 1]), max(X[:, 1]), 100 )
X1, X2 = meshgrid( x1plot, x2plot )
vals = zeros(shape(X1))
for i in range(0, shape(X1)[1]):
this_X = c_[ X1[:, i], X2[:, i] ]
vals[:, i] = trained_svm.predict( this_X )
pyplot.contour( X1, X2, vals, colors='blue' )
def dataset3ParamsVer3( X, y, X_val, y_val ):
C_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
sigma_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
gammas = map( lambda x: 1.0 / x, sigma_values )
raveled_y = y.ravel()
rbf_svm = svm.SVC()
parameters = {'kernel':('rbf', ), 'C':[0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30], 'gamma':map( lambda x: 1.0 / x, sigma_values ) }
grid = grid_search.GridSearchCV( rbf_svm, parameters )
best = grid.fit( X, raveled_y ).best_params_
return best
def dataset3ParamsVer2( X, y, X_val, y_val ):
C_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
sigma_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
raveled_y = y.ravel() # Else the SVM will give you annoying warning
m_val = shape( X_val )[0] # number of entries in validation data
rbf_svm = svm.SVC(kernel='rbf')
best = {'score': -999, 'C': 0.0, 'sigma': 0.0 }
for C in C_values:
for sigma in sigma_values:
# train the SVM first
rbf_svm.set_params( C=C )
rbf_svm.set_params( gamma = 1.0 / sigma )
rbf_svm.fit( X, raveled_y )
score = rbf_svm.score( X_val, y_val )
# get the lowest error
if score > best['score']:
best['score'] = score
best['C'] = C
best['sigma'] = sigma
best['gamma'] = 1.0 / best['sigma']
return best
def dataset3ParamsVer1( X, y, X_val, y_val ):
C_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
sigma_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
raveled_y = y.ravel() # Else the SVM will give you annoying warning
m_val = shape( X_val )[0] # number of entries in validation data
rbf_svm = svm.SVC(kernel='rbf')
best = {'error': 999, 'C': 0.0, 'sigma': 0.0 }
for C in C_values:
for sigma in sigma_values:
# train the SVM first
rbf_svm.set_params( C=C )
rbf_svm.set_params( gamma = 1.0 / sigma )
rbf_svm.fit( X, raveled_y )
# test it out on validation data
predictions = []
for i in range( 0, m_val ):
prediction_result = rbf_svm.predict( X_val[i] )
predictions.append( prediction_result[0] )
# sadly if you don't reshape it, numpy doesn't know if it's row or column vector
predictions = array(predictions).reshape( m_val, 1)
error = (predictions != y_val.reshape(m_val, 1)).mean()
# get the lowest error
if error < best['error']:
best['error'] = error
best['C'] = C
best['sigma'] = sigma
best['gamma'] = 1.0 / best['sigma']
return best
def part1_1():
mat = scipy.io.loadmat( "/Users/saburookita/Downloads/mlclass-ex6-004/mlclass-ex6/ex6data1.mat" )
X, y = mat['X'], mat['y']
plot( c_[X, y] )
pyplot.show( block=True )
# linear SVM with C = 1
linear_svm = svm.SVC(C=1, kernel='linear')
linear_svm.fit( X, y.ravel() )
plot( c_[X, y] )
visualizeBoundary( X, linear_svm )
pyplot.show( block=True )
# try with C = 100
linear_svm.set_params( C=100 )
linear_svm.fit( X, y.ravel() )
plot( c_[X, y] )
visualizeBoundary( X, linear_svm )
pyplot.show( block=True )
def part1_2():
x1 = array([1, 2, 1])
x2 = array([0, 4, -1])
sigma = 2
print "Gaussian kernel: %f" % gaussianKernel( x1, x2, sigma )
mat = scipy.io.loadmat( "/Users/saburookita/Downloads/mlclass-ex6-004/mlclass-ex6/ex6data2.mat" )
X, y = mat['X'], mat['y']
plot( c_[X, y] )
pyplot.show( block=True )
sigma = 0.01
rbf_svm = svm.SVC(C=1, kernel='rbf', gamma = 1.0 / sigma ) # gamma is actually inverse of sigma
rbf_svm.fit( X, y.ravel() )
plot( c_[X, y] )
visualizeBoundary( X, rbf_svm )
pyplot.show( block=True )
def part1_3():
mat = scipy.io.loadmat( "/Users/saburookita/Downloads/mlclass-ex6-004/mlclass-ex6/ex6data3.mat" )
X, y = mat['X'], mat['y']
X_val, y_val = mat['Xval'], mat['yval']
rbf_svm = svm.SVC(kernel='rbf')
best = dataset3ParamsVer1( X, y, X_val, y_val )
rbf_svm.set_params( C=best['C'] )
rbf_svm.set_params( gamma=best['gamma'] )
rbf_svm.fit( X, y )
plot( c_[X, y] )
visualizeBoundary( X, rbf_svm )
pyplot.show( block=True)
best = dataset3ParamsVer2( X, y, X_val, y_val )
rbf_svm.set_params( C=best['C'] )
rbf_svm.set_params( gamma=best['gamma'] )
plot( c_[X, y] )
visualizeBoundary( X, rbf_svm )
pyplot.show( block=True)
best = dataset3ParamsVer3( X, y, X_val, y_val )
rbf_svm.set_params( C=best['C'] )
rbf_svm.set_params( gamma=best['gamma'] )
plot( c_[X, y] )
visualizeBoundary( X, rbf_svm )
pyplot.show( block=True)
def main():
set_printoptions(precision=6, linewidth=200)
part1_1()
part1_2()
part1_3()
if __name__ == '__main__':
main()
|
python
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the OCPReportProcessor."""
import datetime
from unittest.mock import patch
from api.utils import DateHelper
from masu.database import OCP_REPORT_TABLE_MAP
from masu.database.ocp_report_db_accessor import OCPReportDBAccessor
from masu.database.report_manifest_db_accessor import ReportManifestDBAccessor
from masu.processor.ocp.ocp_report_parquet_summary_updater import OCPReportParquetSummaryUpdater
from masu.test import MasuTestCase
from masu.test.database.helpers import ReportObjectCreator
from reporting_common.models import CostUsageReportManifest
class OCPReportSummaryUpdaterTest(MasuTestCase):
"""Test cases for the OCPReportSummaryUpdater class."""
@classmethod
def setUpClass(cls):
"""Set up the test class with required objects."""
super().setUpClass()
cls.accessor = OCPReportDBAccessor(cls.schema)
cls.report_schema = cls.accessor.report_schema
cls.all_tables = list(OCP_REPORT_TABLE_MAP.values())
cls.creator = ReportObjectCreator(cls.schema)
cls.date_accessor = DateHelper()
cls.manifest_accessor = ReportManifestDBAccessor()
cls.dh = DateHelper()
def setUp(self):
"""Set up each test."""
super().setUp()
self.provider = self.ocp_provider
self.today = self.dh.today
billing_start = datetime.datetime(year=self.today.year, month=self.today.month, day=self.today.day).replace(
day=1
)
self.manifest_dict = {
"assembly_id": "1234",
"billing_period_start_datetime": billing_start,
"num_total_files": 2,
"num_processed_files": 1,
"provider_uuid": self.ocp_provider_uuid,
}
self.cluster_id = self.ocp_cluster_id
self.manifest = CostUsageReportManifest.objects.filter(
provider_id=self.ocp_provider_uuid, billing_period_start_datetime=self.dh.this_month_start
).first()
self.manifest.num_total_files = 2
self.manifest.save()
self.updater = OCPReportParquetSummaryUpdater(self.schema, self.provider, self.manifest)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater.OCPReportParquetSummaryUpdater._check_parquet_date_range"
)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater.OCPReportDBAccessor.populate_openshift_cluster_information_tables" # noqa: E501
)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater.OCPReportDBAccessor.delete_line_item_daily_summary_entries_for_date_range" # noqa: E501
)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater."
"OCPReportDBAccessor.populate_volume_label_summary_table"
)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater." "OCPReportDBAccessor.populate_pod_label_summary_table"
)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater."
"OCPReportDBAccessor.populate_line_item_daily_summary_table_presto"
)
def test_update_summary_tables(
self, mock_sum, mock_tag_sum, mock_vol_tag_sum, mock_delete, mock_cluster_populate, mock_date_check
):
"""Test that summary tables are run for a full month when no report period is found."""
start_date = self.dh.today
end_date = start_date
start_date_str = start_date.strftime("%Y-%m-%d")
end_date_str = end_date.strftime("%Y-%m-%d")
mock_date_check.return_value = (start_date, end_date)
self.updater.update_summary_tables(start_date_str, end_date_str)
mock_delete.assert_called_with(self.ocp_provider.uuid, start_date.date(), end_date.date())
mock_sum.assert_called()
mock_tag_sum.assert_called()
mock_vol_tag_sum.assert_called()
mock_date_check.assert_called()
def test_update_daily_tables(self):
start_date = self.dh.today
end_date = start_date
start_date_str = start_date.strftime("%Y-%m-%d")
end_date_str = end_date.strftime("%Y-%m-%d")
expected = (
"INFO:masu.processor.ocp.ocp_report_parquet_summary_updater:"
"NO-OP update_daily_tables for: %s-%s" % (start_date_str, end_date_str)
)
with self.assertLogs("masu.processor.ocp.ocp_report_parquet_summary_updater", level="INFO") as _logger:
self.updater.update_daily_tables(start_date_str, end_date_str)
self.assertIn(expected, _logger.output)
@patch(
"masu.processor.ocp.ocp_report_parquet_summary_updater.OCPReportDBAccessor."
"get_max_min_timestamp_from_parquet" # noqa: E501
)
def test_check_parquet_date_range(self, mock_get_timestamps):
"""Check that we modify start date when needed."""
start_date = self.dh.this_month_start.date()
end_date = self.dh.this_month_end.date()
parquet_start_date = self.dh.today.replace(tzinfo=None)
parquet_end_date = self.dh.today.replace(tzinfo=None)
mock_get_timestamps.return_value = (parquet_start_date, parquet_end_date)
result_start, result_end = self.updater._check_parquet_date_range(start_date, end_date)
self.assertNotEqual(start_date, result_start)
self.assertEqual(parquet_start_date.date(), result_start)
|
python
|
# MQTT
import sensor
# Shock sensor
import RPi.GPIO as GPIO
class ShockSensor(sensor.Sensor):
def __init__(self):
super(ShockSensor, self).__init__()
GPIO.setmode(GPIO.BCM)
self.SHOCK_PIN = 17
GPIO.setup(self.SHOCK_PIN, GPIO.IN)
def get_value(self):
# The vibration sensor is 1 when no vibration is detected, and 0 when there is vibration
for i in range(0,windowsize):
shock=GPIO.input(SHOCK_PIN)
if not shock: return 1
return not shock
def get_shock2():
v=1
for i in range(0,windowsize):
v = random.randint(1, 10)
return v
while True:
s=get_shock2()
(result,mid)=mqttc.publish("sensors/newpipe",s,2)
time.sleep(1)
mqttc.loop_stop()
mqttc.disconnect()
def publish():
#s = get_shock()
s = "testing shock"
publish.single('sensors/newpipe', payload=s, qos=1, hostname='brix.d.cs.uoregon.edu', port='8100' )
|
python
|
from comm.ntlmrelayx.servers.httprelayserver import HTTPRelayServer
from impacket.examples.ntlmrelayx.servers.smbrelayserver import SMBRelayServer
|
python
|
# Generated by Django 3.1.4 on 2021-01-10 00:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resume', '0003_auto_20210109_1855'),
]
operations = [
migrations.AlterField(
model_name='resumesubsection',
name='subtext',
field=models.CharField(max_length=500, null=True),
),
]
|
python
|
#coding=utf-8
from django import forms
from common.models import PersonTelephoneNumber, TelephoneNumber
from django.core import validators
from django.forms.models import ModelForm
from personal.models import Firefighter
class PersonPhoneForm(forms.Form):
id = forms.CharField(widget=forms.HiddenInput, required=False)
type = forms.ChoiceField(label=u'Tipo', choices=PersonTelephoneNumber.TELEPHONE_TYPE_CHOICES)
code = forms.CharField(label=u'Código', validators=[validators.MaxLengthValidator(4), validators.RegexValidator(regex="\d\d\d\d")])
number = forms.CharField(label=u'Número', validators=[validators.MaxLengthValidator(7), validators.RegexValidator(regex="\d\d\d\d\d\d\d")])
def save(self, instance):
if self.cleaned_data.get("id", ""):
phone = instance.persontelephonenumber_set.get(id=self.cleaned_data["id"])
phone.type = self.data["type"]
phone.telephone_number.code = self.cleaned_data["code"]
phone.telephone_number.number = self.cleaned_data["number"]
phone.telephone_number.save()
phone.save()
else:
tphone = TelephoneNumber(code=self.cleaned_data["code"], number=self.cleaned_data["number"])
tphone.save()
phone = PersonTelephoneNumber(person=instance, type=self.cleaned_data["type"], telephone_number=tphone)
phone.save()
class PartialFirefighterForm(ModelForm):
class Meta:
model = Firefighter
fields = ('profile_picture',)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
from threading import Thread
import socket
import pickle as pickle
import time
import os
from collections import deque
import shutil
import re
import sys
import hashlib
from rpyc import Service, connect, async_
from rpyc.utils.server import ThreadPoolServer
from tgen.futil import file_stream
from tgen.logf import log_info, set_debug_stream, log_debug
from tgen.logf import log_warn, is_debug_stream
from tgen.rnd import rnd
from tgen.parallel_percrank_train import ServiceConn
from tgen.seq2seq import Seq2SeqGen
from tgen.seq2seq_ensemble import Seq2SeqEnsemble
from tgen.cluster import Job
def get_worker_registrar_for(head):
"""Return a class that will handle worker registration for the given head."""
class WorkerRegistrarService(Service):
"""An RPyC service to register workers with a head."""
def exposed_register_worker(self, host, port):
"""Register a worker with my head, initialize it."""
# initiate connection in the other direction
log_info('Worker %s:%d connected, initializing training.' % (host, port))
conn = connect(host, port, config={'allow_pickle': True})
# initialize the remote server (with training data etc.)
init_func = async_(conn.root.init_training)
# add unique 'scope suffix' so that the models don't clash in ensembles
head.cfg['scope_suffix'] = hashlib.md5("%s:%d" % (host, port)).hexdigest()
req = init_func(pickle.dumps(head.cfg, pickle.HIGHEST_PROTOCOL))
# add it to the list of running services
sc = ServiceConn(host, port, conn)
head.services.add(sc)
head.pending_requests.add((sc, None, req))
log_info('Worker %s:%d initialized.' % (host, port))
return WorkerRegistrarService
class ParallelSeq2SeqTraining(object):
"""Main (head) that handles parallel Seq2Seq generator training, submitting training jobs and
collecting their results"""
DEFAULT_PORT = 25125
TEMPFILE_NAME = 'seq2seq_temp_dump.pickle.gz'
def __init__(self, cfg, work_dir, experiment_id=None):
# initialize base class
super(ParallelSeq2SeqTraining, self).__init__()
# store config
self.cfg = cfg
# initialize myself
self.work_dir = work_dir
self.jobs_number = cfg.get('jobs_number', 10)
self.job_memory = cfg.get('job_memory', 8)
self.port = cfg.get('port', self.DEFAULT_PORT)
self.queue_settings = cfg.get('queue_settings')
self.host = socket.getfqdn()
self.poll_interval = cfg.get('poll_interval', 1)
self.average_models = cfg.get('average_models', False)
self.average_models_top_k = cfg.get('average_models_top_k', 0)
self.experiment_id = experiment_id if experiment_id is not None else ''
# this will be needed when running
self.server = None
self.server_thread = None
self.jobs = None
self.pending_requests = None
self.services = None
self.free_services = None
self.results = None
# this is needed for saving the model
self.model_temp_path = None
def train(self, das_file, ttree_file, data_portion=1.0, context_file=None, validation_files=None):
"""Run parallel perceptron training, start and manage workers."""
# initialize the ranker instance
log_info('Initializing...')
# run server to process registering clients
self._init_server()
# spawn training jobs
log_info('Spawning jobs...')
host_short, _ = self.host.split('.', 1) # short host name for job names
for j in range(self.jobs_number):
# set up debugging logfile only if we have it on the head
debug_logfile = ('"PRT%02d.debug-out.txt.gz"' % j) if is_debug_stream() else 'None'
job = Job(header='from tgen.parallel_seq2seq_train import run_training',
code=('run_training("%s", %d, %s)' %
(self.host, self.port, debug_logfile)),
name=self.experiment_id + ("PRT%02d-%s-%d" % (j, host_short, self.port)),
work_dir=self.work_dir)
job.submit(memory=self.job_memory, queue=self.queue_settings)
self.jobs.append(job)
# run the training passes
try:
cur_assign = 0
results = [None] * self.jobs_number
rnd_seeds = [rnd.random() for _ in range(self.jobs_number)]
# assign training and wait for it to finish
while cur_assign < self.jobs_number or self.pending_requests:
log_debug('Starting loop over services.')
# check if some of the pending computations have finished
for sc, job_no, req in list(self.pending_requests):
res = self._check_pending_request(sc, job_no, req)
if res is not None:
results[job_no] = res, sc
# check for free services and assign new computation
while cur_assign < self.jobs_number and self.free_services:
log_debug('Assigning request %d' % cur_assign)
sc = self.free_services.popleft()
log_info('Assigning request %d to %s:%d' % (cur_assign, sc.host, sc.port))
if validation_files is not None:
validation_files = ','.join([os.path.relpath(f, self.work_dir)
for f in validation_files.split(',')])
train_func = async_(sc.conn.root.train)
req = train_func(rnd_seeds[cur_assign],
os.path.relpath(das_file, self.work_dir),
os.path.relpath(ttree_file, self.work_dir),
data_portion,
os.path.relpath(context_file, self.work_dir)
if context_file else None,
validation_files)
self.pending_requests.add((sc, cur_assign, req))
cur_assign += 1
log_debug('Assigned %d' % cur_assign)
# sleep for a while
log_debug('Sleeping.')
time.sleep(self.poll_interval)
log_info("Results:\n" + "\n".join("%.5f %s:%d" % (cost, sc.host, sc.port)
for cost, sc in results))
self.model_temp_path = os.path.join(self.work_dir, self.TEMPFILE_NAME)
results.sort(key=lambda res: res[0])
# average the computed models
if self.average_models:
log_info('Creating ensemble models...')
# use only top k if required
results_for_ensemble = (results[:self.average_models_top_k]
if self.average_models_top_k > 0
else results)
ensemble_model = self.build_ensemble_model(results_for_ensemble)
log_info('Saving the ensemble model temporarily to %s...' % self.model_temp_path)
ensemble_model.save_to_file(self.model_temp_path)
# select the best result on devel data + save it
else:
best_cost, best_sc = results[0]
log_info('Best cost: %f (computed at %s:%d).' % (best_cost, best_sc.host, best_sc.port))
log_info('Saving best generator temporarily to %s...' % self.model_temp_path)
# use relative path (working directory of worker jobs is different)
best_sc.conn.root.save_model(os.path.relpath(self.model_temp_path, self.work_dir))
# kill all jobs
finally:
for job in self.jobs:
job.delete()
def _check_pending_request(self, sc, job_no, req):
"""Check whether the given request has finished (i.e., job is loaded or job has
processed the given data portion.
If the request is finished, the worker that processed it is moved to the pool
of free services.
@param iter_no: current iteration number (for logging)
@param sc: a ServiceConn object that stores the worker connection parameters
@param job_no: current job number (is None for jobs loading)
@param req: the request itself
@return: the value returned by the finished data processing request, or None \
(for loading requests or unfinished requests)
"""
result = None
if job_no is not None:
log_debug('Checking %d' % job_no)
# checking if the request has finished
if req.ready:
if job_no is not None:
log_debug('Ready %d' % job_no)
log_info('Retrieved finished request %d' % job_no)
if req.error:
log_info('Error found on request: job #%d, worker %s:%d' %
(job_no if job_no is not None else -1, sc.host, sc.port))
result = req.value
# remove from list of pending requests
# TODO return to pool of free requests (but needs to store the results somewhere)
self.pending_requests.remove((sc, job_no, req))
if job_no is None:
self.free_services.append(sc)
return result
def _init_server(self):
"""Initializes a server that registers new workers."""
registrar_class = get_worker_registrar_for(self)
n_tries = 0
self.server = None
last_error = None
while self.server is None and n_tries < 10:
try:
n_tries += 1
self.server = ThreadPoolServer(service=registrar_class, nbThreads=1, port=self.port)
except socket.error as e:
log_warn('Port %d in use, trying to use a higher port...' % self.port)
self.port += 1
last_error = e
if self.server is None:
if last_error is not None:
raise last_error
raise Exception('Could not initialize server')
self.services = set()
self.free_services = deque()
self.pending_requests = set()
self.jobs = []
self.server_thread = Thread(target=self.server.start)
self.server_thread.setDaemon(True)
self.server_thread.start()
def save_to_file(self, model_fname):
"""This will actually just move the best generator (which is saved in a temporary file)
to the final location."""
log_info('Moving generator to %s...' % model_fname)
orig_model_fname = self.model_temp_path
shutil.move(orig_model_fname, model_fname)
orig_tf_session_fname = re.sub(r'(.pickle)?(.gz)?$', '.tfsess', orig_model_fname)
tf_session_fname = re.sub(r'(.pickle)?(.gz)?$', '.tfsess', model_fname)
if os.path.isfile(orig_tf_session_fname):
shutil.move(orig_tf_session_fname, tf_session_fname)
# move the reranking classifier model files as well, if they exist
orig_clfilter_fname = re.sub(r'((.pickle)?(.gz)?)$', r'.tftreecl\1', orig_model_fname)
orig_clfilter_tf_fname = re.sub(r'((.pickle)?(.gz)?)$', r'.tfsess', orig_clfilter_fname)
if os.path.isfile(orig_clfilter_fname) and os.path.isfile(orig_clfilter_tf_fname):
clfilter_fname = re.sub(r'((.pickle)?(.gz)?)$', r'.tftreecl\1', model_fname)
clfilter_tf_fname = re.sub(r'((.pickle)?(.gz)?)$', r'.tfsess', clfilter_fname)
shutil.move(orig_clfilter_fname, clfilter_fname)
shutil.move(orig_clfilter_tf_fname, clfilter_tf_fname)
def build_ensemble_model(self, results):
"""Load the models computed by the individual jobs and compose them into a single
ensemble model.
@param results: list of tuples (cost, ServiceConn object), where cost is not used"""
ensemble = Seq2SeqEnsemble(self.cfg)
models = []
for _, sc in results:
models.append((pickle.loads(sc.conn.root.get_all_settings()),
pickle.loads(sc.conn.root.get_model_params())))
rerank_settings = results[0][1].conn.root.get_rerank_settings()
if rerank_settings is not None:
rerank_settings = pickle.loads(rerank_settings)
rerank_params = results[0][1].conn.root.get_rerank_params()
if rerank_params is not None:
rerank_params = pickle.loads(rerank_params)
ensemble.build_ensemble(models, rerank_settings, rerank_params)
return ensemble
class Seq2SeqTrainingService(Service):
"""RPyC Worker class for a job training a Seq2Seq generator."""
def __init__(self, conn_ref):
super(Seq2SeqTrainingService, self).__init__(conn_ref)
self.seq2seq = None
def exposed_init_training(self, cfg):
"""Create the Seq2SeqGen object."""
cfg = pickle.loads(cfg)
tstart = time.time()
log_info('Initializing training...')
self.seq2seq = Seq2SeqGen(cfg)
log_info('Training initialized. Time taken: %f secs.' % (time.time() - tstart))
def exposed_train(self, rnd_seed, das_file, ttree_file, data_portion, context_file, validation_files):
"""Run the whole training.
"""
rnd.seed(rnd_seed)
log_info('Random seed: %f' % rnd_seed)
tstart = time.time()
log_info('Starting training...')
self.seq2seq.train(das_file, ttree_file, data_portion, context_file, validation_files)
log_info('Training finished -- time taken: %f secs.' % (time.time() - tstart))
top_cost = self.seq2seq.top_k_costs[0]
log_info('Best cost: %f' % top_cost)
return top_cost
def exposed_save_model(self, model_fname):
"""Save the model to the given file (must be given relative to the worker's working
directory!).
@param model_fname: target path where to save the model (relative to worker's \
working directory)
"""
self.seq2seq.save_to_file(model_fname)
def exposed_get_model_params(self):
"""Retrieve all parameters of the worker's local model (as a dictionary)
@return: model parameters in a pickled dictionary -- keys are names, values are numpy arrays
"""
p_dump = pickle.dumps(self.seq2seq.get_model_params(), protocol=pickle.HIGHEST_PROTOCOL)
return p_dump
def exposed_get_all_settings(self):
"""Call `get_all_settings` on the worker and return the result as a pickle."""
settings = pickle.dumps(self.seq2seq.get_all_settings(), protocol=pickle.HIGHEST_PROTOCOL)
return settings
def exposed_get_rerank_params(self):
"""Call `get_model_params` on the worker's reranker and return the result as a pickle."""
if not self.seq2seq.classif_filter:
return None
p_dump = pickle.dumps(self.seq2seq.classif_filter.get_model_params(),
protocol=pickle.HIGHEST_PROTOCOL)
return p_dump
def exposed_get_rerank_settings(self):
"""Call `get_all_settings` on the worker's reranker and return the result as a pickle."""
if not self.seq2seq.classif_filter:
return None
settings = pickle.dumps(self.seq2seq.classif_filter.get_all_settings(),
protocol=pickle.HIGHEST_PROTOCOL)
return settings
def run_training(head_host, head_port, debug_out=None):
"""Main worker training routine (creates the Seq2SeqTrainingService and connects it to the
head.
@param head_host: hostname of the head
@param head_port: head port number
@param debug_out: path to the debugging output file (debug output discarded if None)
"""
# setup debugging output, if applicable
if debug_out is not None:
set_debug_stream(file_stream(debug_out, mode='w'))
# start the server (in the background)
log_info('Creating training server...')
server = ThreadPoolServer(service=Seq2SeqTrainingService, nbThreads=1)
server_thread = Thread(target=server.start)
server_thread.start()
my_host = socket.getfqdn()
log_info('Worker server created at %s:%d. Connecting to head at %s:%d...' %
(my_host, server.port, head_host, head_port))
# notify main about this server
conn = connect(head_host, head_port, config={'allow_pickle': True})
conn.root.register_worker(my_host, server.port)
conn.close()
log_info('Worker is registered with the head.')
# now serve until we're killed (the server thread will continue to run)
server_thread.join()
if __name__ == '__main__':
try:
host = sys.argv[1]
port = int(sys.argv[2])
except:
sys.exit('Usage: ' + sys.argv[0] + ' <head-address> <head-port>')
run_training(host, port)
|
python
|
from django.core.management.base import BaseCommand
from flatblocks.models import FlatBlock
from camper.pages.models import Chunk
class Command(BaseCommand):
help = 'Copes FlatBlock content into new Chunk objects'
def handle(self, *args, **options):
for fb in FlatBlock.objects.all():
try:
c = Chunk.objects.get(slug=fb.slug)
print("%s already exists" % fb.slug)
except Chunk.DoesNotExist:
c = Chunk()
c.slug = fb.slug
c.content = fb.content
c.content.markup_type = 'markdown'
c.save()
print("saved %s" % fb.slug)
|
python
|
__all__ = ["configreader"]
|
python
|
import discord
from discord.ext import commands
class Hater(commands.Cog):
def __init__(self, client):
self.client = client
self.client.hated_list = []
@commands.command()
async def hate(self, ctx, hated):
hated_id = int(hated[3:-1])
hated_member = ctx.guild.get_member(hated_id)
self.client.hated_list.append(hated_member)
await ctx.send(f'Added **{hated_member.name}** ({hated_member.mention}) to the naughties list.')
@commands.command()
async def show_hated(self, ctx):
message = []
message.append('**--- The naughties list ---**')
[message.append(f'{member.name} ({member.mention})') for member in self.client.hated_list]
await ctx.send('\n'.join(message))
def setup(client):
client.add_cog(Hater(client))
|
python
|
# Copyright 2018 NTRlab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import json
import logging
from bgx_pbft.journal.block_wrapper import NULL_BLOCK_IDENTIFIER
#from bgx_pbft.consensus.wait_certificate import WaitCertificate
LOGGER = logging.getLogger(__name__)
def block_id_is_genesis(block_id):
"""Determines if the block ID represents the genesis block.
Args:
block_id (str): The block ID to check
Returns:
True if this ID represents the block ID, or False otherwise.
"""
return block_id == NULL_BLOCK_IDENTIFIER
def deserialize_wait_certificate(block, pbft_enclave_module):
"""Deserializes the wait certificate associated with the block.
Args:
block (Block or BlockWrapper): The block that has the wait certificate
pbft_enclave_module (module): The PBFT enclave module
Returns:
WaitCertificate: The reconstituted wait certificate associated
with the block or None if cannot deserialize
"""
# The wait certificate is a JSON string placed in the consensus
# field/property of the block header. Parse the JSON and then use the
# serialized wait certificate and signature to create a
# WaitCertificate object.
wait_certificate = None
"""
if block is not None:
try:
wait_certificate_dict = \
json.loads(block.header.consensus.decode())
wait_certificate = \
WaitCertificate.wait_certificate_from_serialized(
pbft_enclave_module=None,#pbft_enclave_module=pbft_enclave_module,
serialized=wait_certificate_dict['SerializedCertificate'],
signature=wait_certificate_dict['Signature'])
except (json.decoder.JSONDecodeError, KeyError):
pass
"""
return wait_certificate
def get_previous_certificate_id(block_header,
block_cache,
pbft_enclave_module):
"""Returns the wait certificate ID for the block immediately preceding the
block represented by block_header.
Args:
block_header (BlockHeader): The header for the block
block_cache (BlockCache): The cache of blocks that are predecessors
to the block represented by block_header
pbft_enclave_module (module): The PBFT enclave module
Returns:
str: The ID of the wait certificate for the block immediately
preceding the block represented by block_header
"""
wait_certificate = None
if not block_id_is_genesis(block_header.previous_block_id):
wait_certificate = deserialize_wait_certificate(
block=block_cache[block_header.previous_block_id],pbft_enclave_module=None) #pbft_enclave_module)
return \
NULL_BLOCK_IDENTIFIER if wait_certificate is None \
else wait_certificate.identifier
|
python
|
#!/usr/bin/env python
"""This module provides functionality to create a custom preoptimization
sequence from a directed acyclic graph (DAG) using topological sorting.
In the current version the DAG have to be specified manually via constants.
"""
import multiprocessing
import random
import logging
import polyjit.experiments.sequences.polly_stats as polly_stats
import pprof_utilities
__author__ = "Christoph Woller"
__credits__ = ["Christoph Woller"]
__maintainer__ = "Christoph Woller"
__email__ = "[email protected]"
SEQUENCE_FILE_PATH = '.../pprof-study/results/'
SEQUENCE_FILE = 'best_sequences.raw'
SEQUENCE_PREFIX = 'Best: '
def calculate_fitness_value(sequence, seq_to_fitness, key, program):
"""Calculates the fitness value of the provided sequence.
This method calculates the fitness of the sequence by using the number
of regions that are no valid SCoPs if this sequence is used for
preoptimization before Polly's SCoP detection.
Args:
sequence (list[string]): the sequence for that the fitness value should
be calculated.
seq_to_fitness (dict): dictionary that stores calculated fitness
values.
key (string): the key of the provided sequence for the dictionary.
program (string): the name of the application this sequence
should be used for.
"""
if key not in seq_to_fitness:
seq_to_fitness[key] = polly_stats.get_regions_without_scops(sequence,
program)
def evaluate_best_sequence(program):
""""Generates optimization sequences from a dependency graph and calculates
the best of these sequences for the specified program."""
log = logging.getLogger(__name__)
# Get different topological sorting arrangements.
sequences = pprof_utilities.read_sequences(SEQUENCE_FILE_PATH,
SEQUENCE_FILE, SEQUENCE_PREFIX)
possible_sequences = len(sequences)
seq_to_fitness = multiprocessing.Manager().dict()
pool = multiprocessing.Pool()
# Calculate the fitness value of the topological sorting arrangements.
for sequence in sequences:
pool.apply_async(calculate_fitness_value, args=(
sequence, seq_to_fitness, str(sequence), program))
pool.close()
pool.join()
# Get the best sequences.
sequences.sort(key=lambda s: seq_to_fitness[str(s)])
sequences = sequences[::-1]
fittest = sequences.pop()
fittest_fitness_value = seq_to_fitness[str(fittest)]
fittest_sequences = [fittest]
equal = True
while sequences and equal:
other = sequences.pop()
if seq_to_fitness[str(other)] == fittest_fitness_value:
fittest_sequences.append(other)
else:
equal = False
log.info("Best sequences %d of %s", len(fittest_sequences),
str(possible_sequences))
for sequence in fittest_sequences:
log.info("Best: %s", str(sequence))
log.info("----------------------------------------------------------------")
return random.choice(fittest_sequences)
|
python
|
from typing import overload
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class AnimatorOverrideController:
def __new__(cls, arg1=None):
'''
:returns: AnimatorOverrideController
:rtype: UnityEngine.AnimatorOverrideController
'''
pass
@staticmethod
def op_Implicit(arg1):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Equality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Inequality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_runtimeAnimatorController():
'''
:returns: RuntimeAnimatorController
:rtype: UnityEngine.RuntimeAnimatorController
'''
pass
@staticmethod
def set_runtimeAnimatorController(arg1):
'''
:param arg1: RuntimeAnimatorController
:type arg1: UnityEngine.RuntimeAnimatorController
'''
pass
@staticmethod
@overload
def get_Item(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: AnimationClip
:rtype: UnityEngine.AnimationClip
'''
pass
@staticmethod
@overload
def get_Item(arg1):
'''
:param arg1: AnimationClip
:type arg1: UnityEngine.AnimationClip
:returns: AnimationClip
:rtype: UnityEngine.AnimationClip
'''
pass
@staticmethod
def get_Item(arg1=None):
pass
@staticmethod
@overload
def set_Item(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: AnimationClip
:type arg2: UnityEngine.AnimationClip
'''
pass
@staticmethod
@overload
def set_Item(arg1, arg2):
'''
:param arg1: AnimationClip
:type arg1: UnityEngine.AnimationClip
:param arg2: AnimationClip
:type arg2: UnityEngine.AnimationClip
'''
pass
@staticmethod
def set_Item(arg1=None, arg2=None):
pass
@staticmethod
def get_overridesCount():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetOverrides(arg1):
'''
:param arg1: Undefined variable
:type arg1: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
def ApplyOverrides(arg1):
'''
:param arg1: Undefined variable
:type arg1: SystemCollectionsGenericIList.SystemCollectionsGenericIList
'''
pass
@staticmethod
def get_animationClips():
'''
:returns: AnimationClipArray
:rtype: UnityEngine.AnimationClipArray
'''
pass
@staticmethod
def GetInstanceID():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetHashCode():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def Equals(arg1):
'''
:param arg1: Object
:type arg1: System.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_name():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def set_name(arg1):
'''
:param arg1: String
:type arg1: System.String or str
'''
pass
@staticmethod
def ToString():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def GetType():
'''
:returns: Type
:rtype: System.Type
'''
pass
|
python
|
from pydantic import BaseModel, Field
class DOIDoc(BaseModel):
"""
DOIs to reference specific materials on Materials Project.
"""
doi: str = Field(
None, description="DOI of the material.",
)
bibtex: str = Field(
None, description="Bibtex reference of the material.",
)
task_id: str = Field(
None,
description="The Materials Project ID of the material. This comes in the form: mp-******",
)
|
python
|
from flask import g, jsonify, request
from app import auth
from app.services.base.models import User, LoginLog
from app.services.base.views import bp
@bp.route('/login_logs')
@auth.login_required
def list_login_logs():
query = LoginLog.query \
.join(User, LoginLog.userIntID == User.id) \
.with_entities(LoginLog, User.username)
username = request.args.get('username_like')
if username:
query = query.filter(User.username.like(u'%{0}%'.format(username)))
if g.role_id not in [1, 2, 3]:
query = query.filter(User.id == g.user_id)
records = query.pagination(code_list=['isLogged'])
return jsonify(records)
|
python
|
import json
import os
from typing import List
from stonehenge.db.operations import Operation
from stonehenge.db.migrations.exceptions import UnappliedMigrationException
class Migration:
def __init__(
self,
operations: List[Operation],
migrations_dir: str,
):
self.operations = operations
self.migrations_dir = migrations_dir
def save_to_file(self) -> str:
next_migration_index = self.get_next_migration_index()
filename = f"Migration_{next_migration_index}.json"
filepath = os.path.join(self.migrations_dir, filename)
if os.path.isfile(filepath):
raise UnappliedMigrationException(filename)
with open(filepath, "w+") as f:
content = self.to_json()
content_str = json.dumps(content, indent=4)
f.write(content_str)
return filename
def get_next_migration_index(self) -> int:
highest = 1
for filename in os.listdir(self.migrations_dir):
try:
index = int(filename[10])
except ValueError:
continue
if index >= highest:
highest = index + 1
return highest
def to_json(self):
return {
"operations": [o.to_json() for o in self.operations],
}
|
python
|
"""
web server
为使用者提供一个类,
使用这可以快速的搭建web服务,
展示自己的网页
"""
from socket import *
from select import select
# 主体功能
class HTTPServer:
def __init__(self,host='0.0.0.0',port=8080,dir=None):
self.host = host
self.port = port
self.dir = dir
def start(self):
pass
if __name__ == '__main__':
# 需要用户决定 : 网络地址 和要发送的数据
host = '0.0.0.0'
port = 8000
dir = "./static" # 数据位置
# 实例化对象,调用方法启动服务
httpd = HTTPServer(host=host,port=port,dir=dir)
httpd.start() # 启动服务
|
python
|
## HOST and PORT info
HOST = "127.0.0.1"
PORT = 8000
## Server name
SERVER = "Lemon Server"
## folder config
STATIC = "static"
RENDER = "render"
## Token info for sessions
token = "SessionToken"
token_length = 100
#blacklist
blacklist = []
#Temp Folder
TEMP = "Temp"
#File extension for files that can have variables in them
FILE_EXTENSION_VAR = ".html"
errorHtmlFile = "config/error.html"
DEFAULT_MIME_TYPE = "text/plain"
LOG_LOCATION = "app/log/log.txt"
ALLOWED_HOSTS = ["localhost","127.0.0.1"]
EXTENSIONS_CONFIG = "app/extensions/config.json"
# These are for the dev server
SOCKET_BUFFER = 65536
NORMAL_SERVER = True
DEBUG = False
ASYNCIO_MAX_WORKERS = 1000
#These are for ssl in the dev server
SSL_CERT = "config/ssl/ssl.crt"
SSL_KEY = "config/ssl/ssl.key"
SSL = False
SSL_PORT = 4433
# This should be changed to True when using gunicorn. If your using something
# else and its not working try setting this to False
RETURN_BYTES = True
# These configurations are for gunicorn
bind = HOST+":"+str(PORT)
workers = 1
worker_connections = 1000
keepalive = 2
|
python
|
#
# Copyright 2018 Dynatrace LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Defines basic SDK constants and classes.
All public names here are also re-exported from :mod:`oneagent.sdk` and should
preferably be used from there.
'''
import os
_DEBUG_LEAKS = False
if _DEBUG_LEAKS:
import traceback
#: The Dynatrace Tag request header name which is used to transport the tag between agents
#: (as a string tag).
DYNATRACE_HTTP_HEADER_NAME = 'X-dynaTrace'
#: The Dynatrace Tag messaging property name which is is used to transport the tag between agents
#: (as a byte tag).
#:
#: .. versionadded:: 1.3
DYNATRACE_MESSAGE_PROPERTY_NAME = "dtdTraceTagInfo"
#: DEPRECATED alias for :data:`DYNATRACE_MESSAGE_PROPERTY_NAME`
#:
#: .. deprecated:: 1.3
DYNATRACE_MESSAGE_PROPERTYNAME = DYNATRACE_MESSAGE_PROPERTY_NAME
#: Allow SDK to be used in forked child processes.
_ONESDK_INIT_FLAG_FORKABLE = 1
class _Uninstantiable(object):
'''Classes deriving from this class cannot be instantiated.'''
def __new__(cls):
raise ValueError('Attempt to instantiate')
def _add_enum_helpers(decorated_cls):
# pylint:disable=protected-access
decorated_cls._enum_name_by_val = dict()
for key in dir(decorated_cls):
val = getattr(decorated_cls, key)
if isinstance(val, int):
decorated_cls._enum_name_by_val.setdefault(val, key)
@classmethod
def _value_name(cls, val):
result = cls._enum_name_by_val.get(val) # pylint:disable=no-member
if result is None:
return "<Unknown " + cls.__name__ + " value " + repr(val) + ">"
return cls.__name__ + "." + result
decorated_cls._value_name = _value_name
return decorated_cls
class AgentState(_Uninstantiable):
'''Constants for the agent's state. See
:attr:`oneagent.sdk.SDK.agent_state`.'''
#: The SDK stub is connected to the agent, which is currently active.
ACTIVE = 0
#: The SDK stub is connected to the agent, which is temporarily inactive.
TEMPORARILY_INACTIVE = 1
#: The SDK stub is connected to the agent, which is permanently inactive.
PERMANENTLY_INACTIVE = 2
#: The agent has not been initialized.
NOT_INITIALIZED = 3
#: Some unexpected error occurred while trying to determine the agent state.
ERROR = -1
class ErrorCode(_Uninstantiable):
'''Constants for error codes of the native agent, as may be contained in
:attr:`.SDKError.code`.'''
# Same bit pattern if interpreted in 32 bit unsigned / two's complement
_ERROR_BASE = 0xAFFE0000 if os.name == 'nt' else -0x50020000
#: The operation completed successfully. You usually won't get any object
#: with error code at all in that case.
SUCCESS = 0
#: The operation failed, but no more specific error code fits the failure.
GENERIC = _ERROR_BASE + 1
#: A function was called with an invalid argument.
INVALID_ARGUMENT = _ERROR_BASE + 2
NOT_IMPLEMENTED = _ERROR_BASE + 3 #: The called function is not implemented.
NOT_INITIALIZED = _ERROR_BASE + 4 #: The SDK has not been initialized.
#: There is not enough available memory to complete the operation.
OUT_OF_MEMORY = _ERROR_BASE + 5
#: The native SDK stub was configured to _not_ try to load the actual agent
#: module.
AGENT_NOT_ACTIVE = _ERROR_BASE + 6
#: Either the OneAgent SDK for C/C++ or the OneAgent binary could not be loaded.
LOAD_AGENT = _ERROR_BASE + 7
#: The expected exports could not be found either in the OneAgent SDK for C/C++
#: or the OneAgent binary.
INVALID_AGENT_BINARY = _ERROR_BASE + 8
#: The operation failed because of an unexpected error.
UNEXPECTED = _ERROR_BASE + 9
#: The command line argument / stub variable definition was ignored because
#: an entry with the same key was already present.
ENTRY_ALREADY_EXISTS = _ERROR_BASE + 10
#: The SDK agent module doesn't support the feature level required by this
#: version of the SDK stub.
FEATURE_LEVEL_NOT_SUPPORTED = _ERROR_BASE + 11
#: The SDK agent module doesn't support the SDK interface required by this
#: version of the SDK stub
INTERFACE_NOT_SUPPORTED = _ERROR_BASE + 12
#: The operation failed because this is the child process of a fork that
#: occurred while the SDK was initialized.
FORK_CHILD = _ERROR_BASE + 13
class AgentForkState(_Uninstantiable):
'''Constants for the agent's fork state. See
:attr:`oneagent.sdk.SDK.agent_fork_state`.'''
#: SDK cannot be used in this process, but forked processes may use the SDK.
#: This is the state of the process
#: that called :func:`oneagent.initialize` with :code:`forkable=True`
PARENT_INITIALIZED = 1
#: Forked processes can use the SDK.
#: Using the SDK in this process is allowed but
#: changes the state to :attr:`.FULLY_INITIALIZED`
#: This is the state of all child processes
#: of a process that is :attr:`.PARENT_INITIALIZED`.
PRE_INITIALIZED = 2
#: SDK can be used, forked processes may not use the SDK.
#: This is the state of a process that was previously :attr:`.PRE_INITIALIZED`
#: and then called an SDK function.
FULLY_INITIALIZED = 3
#: SDK can be used, forked processes may not use the SDK,
#: :func:`oneagent.initialize` was called without :code:`forkable=True`.
NOT_FORKABLE = 4
#: Some error occurred while trying to determine the agent fork state.
ERROR = -1
class MessageSeverity(_Uninstantiable): # Private
'''Constants for the severity of log messages.
The levels with the lower numerical values include all messages of the ones
with the higher values. Note that :attr:`.DEBUG` is the highest severity,
contrary to usual conventions.'''
FINEST = 0 #: Most verbose logging (higly detailed tracing).
FINER = 1 #: Slightly less verbose logging (fairly detailed tracing).
FINE = 2 #: Still verbose logging (informational tracing messages).
CONFIG = 3 #: Log configuration messages.
INFO = 4 #: Log informational messages.
WARNING = 5 #: Log conditions that indicate a potential problem.
SEVERE = 6 #: Log messages indicating a serious failure.
#: Debug message. None should be logged by default, unless they are
#: specifically enabled with special debug options. Note that contrary to
#: usual conventions, this is the highest severity.
DEBUG = 7
#: No messages of this level exist, so using this level disables all log
#: messages.
NONE = 8
class MessagingDestinationType(_Uninstantiable):
'''Messaging Destination Type Constants
'''
QUEUE = 1 #: A message queue: a message sent to this destination will be (successfully)
#: received by only one consumer.
TOPIC = 2 #: A message topic: a message sent to this destination will be received by all
#: subscribed consumers.
class MessagingVendor(_Uninstantiable):
'''Messaging System Vendor Strings
'''
HORNETQ = "HornetQ" #: vendor string for HornetQ
ACTIVE_MQ = "ActiveMQ" #: vendor string for ActiveMQ
RABBIT_MQ = "RabbitMQ" #: vendor string for RabbitMQ
ARTEMIS = "Artemis" #: vendor string for Artemis
WEBSPHERE = "WebSphere" #: vendor string for WebSphere
MQSERIES_JMS = "MQSeries JMS" #: vendor string for MQSeries JMS
MQSERIES = "MQSeries" #: vendor string for MQSeries
TIBCO = "Tibco" #: vendor string for Tibco
class DatabaseVendor(_Uninstantiable):
'''String constants for well-known database vendors. Use for the
:code:`vendor` parameter of
:meth:`oneagent.sdk.SDK.create_database_info`.'''
APACHE_HIVE = "ApacheHive" #: Database vendor string for Apache Hive.
#: Database vendor string for Apache Derby (aka. IBM Cloudscape).
CLOUDSCAPE = "Cloudscape"
HSQLDB = "HSQLDB" #: Database vendor string for HyperSQL DB.
#: Database vendor string for OpenEdge Database (aka. Progress).
PROGRESS = "Progress"
MAXDB = "MaxDB" #: Database vendor string for SAP MaxDB.
HANADB = "HanaDB" #: Database vendor string for SAP HANA DB.
INGRES = "Ingres" #: Database vendor string for Ingres Database.
FIRST_SQL = "FirstSQL" #: Database vendor string for FirstSQL.
ENTERPRISE_DB = "EnterpriseDB" #: Database vendor string for EnterpriseDB.
CACHE = "Cache" #: Database vendor string for InterSystems Cache.
ADABAS = "Adabas" #: Database vendor string for ADABAS.
FIREBIRD = "Firebird" #: Database vendor string for Firebird Database.
DB2 = "DB2" #: Database vendor string for IBM Db2.
#: Database vendor string for JDBC connections to Apache Derby
#: (aka. IBM Cloudscape).
DERBY_CLIENT = "Derby Client"
#: Database vendor string for Derby Embedded.
DERBY_EMBEDDED = "Derby Embedded"
FILEMAKER = "Filemaker" #: Database vendor string for FileMaker Pro.
INFORMIX = "Informix" #: Database vendor string for IBM Informix.
INSTANT_DB = "InstantDb" #: Database vendor string for InstantDB.
INTERBASE = "Interbase" #: Database vendor string for Embarcadero InterBase.
MYSQL = "MySQL" #: Database vendor string for MySQL.
MARIADB = "MariaDB" #: Database vendor string for MariaDB.
NETEZZA = "Netezza" #: Database vendor string for IBM Netezza.
ORACLE = "Oracle" #: Database vendor string for Oracle Database.
PERVASIVE = "Pervasive" #: Database vendor string for Pervasive PSQL.
POINTBASE = "Pointbase" #: Database vendor string for PointBase.
POSTGRESQL = "PostgreSQL" #: Database vendor string for PostgreSQL.
SQLSERVER = "SQL Server" #: Database vendor string for Microsoft SQL Server.
SQLITE = "sqlite" #: Database vendor string for SQLite.
#: Database vendor string for SAP ASE
#: (aka. Sybase SQL Server, Sybase DB, Sybase ASE).
SYBASE = "Sybase"
TERADATA = "Teradata" #: Database vendor string for Teradata Database.
VERTICA = "Vertica" #: Database vendor string for Vertica.
CASSANDRA = "Cassandra" #: Database vendor string for Cassandra.
H2 = "H2" #: Database vendor string for H2 Database Engine.
#: Database vendor string for ColdFusion In-Memory Query
#: (aka. Query of Queries).
COLDFUSION_IMQ = "ColdFusion IMQ"
REDSHIFT = "Amazon Redshift" #: Database vendor string for Amazon Redshift.
class ChannelType(_Uninstantiable):
'''Constants for communication channel types, for use as
:attr:`oneagent.sdk.Channel.type_`'''
OTHER = 0 #: Some other channel type or unknown channel type.
#: The channel is a TCP/IP connection.
#:
#: The channel endpoint string should be the host name, followed by a colon,
#: followed by the port number (in decimal). E.g. :code:`localhost:1234` or
#: :code:`example.com:80`.
TCP_IP = 1
#: The channel is a connection via Unix domain sockets.
#:
#: The channel endpoint string should be the path of the Unix domain
#: sockets.
UNIX_DOMAIN_SOCKET = 2
#: The channel is a named pipe.
#:
#: The channel endpoint string should be the pipe name.
NAMED_PIPE = 3
#: The channel is some in-process means of communication.
IN_PROCESS = 4
class SDKError(Exception):
'''Exception for SDK errors (mostly during initialization, see
:func:`oneagent.initialize`).'''
def __init__(self, code, msg):
super(SDKError, self).__init__(code, msg)
#: An :class:`int` error code. Can be one of the :class:`.ErrorCode`
#: constants. If not, it is a Windows error code on Windows and an errno
#: number on other systems.
self.code = code
#: The :class:`str` error message associated with :attr:`code`
#: (potentially contains more information than could be deduced from
#: :attr:`code` alone).
self.message = msg
class SDKInitializationError(SDKError):
'''Exception for initialization errors.'''
def __init__(self, code, msg, agent_version='-/-'):
super(SDKInitializationError, self).__init__(code, msg)
#: The :class:`str` agent version associated with this error.
self.agent_version = agent_version
class SDKHandleBase(object):
'''Base class for SDK handles that must be closed explicitly.
You can use this class as a context manager (i.e. with a :code:`with`-block)
to automatically close the handle.'''
def __init__(self, nsdk, handle):
self.handle = handle
self.nsdk = nsdk
if _DEBUG_LEAKS:
self.alloc_at = ''.join(traceback.format_stack())
def close_handle(self, nsdk, handle):
raise NotImplementedError(
'Must implement close_handle in derived class')
def __del__(self):
if self.handle is None:
return
try:
warn = self.nsdk.agent_get_logging_callback()
if not warn:
return
if _DEBUG_LEAKS:
warn(
'Unclosed SDK handle '
+ repr(self)
+ b' from '
+ self.alloc_at)
else:
warn('Unclosed SDK handle ' + repr(self))
finally:
self.close()
def __str__(self):
return '{}({})'.format(type(self), self.handle)
def close(self):
'''Closes the handle, if it is still open.
Usually, you should prefer using the handle as a context manager to
calling :meth:`close` manually.'''
if self.handle is not None:
self.close_handle(self.nsdk, self.handle)
self.handle = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __bool__(self):
return bool(self.handle)
__nonzero__ = __bool__
class DbInfoHandle(SDKHandleBase):
'''Opaque handle to database information. See
:meth:`oneagent.sdk.SDK.create_database_info`.'''
def close_handle(self, nsdk, handle):
nsdk.databaseinfo_delete(handle)
class WebapplicationInfoHandle(SDKHandleBase):
'''Opaque handle to web application information. See
:meth:`oneagent.sdk.SDK.create_web_application_info`.'''
def close_handle(self, nsdk, handle):
nsdk.webapplicationinfo_delete(handle)
class MessagingSystemInfoHandle(SDKHandleBase):
'''Opaque handle for messaging system info object. See
:meth:`oneagent.sdk.SDK.create_messaging_system_info`.'''
def close_handle(self, nsdk, handle):
nsdk.messagingsysteminfo_delete(handle)
|
python
|
#!/usr/bin/python
import sys
import re
import os
fasta_file = sys.argv[1]
fasta_file_AT_only = sys.argv[2]
if not os.path.exists(os.path.dirname(fasta_file_AT_only)):
try:
os.makedirs(os.path.dirname(fasta_file_AT_only))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(fasta_file,'r') as fasta:
with open(fasta_file_AT_only,'w') as fasta_out:
for line in fasta:
if line[0] == '>':
fasta_out.write(line)
if line[0] != '>':
line = str(line).upper()
line = line.replace('G','A')
line = line.replace('C','T')
fasta_out.write(line)
|
python
|
"""
* Vehicle Routing Problem *
Steps of the algorithm:
1. Creation of a given number of clusters
2. Creation of an optimal path (loop) for each cluster
Graph Optimisation : basic 2-opt algorithm
Clustering : centroid-based method
"""
from random import *
from math import sqrt
import matplotlib.pyplot as plt
import networkx as nx
import time
def dist(x1, y1, x2, y2):
return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
# cluster's functions
def create_clusters(reference_elements, elements_to_organise):
global target_index
new_node_color = []
new_clusters = [[] for _ in range(NUMBER_CLUSTERS)] # initialisation of the clusters list
for k in range(len(elements_to_organise)):
record = dist(0, 0, WIDTH, HEIGHT)
for j in range(len(reference_elements)):
d = dist(elements_to_organise[k][0], elements_to_organise[k][1],
reference_elements[j][0], reference_elements[j][1])
if d < record:
record = d
target_index = j
new_clusters[target_index].append(elements_to_organise[k])
new_node_color.append(COLORS[target_index])
return new_clusters, new_node_color
def centroid_of(lst):
xG = yG = 0
for a in range(len(lst)):
xG += lst[a][0] / len(lst)
yG += lst[a][1] / len(lst)
new_centroid = (xG, yG)
return new_centroid
# graph's functions
def total_distance(lst):
d = 0
for j in range(len(lst) - 1):
d += dist(vertices[lst[j]][0], vertices[lst[j]][1], vertices[lst[j + 1]][0], vertices[lst[j + 1]][1])
return d
def reverse_sublist(lst, start, end):
lst[start:end + 1] = lst[start:end + 1][::-1]
return lst
# Code from https://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain#Python
def convex_hull(points):
points = sorted(set(points))
if len(points) <= 1:
return points
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
# Build lower hull
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
# Build upper hull
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
upper.pop()
upper.append(p)
return lower[:-1] + upper[:-1]
NUMBER_VERTICES = 20
NUMBER_CLUSTERS = 2 # up to 6
NUMBER_ITERATIONS = 10 ** 4
NUMBER_ITERATIONS2 = 10 ** 3
WIDTH = HEIGHT = 100 # dimension of the canvas
VERTEX_SIZE = 150
COLORS = ['orange', 'red', 'cyan', 'green', 'pink', 'purple']
vertices = []
G = nx.Graph()
print("* Vehicle Routing Problem *")
print("Number of vertices :", NUMBER_VERTICES,
"| Number of clusters :", NUMBER_CLUSTERS,
"| Dimensions of the canvas : (" + str(WIDTH), ";", str(HEIGHT) + ")\n")
start_time = time.time()
# creation of the vertices
for i in range(NUMBER_VERTICES):
new_vertex = (randint(1, WIDTH), randint(1, HEIGHT))
vertices.append(new_vertex)
G.add_node(i, pos=(new_vertex[0], new_vertex[1]))
# initialisation
initial_vertices = sample(vertices, NUMBER_CLUSTERS)
clusters, node_color = create_clusters(initial_vertices, vertices)
# clusters
# --------------------------------------------------------------
previous_state = clusters
current_state = []
iteration = 0
while previous_state != current_state:
previous_state = clusters
current_state = []
centroids = []
for cluster in clusters:
centroids.append(centroid_of(cluster))
clusters, node_color = create_clusters(centroids, vertices)
current_state = clusters
iteration += 1
print("Clusters : ✓")
print("--- %s seconds ---" % (time.time() - start_time))
# --------------------------------------------------------------
# graphs
# --------------------------------------------------------------
platform = (WIDTH / 2, HEIGHT / 2)
vertices.append(platform)
G.add_node(NUMBER_VERTICES, pos=(platform[0], platform[1]))
node_color.append('silver')
pos = nx.get_node_attributes(G, 'pos')
for cluster in clusters:
current_color = COLORS[clusters.index(cluster)]
if len(cluster) > 2:
path = [vertices.index(vertex) for vertex in cluster] # initial path
# adding "platform" at the beginning and the end of the path
path.insert(0, NUMBER_VERTICES)
path.append(path[0])
record_distance = dist(0, 0, WIDTH, HEIGHT) * NUMBER_VERTICES
for i in range(NUMBER_ITERATIONS):
selected_vertices = sample(range(1, len(cluster) + 1), 2)
test = path.copy()
test = reverse_sublist(test, selected_vertices[0], selected_vertices[1])
test_distance = total_distance(test)
if test_distance < record_distance:
record_distance = test_distance
path = test
for i in range(len(cluster) + 1):
G.add_edge(path[i], path[i + 1], color=current_color)
if len(cluster) == 2:
G.add_edge(vertices.index(cluster[0]), vertices.index(cluster[1]), color=current_color)
print("Graphs : ✓")
print("--- %s seconds ---" % (time.time() - start_time))
plt.figure(str(NUMBER_CLUSTERS) + "-means | Iteration " + str(iteration) + " (before exchange between clusters)")
# --------------------------------------------------------------
# exchange vertices between clusters
# --------------------------------------------------------------
# determine the convex hull of each cluster
hulls = []
for cluster in clusters:
hulls.append([vertex for vertex in convex_hull(cluster)])
# 1. select two clusters:
# one from which we will select vertex ([0]) and one in which we will try to insert it at a random location ([1])
# for i in range(len(NUMBER_ITERATIONS2)):
selected_clusters = sample(clusters, 2)
selected_hull = hulls[clusters.index(selected_clusters[0])]
selected_vertex = choice(selected_hull)
selected_location = choice(range(len(selected_clusters[1])))
print(vertices.index(selected_vertex), vertices.index(selected_clusters[1][selected_location]))
# --------------------------------------------------------------
edge_colors = [G[u][v]['color'] for u,v in G.edges()]
plt.figure(str(NUMBER_CLUSTERS) + "-means | Iteration " + str(iteration))
nx.draw(G,
pos,
node_size=VERTEX_SIZE,
node_color=node_color,
edge_color=edge_colors,
width=4,
with_labels=True,
font_size=12)
plt.show()
|
python
|
def append_new_line(file_name, text_to_append):
"""Append given text as a new line at the end of file"""
# Open the file in append & read mode ('a+')
with open(file_name, "a+") as file_object:
# Move read cursor to the start of file.
file_object.seek(0)
# If file is not empty then append '\n'
data = file_object.read(100)
if len(data) > 0:
file_object.write("\n")
# Append text at the end of file
file_object.write(text_to_append)
|
python
|
import discord
import os
import requests
import json
import random
from replit import db
from keepmealive import keep_alive
client = discord.Client()
sad_words=["sad","depressed","unhappy","lost","angry","miserable","depressing"]
starter_encouragements=[
"cheer Up! ",
"You are a great Guy!"
]
def get_quotes():
responce=requests.get("https://zenquotes.io/api/random")
json_data=json.loads(responce.text)
quote=json_data[0]['q'] + "-" + json_data[0]['a']
return quote;
def update_encouragements(encouraging_message):
if "encouragements" in db.keys():
encouragements = db["encouragements"]
encouragements.append(encouraging_message)
db["encouragements"] = encouragements
else:
db["encouragements"] = [encouraging_message]
def delete_encouragements(index):
encouragements=db["encouragements"]
if len(encouragements)> index:
del encouragements[index]
db["encouragements"]=encouragements
@client.event
async def on_ready():
print('We have Logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
msg = message.content
if message.content.startswith('$inspire'):
quote=get_quotes()
await message.channel.send(quote)
options=starter_encouragements
if "encouragements" in db.keys():
options= options.extend(db["encouragements"])
if any(word in msg for word in sad_words):
await message.channel.send(random.choice(starter_encouragements))
if msg.startswith("$new"):
encouraging_message = msg.split("$new",1)[1]
update_encouragements(encouraging_message)
await message.channel.send("New Encourage message added!")
if msg.startswith("$del"):
encouragement=[]
if "encouragements" in db.keys():
index= int(msg.split("$del",1)[1])
delete_encouragements(index)
encouragements = db["encouragements"]
await message.channel.send(encouragements)
if msg.startswith("$list"):
encouragements = []
if "encouragements" in db.keys():
encouragements = db["encouragements"]
await message.channel.send(encouragements)
if msg.startswith("$responding"):
value = msg.split("$responding ",1)[1]
if value.lower() == "true":
db["responding"] = True
await message.channel.send("Responding is on.")
else:
db["responding"] = False
await message.channel.send("Responding is off.")
keep_alive()
client.run(os.getenv('TOKEN'))
|
python
|
from copy import deepcopy
import numpy
from theano.gof.op import PureOp
from theano.gof import Apply, generic, Container
from theano.gof.link import LocalLinker, map_storage, add_clear_storage
from theano import function, Mode
from theano.ifelse import ifelse
import theano.tensor as T
class IfElseIfElseIf(PureOp):
def __init__(self, inplace=False):
self.inplace=inplace # check destroyhandler and others to ensure that a view_map with
#multiple inputs can work
assert not self.inplace
def make_node(self, c1, t1, c2,t2,c3,t3,f3):
assert t1.type == f3.type
assert t2.type == t3.type
assert t3.type == f3.type
return Apply(self, [c1,t1,c2,t2,c3,t3,f3], [t1.type()])
def make_thunk(self, node, storage_map, compute_map, no_recycling):
input_computed = [compute_map[v] for v in node.inputs]
output_computed = [compute_map[v] for v in node.outputs]
input_registers = [storage_map[v] for v in node.inputs]
output_registers = [storage_map[v] for v in node.outputs]
outtype = node.outputs[0].type
def thunk():
if not input_computed[0][0]:
return [0]
else:
truthval = input_registers[0][0]
if truthval:
if not input_computed[1][0]:
return [1]
else:
output_computed[0][0]=1
output_registers[0][0]=outtype.filter(deepcopy(input_registers[1][0]))
return []
else:
if not input_computed[2][0]:
return [2]
else:
truthval = input_registers[2][0]
if truthval:
if not input_computed[3][0]:
return [3]
else:
output_computed[0][0] = 1
output_registers[0][0] = outtype.filter(deepcopy(input_registers[3][0]))
return []
else:
if not input_computed[4][0]:
return [4]
else:
truthval = input_registers[4][0]
if truthval:
if not input_computed[5][0]:
return [5]
else:
output_computed[0][0] = 1
output_registers[0][0] = outtype.filter(deepcopy(input_registers[5][0]))
return []
else:
if not input_computed[6][0]:
return [6]
else:
output_computed[0][0] = 1
output_registers[0][0] = outtype.filter(deepcopy(input_registers[6][0]))
return []
thunk.lazy = True
return thunk
class NotImplementedOp(PureOp):
class E(Exception): pass
def make_node(self, x):
return Apply(self, [x], [x.type()])
def make_thunk(self, node, storage_map, compute_map, no_recycling):
def thunk():
raise self.E()
thunk.lazy=False
return thunk
def test_ifelse():
a = T.scalar()
b = generic()
c = generic()
notimpl = NotImplementedOp()
f = function([a,b,c], ifelse(a, notimpl(b), c),
mode=Mode(linker='vm', optimizer='fast_run'))
try:
print "case 1"
f( 1, 'a', 'b')
assert False
except NotImplementedOp.E:
pass
print "... passed"
print "case 2"
print f( 0, 'a', 'b')
assert f( 0, 'a', 'b') == 'b'
print "... passed"
def more_complex_test():
notimpl = NotImplementedOp()
ifelseifelseif = IfElseIfElseIf()
x1 = T.scalar('x1')
x2 = T.scalar('x2')
c1 = T.scalar('c1')
c2 = T.scalar('c2')
t1 = ifelse(c1,x1,notimpl(x2))
t1.name = 't1'
t2 = t1*10
t2.name = 't2'
t3 = ifelse(c2,t2, x1+t1)
t3.name = 't3'
t4 = ifelseifelseif(T.eq(x1,x2), x1, T.eq(x1,5), x2, c2, t3, t3+0.5)
t4.name = 't4'
f = function([c1,c2,x1,x2], t4, mode=Mode(linker='vm', optimizer='fast_run'))
print f(1, 0, numpy.array(10,dtype=x1.dtype),0)
assert f(1,0,numpy.array(10,dtype=x1.dtype),0) == 20.5
print '... passed'
if __name__ == '__main__':
more_complex_test()
|
python
|
import sqlite3
def connectTab(db_name: str = 'dados.db') -> sqlite3.Connection:
conexao = sqlite3.connect(f'../{db_name}')
conexao.row_factory = sqlite3.Row
return conexao
def createTab(tab_name: str = 'pessoas'):
conexao = connectTab()
print(type(conexao))
with conexao:
cursor = conexao.cursor()
sql = f'CREATE TABLE IF NOT EXISTS {tab_name}(' \
f'id INTEGER NOT NULL PRIMARY KEY,' \
f'nome TEXT NOT NULL' \
f');'
cursor.execute(sql)
conexao.commit()
def insert(tab_name: str = 'pessoas', *args: str):
conexao = connectTab()
with conexao:
cursor = conexao.cursor()
sql = f'INSERT INTO {tab_name} VALUES \n'
c, ids = len(args), list()
for arg in args:
sql += f"(?, '{arg}')"
if c > 1:
sql += ', \n'
ids.append(None)
c -= 1
sql += ';'
cursor.execute(sql, ids)
conexao.commit()
def remove(tab_name: str = 'pessoas', ident: int):
conexao = connectTab()
with conexao:
cursor = conexao.cursor()
sql = f'DELETE FROM {tab_name} WHERE id={ident};'
cursor.execute(sql)
conexao.commit()
def showData(tab_name: str = 'pessoas', only_keys: bool = False):
conexao = connectTab()
with conexao:
cursor = conexao.cursor()
sql = f'SELECT * FROM {tab_name};'
cursor.execute(sql)
result = cursor.fetchall()
pessoas = list()
for data in result:
data = dict(data)
if only_keys:
data = data.keys()
pessoas = list(data)
else:
pessoas.append(data)
return pessoas
|
python
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Attributes
from ._models_py3 import BackupSecretResult
from ._models_py3 import DeletedSecretBundle
from ._models_py3 import DeletedSecretItem
from ._models_py3 import DeletedSecretListResult
from ._models_py3 import Error
from ._models_py3 import KeyVaultError
from ._models_py3 import SecretAttributes
from ._models_py3 import SecretBundle
from ._models_py3 import SecretItem
from ._models_py3 import SecretListResult
from ._models_py3 import SecretProperties
from ._models_py3 import SecretRestoreParameters
from ._models_py3 import SecretSetParameters
from ._models_py3 import SecretUpdateParameters
except (SyntaxError, ImportError):
from ._models import Attributes # type: ignore
from ._models import BackupSecretResult # type: ignore
from ._models import DeletedSecretBundle # type: ignore
from ._models import DeletedSecretItem # type: ignore
from ._models import DeletedSecretListResult # type: ignore
from ._models import Error # type: ignore
from ._models import KeyVaultError # type: ignore
from ._models import SecretAttributes # type: ignore
from ._models import SecretBundle # type: ignore
from ._models import SecretItem # type: ignore
from ._models import SecretListResult # type: ignore
from ._models import SecretProperties # type: ignore
from ._models import SecretRestoreParameters # type: ignore
from ._models import SecretSetParameters # type: ignore
from ._models import SecretUpdateParameters # type: ignore
from ._key_vault_client_enums import (
DeletionRecoveryLevel,
)
__all__ = [
'Attributes',
'BackupSecretResult',
'DeletedSecretBundle',
'DeletedSecretItem',
'DeletedSecretListResult',
'Error',
'KeyVaultError',
'SecretAttributes',
'SecretBundle',
'SecretItem',
'SecretListResult',
'SecretProperties',
'SecretRestoreParameters',
'SecretSetParameters',
'SecretUpdateParameters',
'DeletionRecoveryLevel',
]
|
python
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.base.exceptions import TaskError
from pants.task.lint_task_mixin import LintTaskMixin
from pants.contrib.go.tasks.go_fmt_task_base import GoFmtTaskBase
class GoCheckstyle(LintTaskMixin, GoFmtTaskBase):
"""Checks Go code matches gofmt style."""
def execute(self):
with self.go_fmt_invalid_targets(['-d']) as output:
if output:
self.context.log.error(output)
raise TaskError('Found style errors. Use `./pants fmt` to fix.')
|
python
|
import yaml
import torch
from torch import package
import sys
sys.path.append('../../')
import config
class Punctuation(object):
def __init__(self,
model_path=config.model_path_punctuation,
step=config.step_punctuation):
self.model_path = model_path
self.imp = package.PackageImporter(self.model_path)
self.model = self.imp.load_pickle("te_model", "model")
self.step =step
def apply_te(self, text_val):
self.lan = "ru"
len_text = len(text_val.split())
if len_text > self.step:
temp_pred = ''
for i in range(0, len_text, self.step):
temp_text = self.model.enhance_text(' '.join(text_val.split()[i:i+self.step]), self.lan)[:-1] + ' '
temp_pred += temp_text[0].lower() + temp_text[1:]
self.text_with_punctuation = temp_pred
else:
self.text_with_punctuation = self.model.enhance_text(text_val, self.lan)
return self.text_with_punctuation
|
python
|
#! /usr/bin/env python
from __future__ import print_function
from FWCore.ParameterSet.pfnInPath import pfnInPath
import FWCore.ParameterSet.Config as cms
import sys
import os
import re
if os.getenv('LOCAL_TOP_DIR') == None:
print("The environment variable LOCAL_TOP_DIR must be set to run this script")
print("Usually setting it equal to the value of CMSSW_BASE will do what you want")
print("In the context of a unit test this variable is always set automatically")
sys.exit(1)
# get the list of XML files from the cfi file
process = cms.Process("TEST")
cfiFile = 'Geometry/CMSCommonData/cmsIdealGeometryXML_cfi'
if len(sys.argv) > 1:
cfiFile = sys.argv[1]
process.load(cfiFile)
xmlFiles = process.es_sources['XMLIdealGeometryESSource'].geomXMLFiles.value()
def callDOMCount(schemaPath, xmlPath):
xmlFilename = os.path.basename(xmlPath)
xmlFile = open(xmlPath, 'r')
tmpXMLFile = open(xmlFilename, 'w')
# Inside each XML file, there is a path to the schema file.
# We modify this path in a copy of the XML file for two reasons.
# The XML file might be in a package checked out in a working release
# area and the schema file might not be checked out or vice versa.
# This allows DOMCount to run in spite of that. The second reason
# is that the relative path is erroneous in many of the XML files
# and has to be fixed.
for line in xmlFile.readlines():
line = line.replace("../../../../../DetectorDescription/Schema/DDLSchema.xsd",schemaPath)
line = line.replace("../../../../DetectorDescription/Schema/DDLSchema.xsd",schemaPath)
line = line.replace("../../../DetectorDescription/Schema/DDLSchema.xsd",schemaPath)
line = line.replace("../../DetectorDescription/Schema/DDLSchema.xsd",schemaPath)
line = line.replace("../DetectorDescription/Schema/DDLSchema.xsd",schemaPath)
tmpXMLFile.write(line)
tmpXMLFile.close()
xmlFile.close()
# Run DOMCount
command = 'DOMCount -v=always -n -s -f %s' % (xmlFilename)
os.system ( command )
# Cleanup
os.system ("rm %s" % (xmlFilename))
# Find the schema file
schema = pfnInPath("DetectorDescription/Schema/DDLSchema.xsd").replace('file:','')
print("schema file is:")
print(schema)
sys.stdout.flush()
# Loop over the XML files listed in the cfi file and find them
# NOTE: Now that the files are in an external package, they will
# not be in a 'LOCAL_TOP_DIR'. Checking them for each IB may not
# be needed.
#
## for name in xmlFiles:
## fullpath = '%s/src/%s' % (os.environ['LOCAL_TOP_DIR'], name)
## if os.path.isfile(fullpath):
## callDOMCount(schema, fullpath)
## else:
## # It is an error if the file is not there but the package is
## packageDirectory = os.environ['LOCAL_TOP_DIR'] + '/src/' + re.split('/', name)[0] + '/' + re.split('/', name)[1]
## if os.path.isdir(packageDirectory):
## print 'Error, xml file not found:'
## print fullpath
## print 'Package is there but the xml file is not'
## sys.stdout.flush()
## continue
## # if there is a base release then try to find the file there
## fullpath = '%s/src/%s' % (os.getenv('CMSSW_RELEASE_BASE'), name)
## if os.path.isfile(fullpath):
## callDOMCount(schema, fullpath)
## else:
## print 'Error, xml file not found'
## print name
## sys.stdout.flush()
|
python
|
# Copyright (c) 2018, Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from flask import g
from werkzeug.exceptions import Forbidden, Unauthorized
from warehouse import jwt
def test_required_decorator(app):
wrapper = jwt.jwt_required(lambda: None)
# Valid JWT raises no exception
g.jwt_valid = True
wrapper()
# Invalid JWT raises exception
g.jwt_valid = False
with pytest.raises(Unauthorized):
wrapper()
def test_invalid_access_level(app):
with pytest.raises(ValueError):
jwt.jwt_require_claim(1, "bogus")
def test_no_write_public_project(app):
g.jwt_claims = {"prj": {}}
with pytest.raises(Forbidden):
jwt.jwt_require_claim(None, "admin")
def test_insufficient_access_level(app):
g.jwt_claims = {"prj": {1: "read"}}
with pytest.raises(Forbidden):
jwt.jwt_require_claim(1, "write")
with pytest.raises(Forbidden):
jwt.jwt_require_claim(1, "admin")
g.jwt_claims = {"prj": {1: "write"}}
with pytest.raises(Forbidden):
jwt.jwt_require_claim(1, "admin")
def test_sufficient_access_level(app):
g.jwt_claims = {"prj": {1: "read"}}
jwt.jwt_require_claim(1, "read")
g.jwt_claims = {"prj": {1: "write"}}
jwt.jwt_require_claim(1, "read")
jwt.jwt_require_claim(1, "write")
g.jwt_claims = {"prj": {1: "admin"}}
jwt.jwt_require_claim(1, "read")
jwt.jwt_require_claim(1, "write")
jwt.jwt_require_claim(1, "admin")
def test_missing_access_level(app):
g.jwt_claims = {"prj": {1: "admin"}}
with pytest.raises(Forbidden):
jwt.jwt_require_claim(2, "admin")
|
python
|
#!/usr/bin/env python3
import unittest
import subprocess as sub
from astropy.time import Time
from bin import epics_fetch
class TestEPICSFetch(unittest.TestCase):
def test_known_date(self):
t = Time('2020-06-07T00:00', format='isot')
data = epics_fetch.get_data(['25m:mcp:cwPositions'], t.datetime,
(t-1).datetime)
epics_fetch._print_data(data, ["25m:mcp:cwPositions"])
def test_archive(self):
"""Checks to see if the directory for new data is available to this
computer"""
# This serves no purpose because simply importing the library is a pass
print(epics_fetch.telemetry)
return
def test_help(self):
""""Prints the help if -h is provided"""
sub.call('{} -h'.format(epics_fetch.__file__), shell=True)
if __name__ == '__main__':
unittest.main()
|
python
|
""" Customfield.
Do not edit this file by hand.
This is generated by parsing api.html service doc.
"""
from ambra_sdk.exceptions.service import AccountNotFound
from ambra_sdk.exceptions.service import FilterNotFound
from ambra_sdk.exceptions.service import InvalidCondition
from ambra_sdk.exceptions.service import InvalidDicomTag
from ambra_sdk.exceptions.service import InvalidDicomTagObject
from ambra_sdk.exceptions.service import InvalidField
from ambra_sdk.exceptions.service import InvalidHl7Field
from ambra_sdk.exceptions.service import InvalidHl7Object
from ambra_sdk.exceptions.service import InvalidHl7Segment
from ambra_sdk.exceptions.service import InvalidJson
from ambra_sdk.exceptions.service import InvalidObject
from ambra_sdk.exceptions.service import InvalidOptions
from ambra_sdk.exceptions.service import InvalidSearchSource
from ambra_sdk.exceptions.service import InvalidSortField
from ambra_sdk.exceptions.service import InvalidSortOrder
from ambra_sdk.exceptions.service import InvalidType
from ambra_sdk.exceptions.service import MissingFields
from ambra_sdk.exceptions.service import NoDicomTagDefined
from ambra_sdk.exceptions.service import NotASearch
from ambra_sdk.exceptions.service import NotFound
from ambra_sdk.exceptions.service import NotPermitted
from ambra_sdk.service.query import QueryO
from ambra_sdk.service.query import AsyncQueryO
from ambra_sdk.service.query import QueryOPSF
from ambra_sdk.service.query import AsyncQueryOPSF
class Customfield:
"""Customfield."""
def __init__(self, api):
self._api = api
def list(
self,
account_id,
):
"""List.
:param account_id: uuid of the account
"""
request_data = {
'account_id': account_id,
}
errors_mapping = {}
errors_mapping[('FILTER_NOT_FOUND', None)] = FilterNotFound('The filter can not be found. The error_subtype will hold the filter UUID')
errors_mapping[('INVALID_CONDITION', None)] = InvalidCondition('The condition is not support. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_FIELD', None)] = InvalidField('The field is not valid for this object. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_SORT_FIELD', None)] = InvalidSortField('The field is not valid for this object. The error_subtype will hold the field name this applies to')
errors_mapping[('INVALID_SORT_ORDER', None)] = InvalidSortOrder('The sort order for the field is invalid. The error_subtype will hold the field name this applies to')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The account can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view this list')
query_data = {
'api': self._api,
'url': '/customfield/list',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
query_data['paginated_field'] = 'customfields'
return QueryOPSF(**query_data)
def add(
self,
account_id,
name,
object,
capture_on_destination_search=None,
capture_on_share_code=None,
dicom_only=None,
dicom_tag=None,
dicom_tag_ignore_empty=None,
display_order=None,
field_flag=None,
hl7_component=None,
hl7_field=None,
hl7_segment=None,
load_dicom_tag=None,
load_from_sr=None,
load_hl7=None,
load_hl7_filter=None,
load_order=None,
options=None,
other_customfield_id=None,
other_dicom_tags=None,
required=None,
type=None,
wrapped_dicom_only=None,
):
"""Add.
:param account_id: uuid of the account
:param name: Name of the customfield
:param object: The object to associate the customfield with (Study|User_account|Group|Location|Account|Patient|Case|Order|Appointment|Dicomdata|Scanner|Query)
:param capture_on_destination_search: Flag if the field should be captured during query retrieve on /destination/search call (only applicable to study fields) (optional)
:param capture_on_share_code: Flag if the field should be captured during a share code exchange (only applicable to study fields) (optional)
:param dicom_only: Only capture for non-wrapped DICOM uploads during a share code exchange (optional)
:param dicom_tag: DICOM tag to map this field to. Format should be of form (1234,1234). (only applicable to study fields) (optional)
:param dicom_tag_ignore_empty: Flag to not map an empty custom field to the DICOM tag. (only applicable if a dicom_tag is specified) (optional)
:param display_order: Integer to order how the fields should be displayed (optional)
:param field_flag: Default customfield flag (optional)
:param hl7_component: Component number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_field: Segment field number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_segment: Segment to map this field to in HL7 ORM messages. Valid values are (NTE|PID|PID1|PV1|PV2|OBR|DG1|OBX|CTI|BLG|ORC) (only applicable to study fields) (optional)
:param load_dicom_tag: Flag to load the current value from the study into this field. (only applicable if a dicom_tag is specified) (optional)
:param load_from_sr: Load the value from the structured reports in the study (only applicable to study fields) .(optional)
:param load_hl7: If this is set to a HL7 message type the value of this field will be updated from the hl7_segment, hl7_field and hl7_component from incoming HL7 messages of the matching message type (only applicable to study fields) (optional)
:param load_hl7_filter: Filter token for the load_hl7 option (only applicable to study fields) (optional)
:param load_order: If this flag is on the value of this field will be loaded from a customfield of the matching Order. The customfield is defined by the other_customfield_id parameter (optional)
:param options: Additional options in JSON format (optional)
:param other_customfield_id: Id of a customfield to map its value to this customfield's value (optional)
:param other_dicom_tags: JSON array of other DICOM tags to map this field to. (only applicable to study fields) (optional)
:param required: Flag if the field is required (optional)
:param type: Type of the custom field (text|number|date|memo|select|multiselect|radio|checkbox|search|bool) (optional)
:param wrapped_dicom_only: Only capture for wrapped DICOM uploads during a share code exchange (optional)
"""
request_data = {
'account_id': account_id,
'capture_on_destination_search': capture_on_destination_search,
'capture_on_share_code': capture_on_share_code,
'dicom_only': dicom_only,
'dicom_tag': dicom_tag,
'dicom_tag_ignore_empty': dicom_tag_ignore_empty,
'display_order': display_order,
'field_flag': field_flag,
'hl7_component': hl7_component,
'hl7_field': hl7_field,
'hl7_segment': hl7_segment,
'load_dicom_tag': load_dicom_tag,
'load_from_sr': load_from_sr,
'load_hl7': load_hl7,
'load_hl7_filter': load_hl7_filter,
'load_order': load_order,
'name': name,
'object': object,
'options': options,
'other_customfield_id': other_customfield_id,
'other_dicom_tags': other_dicom_tags,
'required': required,
'type': type,
'wrapped_dicom_only': wrapped_dicom_only,
}
errors_mapping = {}
errors_mapping[('ACCOUNT_NOT_FOUND', None)] = AccountNotFound('The account can not be found')
errors_mapping[('INVALID_DICOM_TAG', None)] = InvalidDicomTag('The DICOM tag is invalid')
errors_mapping[('INVALID_DICOM_TAG_OBJECT', None)] = InvalidDicomTagObject('DICOM tags can only be applied to study fields')
errors_mapping[('INVALID_HL7_OBJECT', None)] = InvalidHl7Object('HL7 fields can only be applied to study fields')
errors_mapping[('INVALID_HL7_SEGMENT', None)] = InvalidHl7Segment('Invalid segment name')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The field is not in valid JSON format. The error_subtype holds the name of the field')
errors_mapping[('INVALID_OBJECT', None)] = InvalidObject('An invalid object was passed.')
errors_mapping[('INVALID_OPTIONS', None)] = InvalidOptions('An option is invalid. The error_subtype holds the specific error message')
errors_mapping[('INVALID_SEARCH_SOURCE', None)] = InvalidSearchSource('An invalid search source was passed.')
errors_mapping[('INVALID_TYPE', None)] = InvalidType('An invalid type was passed.')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The Customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to add a customfield to this account')
errors_mapping[('NO_DICOM_TAG_DEFINED', None)] = NoDicomTagDefined('The load_dicom_tag flag is set but the dicom_tag field is not defined')
query_data = {
'api': self._api,
'url': '/customfield/add',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def set(
self,
uuid,
capture_on_destination_search=None,
capture_on_share_code=None,
dicom_only=None,
dicom_tag=None,
dicom_tag_ignore_empty=None,
display_order=None,
field_flag=None,
hl7_component=None,
hl7_field=None,
hl7_segment=None,
load_dicom_tag=None,
load_from_sr=None,
load_hl7=None,
load_hl7_filter=None,
load_order=None,
name=None,
options=None,
other_customfield_id=None,
other_dicom_tags=None,
required=None,
wrapped_dicom_only=None,
):
"""Set.
:param uuid: uuid of the customfield
:param capture_on_destination_search: Flag if the field should be captured during query retrieve on /destination/search call (optional)
:param capture_on_share_code: Flag if the study type field should be captured during a share code exchange (optional)
:param dicom_only: Only capture for non-wrapped DICOM uploads during a share code exchange (optional)
:param dicom_tag: Dicom tag to map this field to. Format should be of form (1234,1234). (only applicable to study fields) (optional)
:param dicom_tag_ignore_empty: Flag to not map an empty custom field to the DICOM tag. (only applicable if a dicom_tag is specified) (optional)
:param display_order: Integer to order how the fields should be displayed (optional)
:param field_flag: Default customfield flag (optional)
:param hl7_component: Component number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_field: Segment field number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_segment: Segment to map this field to in HL7 ORM messages. Valid values are (NTE|PID|PID1|PV1|PV2|OBR|DG1|OBX|CTI|BLG|ORC) (only applicable to study fields) (optional)
:param load_dicom_tag: Flag to load the current value from the study into this field. (only applicable if a dicom_tag is specified) (optional)
:param load_from_sr: Load the value from the structured reports in the study. (only applicable to study fields) .(optional)
:param load_hl7: If this is set to a HL7 message type the value of this field will be updated from the hl7_segment, hl7_field and hl7_component from incoming HL7 messages of the matching message type (only applicable to study fields) (optional)
:param load_hl7_filter: Filter token for the load_hl7 option (only applicable to study fields) (optional)
:param load_order: If this flag is on the value of this field will be loaded from a customfield of the matching Order. The customfield is defined by the other_customfield_id parameter (optional)
:param name: Name of the customfield (optional)
:param options: Additional options in JSON format (optional)
:param other_customfield_id: Id of a customfield to map its value to this customfield's value (optional)
:param other_dicom_tags: JSON array of other DICOM tags to map this field to. (only applicable to study fields) (optional)
:param required: Flag if the field is required (optional)
:param wrapped_dicom_only: Only capture for wrapped DICOM uploads during a share code exchange (optional)
"""
request_data = {
'capture_on_destination_search': capture_on_destination_search,
'capture_on_share_code': capture_on_share_code,
'dicom_only': dicom_only,
'dicom_tag': dicom_tag,
'dicom_tag_ignore_empty': dicom_tag_ignore_empty,
'display_order': display_order,
'field_flag': field_flag,
'hl7_component': hl7_component,
'hl7_field': hl7_field,
'hl7_segment': hl7_segment,
'load_dicom_tag': load_dicom_tag,
'load_from_sr': load_from_sr,
'load_hl7': load_hl7,
'load_hl7_filter': load_hl7_filter,
'load_order': load_order,
'name': name,
'options': options,
'other_customfield_id': other_customfield_id,
'other_dicom_tags': other_dicom_tags,
'required': required,
'uuid': uuid,
'wrapped_dicom_only': wrapped_dicom_only,
}
errors_mapping = {}
errors_mapping[('INVALID_DICOM_TAG', None)] = InvalidDicomTag('The DICOM tag is invalid')
errors_mapping[('INVALID_DICOM_TAG_OBJECT', None)] = InvalidDicomTagObject('DICOM tags can only be applied to study fields')
errors_mapping[('INVALID_HL7_FIELD', None)] = InvalidHl7Field('Invalid field number')
errors_mapping[('INVALID_HL7_OBJECT', None)] = InvalidHl7Object('HL7 fields can only be applied to study fields')
errors_mapping[('INVALID_HL7_SEGMENT', None)] = InvalidHl7Segment('Invalid segment name')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The field is not in valid JSON format. The error_subtype holds the name of the field')
errors_mapping[('INVALID_OPTIONS', None)] = InvalidOptions('An option is invalid. The error_subtype holds the specific error message')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The object was not found. The error_subtype holds the name of the key for the object that can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to edit the customfield')
errors_mapping[('NO_DICOM_TAG_DEFINED', None)] = NoDicomTagDefined('The load_dicom_tag flag is set but the dicom_tag field is not defined')
query_data = {
'api': self._api,
'url': '/customfield/set',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def get(
self,
uuid,
):
"""Get.
:param uuid: uuid of the customfield
"""
request_data = {
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view the customfield')
query_data = {
'api': self._api,
'url': '/customfield/get',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def delete(
self,
uuid,
):
"""Delete.
:param uuid: uuid of the customfield
"""
request_data = {
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to delete the customfield')
query_data = {
'api': self._api,
'url': '/customfield/delete',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def lookup(
self,
account_id,
name,
):
"""Lookup.
:param account_id: uuid of the account
:param name: Name of the customfield
"""
request_data = {
'account_id': account_id,
'name': name,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to do this')
query_data = {
'api': self._api,
'url': '/customfield/lookup',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def search(
self,
uuid,
search=None,
):
"""Search.
:param uuid: uuid of the customfield
:param search: The value to search for (optional)
"""
request_data = {
'search': search,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_A_SEARCH', None)] = NotASearch('This is not a search type of customfield')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to do this')
query_data = {
'api': self._api,
'url': '/customfield/search',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
class AsyncCustomfield:
"""AsyncCustomfield."""
def __init__(self, api):
self._api = api
def list(
self,
account_id,
):
"""List.
:param account_id: uuid of the account
"""
request_data = {
'account_id': account_id,
}
errors_mapping = {}
errors_mapping[('FILTER_NOT_FOUND', None)] = FilterNotFound('The filter can not be found. The error_subtype will hold the filter UUID')
errors_mapping[('INVALID_CONDITION', None)] = InvalidCondition('The condition is not support. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_FIELD', None)] = InvalidField('The field is not valid for this object. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_SORT_FIELD', None)] = InvalidSortField('The field is not valid for this object. The error_subtype will hold the field name this applies to')
errors_mapping[('INVALID_SORT_ORDER', None)] = InvalidSortOrder('The sort order for the field is invalid. The error_subtype will hold the field name this applies to')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The account can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view this list')
query_data = {
'api': self._api,
'url': '/customfield/list',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
query_data['paginated_field'] = 'customfields'
return AsyncQueryOPSF(**query_data)
def add(
self,
account_id,
name,
object,
capture_on_destination_search=None,
capture_on_share_code=None,
dicom_only=None,
dicom_tag=None,
dicom_tag_ignore_empty=None,
display_order=None,
field_flag=None,
hl7_component=None,
hl7_field=None,
hl7_segment=None,
load_dicom_tag=None,
load_from_sr=None,
load_hl7=None,
load_hl7_filter=None,
load_order=None,
options=None,
other_customfield_id=None,
other_dicom_tags=None,
required=None,
type=None,
wrapped_dicom_only=None,
):
"""Add.
:param account_id: uuid of the account
:param name: Name of the customfield
:param object: The object to associate the customfield with (Study|User_account|Group|Location|Account|Patient|Case|Order|Appointment|Dicomdata|Scanner|Query)
:param capture_on_destination_search: Flag if the field should be captured during query retrieve on /destination/search call (only applicable to study fields) (optional)
:param capture_on_share_code: Flag if the field should be captured during a share code exchange (only applicable to study fields) (optional)
:param dicom_only: Only capture for non-wrapped DICOM uploads during a share code exchange (optional)
:param dicom_tag: DICOM tag to map this field to. Format should be of form (1234,1234). (only applicable to study fields) (optional)
:param dicom_tag_ignore_empty: Flag to not map an empty custom field to the DICOM tag. (only applicable if a dicom_tag is specified) (optional)
:param display_order: Integer to order how the fields should be displayed (optional)
:param field_flag: Default customfield flag (optional)
:param hl7_component: Component number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_field: Segment field number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_segment: Segment to map this field to in HL7 ORM messages. Valid values are (NTE|PID|PID1|PV1|PV2|OBR|DG1|OBX|CTI|BLG|ORC) (only applicable to study fields) (optional)
:param load_dicom_tag: Flag to load the current value from the study into this field. (only applicable if a dicom_tag is specified) (optional)
:param load_from_sr: Load the value from the structured reports in the study (only applicable to study fields) .(optional)
:param load_hl7: If this is set to a HL7 message type the value of this field will be updated from the hl7_segment, hl7_field and hl7_component from incoming HL7 messages of the matching message type (only applicable to study fields) (optional)
:param load_hl7_filter: Filter token for the load_hl7 option (only applicable to study fields) (optional)
:param load_order: If this flag is on the value of this field will be loaded from a customfield of the matching Order. The customfield is defined by the other_customfield_id parameter (optional)
:param options: Additional options in JSON format (optional)
:param other_customfield_id: Id of a customfield to map its value to this customfield's value (optional)
:param other_dicom_tags: JSON array of other DICOM tags to map this field to. (only applicable to study fields) (optional)
:param required: Flag if the field is required (optional)
:param type: Type of the custom field (text|number|date|memo|select|multiselect|radio|checkbox|search|bool) (optional)
:param wrapped_dicom_only: Only capture for wrapped DICOM uploads during a share code exchange (optional)
"""
request_data = {
'account_id': account_id,
'capture_on_destination_search': capture_on_destination_search,
'capture_on_share_code': capture_on_share_code,
'dicom_only': dicom_only,
'dicom_tag': dicom_tag,
'dicom_tag_ignore_empty': dicom_tag_ignore_empty,
'display_order': display_order,
'field_flag': field_flag,
'hl7_component': hl7_component,
'hl7_field': hl7_field,
'hl7_segment': hl7_segment,
'load_dicom_tag': load_dicom_tag,
'load_from_sr': load_from_sr,
'load_hl7': load_hl7,
'load_hl7_filter': load_hl7_filter,
'load_order': load_order,
'name': name,
'object': object,
'options': options,
'other_customfield_id': other_customfield_id,
'other_dicom_tags': other_dicom_tags,
'required': required,
'type': type,
'wrapped_dicom_only': wrapped_dicom_only,
}
errors_mapping = {}
errors_mapping[('ACCOUNT_NOT_FOUND', None)] = AccountNotFound('The account can not be found')
errors_mapping[('INVALID_DICOM_TAG', None)] = InvalidDicomTag('The DICOM tag is invalid')
errors_mapping[('INVALID_DICOM_TAG_OBJECT', None)] = InvalidDicomTagObject('DICOM tags can only be applied to study fields')
errors_mapping[('INVALID_HL7_OBJECT', None)] = InvalidHl7Object('HL7 fields can only be applied to study fields')
errors_mapping[('INVALID_HL7_SEGMENT', None)] = InvalidHl7Segment('Invalid segment name')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The field is not in valid JSON format. The error_subtype holds the name of the field')
errors_mapping[('INVALID_OBJECT', None)] = InvalidObject('An invalid object was passed.')
errors_mapping[('INVALID_OPTIONS', None)] = InvalidOptions('An option is invalid. The error_subtype holds the specific error message')
errors_mapping[('INVALID_SEARCH_SOURCE', None)] = InvalidSearchSource('An invalid search source was passed.')
errors_mapping[('INVALID_TYPE', None)] = InvalidType('An invalid type was passed.')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The Customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to add a customfield to this account')
errors_mapping[('NO_DICOM_TAG_DEFINED', None)] = NoDicomTagDefined('The load_dicom_tag flag is set but the dicom_tag field is not defined')
query_data = {
'api': self._api,
'url': '/customfield/add',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def set(
self,
uuid,
capture_on_destination_search=None,
capture_on_share_code=None,
dicom_only=None,
dicom_tag=None,
dicom_tag_ignore_empty=None,
display_order=None,
field_flag=None,
hl7_component=None,
hl7_field=None,
hl7_segment=None,
load_dicom_tag=None,
load_from_sr=None,
load_hl7=None,
load_hl7_filter=None,
load_order=None,
name=None,
options=None,
other_customfield_id=None,
other_dicom_tags=None,
required=None,
wrapped_dicom_only=None,
):
"""Set.
:param uuid: uuid of the customfield
:param capture_on_destination_search: Flag if the field should be captured during query retrieve on /destination/search call (optional)
:param capture_on_share_code: Flag if the study type field should be captured during a share code exchange (optional)
:param dicom_only: Only capture for non-wrapped DICOM uploads during a share code exchange (optional)
:param dicom_tag: Dicom tag to map this field to. Format should be of form (1234,1234). (only applicable to study fields) (optional)
:param dicom_tag_ignore_empty: Flag to not map an empty custom field to the DICOM tag. (only applicable if a dicom_tag is specified) (optional)
:param display_order: Integer to order how the fields should be displayed (optional)
:param field_flag: Default customfield flag (optional)
:param hl7_component: Component number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_field: Segment field number to map this field to in HL7 ORM messages. Valid values are 1 to 64. (only applicable to study fields) (optional)
:param hl7_segment: Segment to map this field to in HL7 ORM messages. Valid values are (NTE|PID|PID1|PV1|PV2|OBR|DG1|OBX|CTI|BLG|ORC) (only applicable to study fields) (optional)
:param load_dicom_tag: Flag to load the current value from the study into this field. (only applicable if a dicom_tag is specified) (optional)
:param load_from_sr: Load the value from the structured reports in the study. (only applicable to study fields) .(optional)
:param load_hl7: If this is set to a HL7 message type the value of this field will be updated from the hl7_segment, hl7_field and hl7_component from incoming HL7 messages of the matching message type (only applicable to study fields) (optional)
:param load_hl7_filter: Filter token for the load_hl7 option (only applicable to study fields) (optional)
:param load_order: If this flag is on the value of this field will be loaded from a customfield of the matching Order. The customfield is defined by the other_customfield_id parameter (optional)
:param name: Name of the customfield (optional)
:param options: Additional options in JSON format (optional)
:param other_customfield_id: Id of a customfield to map its value to this customfield's value (optional)
:param other_dicom_tags: JSON array of other DICOM tags to map this field to. (only applicable to study fields) (optional)
:param required: Flag if the field is required (optional)
:param wrapped_dicom_only: Only capture for wrapped DICOM uploads during a share code exchange (optional)
"""
request_data = {
'capture_on_destination_search': capture_on_destination_search,
'capture_on_share_code': capture_on_share_code,
'dicom_only': dicom_only,
'dicom_tag': dicom_tag,
'dicom_tag_ignore_empty': dicom_tag_ignore_empty,
'display_order': display_order,
'field_flag': field_flag,
'hl7_component': hl7_component,
'hl7_field': hl7_field,
'hl7_segment': hl7_segment,
'load_dicom_tag': load_dicom_tag,
'load_from_sr': load_from_sr,
'load_hl7': load_hl7,
'load_hl7_filter': load_hl7_filter,
'load_order': load_order,
'name': name,
'options': options,
'other_customfield_id': other_customfield_id,
'other_dicom_tags': other_dicom_tags,
'required': required,
'uuid': uuid,
'wrapped_dicom_only': wrapped_dicom_only,
}
errors_mapping = {}
errors_mapping[('INVALID_DICOM_TAG', None)] = InvalidDicomTag('The DICOM tag is invalid')
errors_mapping[('INVALID_DICOM_TAG_OBJECT', None)] = InvalidDicomTagObject('DICOM tags can only be applied to study fields')
errors_mapping[('INVALID_HL7_FIELD', None)] = InvalidHl7Field('Invalid field number')
errors_mapping[('INVALID_HL7_OBJECT', None)] = InvalidHl7Object('HL7 fields can only be applied to study fields')
errors_mapping[('INVALID_HL7_SEGMENT', None)] = InvalidHl7Segment('Invalid segment name')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The field is not in valid JSON format. The error_subtype holds the name of the field')
errors_mapping[('INVALID_OPTIONS', None)] = InvalidOptions('An option is invalid. The error_subtype holds the specific error message')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The object was not found. The error_subtype holds the name of the key for the object that can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to edit the customfield')
errors_mapping[('NO_DICOM_TAG_DEFINED', None)] = NoDicomTagDefined('The load_dicom_tag flag is set but the dicom_tag field is not defined')
query_data = {
'api': self._api,
'url': '/customfield/set',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def get(
self,
uuid,
):
"""Get.
:param uuid: uuid of the customfield
"""
request_data = {
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view the customfield')
query_data = {
'api': self._api,
'url': '/customfield/get',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def delete(
self,
uuid,
):
"""Delete.
:param uuid: uuid of the customfield
"""
request_data = {
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to delete the customfield')
query_data = {
'api': self._api,
'url': '/customfield/delete',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def lookup(
self,
account_id,
name,
):
"""Lookup.
:param account_id: uuid of the account
:param name: Name of the customfield
"""
request_data = {
'account_id': account_id,
'name': name,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to do this')
query_data = {
'api': self._api,
'url': '/customfield/lookup',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def search(
self,
uuid,
search=None,
):
"""Search.
:param uuid: uuid of the customfield
:param search: The value to search for (optional)
"""
request_data = {
'search': search,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_A_SEARCH', None)] = NotASearch('This is not a search type of customfield')
errors_mapping[('NOT_FOUND', None)] = NotFound('The customfield can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to do this')
query_data = {
'api': self._api,
'url': '/customfield/search',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
|
python
|
from adafruit_servokit import ServoKit
from dcservo import DogCamServoBase
# Don't export ServoLib
__all__ = ("DogCamServoAda")
# Bring in global instance
ServoLib = ServoKit(channels=16)
class DogCamServoAda(DogCamServoBase):
def __init__(self, InName, InPin, ZeroAngle=0.0, Steps=1.0, LowerBounds=0.0, UpperBounds=180.0, PulseWidthMin=1000, PulseWidthMax=2000):
ServoLib.servo[InPin].actuation_range = UpperBounds
ServoLib.servo[InPin].set_pulse_width_range(PulseWidthMin, PulseWidthMax)
super().__init__(InName, InPin, InZeroAngle=ZeroAngle, InSteps=Steps, InLowerBounds=LowerBounds, InUpperBounds=UpperBounds)
def _MoveToPosition(self, angle):
print(f"{self.Name}: Moving to position {angle}")
try:
ServoLib.servo[self.Pin].angle = angle
except Exception as ex:
print(f"{self.Name}: Could not move position to {angle}!\n{ex}")
|
python
|
'''
code by Tae Hwan Jung(Jeff Jung) @graykode
'''
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
tf.reset_default_graph()
# 3 Words Sentence
sentences = [ "i like dog", "i like cat", "i like animal",
"dog cat animal", "apple cat dog like", "dog fish milk like",
"dog cat eyes like", "i like apple", "apple i hate",
"apple i movie book music like", "cat dog hate", "cat dog like"]
word_sequence = " ".join(sentences).split() #string
word_list = " ".join(sentences).split()
word_list = list(set(word_list))#去重的list
word_dict = {w: i for i, w in enumerate(word_list)}#字典
# Word2Vec Parameter
batch_size = 20
embedding_size = 2 # To show 2 dim embedding graph
voc_size = len(word_list)
def random_batch(data, size):
random_inputs = []
random_labels = []
random_index = np.random.choice(range(len(data)), size, replace=False)
for i in random_index:
random_inputs.append(np.eye(voc_size)[data[i][0]]) # target
random_labels.append(np.eye(voc_size)[data[i][1]]) # context word
return random_inputs, random_labels
# Make skip gram of one size window
skip_grams = []
for i in range(1, len(word_sequence) - 1):
target = word_dict[word_sequence[i]]#找到对应的字典key
context = [word_dict[word_sequence[i - 1]], word_dict[word_sequence[i + 1]]]#左右两边的value
for w in context:
skip_grams.append([target, w])#将左右两边的value放到中心的key中
# Model
inputs = tf.placeholder(tf.float32, shape=[None, voc_size])#PXn的矩阵
labels = tf.placeholder(tf.float32, shape=[None, voc_size])#???
# W and WT is not Traspose relationship
W = tf.Variable(tf.random_uniform([voc_size, embedding_size], -1.0, 1.0))#nx2的矩阵
WT = tf.Variable(tf.random_uniform([embedding_size, voc_size], -1.0, 1.0))
hidden_layer = tf.matmul(inputs, W) # [batch_size, embedding_size] px2的矩阵
output_layer = tf.matmul(hidden_layer, WT) # [batch_size, voc_size] pxn的矩阵
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output_layer, labels=labels))
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)#0.001是学习步划
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(5000):
batch_inputs, batch_labels = random_batch(skip_grams, batch_size)
_, loss = sess.run([optimizer, cost], feed_dict={inputs: batch_inputs, labels: batch_labels})
if (epoch + 1)%1000 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
trained_embeddings = W.eval()
for i, label in enumerate(word_list):
x, y = trained_embeddings[i]
plt.scatter(x, y)
plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
plt.show()
|
python
|
import ply.lex as lex
import ply.yacc as yacc
KEYWORDS = ("run", "load", "save", "insert", "clear", "quit", "exit")
PARAMS = ("topology", "width", "height")
DOMAINS = ("'KleinBottle'", "'MoebiusBand'", "'Torus'", "'Cylinder'", "'Plane'")
class Parser:
"""
Base class for a lexer/parser that has the rules defined as methods
"""
tokens = ()
precedence = ()
def __init__(self, game_instance, **kw):
self.names = {}
self.game_instance = game_instance
# Build the lexer and parser
lex.lex(module=self)
yacc.yacc(module=self)
def parse(self, s):
yacc.parse(s)
class GameParser(Parser):
"""
This class is a parser for the game's control/config language. It is an
adaption of David Beazleys classcalc example contained in PLY, hence an
elementary calculator is also included :)
"""
tokens = (
'NAME', 'NUMBER',
'PLUS', 'MINUS', 'EXP', 'TIMES', 'DIVIDE', 'EQUALS',
'LPAREN', 'RPAREN', 'PARAM', 'KEY', 'STRING'
)
# Reserved words
reserved = dict(((k, 'PARAM') for k in PARAMS), **{k: 'KEY' for k in KEYWORDS})
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_EXP = r'\*\*'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_STRING = r'\'[a-zA-Z_]*\''
def t_NAME(self, t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
t.type = GameParser.reserved.get(t.value, 'NAME')
return t
def t_NUMBER(self, t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %s" % t.value)
t.value = 0
return t
t_ignore = " \t"
def t_newline(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Parsing rules
precedence = (
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
('left', 'EXP'),
('right', 'UMINUS'),
)
def p_statement_setparam(self, p):
"statement : PARAM expression"
try:
setattr(self.game_instance, p[1], p[2])
except Exception as e:
print(e)
def p_statement_keyword_arg(self, p):
"statement : KEY expression"
try:
getattr(self.game_instance, p[1])(p[2])
except Exception as e:
print(e)
def p_statement_keyword_noarg(self, p):
"statement : KEY"
try:
getattr(self.game_instance, p[1])()
except Exception as e:
print(e)
def p_statement_assign(self, p):
'statement : NAME EQUALS expression'
self.names[p[1]] = p[3]
def p_statement_expr(self, p):
'statement : expression'
print(p[1])
def p_expression_binop(self, p):
"""
expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression
| expression EXP expression
"""
if p[2] == '+':
p[0] = p[1] + p[3]
elif p[2] == '-':
p[0] = p[1] - p[3]
elif p[2] == '*':
p[0] = p[1] * p[3]
elif p[2] == '/':
p[0] = p[1] / p[3]
elif p[2] == '**':
p[0] = p[1] ** p[3]
def p_expression_uminus(self, p):
'expression : MINUS expression %prec UMINUS'
p[0] = -p[2]
def p_expression_group(self, p):
'expression : LPAREN expression RPAREN'
p[0] = p[2]
def p_expression_number(self, p):
'expression : NUMBER'
p[0] = p[1]
def p_expression_name(self, p):
'expression : NAME'
try:
p[0] = self.names[p[1]]
except LookupError:
print("Undefined name '%s'" % p[1])
p[0] = 0
def p_expression_string(self, p):
'expression : STRING'
p[0] = p[1].strip("'")
def p_error(self, p):
if p:
print("Syntax error at '%s'" % p.value)
else:
print("Syntax error at EOF")
if __name__ == '__main__':
p = GameParser()
p.run()
|
python
|
"""
Images should have the shape b x c x h x w.
Masks attach an alpha channel with masking values in the range [0, 1], which can
be consumed by other augmentation layers. Masks themselves consume alpha
channels by multiplying the old with the new.
"""
import math
import torch
import torch.fft
from torch import Tensor
def to_tensor(x):
return torch.tensor(x) if not isinstance(x, Tensor) else x
def _attach(image, mask):
b, c, h, w = image.shape
mask = mask.expand(b,1,h,w)
mask = mask.to(image.device)
if c == 3:
mask = mask.to(image.dtype)
return torch.cat([image, mask],1)
elif c == 4:
image[:,3,...] *= mask
return image
def detach(image):
return image[:,:3,:,:], image[:,3:,:,:]
def cutout(image, size):
b, c, h, w = image.shape
size_h, size_w = size
size_h = to_tensor(size_h).to(torch.int64).to(image.device).view(-1,1,1,1)
size_w = to_tensor(size_w).to(torch.int64).to(image.device).view(-1,1,1,1)
center_h = torch.randint(h, (b,1,1,1), device=image.device)
center_w = torch.randint(w, (b,1,1,1), device=image.device)
mask_h = torch.arange(h, device=image.device).view(1,1,-1,1)
mask_w = torch.arange(w, device=image.device).view(1,1,1,-1)
mask = (center_h - size_h <= mask_h) & (mask_h < center_h + size_h) \
& (center_w - size_w <= mask_w) & (mask_w < center_w + size_w)
return _attach(image, mask)
def random_pixel(image, lam=0.5, kernel=1):
b, c, h, w = image.shape
h_ = h // kernel + (h % kernel != 0)
w_ = w // kernel + (w % kernel != 0)
rand = torch.rand([b,1,h_,w_], device=image.device)
rand = rand.repeat_interleave(kernel, dim=2)
rand = rand.repeat_interleave(kernel, dim=3)
rand = rand[:,:,:h,:w]
lam = to_tensor(lam).view(-1,1,1,1)
return _attach(image, rand <= lam)
def random_row(image, lam=0.5, kernel=1):
b, c, h, w = image.shape
h_ = h // kernel + (h % kernel != 0)
rand = torch.rand([b,1,h_,1], device=image.device)
rand = rand.repeat_interleave(kernel, dim=2)
rand = rand.expand(-1,-1,-1,w)[:,:,:h,:]
lam = to_tensor(lam).view(-1,1,1,1)
return _attach(image, rand <= lam)
def random_col(image, lam=0.5, kernel=1):
b, c, h, w = image.shape
w_ = w // kernel + (w % kernel != 0)
rand = torch.rand([b,1,1,w_])
rand = rand.expand(-1,-1,h,-1)[:,:,:,:w]
lam = to_tensor(lam).view(-1,1,1,1)
return _attach(image, rand <= lam)
def random_block(image, size=[50,50], lam=None):
b, c, h, w = image.shape
device = image.device
if lam is not None:
sqrt_lam = torch.sqrt(lam)
size = (h * sqrt_lam, w * sqrt_lam)
if size == [h,w] or all(s == [h,w] for s in size):
return _attach(image, torch.ones(b,1,h,w))
size_h, size_w = size
size_h = to_tensor(size_h).to(torch.int64).to(device).view(-1,1,1,1)
size_w = to_tensor(size_w).to(torch.int64).to(device).view(-1,1,1,1)
rand_h = torch.floor(torch.rand([b,1,1,1], device=device) * (h - size_h + 1))
rand_w = torch.floor(torch.rand([b,1,1,1], device=device) * (w - size_w + 1))
mask_h = torch.arange(h, device=device).view(1,1,-1,1).expand(b,-1,-1,-1)
mask_w = torch.arange(w, device=device).view(1,1,1,-1).expand(b,-1,-1,-1)
mask = (rand_h <= mask_h) & (mask_h < rand_h + size_h) \
& (rand_w <= mask_w) & (mask_w < rand_w + size_w)
return _attach(image, mask)
def random_row_strip(image, **kwargs):
return random_strip(image, 2, **kwargs)
def random_col_strip(image, **kwargs):
return random_strip(image, 3, **kwargs)
def random_strip(image, dim, size=50, lam=None):
b, c = image.shape[:2]
d = image.shape[dim]
device = image.device
if lam is not None:
size = d * lam
size = to_tensor(size).to(device).view(-1,1,1,1)
start = torch.rand([b,1,1,1], device=device) * (d - size)
index = torch.arange(d, device=device).view(1,1,1,d)
mask = (start <= index) & (index < start + size)
mask = mask.transpose(-1,dim)
return _attach(image, mask)
def time(image, lam=1.0):
size = lam * image.shape[-1]
return specaugment(image, size, -1)
def frequency(image, lam=1.0):
size = lam * image.shape[-2]
return specaugment(image, size, -2)
def specaugment(image, size, dim):
b = image.shape[0]
d = image.shape[dim]
size = to_tensor(size).view(-1,1,1,1)
width = torch.rand([b,1,1,1]) * size
start = torch.rand([b,1,1,1]) * (d - width)
mask = torch.arange(0,d).view([1,1,1,-1])
mask = (start <= mask) & (mask < start + width)
mask = mask.transpose(-1,dim)
return _attach(image, mask)
def fmix(image, lam=None, decay=3.0):
b, c, h, w = image.shape
mask = low_freq_mask([b,1,h,w], decay)
mask = binarise_mask(mask, lam)
return _attach(image, mask)
def fftfreq(n, d=1.0, device='cpu'):
"""DFT sample frequency
"""
s = (n - 1) // 2 + 1
results = torch.empty(n, device=device)
results[:s] = torch.arange(0, s, device=device)
results[s:] = torch.arange(-(n // 2), 0, device=device)
return results * (1.0 / (n * d))
def fftfreq2(h, w, device='cpu'):
"""Magnitude of 2d sample frequency
"""
fy = fftfreq(h, device=device)
fy = fy.unsqueeze(-1)
if w % 2 == 1:
fx = fftfreq(w, device=device)
fx = fx[: w // 2 + 2]
else:
fx = fftfreq(w, device=device)
fx = fx[: w // 2 + 1]
return torch.sqrt(fx * fx + fy * fy)
def get_spectrum(shape, decay, device='cpu'):
b, c, h, w = shape
cap = torch.tensor(1.0 / max(h,w), device=device)
freqs = fftfreq2(h, w, device=device)
freqs = torch.maximum(freqs, cap)
h, w = freqs.shape
scale = 1.0 / (freqs ** decay).view(1,1,h,w,1)
spec = scale * torch.randn([b,c,h,w,2])
return spec[...,0] + spec[...,1] * 1j
def low_freq_mask(shape, decay):
h, w = shape[-2:]
spec = get_spectrum(shape, decay)
mask = torch.fft.ifftn(spec, s=(h,w)).real
lo = mask.flatten(2).min(-1)[0]
hi = mask.flatten(2).max(-1)[0]
lo = lo.view(shape[0],1,1,1)
hi = hi.view(shape[0],1,1,1)
return (mask - lo) / (hi - lo)
def binarise_mask(mask, lam):
shape = mask.shape
mask = mask.flatten(1)
index = mask.argsort(-1, descending=True)
if torch.rand(1) < 0.5:
cutoff = torch.ceil(lam * mask.shape[-1])
else:
cutoff = torch.floor(lam * mask.shape[-1])
cutoff = cutoff.to(torch.int64)
for msk, idx, cut in zip(mask, index, cutoff):
msk[idx[:cut]] = 1
msk[idx[cut:]] = 0
return mask.view(shape)
|
python
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
PAD_WORD_ID = 0
UNK_WORD_ID = 1
END_WORD_ID = 2
PAD_CHAR = 261
BOW_CHAR = 259
EOW_CHAR = 260
|
python
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
file_1 = '/ai4efs/models/object_detection/faster_rcnn_inception_resnet_v2_atrous/train_on_ss/predictions/eccv_train_per_cat_prec_recall_data.npz'
data_1 = np.load(open(file_1,'r'))
file_2 = '/ai4efs/models/object_detection/faster_rcnn_inception_resnet_v2_atrous/train_on_ss_and_inat/predictions/eccv_train_per_cat_prec_recall_data.npz'
data_2 = np.load(open(file_2,'r'))
file_3 = '/ai4efs/models/object_detection/faster_rcnn_inception_resnet_v2_atrous/train_on_ss_no_deer/predictions/eccv_train_per_cat_prec_recall_data.npz'
data_3 = np.load(open(file_3,'r'))
file_4 = '/ai4efs/models/object_detection/faster_rcnn_inception_resnet_v2_atrous/train_on_ss_no_deer_and_inat/predictions/eccv_train_per_cat_prec_recall_data.npz'
data_4 = np.load(open(file_4,'r'))
ap = data_1['ap'].tolist()
cat_id_to_cat = data_1['cat_id_to_cat'].tolist()
cat_ids = [i for i in ap if not np.isnan(ap[i])]
print(cat_ids)
N = len(cat_ids)
ind = np.arange(N)
width = 0.15
fig = plt.figure()
ax = fig.add_subplot(111)
aps = [ap[i] for i in cat_ids]
print(aps)
print(len(ind),len(aps))
rects1 = ax.bar(ind, aps, width, color='royalblue')
ap = data_2['ap'].tolist()
rects2 = ax.bar(ind+width, [ap[i] for i in cat_ids], width, color='seagreen')
ap = data_3['ap'].tolist()
rects3 = ax.bar(ind+width*2, [ap[i] for i in cat_ids], width, color='red')
ap = data_4['ap'].tolist()
rects4 = ax.bar(ind+width*3, [ap[i] for i in cat_ids], width, color='orange')
ax.set_ylabel('mAP per class')
ax.set_title('mAP per class with and without iNat and deer-like animals')
ax.set_xticks(ind + 3*width / 2)
ax.set_xticklabels([cat_id_to_cat[i] for i in cat_ids])
plt.xticks(rotation=90)
ax.legend((rects1[0],rects2[0], rects3[0], rects4[0]),('w/deer, w/o iNat','w/ deer, w/ iNat','w/o deer, w/o iNat','w/o deer, w/iNat'), loc='lower center')
plt.tight_layout()
plt.savefig('/ai4efs/models/object_detection/faster_rcnn_inception_resnet_v2_atrous/train_on_ss_no_deer_and_inat/predictions/compare_per_seq_mAP_w_deer_and_no_deer.jpg')
|
python
|
from pathlib import Path
import pytest
import git
import json
import os
from conftest import TEST_DIR
from masonry import main
from cookiecutter.exceptions import FailedHookException, UndefinedVariableInTemplate
@pytest.fixture(scope='module')
def init_simple_project(tmpdir_factory):
# Setup a basic project
temp_output_path = Path(tmpdir_factory.mktemp('simple_project').strpath)
template_path = TEST_DIR / 'example_templates' / 'breaking_project'
# Set arguments
args = f"init -o {temp_output_path} {template_path}"
from masonry import main
# Run from entry point
main.main(args=args)
cookiecutter_vars_path = os.path.join(template_path, "first_layer", "cookiecutter.json")
with open(cookiecutter_vars_path, 'r') as f:
cookiecutter_vars = json.load(f)
project_name = cookiecutter_vars['project_name']
project_dir = temp_output_path / project_name
return project_dir
def test_rollback_when_error_in_pre_hook(init_simple_project):
# GIVEN an initialised project
project_dir = init_simple_project
# WHEN a template is added that causes an error
args = f"add -o {project_dir} breaking_pre_hook"
with pytest.raises(FailedHookException):
main.main(args=args)
# THEN only the original files should be present
target = set([
project_dir / 'file_from_layer_1.txt',
project_dir / '.mason',
project_dir / '.git',
])
result = set(project_dir.iterdir())
assert result == target
# THEN original file should be unchanged
target = '123456'
result_file = project_dir / 'file_from_layer_1.txt'
result = result_file.read_text()
assert result == target
def test_rollback_when_error_in_post_hook(init_simple_project):
# GIVEN an initialised project
project_dir = init_simple_project
# WHEN a template is added that causes an error
args = f"add -o {project_dir} breaking_post_hook"
with pytest.raises(FailedHookException):
main.main(args=args)
# THEN only the original files should be present
target = set([
project_dir / 'file_from_layer_1.txt',
project_dir / '.mason',
project_dir / '.git',
])
result = set(project_dir.iterdir())
assert result == target
# THEN original file should be unchanged
target = '123456'
result_file = project_dir / 'file_from_layer_1.txt'
result = result_file.read_text()
assert result == target
def test_rollback_when_error_in_variable_name(init_simple_project):
# GIVEN an initialised project
project_dir = init_simple_project
# WHEN a template is added that causes an error
args = f"add -o {project_dir} breaking_variable_name"
with pytest.raises(UndefinedVariableInTemplate):
main.main(args=args)
# THEN only the original files should be present
target = set([
project_dir / 'file_from_layer_1.txt',
project_dir / '.mason',
project_dir / '.git',
])
result = set(project_dir.iterdir())
assert result == target
# THEN original file should be unchanged
target = '123456'
result_file = project_dir / 'file_from_layer_1.txt'
result = result_file.read_text()
assert result == target
def test_rollback_when_init_project(tmpdir_factory):
# GIVEN a temp directory and template to initialise
temp_output_path = Path(tmpdir_factory.mktemp('empty_project').strpath)
template_path = TEST_DIR / 'example_templates' / 'breaking_project'
# WHEN a new project is initialised that causes an error
args = f"init -o {temp_output_path} {template_path}/breaking_variable_name"
with pytest.raises(UndefinedVariableInTemplate):
main.main(args=args)
# THEN the directory should be empty
target = set([])
result = set(temp_output_path.iterdir())
assert result == target
|
python
|
# standard
import os
# BASE DIRECTORY
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# HEARTBEAT
HEARTBEAT = 10 * 1000
# INTERNET
INTERNET = {
'address': '1.1.1.1',
'port': 53,
'timeout': 3,
'interval': 5 * 1000
}
# MODULES
MODULES = ('fb', 'synker')
MODULES_DIR = 'src.modules'
MODULES_CONVENTION = 'title'
MODULES_SETTINGS = {
'fb': {
'interval': 60,
'instance': 'localhost',
'user': 'root',
'password': 's3cret',
'temp': '/tmp',
'dest': ''
},
'synker': {
'interval': 30,
'localdir': '',
'pattern': '*',
'clouddir': '/backup',
'limit': 0,
'token': ''
}
}
# CONFIG
CONFIG_FILENAME = 'settings.ini'
CONFIG_FILEPATH = os.path.join(BASE_DIR, CONFIG_FILENAME)
CONFIG_DEFAULT = {**MODULES_SETTINGS}
# LOG
LOG_LEVEL = 'DEBUG'
LOG_FILENAME = 'log/logs.log'
LOG_FILEPATH = os.path.join(BASE_DIR, LOG_FILENAME)
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# TRANSLATION
LANGUAGES = (
('en', 'English'),
('fa', 'Persian')
)
LANG_CODE = 'fa'
TRANSLATION_DOMAIN = 'mb'
LOCALE_DIRNAME = 'locale'
LOCALE_DIRPATH = os.path.join(BASE_DIR, LOCALE_DIRNAME)
|
python
|
from django.db import models
# Create your models here.
class douban_top250(models.Model):
serial_number=models.IntegerField()
movie_name=models.CharField(max_length=255)
introduce=models.CharField(max_length=255)
star=models.FloatField(max_length=12)
evaluate=models.CharField(max_length=255)
describe=models.CharField(max_length=255)
datetime=models.DateTimeField(auto_now=True)
def __str__(self):
return self.movie_name
|
python
|
#!/usr/bin/env python3
import os
import math
import sys
from abc import abstractmethod
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets import nets_factory, resnet_utils
import aardvark
import cv2
from tf_utils import *
import cpp
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('rpn_priors', 'rpn_priors', 'param prior config file')
flags.DEFINE_integer('rpn_params', 3, 'number of parameters per shape')
flags.DEFINE_integer('rpn_stride', 1, 'downsize factor of rpn output')
flags.DEFINE_float('rpn_logits_weight', 1.0, 'loss weight')
flags.DEFINE_float('rpn_params_weight', 1.0, 'loss weight')
class BasicRPN3D:
def __init__ (self):
priors = []
# read in priors
# what RPN estimates is the delta between priors and the real
# regression target.
if os.path.exists(FLAGS.rpn_priors):
with open(FLAGS.rpn_priors, 'r') as f:
for l in f:
if l[0] == '#':
continue
vs = [float(v) for v in l.strip().split(' ')]
assert len(vs) == FLAGS.rpn_params
priors.append(vs)
pass
pass
pass
if len(priors) == 0:
priors.append([1.0] * FLAGS.rpn_params)
pass
aardvark.print_red("PRIORS %s" % str(priors))
self.priors = np.array(priors, dtype=np.float32)
pass
def rpn_backbone (self, volume, is_training, stride):
assert False
def rpn_logits (self, net, is_training, channels):
assert False
def rpn_params (self, net, is_training, channels):
assert False
def rpn_generate_shapes (self, shape, anchor_params, priors, n_priors):
assert False
def build_rpn (self, volume, is_training, shape=None):
# volume: input volume tensor
Z,Y,X = shape
assert max(Z % FLAGS.rpn_stride, Y % FLAGS.rpn_stride, X % FLAGS.rpn_stride) == 0
oZ = Z // FLAGS.rpn_stride
oY = Y // FLAGS.rpn_stride
oX = X // FLAGS.rpn_stride
n_priors = self.priors.shape[0]
n_params = self.priors.shape[1]
self.gt_anchors = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors))
self.gt_anchors_weight = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors))
# parameter of that location
self.gt_params = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors, n_params))
self.gt_params_weight = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors))
self.backbone = self.rpn_backbone(volume, is_training, FLAGS.rpn_stride)
logits = self.rpn_logits(self.backbone, is_training, n_priors)
logits = tf.identity(logits, name='logits')
self.logits = logits
self.probs = tf.sigmoid(logits, name='probs')
params = self.rpn_params(self.backbone, is_training, n_priors * n_params)
params = tf.identity(params, name='params')
self.params = params
# setup losses
# 1. losses for logits
logits1 = tf.reshape(logits, (-1,))
gt_anchors = tf.reshape(self.gt_anchors, (-1,))
gt_anchors_weight = tf.reshape(self.gt_anchors_weight, (-1,))
xe = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits1, labels=tf.cast(gt_anchors, tf.float32))
xe = tf.reduce_sum(xe * gt_anchors_weight) / (tf.reduce_sum(gt_anchors_weight) + 0.00001)
xe = tf.identity(xe, name='xe')
getattr(self, 'metrics', []).append(xe)
tf.losses.add_loss(xe * FLAGS.rpn_logits_weight)
# 2. losses for parameters
priors = tf.constant(self.priors[np.newaxis, :, :], dtype=tf.float32)
params = tf.reshape(params, (-1, n_priors, n_params))
gt_params = tf.reshape(self.gt_params, (-1, n_priors, n_params))
l1 = tf.losses.huber_loss(params, gt_params / priors, reduction=tf.losses.Reduction.NONE, loss_collection=None)
l1 = tf.reduce_sum(l1, axis=2)
# l1: ? * n_priors
l1 = tf.reshape(l1, (-1,))
gt_params_weight = tf.reshape(self.gt_params_weight, (-1,))
l1 = tf.reduce_sum(l1 * gt_params_weight) / (tf.reduce_sum(gt_params_weight) + 0.00001)
l1 = tf.identity(l1, name='l1')
getattr(self, 'metrics', []).append(l1)
tf.losses.add_loss(l1 * FLAGS.rpn_params_weight)
pass
|
python
|
import os
import sys
import yaml
import json
import pprint
import pathlib
import logging
import inspect
import argparse
import itertools
import importlib
from genie.metaparser import MetaParser
IGNORE_DIR = ['.git', '__pycache__', 'template', 'tests']
IGNORE_FILE = ['__init__.py', 'base.py', 'utils.py']
AVAILABLE_FUNC = ['cli', 'xml', 'yang', 'rest']
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
log = logging.getLogger(__name__)
def format(d, tab=0):
s = ['{\n']
if d is None:
return d
for k,v in d.items():
if isinstance(v, dict):
v = format(v, tab+1)
else:
v = repr(v)
s.append('%s%r: %s,\n' % (' '*tab, k, v))
s.append('%s}' % (' '*tab))
return ''.join(s)
class CreateApiDoc(object):
def __init__(self, datafile):
assert 'VIRTUAL_ENV' in os.environ
with open(datafile, 'r') as f:
self.datafile = yaml.safe_load(f)
self.output = {}
self.output['tokens'] = []
def _expand(self, name):
if '$env(VIRTUAL_ENV)' in name:
# Replace '$env(VIRTUAL_ENV)' with the actual value
return name.replace('$env(VIRTUAL_ENV)', os.environ['VIRTUAL_ENV'])
return name
def _find_parsers(self, mod):
parsers = []
for name, obj in inspect.getmembers(mod):
# starts with _ are ignored
if name.startswith('_'):
continue
# skip if not class
if not inspect.isclass(obj):
continue
# skip anything not defined in this module
try:
if inspect.getsourcefile(obj) != mod.__file__:
continue
except:
# getsourcefile fails for builtin objects
# we aren't interested in those anyway
continue
# Inherits from metaparser + have a funciton which is from the
# available func
if issubclass(obj, MetaParser) and hasattr(obj, 'cli_command'):
parsers.append(obj)
return parsers
def _add_parser(self, parser, cli, tokens, mod):
if cli not in self.output:
self.output[cli] = {}
output = self.output[cli]
for token in tokens:
if token not in output:
output[token] = {}
output = output[token]
if token not in self.output['tokens']:
self.output['tokens'].append(token)
output['module_name'] = mod.__name__.rsplit('.', 1)[-1]
output['package'] = self.package
output['class'] = parser.__name__
output['doc'] = parser.__doc__
output['schema'] = format(parser.schema)
output['uid'] = cli.replace(' ','_').replace('{', '').replace('}', '').replace('|', '_')
line = inspect.getsourcelines(parser)[-1]
temp_url = mod.__file__.replace(os.path.join(
os.environ['VIRTUAL_ENV'], 'pypi', 'genieparser') + '/', '')
style = self.root['url']['style']
if style == 'bitbucket':
url = '{p}{t}#{l}'.format(p=self.root['url']['link'], t=temp_url, l=line)
elif style == 'github':
url = p=self.root['url']['link'].format(branch=self.root['url']['branch'])
url = '{p}{t}#L{l}'.format(p=url, t=temp_url, l=line)
output['url'] = url
def _add_parsers(self, item, tokens):
# Find all classes which has a function named parse
# Will give module path
module_path = self.root['root'] + str(item).rsplit('.', 1)[0].\
replace(self.module_loc, '').replace('/', '.')
mod = importlib.import_module(module_path)
parsers = self._find_parsers(mod)
if parsers:
pass
for parser in parsers:
if isinstance(parser.cli_command, list):
for cli in parser.cli_command:
self._add_parser(parser, cli, tokens, mod)
else:
self._add_parser(parser, parser.cli_command, tokens, mod)
def _recursive_find(self, item, token):
for item in item.iterdir():
if item.is_dir():
if item.name in IGNORE_DIR:
# Ignore
continue
else:
self._recursive_find(item, token + [item.name])
elif item.is_file():
if item.name in IGNORE_FILE or item.suffix != '.py':
continue
# Then add it to the self.datafile
self._add_parsers(item, token)
def find_all_apis(self):
if 'root_directories' not in self.datafile:
return {}
for name, values in self.datafile['root_directories'].items():
log.info("Learning '{name}'".format(name=name))
# Figure out location of package so you can walk it
self.root = values
self.package = self.root['root']
self.module_loc = importlib.import_module(self.root['root']).__path__[0]
# Walk all file in there and go through the parsers
self._recursive_find(pathlib.Path(self.module_loc), [])
def find_diff(l1, l2):
'''Difference between list1 and list2'''
diff = []
for list1, list2 in itertools.zip_longest(l1, l2):
if list2 != list1:
diff.append(list2)
return diff
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-datafile',
metavar='FILE',
type=str,
default=None,
help='File containing directory information')
parser.add_argument('-save_location',
metavar='FILE',
type=str,
default=None,
help='Location to save the output file')
custom_args = parser.parse_known_args()[0]
apiDoc = CreateApiDoc(custom_args.datafile)
apiDoc.find_all_apis()
output = json.dumps(apiDoc.output)
os.makedirs(os.path.dirname(custom_args.save_location), exist_ok=True)
with open(custom_args.save_location, 'w+') as f:
f.write(output)
|
python
|
from nltk import tokenize
from operator import itemgetter
import math
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
stop_words = set(stopwords.words('english'))
#nltk.download('stopwords')
## 2 Declare Variables
doc = '''I am from speak english with vanessa da com.You are so lovely.So i get emails from students telling me when i am so glad i canunderstand everything you say.Putra night charan in english tv show and i can understand anything.Does this mean that your speak in floor.Devika question.I want to make sure the you know exactly the truth.What's the next step when we explain in something like today in the show videos.I want to make sure that you can understand everything.Is.Unnatural.I am not talking.Best.Where is mauli.Children.I am not talking mike.But i am talking to really.Aloe vera flower because i want to make sure that you can understand.Everything.Turn off the talking to.Hamara i know the you are watching but on my side i see so it's difficult to help.Natural conversation.When someone is there so the reason why i want it all you get is because i have a lot of videos on my youtube channel with other english speakers.Jesus videos with people skype does videos with people in my house around my city.And i think it's a really good way.English listening to the next level.What is videos.Mossbauer
explanation.What videos with my voice to overy understand my voice.One other person.How make sure that in the description and at the end of
this video i will
'''
## 3 Remove stopwords
## 4. Find total words in the document
total_words = doc.split()
total_word_length = len(total_words)
#print(total_word_length)
##5 5. Find the total number of sentences
total_sentences = tokenize.sent_tokenize(doc)
total_sent_len = len(total_sentences)
#print(total_sent_len)
##6. Calculate TF for each word
tf_score = {}
for each_word in total_words:
each_word = each_word.replace('.','')
if each_word not in stop_words:
if each_word in tf_score:
tf_score[each_word] += 1
else:
tf_score[each_word] = 1
# Dividing by total_word_length for each dictionary element
tf_score.update((x, y/int(total_word_length)) for x, y in tf_score.items())
#print(tf_score)
##7. Function to check if the word is present in a sentence list
def check_sent(word, sentences):
final = [all([w in x for w in word]) for x in sentences]
sent_len = [sentences[i] for i in range(0, len(final)) if final[i]]
return int(len(sent_len))
##8 8. Calculate IDF for each word
idf_score = {}
for each_word in total_words:
each_word = each_word.replace('.','')
if each_word not in stop_words:
if each_word in idf_score:
idf_score[each_word] = check_sent(each_word, total_sentences)
else:
idf_score[each_word] = 1
# Performing a log and divide
idf_score.update((x, math.log(int(total_sent_len)/y)) for x, y in idf_score.items())
#print(idf_score)
##9. Calculate TF * IDF
tf_idf_score = {key: tf_score[key] * idf_score.get(key, 0) for key in tf_score.keys()}
#print(tf_idf_score)
#10. Create a function to get N important words in the document
print('..........................important word................')
def get_top_n(dict_elem, n):
sorted_result = dict(sorted(dict_elem.items(), key = itemgetter(1), reverse = True)[:n])
##################################################################
# sorted_result onctaone bot word and correspondin frequency #
###################################################################
keywords=[key for key in sorted_result.keys()]
return keywords
#11. Get the top 5 words of significance
if __name__ == '__main__':
get_top_n(tf_idf_score, 20)
print(get_top_n(tf_idf_score, 20))
|
python
|
#!/usr/bin/env python
"""Provides Generic Classes to make an image analysis.
"""
from abc import ABC, abstractmethod
import pandas as pd
class InputData(ABC):
def __init__(self, data):
self._content = data
@abstractmethod
def read(self):
pass
class Cohort(InputData):
def __init__(self, dataframe, workdir=None):
super().__init__(dataframe)
self.workdir = workdir
def read(self):
for _, row in self._content.iterrows():
filepath = row.path
name = row.id
if row.todo == 1 and filepath != 0:
if self.workdir:
filepath = str(self.workdir / filepath)
print(type(filepath))
yield (name, filepath)
class AnalysisCV(object):
'''
'''
def __init__(self, procedure):
self.procedure = procedure
def run(self, input_data):
print('running analysis !!')
all_results = {}
for (name, filepath) in input_data.read():
result = self.procedure.run(filepath, name)
results_df = pd.DataFrame(result, columns=result[0].keys())
all_results[name] = results_df
results_df.to_csv(name + '.csv')
return all_results
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TransformerXLConfig(FairseqDataclass):
# defaults come from the original Transformer-XL code
cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000])
d_model: int = 500
n_head: int = 10
d_head: int = 50
d_inner: int = 1000
div_val: int = 1
n_layer: int = 12
mem_len: int = 0
clamp_len: int = -1
same_length: bool = False
dropout: float = 0.0
dropatt: float = 0.0
checkpoint_activations: bool = False
offload_activations: bool = False
max_target_positions: int = II("task.max_target_positions")
@register_model("transformer_xl", dataclass=TransformerXLConfig)
class TransformerXLLanguageModel(FairseqLanguageModel):
@classmethod
def build_model(cls, cfg: TransformerXLConfig, task):
return cls(TransformerXLDecoder(cfg, task))
class TransformerXLDecoder(FairseqIncrementalDecoder):
def __init__(self, cfg, task):
try:
from transformers.models.transfo_xl import (
TransfoXLConfig,
TransfoXLLMHeadModel,
)
except ImportError:
from transformers.configuration_transfo_xl import TransfoXLConfig
from transformers.modeling_transfo_xl import TransfoXLLMHeadModel
super().__init__(task.target_dictionary)
self.cfg = cfg
# remove any cutoffs larger than the vocab size
cutoffs = [
cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary)
]
config = TransfoXLConfig(
vocab_size=len(task.target_dictionary),
cutoffs=cutoffs,
d_model=cfg.d_model,
d_embed=cfg.d_model,
n_head=cfg.n_head,
d_head=cfg.d_head,
d_inner=cfg.d_inner,
div_val=cfg.div_val,
n_layer=cfg.n_layer,
mem_len=cfg.mem_len,
clamp_len=cfg.clamp_len,
same_length=cfg.same_length,
dropout=cfg.dropout,
dropatt=cfg.dropatt,
)
logger.info(config)
self.model = TransfoXLLMHeadModel(config)
# import pdb; pdb.set_trace()
if cfg.checkpoint_activations or cfg.offload_activations:
for i in range(len(self.model.transformer.layers)):
self.model.transformer.layers[i] = checkpoint_wrapper(
self.model.transformer.layers[i],
offload_to_cpu=cfg.offload_activations,
)
# TODO: may save mem to wrap(layer.pos_ff.CoreNet[3])
self._mems = None
def forward(
self,
src_tokens,
src_lengths=None, # unused
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
if incremental_state is not None: # used during inference
mems = self.get_incremental_state(incremental_state, "mems")
src_tokens = src_tokens[:, -1:] # only keep the most recent token
else:
mems = self._mems
output = self.model(
input_ids=src_tokens,
mems=mems,
return_dict=False,
)
if len(output) >= 2:
if incremental_state is not None:
self.set_incremental_state(incremental_state, "mems", output[1])
else:
self._mems = output[1]
return (output[0],)
def max_positions(self):
return self.cfg.max_target_positions
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
new_order: torch.Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
mems = self.get_incremental_state(incremental_state, "mems")
if mems is not None:
new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
self.set_incremental_state(incremental_state, "mems", new_mems)
|
python
|
def cbrt(a):
s = -1 if a < 0 else 1
return s * (a*s) ** (1/3)
print(cbrt(-8)) # -2.0
print(cbrt(8)) # 2.0
print(cbrt(0)) # 0.0
|
python
|
import pytest
from eth_account import Account
from eth_keys import KeyAPI
from eth_utils import is_same_address
@pytest.fixture
def c(w3, get_contract):
a0, a1, a2, a3, a4, a5, a6 = w3.eth.accounts[:7]
with open("examples/wallet/wallet.vy") as f:
code = f.read()
# Sends wei to the contract for future transactions gas costs
c = get_contract(code, *[[a1, a2, a3, a4, a5], 3])
w3.eth.sendTransaction({"to": c.address, "value": 10 ** 17})
return c
@pytest.fixture
def sign(keccak):
def _sign(seq, to, value, data, key):
keys = KeyAPI()
comb = seq.to_bytes(32, "big") + b"\x00" * 12 + to + value.to_bytes(32, "big") + data
h1 = keccak(comb)
h2 = keccak(b"\x19Ethereum Signed Message:\n32" + h1)
sig = keys.ecdsa_sign(h2, key)
return [28 if sig.v == 1 else 27, sig.r, sig.s]
return _sign
def test_approve(w3, c, tester, assert_tx_failed, sign):
a0, a1, a2, a3, a4, a5, a6 = w3.eth.accounts[:7]
k0, k1, k2, k3, k4, k5, k6, k7 = tester.backend.account_keys[:8]
to, value, data = b"\x35" * 20, 10 ** 16, b""
to_address = w3.toChecksumAddress(to)
def pack_and_sign(seq, *args):
sigs = [sign(seq, to, value, data, k) if k else [0, 0, 0] for k in args]
return sigs
# Legitimate approval
sigs = pack_and_sign(0, k1, 0, k3, 0, k5)
c.approve(0, "0x" + to.hex(), value, data, sigs, transact={"value": value, "from": a1})
# Approve fails if only 2 signatures are given
sigs = pack_and_sign(1, k1, 0, k3, 0, 0)
assert_tx_failed(
lambda: c.approve(1, to_address, value, data, sigs, transact={"value": value, "from": a1})
) # noqa: E501
# Approve fails if an invalid signature is given
sigs = pack_and_sign(1, k1, 0, k7, 0, k5)
assert_tx_failed(
lambda: c.approve(1, to_address, value, data, sigs, transact={"value": value, "from": a1})
) # noqa: E501
# Approve fails if transaction number is incorrect (the first argument should be 1)
sigs = pack_and_sign(0, k1, 0, k3, 0, k5)
assert_tx_failed(
lambda: c.approve(0, to_address, value, data, sigs, transact={"value": value, "from": a1})
) # noqa: E501
# Approve fails if not enough value is sent
sigs = pack_and_sign(1, k1, 0, k3, 0, k5)
assert_tx_failed(
lambda: c.approve(1, to_address, value, data, sigs, transact={"value": 0, "from": a1})
) # noqa: E501
sigs = pack_and_sign(1, k1, 0, k3, 0, k5)
# this call should succeed
c.approve(1, to_address, value, data, sigs, call={"value": value, "from": a1})
print("Basic tests passed")
def test_javascript_signatures(w3, get_contract):
a3 = w3.eth.accounts[2]
# The zero address will cause `approve` to default to valid signatures
zero_address = "0x0000000000000000000000000000000000000000"
accounts = [
"0x776ba14735ff84789320718cf0aa43e91f7a8ce1",
"0x095ce4e4240fa66ff90282c26847456e3f3b5002",
]
# The address that will receive the transaction
recipient = "0x776Ba14735FF84789320718cf0aa43e91F7A8Ce1"
# These are the matching sigs to the accounts
raw_sigs = [
"0x4a89507bf71749fb338ed13fba623a683d9ecab0fb9c389a4298525c043e38281a00ab65628bb18a382eb8c8b4fb4dae95ccc993cf49f617c60d8051180778601c", # noqa: E501
"0xc84fe5d2a600e033930e0cf73f26e78f4c65b134f9c9992f60f08ce0863abdbe0548a6e8aa2d952659f29c67106b59fdfcd64d67df03c1df620c70c85578ae701b", # noqa: E501
]
# Turns the raw sigs into sigs
sigs = [
(w3.toInt(x[64:]), w3.toInt(x[:32]), w3.toInt(x[32:64])) # v # r # s
for x in map(lambda z: w3.toBytes(hexstr=z[2:]), raw_sigs)
]
h = w3.keccak(
(0).to_bytes(32, "big")
+ b"\x00" * 12
+ w3.toBytes(hexstr=recipient[2:])
+ (25).to_bytes(32, "big")
+ b""
) # noqa: E501
h2 = w3.keccak(b"\x19Ethereum Signed Message:\n32" + h)
# Check to make sure the signatures are valid
assert is_same_address(Account.recoverHash(h2, sigs[0]), accounts[0])
assert is_same_address(Account.recoverHash(h2, sigs[1]), accounts[1])
# Set the owners to zero addresses
with open("examples/wallet/wallet.vy") as f:
owners = [w3.toChecksumAddress(x) for x in accounts + [a3, zero_address, zero_address]]
x2 = get_contract(f.read(), *[owners, 2])
w3.eth.sendTransaction({"to": x2.address, "value": 10 ** 17})
# There's no need to pass in signatures because the owners are 0 addresses
# causing them to default to valid signatures
x2.approve(
0,
recipient,
25,
b"",
sigs + [[0, 0, 0]] * 3,
call={"to": x2.address, "value": 10 ** 17},
)
print("Javascript signature tests passed")
|
python
|
from django.db import models
# Create your models here.
class Course(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255, null=False)
class Slot(models.Model):
MON = 1
TUE = 2
WED = 3
THU = 4
FRI = 5
DAY_CHOICES = [
(MON, 'Mon'),
(TUE, 'Tue'),
(WED, 'Wed'),
(THU, 'Thu'),
(FRI, 'Fri'),
]
CORY = 0
SODA = 1
ROOM_CHOICES = [
(CORY, 'Cory'),
(SODA, 'Soda'),
]
HOUR_CHOICES = [
(11, '11am'),
(12, '12pm'),
(13, '1pm'),
(14, '2pm'),
(15, '3pm'),
(16, '4pm'),
]
id = models.AutoField(primary_key=True)
hour = models.IntegerField(choices=HOUR_CHOICES)
day = models.IntegerField(choices=DAY_CHOICES)
room = models.IntegerField(choices=ROOM_CHOICES)
@staticmethod
def time(hour):
if hour < 12:
return '{}am'.format(hour)
else:
return '{}pm'.format(hour)
def start_time(self):
return self.time(self.hour)
def end_time(self):
return self.time(self.hour + 1)
class Tutor(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
slots = models.ManyToManyField(Slot)
courses = models.ManyToManyField(Course)
|
python
|
import Tkinter
import tkinter
class TkinterImplementation(object):
def begin(self, wrappedIdleImage):
self.root = tkinter.Tk()
self.root.overrideredirect(True)
self.root.geometry(
"{0}x{1}+0+0".format(self.root.winfo_screenwidth(), self.root.winfo_screenheight()))
self.root.config(background='black')
self.panel = Tkinter.Label(self.root, image=wrappedIdleImage.getImage())
self.panel.config(background='black')
self.panel.pack(side='bottom', fill='both', expand='yes')
self.root.update()
def update(self):
self.root.update()
def changeImage(self, image):
self.panel.config(image=image)
self.root.update()
|
python
|
"""
Some utility functions that are only used for unittests.
Placing them in test/ directory seems to be against convention, so they are part of the library.
"""
from __future__ import print_function, division, absolute_import
import random
import copy
import numpy as np
import six.moves as sm
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
try:
import cPickle as pickle
except ImportError:
import pickle
import imgaug as ia
import imgaug.random as iarandom
from imgaug.augmentables.kps import KeypointsOnImage
class ArgCopyingMagicMock(mock.MagicMock):
"""A MagicMock that copies its call args/kwargs before storing the call.
This is useful for imgaug as many augmentation methods change data
in-place.
Taken from https://stackoverflow.com/a/23264042/3760780
"""
def _mock_call(self, *args, **kwargs):
args_copy = copy.deepcopy(args)
kwargs_copy = copy.deepcopy(kwargs)
return super(ArgCopyingMagicMock, self)._mock_call(
*args_copy, **kwargs_copy)
def assert_cbaois_equal(observed, expected, max_distance=1e-4):
# pylint: disable=unidiomatic-typecheck
if isinstance(observed, list) or isinstance(expected, list):
assert isinstance(observed, list)
assert isinstance(expected, list)
assert len(observed) == len(expected)
for observed_i, expected_i in zip(observed, expected):
assert_cbaois_equal(observed_i, expected_i,
max_distance=max_distance)
else:
assert type(observed) == type(expected)
assert len(observed.items) == len(expected.items)
assert observed.shape == expected.shape
for item_a, item_b in zip(observed.items, expected.items):
assert item_a.coords_almost_equals(item_b,
max_distance=max_distance)
if isinstance(expected, ia.PolygonsOnImage):
for item_obs, item_exp in zip(observed.items, expected.items):
if item_exp.is_valid:
assert item_obs.is_valid
def shift_cbaoi(cbaoi, top=0, right=0, bottom=0, left=0):
if isinstance(cbaoi, ia.KeypointsOnImage):
return cbaoi.shift(x=left-right, y=top-bottom)
return cbaoi.shift(top=top, right=right, bottom=bottom, left=left)
def create_random_images(size):
return np.random.uniform(0, 255, size).astype(np.uint8)
def create_random_keypoints(size_images, nb_keypoints_per_img):
result = []
for _ in sm.xrange(size_images[0]):
kps = []
height, width = size_images[1], size_images[2]
for _ in sm.xrange(nb_keypoints_per_img):
x = np.random.randint(0, width-1)
y = np.random.randint(0, height-1)
kps.append(ia.Keypoint(x=x, y=y))
result.append(ia.KeypointsOnImage(kps, shape=size_images[1:]))
return result
def array_equal_lists(list1, list2):
assert isinstance(list1, list), (
"Expected list1 to be a list, got type %s." % (type(list1),))
assert isinstance(list2, list), (
"Expected list2 to be a list, got type %s." % (type(list2),))
if len(list1) != len(list2):
return False
for arr1, arr2 in zip(list1, list2):
if not np.array_equal(arr1, arr2):
return False
return True
def keypoints_equal(kpsois1, kpsois2, eps=0.001):
if isinstance(kpsois1, KeypointsOnImage):
assert isinstance(kpsois2, KeypointsOnImage)
kpsois1 = [kpsois1]
kpsois2 = [kpsois2]
if len(kpsois1) != len(kpsois2):
return False
for kpsoi1, kpsoi2 in zip(kpsois1, kpsois2):
kps1 = kpsoi1.keypoints
kps2 = kpsoi2.keypoints
if len(kps1) != len(kps2):
return False
for kp1, kp2 in zip(kps1, kps2):
x_equal = (float(kp2.x) - eps
<= float(kp1.x)
<= float(kp2.x) + eps)
y_equal = (float(kp2.y) - eps
<= float(kp1.y)
<= float(kp2.y) + eps)
if not x_equal or not y_equal:
return False
return True
def reseed(seed=0):
iarandom.seed(seed)
np.random.seed(seed)
random.seed(seed)
def runtest_pickleable_uint8_img(augmenter, shape=(15, 15, 3), iterations=3):
image = np.mod(np.arange(int(np.prod(shape))), 256).astype(np.uint8)
image = image.reshape(shape)
augmenter_pkl = pickle.loads(pickle.dumps(augmenter, protocol=-1))
for _ in np.arange(iterations):
image_aug = augmenter(image=image)
image_aug_pkl = augmenter_pkl(image=image)
assert np.array_equal(image_aug, image_aug_pkl)
|
python
|
"""io
Core IO Modules
"""
import os
import json
import pickle
###############################################################
# Common I/O operations
# ======================
#
def makedirs(filepath):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
def walk(source_dir):
paths = list()
for root, dirs, files in os.walk(source_dir):
for filename in files:
paths.append(os.path.join(root, filename))
return paths
def load_json(filepath, encoding="utf-8"):
return json.load(open(filepath, "r", encoding=encoding))
def dump_json(obj, filepath, indent=None, ensure_ascii=False, makedir=True):
if makedir:
makedirs(filepath)
json.dump(
obj,
open(filepath, "w"),
indent=indent,
ensure_ascii=ensure_ascii
)
def load_pickle(filepath):
return pickle.load(open(filepath, "rb"))
def dump_pickle(obj, filepath, makedir=True):
if makedir:
makedirs(filepath)
pickle.dump(obj, open(filepath, "wb"))
|
python
|
from django.shortcuts import render
from account.models import Account
from datetime import datetime
def home(request):
# Editing Earl of the Day ID should update all data on home page
earl_of_the_day_id = 2
month = datetime.today().month
upcoming_birthdays = Account.objects.filter(birthday__month=month).order_by('birthday')
context = {
"earl_of_the_day": Account.objects.get(pk=earl_of_the_day_id),
"upcoming": upcoming_birthdays,
"active_page": "home",
}
return render(request, 'home.html', context)
|
python
|
from pyrk.materials.material import Material
from pyrk.utilities.ur import units
from pyrk.density_model import DensityModel
from pyrk.inp import validation
class LiquidMaterial(Material):
''' subclass of material for liquid'''
def __init__(self,
name=None,
k=0 * units.watt / units.meter / units.kelvin,
cp=0 * units.joule / units.kg / units.kelvin,
dm=DensityModel(),
mu=0 * units.pascal * units.seconds):
"""Initalizes a material
:param name: The name of the component (i.e., "fuel" or "cool")
:type name: str.
:param k: The thermal conductivity of the component
:type k: float, pint.unit.Quantity :math:'watt/meter/K'
:param cp: specific heat capacity, :math:`c_p`, in :math:`J/kg-K`
:type cp: float, pint.unit.Quantity :math:`J/kg-K`
:param dm: The density of the material
:type dm: DensityModel object
:param mu: dynamic viscosity(for fluid), :math:`mu`, in :math:`Pa.s`
:type mu: float, pint.unit.Quantity :math:`Pa.s`
"""
Material.__init__(self, name, k, cp, dm)
self.mu = mu.to('pascal*seconds')
validation.validate_ge("mu", mu, 0 * units.pascal * units.seconds)
|
python
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
# pylint: disable=bad-continuation
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
from azext_dnsresolver.generated._client_factory import (
cf_dns_resolver,
cf_inbound_endpoint,
cf_outbound_endpoint,
cf_dns_forwarding_ruleset,
cf_forwarding_rule,
cf_virtual_network_link,
)
dns_resolver_dns_resolver = CliCommandType(
operations_tmpl=(
'azext_dnsresolver.vendored_sdks.dnsresolver.operations._dns_resolvers_operations#DnsResolversOperations.{}'
),
client_factory=cf_dns_resolver,
)
dns_resolver_forwarding_rule = CliCommandType(
operations_tmpl='azext_dnsresolver.vendored_sdks.dnsresolver.operations._forwarding_rules_operations#ForwardingRulesOperations.{}',
client_factory=cf_forwarding_rule,
)
dns_resolver_dns_forwarding_ruleset = CliCommandType(
operations_tmpl='azext_dnsresolver.vendored_sdks.dnsresolver.operations._dns_forwarding_rulesets_operations#DnsForwardingRulesetsOperations.{}',
client_factory=cf_dns_forwarding_ruleset,
)
dns_resolver_inbound_endpoint = CliCommandType(
operations_tmpl='azext_dnsresolver.vendored_sdks.dnsresolver.operations._inbound_endpoints_operations#InboundEndpointsOperations.{}',
client_factory=cf_inbound_endpoint,
)
dns_resolver_outbound_endpoint = CliCommandType(
operations_tmpl='azext_dnsresolver.vendored_sdks.dnsresolver.operations._outbound_endpoints_operations#OutboundEndpointsOperations.{}',
client_factory=cf_outbound_endpoint,
)
dns_resolver_virtual_network_link = CliCommandType(
operations_tmpl='azext_dnsresolver.vendored_sdks.dnsresolver.operations._virtual_network_links_operations#VirtualNetworkLinksOperations.{}',
client_factory=cf_virtual_network_link,
)
def load_command_table(self, _):
with self.command_group(
'dns-resolver', dns_resolver_dns_resolver, client_factory=cf_dns_resolver, is_preview=True
) as g:
g.custom_command('list', 'dns_resolver_list')
g.custom_show_command('show', 'dns_resolver_show')
g.custom_command('create', 'dns_resolver_create', supports_no_wait=True)
g.custom_command('update', 'dns_resolver_update', supports_no_wait=True)
g.custom_command('delete', 'dns_resolver_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'dns_resolver_show')
with self.command_group(
'dns-resolver forwarding-rule', dns_resolver_forwarding_rule, client_factory=cf_forwarding_rule
) as g:
g.custom_command('list', 'dns_resolver_forwarding_rule_list')
g.custom_show_command('show', 'dns_resolver_forwarding_rule_show')
g.custom_command('create', 'dns_resolver_forwarding_rule_create')
g.custom_command('update', 'dns_resolver_forwarding_rule_update')
g.custom_command('delete', 'dns_resolver_forwarding_rule_delete', confirmation=True)
with self.command_group(
'dns-resolver forwarding-ruleset', dns_resolver_dns_forwarding_ruleset, client_factory=cf_dns_forwarding_ruleset
) as g:
g.custom_command('list', 'dns_resolver_forwarding_ruleset_list')
g.custom_show_command('show', 'dns_resolver_forwarding_ruleset_show')
g.custom_command('create', 'dns_resolver_forwarding_ruleset_create', supports_no_wait=True)
g.custom_command('update', 'dns_resolver_forwarding_ruleset_update', supports_no_wait=True)
g.custom_command('delete', 'dns_resolver_forwarding_ruleset_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'dns_resolver_forwarding_ruleset_show')
with self.command_group(
'dns-resolver inbound-endpoint', dns_resolver_inbound_endpoint, client_factory=cf_inbound_endpoint
) as g:
g.custom_command('list', 'dns_resolver_inbound_endpoint_list')
g.custom_show_command('show', 'dns_resolver_inbound_endpoint_show')
g.custom_command('create', 'dns_resolver_inbound_endpoint_create', supports_no_wait=True)
g.custom_command('update', 'dns_resolver_inbound_endpoint_update', supports_no_wait=True)
g.custom_command('delete', 'dns_resolver_inbound_endpoint_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'dns_resolver_inbound_endpoint_show')
with self.command_group(
'dns-resolver outbound-endpoint', dns_resolver_outbound_endpoint, client_factory=cf_outbound_endpoint
) as g:
g.custom_command('list', 'dns_resolver_outbound_endpoint_list')
g.custom_show_command('show', 'dns_resolver_outbound_endpoint_show')
g.custom_command('create', 'dns_resolver_outbound_endpoint_create', supports_no_wait=True)
g.custom_command('update', 'dns_resolver_outbound_endpoint_update', supports_no_wait=True)
g.custom_command('delete', 'dns_resolver_outbound_endpoint_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'dns_resolver_outbound_endpoint_show')
with self.command_group(
'dns-resolver vnet-link', dns_resolver_virtual_network_link, client_factory=cf_virtual_network_link
) as g:
g.custom_command('list', 'dns_resolver_vnet_link_list')
g.custom_show_command('show', 'dns_resolver_vnet_link_show')
g.custom_command('create', 'dns_resolver_vnet_link_create', supports_no_wait=True)
g.custom_command('update', 'dns_resolver_vnet_link_update', supports_no_wait=True)
g.custom_command('delete', 'dns_resolver_vnet_link_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'dns_resolver_vnet_link_show')
|
python
|
#!/usr/bin/env python
'''Generate a series of calibration frames using POV-ray.'''
from __future__ import division
import sys, os, math
def do_scene (x, y, z, fn):
'''Generate a frame with the camera at x,y,z into fn and render it.'''
f = open (fn, 'w')
print >>f, '#include "calibration_target.pov"'
print >>f, 'camera {'
print >>f, ' location <%.2f, %.2f, %.2f>' % (x, y, z)
print >>f, ' look_at <%.2f, 300, 280>' % x
print >>f, '}'
f.close ()
os.system ('povray +I%s +FN +W640 +H480 +AA +A0.3 -D &> /dev/null' % fn)
# Main program: calculate the camera positions and generate the frames.
n = 30
for i in range (0, n):
x = 75 + 100 * math.cos (i * math.pi / n)
y = 50 + 100 * math.cos (i * math.pi / n)
z = 650 + 100 * math.sin (i * math.pi / n)
print y, z
fn = 'calib-%3.3d.pov' % i
do_scene (x, y, z, fn)
|
python
|
from pypeflow.common import *
from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn
from pypeflow.task import PypeTask, PypeThreadTaskBase, PypeTaskBase
from pypeflow.controller import PypeWorkflow, PypeThreadWorkflow
import os
import uuid
import sys
def run_script(job_data, job_type = "SGE" ):
if job_type == "SGE":
job_name = job_data["job_name"]
cwd = job_data["cwd"]
sge_option = job_data["sge_option"]
script_fn = job_data["script_fn"]
sge_cmd="qsub -N {job_name} {sge_option} -o {cwd}/sge_log -j y\
-S /bin/bash {script}".format(job_name=job_name,
cwd=os.getcwd(),
sge_option=sge_option,
script=script_fn)
#print sge_cmd
os.system( sge_cmd )
os.system( "sleep 1")
elif job_type == "local":
os.system( "bash %s" % job_data["script_fn"] )
def wait_for_file(filename, task = None, job_name = ""):
while 1:
time.sleep(30)
if os.path.exists(filename):
break
if task != None:
if task.shutdown_event != None and task.shutdown_event.is_set():
os.system("qdel %s" % job_name)
break
def run_p_task(self):
p_script_fn = self.parameters["p_file"]
job_id = self.parameters["job_id"]
cwd = self.parameters["cwd"]
script_dir = os.path.join( cwd )
script_fn = os.path.join( script_dir , "rp_%05d.sh" % (job_id))
log_path = os.path.join( script_dir, "rp_%05d.log" % (job_id))
script = []
script.append( "export PATH=~/task2014/dazzler/DALIGNER/:$PATH" )
script.append( "cd %s" % cwd )
script.append( ("/usr/bin/time bash %s " % p_script_fn) + ( " >& %s " % log_path ) + ( " && touch %s" % fn( self.job_done ) ) )
with open(script_fn,"w") as script_file:
script_file.write("\n".join(script))
job_name = self.URL.split("/")[-1]
job_name += "-"+str(uuid.uuid1())[:8]
job_data = {"job_name": job_name,
"cwd": cwd,
"sge_option": " -pe smp 2 -q huasm ",
"script_fn": script_fn }
run_script(job_data, job_type = "SGE")
wait_for_file( fn( self.job_done ), task=self, job_name=job_name )
def run_consensus_task(self):
job_id = self.parameters["job_id"]
cwd = self.parameters["cwd"]
script_dir = os.path.join( cwd )
script_fn = os.path.join( script_dir , "cp_%05d.sh" % (job_id))
log_path = os.path.join( script_dir, "cp_%05d.log" % (job_id))
with open( os.path.join(cwd, "c_%05d.sh" % job_id), "w") as p_script:
print >> p_script, ". /mnt/secondary/Share/HBAR_03202013/bin/activate"
print >> p_script, "cd .."
print >> p_script, """./LA4Falcon -o -f:%s las_files/%s.%d.las | """ % (prefix, prefix, job_id),
print >> p_script, """ falcon_sense.py --trim --output_multi --min_idt 0.70 --min_cov 4 --local_match_count_threshold 3 --max_n_read 800 --n_core 8 > %s""" % fn(self.out_file)
script = []
script.append( "cd %s" % cwd )
script.append( ("/usr/bin/time bash c_%05d.sh " % job_id ) + ( " >& %s " % log_path ) + ( " && touch c_%05d_done" % job_id ) )
with open(script_fn,"w") as script_file:
script_file.write("\n".join(script))
job_name = self.URL.split("/")[-1]
job_name += "-"+str(uuid.uuid1())[:8]
job_data = {"job_name": job_name,
"cwd": cwd,
"sge_option": " -pe smp 6 -q huasm ",
"script_fn": script_fn }
run_script(job_data, job_type = "SGE")
wait_for_file( os.path.join(cwd,"c_%05d_done" % job_id) , task=self, job_name=job_name )
if __name__ == "__main__":
prefix = sys.argv[1]
concurrent_jobs = 16
PypeThreadWorkflow.setNumThreadAllowed(concurrent_jobs, concurrent_jobs)
wf = PypeThreadWorkflow()
mjob_data = {}
with open("run_jobs.sh") as f:
for l in f:
l = l.strip().split()
if l[0] not in ( "LAsort", "LAmerge" ):
continue
if l[0] == "LAsort":
p_id = int( l[2].split(".")[1] )
mjob_data.setdefault( p_id, [] )
mjob_data[p_id].append( " ".join(l) )
if l[0] == "LAmerge":
l2 = l[2].split(".")
if l2[1] == "L2":
p_id = int( l[2].split(".")[2] )
mjob_data.setdefault( p_id, [] )
mjob_data[p_id].append( " ".join(l) )
else:
p_id = int( l[2].split(".")[1] )
mjob_data.setdefault( p_id, [] )
mjob_data[p_id].append( " ".join(l) )
db_file = makePypeLocalFile(os.path.abspath( "./%s.db" % prefix ))
for p_id in mjob_data:
s_data = mjob_data[p_id]
try:
os.makedirs("./p_%05d" % p_id)
os.makedirs("./p_%05d/sge_log" % p_id)
except OSError:
pass
try:
os.makedirs("./preads")
except OSError:
pass
try:
os.makedirs("./las_files")
except OSError:
pass
with open("./p_%05d/p_%05d.sh" % (p_id, p_id), "w") as p_script:
print >> p_script, """for f in `find .. -wholename "*job*/%s.%d.%s.*.*.las"`; do ln -sf $f .; done""" % (prefix, p_id, prefix)
for l in s_data:
print >> p_script, l
print >> p_script, "mv %s.%d.las ../las_files" % (prefix, p_id)
p_file = os.path.abspath( "./p_%05d/p_%05d.sh" % (p_id, p_id) )
job_done = makePypeLocalFile(os.path.abspath( "./p_%05d/p_%05d_done" % (p_id,p_id) ))
parameters = {"p_file": p_file,
"cwd": os.path.join(os.getcwd(), "p_%05d" % p_id),
"job_id": p_id}
make_p_task = PypeTask( inputs = {"db_file": db_file},
outputs = {"job_done": job_done},
parameters = parameters,
TaskType = PypeThreadTaskBase,
URL = "task://localhost/ptask_%05d" % p_id )
p_task = make_p_task ( run_p_task )
wf.addTask(p_task)
out_file = makePypeLocalFile(os.path.abspath( "./preads/out.%04d.fa" % p_id ))
parameters = {"cwd": os.path.join(os.getcwd(), "preads" ),
"job_id": p_id}
make_c_task = PypeTask( inputs = {"job_done": job_done},
outputs = {"out_file": out_file },
parameters = parameters,
TaskType = PypeThreadTaskBase,
URL = "task://localhost/ct_%05d" % p_id )
c_task = make_c_task( run_consensus_task )
wf.addTask(c_task)
print p_id
wf.refreshTargets(updateFreq = 15) #all
|
python
|
"""
ray.py defines a class of rays that can be represented in space. A ray
propagates in the optical system and can be refracted, reflected or dispersed.
Each instantiation is hence described by several line segments in space which
are determined by their endpoints and directions. The final segment determines
the current direction of the ray.
"""
import numpy as np
import nklab as nk
class Ray:
"""
Instantiates an optical ray.
Provides
1. A vector representation of the ray in the system.
2. Methods for updating the representation of the ray and returning its
current point and direction each time it propagates to an optical
element surface.
"""
def __init__(self, r=[0, 0, 0], k=[0, 0, 1], wavelength = 0):
"""
Instantiates an optical ray at a starting position r with initial
(normalised) direction k. Coordinates are in the x,y,z Cartesian form.
r and k can be numpy arrays or lists of integers and/or floats.
wavelength is a float (measured in nanometres).
"""
if len(r) != 3 or len(k) != 3:
raise Exception('3D vector size')
self._r = np.array(r, dtype=float)
self._k = nk.normalise(np.array(k, dtype=float))
if wavelength == 0:
self._wavelength = None
self._wavelength = float(wavelength)
# __vertices and __directions are lists of all segment endpoints and
# directions of the ray. They are useful for plotting but not useful
# for the user.
self._vertices = [self._r]
self._directions = [self._k]
def __repr__(self):
"""
Represents the current point and direction of the ray
"""
return "%s(r=[%g, %g, %g], k=[%g, %g, %g])" % (
"Ray", self.r()[0], self.r()[1], self.r()[2],
self.k()[0], self.k()[1], self.k()[2])
def __str__(self):
"""
Represents the current point and direction of the ray
"""
return "r = (%g, %g, %g), k = (%g, %g, %g)" % (
self.r()[0], self.r()[1], self.r()[2],
self.k()[0], self.k()[1], self.k()[2])
def r(self):
"""
Gets the value of the current point.
"""
return self._vertices[-1]
def k(self):
"""
Gets the value of the current direction.
"""
return self._directions[-1]
def vertices(self):
"""
Gets the values of all vertices of the ray.
Vertices are numpy arrays of floats.
"""
return self._vertices
def append(self, r, k):
"""
Appends new point and direction to the ray usually after interaction
with optical element.
r, k can be numpy arrays or lists of floats and/or integers.
Appended points and directions are numpy arrays of floats.
Directions are normalised.
"""
if len(r) != 3 or len(k) != 3:
raise Exception('3D vector size')
r = np.array(r, dtype=float)
k = nk.normalise(np.array(k, dtype=float))
self._vertices.append(r)
self._directions.append(k)
|
python
|
from django.views import View
from django.http import JsonResponse
from django.shortcuts import render, reverse
from django.contrib.auth.mixins import LoginRequiredMixin
from core.models import DesignDocument, UserDocumentDownload, UserDocumentFavorite
class ProfileView(LoginRequiredMixin, View):
template_name = 'core/profile/profile.html'
def get(self, request):
filter_param = request.GET.get('filter')
design_documents = self.get_filtered_documents(filter_param, request.user) if \
filter_param else \
DesignDocument.objects.filter(uploaded_by=request.user)
print(design_documents)
context = {
'documents': design_documents,
'filter_param': filter_param
}
return render(request, self.template_name, context)
def delete(self, request):
request.user.delete()
return JsonResponse({'message': 'Account successfully deleted'}, status=200)
def get_filtered_documents(self, filter_param, user):
try:
model_class = {
'favorites': UserDocumentFavorite,
'downloads': UserDocumentDownload
}[filter_param]
return [item.design_document for item in model_class.objects.filter(user=user)]
except KeyError:
return DesignDocument.objects.filter(uploaded_by=user)
|
python
|
#Use emcee as a Metropolis-Hastings so we can avoid a lot of the difficulty of the ensemble sampler for the moment.
import numpy as np
import emcee
#create our lnprob as a multidimensional Gaussian, where icov is C^{-1}
def lnprob(x, mu, icov):
diff = x-mu
lnp = -np.dot(diff,np.dot(icov,diff))/2.0
print("lnp = ", lnp)
return lnp
ndim = 2
#Create our own parameters for this Gaussian
means = np.array([10, 3])
cov = np.array([[3.0, 0.0],[0.0, 1.0]])
icov = np.linalg.inv(cov)
print("Inverse covariance matrix", icov)
#Jump distribution parameters
MH_cov = np.array([[1.5, 0],[0., 0.7]])
sampler = emcee.MHSampler(MH_cov, ndim, lnprob, args=[means, icov])
pos, prob, state = sampler.run_mcmc(np.array([0, 0]), 5)
print("Samples", sampler.flatchain)
# sampler.reset()
# sampler.run_mcmc(pos, 5)
print("Acceptance fraction", sampler.acceptance_fraction)
#
# import triangle
# import matplotlib.pyplot as plt
#
# samples = sampler.flatchain
# figure = triangle.corner(samples, labels=(r"$\mu_1$", r"$\mu_2$"), quantiles=[0.16, 0.5, 0.84],
# show_titles=True, title_args={"fontsize": 12})
# figure.savefig("MH.png")
#
# def plot_walkers(filename, samples, labels=None):
# ndim = len(samples[0, :])
# fig, ax = plt.subplots(nrows=ndim, sharex=True)
# for i in range(ndim):
# ax[i].plot(samples[:,i])
# if labels is not None:
# ax[i].set_ylabel(labels[i])
# ax[-1].set_xlabel("Sample number")
# fig.savefig(filename)
#
# plot_walkers("walkers.png", samples, labels=(r"$\mu_1$", r"$\mu_2$"))
|
python
|
temporario = list()
principal = list()
maior = menor = 0
while True:
temporario.append(input("Nome: ").strip().title())
temporario.append(float(input("Peso: ")))
if len(principal) == 0:
maior = menor = temporario[1]
else:
if temporario[1] > maior:
maior = temporario[1]
elif temporario[1] < menor:
menor = temporario[1]
principal.append(temporario[:])
temporario.clear()
resposta = input("Deseja continuar? [S/N] ").strip().upper()
if resposta == "N":
break
if resposta == "S":
print("Continuando...")
else:
break
print(f"Ao todo, você cadastrou {len(principal)} pessoas.")
print(f"O maior peso foi {maior}Kg. Peso de", end=" ")
for pessoa in principal:
if pessoa[1] == maior:
print(pessoa[0], end=" ")
print(f"\nO menor peso foi de {menor}Kg. Peso de", end=" ")
for pessoa in principal:
if pessoa[1] == menor:
print(pessoa[0], end=" ")
|
python
|
from setuptools import setup
setup(name='myslice',
version='2.0.0',
description='MySlice version 2',
url='http://myslice.info',
author='Ciro Scognamiglio',
author_email='[email protected]',
license='MIT',
packages=['myslice'],
#install_requires=[
# 'tornado',
# 'tornado_cors',
# 'SockJS-tornado',
# 'rethinkdb',
# 'requests',
# 'pycryptodome',
# 'pytz',
# 'python-dateutil',
# 'premailer',
# 'python-oauth2',
# 'pyzmq'
# ],
#scripts=['myslice/bin/myslice-sync', 'myslice/bin/myslice-web'],
#data_files=[('/etc', ['config/planetlab.cfg-dist']),
# ('/etc/init.d', ['init/myslice'])],
zip_safe=False)
|
python
|
import logging
import operator
import time
from functools import reduce
from typing import Optional, Union, Dict, Collection, Any
logger = logging.getLogger(__name__)
class Configuration(object):
def __init__(self, c:Optional[Union['Configuration', Dict]]=None):
"""Create Configuration object
python dict() or another Configuration can be used as source
Args:
c (Optional[Union[, optional): Use this object as Configuration source. Defaults to None (empty configuration).
"""
self._generation = 0
super(Configuration, self).__init__()
if c is None:
self._config_object = dict()
else:
self._config_object = c
if isinstance(c, Configuration) and c._generation != 0:
self._on_update()
elif not isinstance(c, Configuration):
self._on_update()
def _on_update(self, generation=None):
self._generation = time.time() if generation is None else generation
@staticmethod
def _to_config_object(o:Union['Configuration', Dict]) -> 'Configuration':
"""internal method to convert arbitrary object into Configuration.
If the object is already a Configuration object then returns the object
Returns:
Configuration: a configuration object
"""
if isinstance(o, Configuration):
return o
return Configuration(o)
def __eq__(self, other):
if self._generation == 0 and other is None:
return True
return super(Configuration, self).__eq__(other)
def __getitem__(self, item):
return self.get_at(item)
def __setitem__(self, item, value):
self.set_at(item, value)
def __iter__(self):
for key, value in self._config_object.items():
yield key, value
def __getattr__(self, item):
try:
res = getattr(self._config_object, item)
return res
except AttributeError:
return self.get_at(item)
@staticmethod
def _is_native(o) -> bool:
_native = False
if not _native and isinstance(o, str):
_native = True
if not _native and isinstance(o, bytes):
_native = True
if not _native and isinstance(o, float):
_native = True
if not _native and isinstance(o, int):
_native = True
if not _native and isinstance(o, type(None)):
_native = True
if not _native and isinstance(o, list):
_native = True
if not _native and isinstance(o, dict):
_native = True
return _native
def as_dict(self)->Optional[Dict]:
"""Returns current configuration object as python dict
Returns:
Optional[Dict]: dict representation
"""
if isinstance(self._config_object, Configuration) and (self._is_native(self._config_object._config_object) or not hasattr(self._config_object._config_object, "__iter__")):
return self._config_object._config_object
if not hasattr(self._config_object, "__iter__"):
return self._config_object
if isinstance(self._config_object, list):
return self._config_object
if isinstance(self._config_object, str):
return self._config_object
if isinstance(self._config_object, int):
return self._config_object
if isinstance(self._config_object, float):
return self._config_object
if isinstance(self._config_object, bytes):
return self._config_object
# if self._is_native(self._config_object):
# return self._config_object
d = {}
for key, value in self._config_object.items():
_value = value.as_dict() if isinstance(value, Configuration) else value
d.update({key:_value})
return d
def __str__(self):
return str(dict(self))
def __unicode__(self):
return str(dict(self))
def __repr__(self):
return str(dict(self))
def get_at(self, path:str, convert:bool=True)->Optional[Union['Configuration', Any]]:
"""Returns Configuration branch at given address
Args:
path (Union[str,int]): path to get
convert (Boolean): (deprecated) Embed target into Configuration object if if target element is an iterable
Returns:
[type]: [description]
"""
try:
if type(path) == int:
res = operator.getitem(self._config_object, path)
else:
res = reduce(operator.getitem, path.split('.'), self._config_object)
# if convert and ( type(res) == dict or type(res) == list):
# res = self._to_config_object(res)
except (KeyError, TypeError) as e:
return None
if isinstance(res, Configuration) and self._is_native(res._config_object):
return res.as_dict()
return res
def exists(self, path:Union[str,int])->bool:
"""check if given path exists in Configuration
Args:
path (Union[str,int]): path to check
Returns:
bool: true if path exists
"""
try:
if type(path) == int:
operator.getitem(self._config_object, path)
else:
reduce(operator.getitem, path.split('.'), self._config_object)
except KeyError as e:
return False
return True
def __add__(self, item):
def merge(source, destination):
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
if isinstance(node, dict):
merge(value, node)
else:
destination[key] = value
else:
destination[key] = value
return destination
if not isinstance(item, Configuration):
raise ValueError("Value must be of Configuration type", item)
destination = self.as_dict()
source = item.as_dict()
_type = type(self)
res = merge(source, destination)
c = _type(res)
if item._generation == self._generation:
c._on_update(0)
elif item._generation == 0:
c._on_update(self._generation)
elif self._generation == 0:
c._on_update(item._generation)
return c
# def set_at(self, path, value)->None:
# def _setitem(value, path):
# return {path: value}
# p = path.split('.')
# p.reverse()
# res = reduce(_setitem, p, value)
# c = Configuration(res)
# self += c
# return self
def set_at(self, path, value)->None:
value = self._value_convertor(value)
key, _sep, _path = path.partition('.')
if _sep != '':
_value = self._config_object.setdefault(key, Configuration())
if isinstance(_value, Configuration):
_value.set_at(_path, value)
else:
c = Configuration(_value)
c.set_at(_path, value)
self._config_object[key] = c
else:
self._config_object[key] = value
self._on_update()
# def __setattr__(self, name, value):
# if name in ['_config_object']:
# super(Configuration, self).__setattr__(name, value)
# else:
# self.set_at(name, value)
def __len__(self):
return len(self.as_dict())
def write(self, stream):
raise NotImplementedError
def _value_convertor(self, o):
# TODO: Validate for literal type
# raise ConfigurationException(ValueError(value))
return o
def append(self, c:Union['Configuration', Dict])->'Configuration':
"""mutates Configuration object by appending Configuration to current object
Returns:
Configuration: self, updated object
"""
source = self._config_object
destination = c
if isinstance(self._config_object, dict):
source = Configuration(self._config_object)
if isinstance(c, dict):
destination = Configuration(c)
self._config_object = source + destination
return self
|
python
|
"""
collection of helper functions
"""
from __future__ import print_function, division, absolute_import
import os
from glob import glob
from collections import defaultdict
import tables
from .. import NcsFile, options
def check_sorted(channel_dirname):
"""
check how many 'sorted_...' folder there are
"""
pattern = os.path.join(channel_dirname, 'sort_???_?????_*')
return len(glob(pattern))
def spike_count_h5f(fname):
"""
return number of positive/negative spikes in h5file
"""
fid = tables.open_file(fname, 'r')
try:
n_pos = fid.root.pos.spikes.shape[0]
except tables.NoSuchNodeError:
n_pos = 0
try:
n_neg = fid.root.neg.spikes.shape[0]
except tables.NoSuchNodeError:
n_neg = 0
fid.close()
if n_pos + n_neg > 0:
ch_extracted = True
else:
ch_extracted = False
return ch_extracted, n_pos, n_neg
def check_status(channel_fname):
"""
check whether channel is extracted/sorted
"""
channel_dirname = os.path.splitext(channel_fname)[0]
if os.path.isdir(channel_dirname):
h5fname = os.path.join(channel_dirname,
'data_' + channel_dirname + '.h5')
if os.path.exists(h5fname):
ch_extracted, n_pos, n_neg = spike_count_h5f(h5fname)
n_sorted = check_sorted(channel_dirname)
else:
h5fname = None
ch_extracted = False
n_pos = n_neg = n_sorted = 0
else:
h5fname = None
ch_extracted = False
n_pos = n_neg = n_sorted = 0
return ch_extracted, n_pos, n_neg, n_sorted, h5fname
def get_channels(path, from_h5files=False):
"""
simply finds the ncs files that are big enough
"""
def h5fname2channel(h5fname):
"""
transform h5filename to channel name
It's a hack....
"""
dirname = os.path.dirname(h5fname)
basename = os.path.basename(dirname)
cand = os.path.join(basename, basename + '.ncs')
if os.path.exists(cand):
return cand
else:
print('{} not found!'.format(cand))
ret = {}
if from_h5files:
chs = []
for name in h5files(path):
test = h5fname2channel(name)
if test is not None:
chs.append(test)
else:
key = 'unknown'
ret[key] = os.path.basename(os.path.dirname(name))
else:
chs = glob(os.path.join(path, '*.ncs'))
for chan in chs:
statr = os.stat(chan)
if statr.st_size > 16 * 1024:
fid = NcsFile(chan)
name = fid.header['AcqEntName']
ret[name] = os.path.basename(chan)
return ret
def get_regions(path):
channels = glob(os.path.join(path, 'CSC*.ncs'))
regions = defaultdict(list)
for ch in channels:
statr = os.stat(ch)
if statr.st_size > 16 * 1024:
fh = NcsFile(ch)
name = fh.header['AcqEntName']
try:
int(name[-1])
name = name[:-1]
except ValueError:
if name[-4:] == '_Ref':
name = name[:-4]
else:
print('Unknown Region: ' + name[-4:])
regions[name].append(ch)
for name in regions:
regions[name] = sorted(regions[name])
return regions
def h5files(path):
"""
highly specific tool to find all relevant h5 files
if their names follow the CSC?, CSC?? naming convention
"""
def sort_function(fname):
try:
a = int(os.path.basename(fname)[8:-3])
return a
except ValueError:
return fname
# channel_dirs = glob(os.path.join(path, 'CSC?'))
# channel_dirs += glob(os.path.join(path, 'CSC??'))
channel_dirs = []
for pat in options['folder_patterns']:
channel_dirs += glob(os.path.join(path, pat))
ret = []
for chd in channel_dirs:
basename = os.path.basename(chd)
h5cand = os.path.join(chd, 'data_{}.h5'.format(basename))
if os.path.exists(h5cand):
if os.stat(h5cand).st_size > 0:
ret.append(h5cand)
return sorted(ret, key=sort_function)
|
python
|
# -*- coding: utf-8 -*-
import unittest
from hamlish_jinja import Hamlish, Output
import testing_base
class TestDebugOutput(testing_base.TestCase):
def setUp(self):
self.hamlish = Hamlish(
Output(indent_string='', newline_string='', debug=False))
def test_pre_tags(self):
s = self._h('''
%pre
|def test():
| if 1:
| print "Test"
''')
r = '''<pre>def test():
if 1:
print "Test"
</pre>\
'''
self.assertEqual(s, r)
if __name__ == '__main__':
unittest.main()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.