content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Generated by Django 2.0.3 on 2018-04-13 22:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalog', '0006_auto_20180413_1527'),
]
operations = [
migrations.RenameField(
model_name='reply',
old_name='mediaItem',
new_name='reply_to',
),
migrations.AddField(
model_name='review',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
python
|
import numpy as np
from gym.spaces import Box, Dict, Discrete
from database_env.foop import DataBaseEnv_FOOP
from database_env.query_encoding import DataBaseEnv_QueryEncoding
class DataBaseEnv_FOOP_QueryEncoding(DataBaseEnv_FOOP, DataBaseEnv_QueryEncoding):
"""
Database environment with states and actions as in the article (https://arxiv.org/pdf/1911.11689.pdf)
and encoding like NEO (http://www.vldb.org/pvldb/vol12/p1705-marcus.pdf).
Suitable for use with RLlib.
Attributes:
env_config(dict): Algorithm-specific configuration data, should contain item corresponding to the DB scheme.
"""
def __init__(self, env_config, is_join_graph_encoding=False):
super().__init__(env_config)
self.is_join_graph_encoding = is_join_graph_encoding
real_obs_shape = self.N_rels * self.N_cols + self.N_cols
if self.is_join_graph_encoding:
real_obs_shape += self.query_encoding_size
real_obs_shape = (real_obs_shape, )
self.observation_space = Dict({
'real_obs': Box(low = 0, high = 1, shape = real_obs_shape, dtype = np.int),
'action_mask': Box(low = 0, high = 1, shape = (len(self.actions), ), dtype = np.int),
})
def get_obs(self):
real_obs = [self.get_foop().flatten()]
if self.is_join_graph_encoding:
real_obs.append(self.join_graph_encoding)
real_obs.append(self.predicate_ohe)
real_obs = np.concatenate(real_obs).astype(np.int)
return {
'real_obs': real_obs.tolist(),
'action_mask': self.valid_actions().astype(np.int).tolist()
}
|
python
|
def intercala(nomeA, nomeB, nomeS):
fileA = open(nomeA, 'rt')
fileB = open(nomeB, 'rt')
fileS = open(nomeS, 'wt')
nA = int(fileA.readline())
nB = int(fileB.readline())
while
def main():
nomeA = input('Nome do primeiro arquivo: ')
nomeB = input('Nome do segundo arquivo: ')
nomeS = input('Nome para o arquivo de saida: ')
intercala(nomeA, nomeB, nomeS)
if __name__ == "__main__":
main()
|
python
|
import RPi.GPIO as GPIO
import time
class Motion:
def __init__(self, ui, pin, timeout=30):
self._ui = ui
self._pin = int(pin)
self._timeout = int(timeout)
self._last_motion = time.time()
GPIO.setmode(GPIO.BCM) # choose BCM or BOARD
GPIO.setup(self._pin, GPIO.IN)
def check(self):
now = time.time()
if GPIO.input(self._pin):
self._last_motion = now
if (now - self._last_motion) <= self._timeout:
self._ui.on()
#if not self._ui.backlight_on:
# print "Turning UI on"
else:
# elif self._ui.backlight_on:
self._ui.off()
|
python
|
#
# PySNMP MIB module H3C-DOMAIN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/H3C-DOMAIN-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:08:30 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion")
h3cCommon, = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "h3cCommon")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
ObjectIdentity, iso, Bits, Integer32, ModuleIdentity, TimeTicks, IpAddress, NotificationType, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, MibIdentifier, Counter32, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "iso", "Bits", "Integer32", "ModuleIdentity", "TimeTicks", "IpAddress", "NotificationType", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "MibIdentifier", "Counter32", "Unsigned32")
RowStatus, TruthValue, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TruthValue", "DisplayString", "TextualConvention")
h3cDomain = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46))
if mibBuilder.loadTexts: h3cDomain.setLastUpdated('200908050000Z')
if mibBuilder.loadTexts: h3cDomain.setOrganization('H3C Technologies Co., Ltd.')
class H3cModeOfDomainScheme(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("none", 1), ("local", 2), ("radius", 3), ("tacacs", 4))
class H3cAAATypeDomainScheme(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("accounting", 1), ("authentication", 2), ("authorization", 3), ("none", 4))
class H3cAccessModeofDomainScheme(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))
namedValues = NamedValues(("default", 1), ("login", 2), ("lanAccess", 3), ("portal", 4), ("ppp", 5), ("gcm", 6), ("dvpn", 7), ("dhcp", 8), ("voice", 9), ("superauthen", 10), ("command", 11), ("wapi", 12))
h3cDomainControl = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 1))
h3cDomainDefault = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cDomainDefault.setStatus('current')
h3cDomainTables = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2))
h3cDomainInfoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1), )
if mibBuilder.loadTexts: h3cDomainInfoTable.setStatus('current')
h3cDomainInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1), ).setIndexNames((0, "H3C-DOMAIN-MIB", "h3cDomainName"))
if mibBuilder.loadTexts: h3cDomainInfoEntry.setStatus('current')
h3cDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 128)))
if mibBuilder.loadTexts: h3cDomainName.setStatus('current')
h3cDomainState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("block", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainState.setStatus('current')
h3cDomainMaxAccessNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainMaxAccessNum.setStatus('current')
h3cDomainVlanAssignMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("integer", 1), ("string", 2), ("vlanlist", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainVlanAssignMode.setStatus('current')
h3cDomainIdleCutEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 5), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIdleCutEnable.setStatus('current')
h3cDomainIdleCutMaxTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 120))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIdleCutMaxTime.setStatus('current')
h3cDomainIdleCutMinFlow = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10240000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIdleCutMinFlow.setStatus('current')
h3cDomainMessengerEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 8), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainMessengerEnable.setStatus('current')
h3cDomainMessengerLimitTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 60))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainMessengerLimitTime.setStatus('current')
h3cDomainMessengerSpanTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 60))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainMessengerSpanTime.setStatus('current')
h3cDomainSelfServiceEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 11), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSelfServiceEnable.setStatus('current')
h3cDomainSelfServiceURL = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 12), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSelfServiceURL.setStatus('current')
h3cDomainAccFailureAction = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ignore", 1), ("reject", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainAccFailureAction.setStatus('current')
h3cDomainRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 14), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainRowStatus.setStatus('current')
h3cDomainCurrentAccessNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDomainCurrentAccessNum.setStatus('current')
h3cDomainSchemeTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2), )
if mibBuilder.loadTexts: h3cDomainSchemeTable.setStatus('current')
h3cDomainSchemeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1), ).setIndexNames((0, "H3C-DOMAIN-MIB", "h3cDomainName"), (0, "H3C-DOMAIN-MIB", "h3cDomainSchemeIndex"))
if mibBuilder.loadTexts: h3cDomainSchemeEntry.setStatus('current')
h3cDomainSchemeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: h3cDomainSchemeIndex.setStatus('current')
h3cDomainSchemeMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 2), H3cModeOfDomainScheme()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSchemeMode.setStatus('current')
h3cDomainAuthSchemeName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainAuthSchemeName.setStatus('current')
h3cDomainAcctSchemeName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainAcctSchemeName.setStatus('current')
h3cDomainSchemeRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSchemeRowStatus.setStatus('current')
h3cDomainSchemeAAAType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 6), H3cAAATypeDomainScheme()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSchemeAAAType.setStatus('current')
h3cDomainSchemeAAAName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSchemeAAAName.setStatus('current')
h3cDomainSchemeAccessMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 8), H3cAccessModeofDomainScheme()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSchemeAccessMode.setStatus('current')
h3cDomainIpPoolTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3), )
if mibBuilder.loadTexts: h3cDomainIpPoolTable.setStatus('current')
h3cDomainIpPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3, 1), ).setIndexNames((0, "H3C-DOMAIN-MIB", "h3cDomainName"), (0, "H3C-DOMAIN-MIB", "h3cDomainIpPoolNum"))
if mibBuilder.loadTexts: h3cDomainIpPoolEntry.setStatus('current')
h3cDomainIpPoolNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 99)))
if mibBuilder.loadTexts: h3cDomainIpPoolNum.setStatus('current')
h3cDomainIpPoolLowIpAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3, 1, 2), InetAddressType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIpPoolLowIpAddrType.setStatus('current')
h3cDomainIpPoolLowIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3, 1, 3), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIpPoolLowIpAddr.setStatus('current')
h3cDomainIpPoolLen = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIpPoolLen.setStatus('current')
h3cDomainIpPoolRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIpPoolRowStatus.setStatus('current')
mibBuilder.exportSymbols("H3C-DOMAIN-MIB", H3cAAATypeDomainScheme=H3cAAATypeDomainScheme, h3cDomainSelfServiceURL=h3cDomainSelfServiceURL, h3cDomainIpPoolEntry=h3cDomainIpPoolEntry, h3cDomainInfoEntry=h3cDomainInfoEntry, h3cDomainMessengerLimitTime=h3cDomainMessengerLimitTime, h3cDomainIdleCutEnable=h3cDomainIdleCutEnable, h3cDomainSchemeRowStatus=h3cDomainSchemeRowStatus, h3cDomainIpPoolLen=h3cDomainIpPoolLen, h3cDomainName=h3cDomainName, h3cDomain=h3cDomain, h3cDomainIdleCutMaxTime=h3cDomainIdleCutMaxTime, H3cAccessModeofDomainScheme=H3cAccessModeofDomainScheme, h3cDomainRowStatus=h3cDomainRowStatus, h3cDomainAcctSchemeName=h3cDomainAcctSchemeName, h3cDomainVlanAssignMode=h3cDomainVlanAssignMode, h3cDomainIdleCutMinFlow=h3cDomainIdleCutMinFlow, h3cDomainSelfServiceEnable=h3cDomainSelfServiceEnable, h3cDomainControl=h3cDomainControl, h3cDomainMessengerEnable=h3cDomainMessengerEnable, h3cDomainSchemeAAAName=h3cDomainSchemeAAAName, h3cDomainIpPoolTable=h3cDomainIpPoolTable, h3cDomainAccFailureAction=h3cDomainAccFailureAction, h3cDomainIpPoolRowStatus=h3cDomainIpPoolRowStatus, h3cDomainIpPoolLowIpAddrType=h3cDomainIpPoolLowIpAddrType, H3cModeOfDomainScheme=H3cModeOfDomainScheme, h3cDomainDefault=h3cDomainDefault, h3cDomainSchemeTable=h3cDomainSchemeTable, h3cDomainMessengerSpanTime=h3cDomainMessengerSpanTime, h3cDomainSchemeEntry=h3cDomainSchemeEntry, h3cDomainSchemeAccessMode=h3cDomainSchemeAccessMode, h3cDomainSchemeMode=h3cDomainSchemeMode, PYSNMP_MODULE_ID=h3cDomain, h3cDomainAuthSchemeName=h3cDomainAuthSchemeName, h3cDomainTables=h3cDomainTables, h3cDomainIpPoolNum=h3cDomainIpPoolNum, h3cDomainInfoTable=h3cDomainInfoTable, h3cDomainCurrentAccessNum=h3cDomainCurrentAccessNum, h3cDomainSchemeAAAType=h3cDomainSchemeAAAType, h3cDomainIpPoolLowIpAddr=h3cDomainIpPoolLowIpAddr, h3cDomainMaxAccessNum=h3cDomainMaxAccessNum, h3cDomainSchemeIndex=h3cDomainSchemeIndex, h3cDomainState=h3cDomainState)
|
python
|
import sys
import numpy as np
from matplotlib import pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers
def preprocess(array: np.array):
""" Normalizes the supplied array and reshapes it into the appropriate format """
array = array.astype("float32")/255.0
array = np.reshape(array, (len(array), 28, 28, 1))
print("Final Shape:", array.shape)
return array
def noise(array):
""" Adds random noise to each image in the supplied array """
noise_factor = 0.5
noise_array = array + noise_factor * \
np.random.normal(loc=0.0, scale=1.0, size=array.shape)
return np.clip(noise_array, 0.0, 1.0)
def load_data(path="mnist.npz"):
""" Loading the data and applying the preprocessing steps """
with np.load("mnist.npz", allow_pickle=True) as f:
train_data, test_data = f['x_train'], f['x_test']
train_data = preprocess(train_data)
test_data = preprocess(test_data)
return train_data, test_data
train_data, test_data = load_data()
# create a copy of data with noise
noisy_train_data = noise(train_data)
noisy_test_data = noise(test_data)
def build_model(input_shape=(28, 28, 1)):
""" Building the autoencoder model for mnist """
input = layers.Input(shape=input_shape)
# encoder
x = layers.Conv2D(32, (3, 3), activation='relu',
padding='same', name="Conv1")(input)
x = layers.MaxPooling2D((2, 2), padding='same', name='Pool1')(x)
x = layers.Conv2D(32, (3, 3), activation='relu',
padding='same', name='Conv2')(x)
x = layers.MaxPooling2D((2, 2), padding='same', name='Pool2')(x)
# decoder
x = layers.Conv2DTranspose(
32, (3, 3), strides=2, activation='relu', padding='same', name="Conv1_transpose")(x)
x = layers.Conv2DTranspose(
32, (3, 3), strides=2, activation='relu', padding='same', name='Conv2_transpose')(x)
output = layers.Conv2D(1, (3, 3), activation='sigmoid',
padding='same', name="output_layer")(x)
autoencoder = keras.models.Model(input, output, name='AutoEncoder-Model')
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
return autoencoder
def train_model(checkpoint_dir="tmp", monitor="val_loss"):
autoencoder = build_model()
autoencoder.summary()
early_stopping = keras.callbacks.EarlyStopping(
monitor=monitor,
patience=5,
restore_best_weights=True)
model_checkpoint = keras.callbacks.ModelCheckpoint(
checkpoint_dir,
monitor=monitor,
verbose=0,
save_best_only=True,
save_weights_only=False,
mode="auto",
save_freq="epoch",
options=None)
autoencoder.fit(
x=noisy_train_data,
y=train_data,
epochs=100,
batch_size=128,
shuffle=True,
validation_data=(noisy_test_data, test_data),
callbacks=[early_stopping, model_checkpoint])
autoencoder.save('saved_model')
def display(array1, array2, n=10):
"""
Displays n random images from each one of the supplied arrays.
args:
n: Number of output to show
"""
indices = np.random.randint(len(array1), size=n)
images1 = array1[indices, :]
images2 = array2[indices, :]
plt.figure(figsize=(20, 4))
for i, (image1, image2) in enumerate(zip(images1, images2)):
ax = plt.subplot(2, n, i + 1)
plt.imshow(image1.reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(image2.reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def show_output():
""" function for showing the output """
try:
autoencoder = keras.models.load_model(
"saved_model") # loading model from tmp folder
except Exception:
print("There is no model please train the model first then use the run command")
predictions = autoencoder.predict(noisy_test_data)
display(noisy_test_data, predictions, n=10)
if __name__ == '__main__':
try:
if sys.argv[1] == "train":
train_model()
if sys.argv[1] == "run":
show_output()
except Exception:
print("Please Use train and run argument to run the process. check the Readme for more details")
|
python
|
import math
from time import sleep
from timeit import default_timer as timer
LMS8001_C1_STEP=1.2e-12
LMS8001_C2_STEP=10.0e-12
LMS8001_C3_STEP=1.2e-12
LMS8001_C2_FIX=150.0e-12
LMS8001_C3_FIX=5.0e-12
LMS8001_R2_0=24.6e3
LMS8001_R3_0=14.9e3
class PLL_METHODS(object):
def __init__(self, chip, fRef):
self.chip = chip
self.fRef = fRef
def estim_KVCO(self, FIT_KVCO=True, PROFILE=0):
# Check VCO_SEL and VCO_FREQ
reg_pll_vco_freq=self.chip.getRegisterByName('PLL_VCO_FREQ_'+str(PROFILE))
reg_pll_vco_cfg=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
vco_sel=reg_pll_vco_cfg['VCO_SEL_'+str(PROFILE)+'<1:0>']
vco_freq=reg_pll_vco_freq['VCO_FREQ_'+str(PROFILE)+'<7:0>']
if not (FIT_KVCO):
# Use Average for KVCO in Calculations
if (vco_sel==1):
KVCO_avg=44.404e6
elif (vco_sel==2):
KVCO_avg=33.924e6
elif (vco_sel==3):
KVCO_avg=41.455e6
else:
self.chip.log('Ext. LO selected in PLL_PROFILE.')
return None
else:
# Use Fitted Values for KVCO in Calculations
# Changed on 17.05.2017. with new results
# Following equations fitted for VTUNE=0.7 V
CBANK=vco_freq
if (vco_sel==1):
KVCO_avg=27.296e6 * (2.26895e-10*CBANK**4+4.98467e-9*CBANK**3+9.01884e-6*CBANK**2+3.69804e-3*CBANK**1+1.01283e+00)
elif (vco_sel==2):
KVCO_avg=23.277e6 * (8.38795e-11*CBANK**4+2.20202e-08*CBANK**3+3.68009e-06*CBANK**2+3.22264e-03*CBANK**1+1.01093e+00)
elif (vco_sel==3):
KVCO_avg=29.110e6 * (-1.54988e-11*CBANK**4+4.27489e-08*CBANK**3+5.26971e-06*CBANK**2+2.83453e-03*CBANK**1+9.94192e-01)
else:
self.chip.log('Ext. LO selected in PLL_PROFILE.')
return None
return KVCO_avg
def calc_ideal_LPF(self, fc, PM_deg, Icp, KVCO_HzV, N, gamma=1.045, T31=0.1):
PM_rad=PM_deg*math.pi/180
wc=2*math.pi*fc
Kphase=Icp/(2*math.pi)
Kvco=2*math.pi*KVCO_HzV
# Approx. formula, Dean Banerjee
T1=(1.0/math.cos(PM_rad)-math.tan(PM_rad))/(wc*(1+T31))
T3=T1*T31;
T2=gamma/((wc**2)*(T1+T3));
A0=(Kphase*Kvco)/((wc**2)*N)*math.sqrt((1+(wc**2)*(T2**2))/((1+(wc**2)*(T1**2))*(1+(wc**2)*(T3**2))));
A2=A0*T1*T3;
A1=A0*(T1+T3);
C1=A2/(T2**2)*(1+math.sqrt(1+T2/A2*(T2*A0-A1)));
C3=(-(T2**2)*(C1**2)+T2*A1*C1-A2*A0)/((T2**2)*C1-A2);
C2=A0-C1-C3;
R2=T2/C2;
R3=A2/(C1*C3*T2);
LPF_vals=dict()
LPF_vals['C1']=C1
LPF_vals['C2']=C2
LPF_vals['C3']=C3
LPF_vals['R2']=R2
LPF_vals['R3']=R3
return LPF_vals
def calc_real_LPF(self, LPF_IDEAL_VALS):
C1_cond=(LMS8001_C1_STEP<=LPF_IDEAL_VALS['C1']<=15*LMS8001_C1_STEP)
C2_cond=(LMS8001_C2_FIX<=LPF_IDEAL_VALS['C2']<=LMS8001_C2_FIX+15*LMS8001_C2_STEP)
C3_cond=(LMS8001_C3_FIX+LMS8001_C3_STEP<=LPF_IDEAL_VALS['C3']<=LMS8001_C3_FIX+15*LMS8001_C3_STEP)
R2_cond=(LMS8001_R2_0/15.0<=LPF_IDEAL_VALS['R2']<=LMS8001_R2_0)
R3_cond=(LMS8001_R3_0/15.0<=LPF_IDEAL_VALS['R3']<=LMS8001_R3_0)
LPFvals_OK=(C1_cond and C2_cond and C3_cond and R2_cond and R3_cond)
LPF_REAL_VALS=dict()
if (LPFvals_OK):
C1_CODE=int(round(LPF_IDEAL_VALS['C1']/LMS8001_C1_STEP))
C2_CODE=int(round((LPF_IDEAL_VALS['C2']-LMS8001_C2_FIX)/LMS8001_C2_STEP))
C3_CODE=int(round((LPF_IDEAL_VALS['C3']-LMS8001_C3_FIX)/LMS8001_C3_STEP))
C1_CODE=int(min(max(C1_CODE,0),15))
C2_CODE=int(min(max(C2_CODE,0),15))
C3_CODE=int(min(max(C3_CODE,0),15))
R2_CODE=int(round(LMS8001_R2_0/LPF_IDEAL_VALS['R2']))
R3_CODE=int(round(LMS8001_R3_0/LPF_IDEAL_VALS['R3']))
R2_CODE=min(max(R2_CODE,1),15)
R3_CODE=min(max(R3_CODE,1),15)
LPF_REAL_VALS['C1_CODE']=C1_CODE
LPF_REAL_VALS['C2_CODE']=C2_CODE
LPF_REAL_VALS['C3_CODE']=C3_CODE
LPF_REAL_VALS['R2_CODE']=R2_CODE
LPF_REAL_VALS['R3_CODE']=R3_CODE
return (LPFvals_OK, LPF_REAL_VALS)
def setSDM(self, DITHER_EN=0, SEL_SDMCLK=0, REV_SDMCLK=0, PROFILE=0):
# Sets Sigma-Delta Modulator Config.
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_PLL_SDM_CFG=self.chip.getRegisterByName('PLL_SDM_CFG_'+str(PROFILE))
reg_PLL_SDM_CFG['DITHER_EN_'+str(PROFILE)]=DITHER_EN
reg_PLL_SDM_CFG['SEL_SDMCLK_'+str(PROFILE)]=SEL_SDMCLK
reg_PLL_SDM_CFG['REV_SDMCLK_'+str(PROFILE)]=REV_SDMCLK
self.chip.setImmediateMode(Imd_Mode)
def setVCOBIAS(self, EN=0, BYP_VCOREG=1, CURLIM_VCOREG=1, SPDUP_VCOREG=0, VDIV_VCOREG=32):
"""Sets VCO Bias Parameters"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
regVCOBIAS=self.chip.getRegisterByName('PLL_VREG')
regVCOBIAS['EN_VCOBIAS']=EN
regVCOBIAS['BYP_VCOREG']=BYP_VCOREG
regVCOBIAS['CURLIM_VCOREG']=CURLIM_VCOREG
regVCOBIAS['SPDUP_VCOREG']=SPDUP_VCOREG
regVCOBIAS['VDIV_VCOREG<7:0>']=VDIV_VCOREG
self.chip.setImmediateMode(Imd_Mode)
def setSPDUP_VCO(self, SPDUP=0, PROFILE=0):
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_VCO_CFG=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
reg_VCO_CFG['SPDUP_VCO_'+str(PROFILE)]=SPDUP
self.chip.setImmediateMode(Imd_Mode)
def setSPDUP_VCOREG(self, SPDUP=0):
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
regVCOBIAS=self.chip.getRegisterByName('PLL_VREG')
regVCOBIAS['SPDUP_VCOREG']=SPDUP
self.chip.setImmediateMode(Imd_Mode)
def setXBUF(self, EN=0, BYPEN=0, SLFBEN=1):
"""Sets XBUF Configuration"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
regXBUF=self.chip.getRegisterByName('PLL_CFG_XBUF')
regXBUF['PLL_XBUF_EN']=EN
regXBUF['PLL_XBUF_SLFBEN']=SLFBEN
regXBUF['PLL_XBUF_BYPEN']=BYPEN
self.chip.setImmediateMode(Imd_Mode)
def setCP(self, PULSE=4, OFS=0, ICT_CP=16, PROFILE=0):
"""Sets CP Parameters"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_CP_CFG0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
reg_CP_CFG0['PULSE_'+str(PROFILE)+'<5:0>']=PULSE
reg_CP_CFG0['OFS_'+str(PROFILE)+'<5:0>']=OFS
reg_CP_CFG1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
reg_CP_CFG1['ICT_CP_'+str(PROFILE)+'<4:0>']=ICT_CP
reg_PLL_ENABLE=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
if (OFS>0):
reg_PLL_ENABLE['PLL_EN_CPOFS_'+str(PROFILE)]=1
else:
reg_PLL_ENABLE['PLL_EN_CPOFS_'+str(PROFILE)]=0
self.chip.setImmediateMode(Imd_Mode)
def getCP(self, PROFILE=0):
"""Returns CP Parameters"""
d=dict()
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_CP_CFG0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
d["PULSE"]=reg_CP_CFG0['PULSE_'+str(PROFILE)+'<5:0>']
d["OFS"]=reg_CP_CFG0['OFS_'+str(PROFILE)+'<5:0>']
reg_CP_CFG1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
d["ICT_CP"]=reg_CP_CFG1['ICT_CP_'+str(PROFILE)+'<4:0>']
reg_PLL_ENABLE=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
d["EN_CPOFS"]=reg_PLL_ENABLE['PLL_EN_CPOFS_'+str(PROFILE)]
self.chip.setImmediateMode(Imd_Mode)
return d
def setCP_FLOCK(self, PULSE=4, OFS=0, PROFILE=0):
reg_pll_flock_cfg2=self.chip.getRegisterByName('PLL_FLOCK_CFG2_'+str(PROFILE))
reg_pll_flock_cfg2['FLOCK_PULSE_'+str(PROFILE)+'<5:0>']=int(PULSE)
reg_pll_flock_cfg2['FLOCK_OFS_'+str(PROFILE)+'<5:0>']=int(OFS)
def setLD(self, LD_VCT=2, PROFILE=0):
"""Sets Lock-Detector's Comparator Threashold"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_pll_enable=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
reg_pll_enable['PLL_EN_LD_'+str(PROFILE)]=1
reg_pll_cp_cfg1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
reg_pll_cp_cfg1['LD_VCT_'+str(PROFILE)+'<1:0>']=LD_VCT
self.chip.setImmediateMode(Imd_Mode)
def setPFD(self, DEL=0, FLIP=0, PROFILE=0):
"""Sets PFD Parameters"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_CP_CFG0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
reg_CP_CFG0['FLIP_'+str(PROFILE)]=FLIP
reg_CP_CFG0['DEL_'+str(PROFILE)+'<1:0>']=DEL
self.chip.setImmediateMode(Imd_Mode)
def setVTUNE_VCT(self, VTUNE_VCT, PROFILE=0):
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']=VTUNE_VCT
def openPLL(self, VTUNE_VCT=2, PROFILE=0, dbgMode=False):
"""Breaks the PLL Loop and sets the fixed VCO tuning voltage"""
VTUNE_VCT=int(VTUNE_VCT)
VTUNE_DICT={0:300, 1:600, 2:750, 3:900}
if (VTUNE_VCT>3):
VTUNE_VCT=3
elif (VTUNE_VCT<0):
VTUNE_VCT=0
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_pll_lpf_cfg2['LPFSW_'+str(PROFILE)]=1
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']=VTUNE_VCT
if (dbgMode):
self.chip.log("PLL Loop Broken. VTUNE=%.2f mV" %(VTUNE_DICT[VTUNE_VCT]))
def closePLL(self, PROFILE=0):
"""Closes PLL Loop"""
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_pll_lpf_cfg2['LPFSW_'+str(PROFILE)]=0
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)]=2
def setVCO(self, SEL=3, FREQ=128, AMP=1, VCO_AAC_EN=True, VDIV_SWVDD=2, PROFILE=0):
"""Sets VCO Parameters"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_VCO_FREQ=self.chip.getRegisterByName('PLL_VCO_FREQ_'+str(PROFILE))
reg_VCO_FREQ['VCO_FREQ_'+str(PROFILE)+'<7:0>']=FREQ
reg_VCO_CFG=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
if (VCO_AAC_EN):
reg_VCO_CFG['VCO_AAC_EN_'+str(PROFILE)]=1
else:
reg_VCO_CFG['VCO_AAC_EN_'+str(PROFILE)]=0
reg_VCO_CFG['VCO_SEL_'+str(PROFILE)+'<1:0>']=SEL
reg_VCO_CFG['VCO_AMP_'+str(PROFILE)+'<6:0>']=AMP
reg_VCO_CFG['VDIV_SWVDD_'+str(PROFILE)+'<1:0>']=VDIV_SWVDD
self.chip.setImmediateMode(Imd_Mode)
def setFFDIV(self, FFMOD=0, PROFILE=0):
"""Sets FF-DIV Modulus"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_PLL_ENABLE=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
if (FFMOD>0):
reg_PLL_ENABLE['PLL_EN_FFCORE_'+str(PROFILE)]=1
else:
reg_PLL_ENABLE['PLL_EN_FFCORE_'+str(PROFILE)]=0
reg_FF_CFG=self.chip.getRegisterByName('PLL_FF_CFG_'+str(PROFILE))
if (FFMOD>0):
reg_FF_CFG['FFDIV_SEL_'+str(PROFILE)]=1
else:
reg_FF_CFG['FFDIV_SEL_'+str(PROFILE)]=0
reg_FF_CFG['FFCORE_MOD_'+str(PROFILE)+'<1:0>']=FFMOD
reg_FF_CFG['FF_MOD_'+str(PROFILE)+'<1:0>']=FFMOD
self.chip.setImmediateMode(Imd_Mode)
def setFBDIV(self, N_INT, N_FRAC, IntN_Mode=False, PROFILE=0):
"""Sets FB-DIV Parameters"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_SDM_CFG=self.chip.getRegisterByName('PLL_SDM_CFG_'+str(PROFILE))
if (IntN_Mode):
reg_SDM_CFG['INTMOD_EN_'+str(PROFILE)]=1
N_FRAC_H=0
N_FRAC_L=0
else:
reg_SDM_CFG['INTMOD_EN_'+str(PROFILE)]=0
N_FRAC_H=int(math.floor(N_FRAC/2**16))
N_FRAC_L=int(N_FRAC-N_FRAC_H*(2**16))
#if (DITHER_EN):
# reg_SDM_CFG['DITHER_EN_'+str(PROFILE)]=1
#else:
# reg_SDM_CFG['DITHER_EN_'+str(PROFILE)]=0
reg_SDM_CFG['INTMOD_'+str(PROFILE)+'<9:0>']=N_INT
reg_FRACMODL=self.chip.getRegisterByName('PLL_FRACMODL_'+str(PROFILE))
reg_FRACMODL['FRACMODL_'+str(PROFILE)+'<15:0>']=N_FRAC_L
reg_FRACMODH=self.chip.getRegisterByName('PLL_FRACMODH_'+str(PROFILE))
reg_FRACMODH['FRACMODH_'+str(PROFILE)+'<3:0>']=N_FRAC_H
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
self.chip.setImmediateMode(Imd_Mode)
def setLPF(self, C1=8, C2=8, R2=1, C3=8, R3=1, PROFILE=0):
"""Sets LPF Element Values"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_PLL_LPF_CFG1=self.chip.getRegisterByName('PLL_LPF_CFG1_'+str(PROFILE))
reg_PLL_LPF_CFG1['R3_'+str(PROFILE)+'<3:0>']=R3
reg_PLL_LPF_CFG1['R2_'+str(PROFILE)+'<3:0>']=R2
reg_PLL_LPF_CFG1['C2_'+str(PROFILE)+'<3:0>']=C2
reg_PLL_LPF_CFG1['C1_'+str(PROFILE)+'<3:0>']=C1
reg_PLL_LPF_CFG2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_PLL_LPF_CFG2['C3_'+str(PROFILE)+'<3:0>']=C3
self.chip.setImmediateMode(Imd_Mode)
def setLPF_FLOCK(self, C1=8, C2=8, R2=1, C3=8, R3=1, PROFILE=0):
reg_pll_flock_cfg1=self.chip.getRegisterByName('PLL_FLOCK_CFG1_'+str(PROFILE))
reg_pll_flock_cfg1['FLOCK_R3_'+str(PROFILE)+'<3:0>']=int(R3)
reg_pll_flock_cfg1['FLOCK_R2_'+str(PROFILE)+'<3:0>']=int(R2)
reg_pll_flock_cfg1['FLOCK_C1_'+str(PROFILE)+'<3:0>']=int(C1)
reg_pll_flock_cfg1['FLOCK_C2_'+str(PROFILE)+'<3:0>']=int(C2)
reg_pll_flock_cfg2=self.chip.getRegisterByName('PLL_FLOCK_CFG2_'+str(PROFILE))
reg_pll_flock_cfg2['FLOCK_C3_'+str(PROFILE)+'<3:0>']=int(C3)
def setLODIST(self, channel, EN=True, EN_FLOCK=False, IQ=True, phase=0, PROFILE=0):
"""Sets LODIST Configuration"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
channel_dict={'A':0, 'B':1, 'C':2, 'D':3}
phase_dict={0:0, 90:1, 180:2, 270:3}
if (channel not in channel_dict.keys()):
self.chip.log("Not valid LO-DIST channel name.")
return None
if (phase not in phase_dict.keys()):
self.chip.log("Not valid LO-DIST phase value.")
return None
reg_pll_enable=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
reg_lodist_cfg=self.chip.getRegisterByName('PLL_LODIST_CFG_'+str(PROFILE))
val_old=reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']
if (EN):
reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']=val_old|int(2**channel_dict[channel])
reg_pll_enable['PLL_LODIST_EN_BIAS_'+str(PROFILE)]=1
else:
reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']=val_old&(15-int(2**channel_dict[channel]))
# Disable LO DIST Bias if not needed
if (reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']==0):
reg_pll_enable['PLL_LODIST_EN_BIAS_'+str(PROFILE)]=0
reg_pll_enable['PLL_LODIST_EN_DIV2IQ_'+str(PROFILE)]=0
if (IQ==True):
reg_lodist_cfg['PLL_LODIST_FSP_OUT'+str(channel_dict[channel])+'_'+str(PROFILE)+'<2:0>']=phase_dict[phase]
reg_pll_enable['PLL_LODIST_EN_DIV2IQ_'+str(PROFILE)]=1
else:
reg_lodist_cfg['PLL_LODIST_FSP_OUT'+str(channel_dict[channel])+'_'+str(PROFILE)+'<2:0>']=phase_dict[phase]+4
A_IQ=reg_lodist_cfg['PLL_LODIST_FSP_OUT0_'+str(PROFILE)+'<2:0>']
A_EN=reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']&1
B_IQ=reg_lodist_cfg['PLL_LODIST_FSP_OUT1_'+str(PROFILE)+'<2:0>']
B_EN=reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']&2
C_IQ=reg_lodist_cfg['PLL_LODIST_FSP_OUT2_'+str(PROFILE)+'<2:0>']
C_EN=reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']&4
D_IQ=reg_lodist_cfg['PLL_LODIST_FSP_OUT3_'+str(PROFILE)+'<2:0>']
D_EN=reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']&8
# Disable DivBy2 IQ Gen. Core if not needed
if ((A_IQ>=4 or A_EN==0) and (B_IQ>=4 or B_EN==0) and (C_IQ>=4 or C_EN==0) and (D_IQ>=4 or D_EN==0)):
reg_pll_enable['PLL_LODIST_EN_DIV2IQ_'+str(PROFILE)]=0
# Enable Output of desired LO channel during the Fast-Lock Operating Mode of LMS8001-PLL if EN_FLOCK=True
if (EN_FLOCK):
if (channel=='A'):
LO_FLOCK_EN_MASK=1
elif (channel=='B'):
LO_FLOCK_EN_MASK=2
elif (channel=='C'):
LO_FLOCK_EN_MASK=4
else:
LO_FLOCK_EN_MASK=8
else:
LO_FLOCK_EN_MASK=0
reg_pll_flock_cfg3=self.chip.getRegisterByName('PLL_FLOCK_CFG3_'+str(PROFILE))
LO_FLOCK_EN=reg_pll_flock_cfg3['FLOCK_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']
reg_pll_flock_cfg3['FLOCK_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']=LO_FLOCK_EN | LO_FLOCK_EN_MASK
# Set Back to initial value of ImmediateMode
self.chip.setImmediateMode(Imd_Mode)
def setFLOCK(self, BWEF, LoopBW=600.0e3, PM=50.0, FLOCK_N=200, Ch_EN=[], METHOD='SIMPLE', FIT_KVCO=True, FLOCK_VCO_SPDUP=1, PROFILE=0, dbgMode=False):
"""
Automatically calculates Fast-Lock Mode parameters from BWEF argument. BWEF-BandWidth Extension Factor
METHOD='SIMPLE'
Clips charge pump current settings in Fast-Lock Operating Mode if ICP_NORMAL*BWEF^2 is greater than (ICP)max.
Only changes the values of LoopFilter resistors during Fast-Lock mode.
Capacitor values are the same as in NORMAL operating mode.
METHOD=='SMART'
Takes the phase-margin argument PM to calculate LoopFilter elements and maximum pulse CP current which will give
the PLL loop bandwidth value of LoopBW with desired phase margin PM.
"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
LO_OUT_EN=0
if ('A' in Ch_EN):
LO_OUT_EN+=1
if ('B' in Ch_EN):
LO_OUT_EN+=2
if ('C' in Ch_EN):
LO_OUT_EN+=4
if ('D' in Ch_EN):
LO_OUT_EN+=8
if (METHOD not in ['SIMPLE', 'SMART']):
self.chip.log("Bad Fast-Lock Mode Optimization Method. METHOD='SIMPLE' or METHOD='SMART'.")
return False
reg_cp_cfg0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
PULSE=reg_cp_cfg0['PULSE_'+str(PROFILE)+'<5:0>']
OFS=reg_cp_cfg0['OFS_'+str(PROFILE)+'<5:0>']
reg_pll_cp_cfg1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
ICT_CP_INIT=reg_pll_cp_cfg1['ICT_CP_'+str(PROFILE)+'<4:0>']
reg_pll_flock_cfg3=self.chip.getRegisterByName('PLL_FLOCK_CFG3_'+str(PROFILE))
reg_pll_flock_cfg3['FLOCK_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']=LO_OUT_EN
reg_pll_flock_cfg3['FLOCK_VCO_SPDUP_'+str(PROFILE)]=0
reg_pll_flock_cfg3['FLOCK_N_'+str(PROFILE)+'<9:0>']=min(FLOCK_N, 1023)
reg_pll_flock_cfg3['FLOCK_VCO_SPDUP_'+str(PROFILE)]=FLOCK_VCO_SPDUP
if (METHOD=='SIMPLE'):
reg_lpf_cfg1=self.chip.getRegisterByName('PLL_LPF_CFG1_'+str(PROFILE))
R3=reg_lpf_cfg1['R3_'+str(PROFILE)+'<3:0>']
R2=reg_lpf_cfg1['R2_'+str(PROFILE)+'<3:0>']
C1=reg_lpf_cfg1['C1_'+str(PROFILE)+'<3:0>']
C2=reg_lpf_cfg1['C2_'+str(PROFILE)+'<3:0>']
reg_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
C3=reg_lpf_cfg2['C3_'+str(PROFILE)+'<3:0>']
R3_FLOCK=min(round(R3*BWEF), 15)
# R3_FLOCK=min(round(R3*math.sqrt(BWEF)), 15)
R2_FLOCK=min(round(R2*BWEF), 15)
PULSE_FLOCK=min(round(PULSE*BWEF**2), 63)
#PULSE_FLOCK=min(round(PULSE*BWEF), 63)
OFS_FLOCK=min(round(OFS*PULSE_FLOCK/PULSE), 63)
#OFS_FLOCK=OFS
self.setLPF_FLOCK(C1=C1, C2=C2, R2=R2_FLOCK, C3=C3, R3=R3_FLOCK, PROFILE=PROFILE)
self.setCP_FLOCK(PULSE=PULSE_FLOCK, OFS=OFS_FLOCK, PROFILE=PROFILE)
else:
fc=LoopBW/1.65
# Sweep CP PULSE values and find the first one that result with implementable LPF values for desired PLL dynamics in Fast-Lock Mode
cp_pulse_vals=range(PULSE,64)
cp_pulse_vals.reverse()
# Estimate the value of KVCO for settings in the PLL Profile PROFILE
KVCO_avg=self.estim_KVCO(FIT_KVCO=FIT_KVCO, PROFILE=PROFILE)
# Read Feedback-Divider Modulus
N=self.getNDIV(PROFILE=PROFILE)
for cp_pulse in cp_pulse_vals:
# Calculate CP Current Value
Icp=ICT_CP_INIT*25.0e-6/16.0*cp_pulse
gamma=1.045
T31=0.1
LPF_IDEAL_VALS=self.calc_ideal_LPF(fc=fc, PM_deg=PM, Icp=Icp, KVCO_HzV=KVCO_avg, N=N, gamma=gamma, T31=T31)
(LPFvals_OK, LPF_REAL_VALS)=self.calc_real_LPF(LPF_IDEAL_VALS)
if (LPFvals_OK):
# Set CP Pulse Current to the optimized value
self.setCP_FLOCK(PULSE=cp_pulse, OFS=min(round(OFS*cp_pulse/PULSE),63), PROFILE=PROFILE)
# self.setCP_FLOCK(PULSE=cp_pulse, OFS=0, PROFILE=PROFILE)
# Set LPF Components to the optimized values
self.setLPF_FLOCK(C1=LPF_REAL_VALS['C1_CODE'], C2=LPF_REAL_VALS['C2_CODE'], R2=LPF_REAL_VALS['R2_CODE'], C3=LPF_REAL_VALS['C3_CODE'], R3=LPF_REAL_VALS['R3_CODE'], PROFILE=PROFILE)
if (dbgMode):
self.chip.log('PLL LoopBW Optimization finished successfuly.')
self.chip.log('-'*45)
self.chip.log('\tIcp=%.2f uA' %(Icp/1.0e-6))
self.chip.log('\tUsed Value for KVCO=%.2f MHz/V' %(KVCO_avg/1.0e6))
self.chip.log('\tNDIV=%.2f' % (N))
self.chip.log('-'*45)
self.chip.log('')
self.chip.log('Ideal LPF Values')
self.chip.log('-'*45)
self.chip.log('\tC1= %.2f pF' %(LPF_IDEAL_VALS['C1']/1.0e-12))
self.chip.log('\tC2= %.2f pF' %(LPF_IDEAL_VALS['C2']/1.0e-12))
self.chip.log('\tR2= %.2f kOhm' %(LPF_IDEAL_VALS['R2']/1.0e3))
self.chip.log('\tC3= %.2f pF' %(LPF_IDEAL_VALS['C3']/1.0e-12))
self.chip.log('\tR3= %.2f kOhm' %(LPF_IDEAL_VALS['R3']/1.0e3))
self.chip.log('')
return True
self.chip.setImmediateMode(Imd_Mode)
return True
def disablePLL(self, PROFILE=0):
"""Disables PLL Blocks, XBUF and VCO Bias"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Disable VCO-BIAS
self.setVCOBIAS(EN=0)
# Disable XBUF
self.setXBUF(EN=0)
# Disable PLL core circuits
reg_pll_enable=self.chip.getRegisterByName("PLL_ENABLE_"+str(PROFILE))
reg_pll_enable['PLL_EN_VTUNE_COMP_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_LD_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_PFD_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_CP_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_CPOFS_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_VCO_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_FFDIV_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_FBDIV_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]=0
reg_pll_enable['PLL_SDM_CLK_EN_'+str(PROFILE)]=0
self.chip.setImmediateMode(Imd_Mode)
def enablePLL(self, PDIV2=False, IntN_Mode=False, XBUF_SLFBEN=1, PROFILE=0):
"""Enables VCO Bias, XBUF and PLL Blocks"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Define PLL Config
# Enable VCO Biasing Block
reg_pll_vreg=self.chip.getRegisterByName("PLL_VREG")
reg_pll_vreg['EN_VCOBIAS']=1
# Enable XBUF
# Sets SLFBEN, when TCXO is AC-coupled to LMS8001 IC REFIN
reg_cfg_xbuf=self.chip.getRegisterByName("PLL_CFG_XBUF")
reg_cfg_xbuf['PLL_XBUF_EN']=1
reg_cfg_xbuf['PLL_XBUF_SLFBEN']=XBUF_SLFBEN
# Define Desired PLL Profile
# Enable Blocks
reg_pll_enable=self.chip.getRegisterByName("PLL_ENABLE_"+str(PROFILE))
reg_pll_enable['PLL_EN_VTUNE_COMP_'+str(PROFILE)]=1
reg_pll_enable['PLL_EN_LD_'+str(PROFILE)]=1
reg_pll_enable['PLL_EN_PFD_'+str(PROFILE)]=1
reg_pll_enable['PLL_EN_CP_'+str(PROFILE)]=1
reg_pll_enable['PLL_EN_VCO_'+str(PROFILE)]=1
reg_pll_enable['PLL_EN_FFDIV_'+str(PROFILE)]=1
reg_pll_enable['PLL_EN_FBDIV_'+str(PROFILE)]=1
if (PDIV2):
reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]=1
else:
reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_FBDIV_'+str(PROFILE)]=1
if (IntN_Mode):
reg_pll_enable['PLL_SDM_CLK_EN_'+str(PROFILE)]=0
else:
reg_pll_enable['PLL_SDM_CLK_EN_'+str(PROFILE)]=1
self.chip.setImmediateMode(Imd_Mode)
def calc_fbdiv(self, F_TARGET, IntN_Mode, PDIV2):
"""Calculates Configuration Parameters for FB-DIV for targeted VCO Frequency"""
if (PDIV2):
N_FIX=2.0
else:
N_FIX=1.0
# Integer-N or Fractional-N Mode
if (IntN_Mode):
N_INT=round(F_TARGET/N_FIX/self.fRef)
N_FRAC=0
else:
N_INT=int(math.floor(F_TARGET/N_FIX/self.fRef))
N_FRAC=int(round(2**20*(F_TARGET/N_FIX/self.fRef-N_INT)))
return (N_INT, N_FRAC, N_FIX)
def vco_auto_ctune(self, F_TARGET, PROFILE=0, XBUF_SLFBEN=1, IntN_Mode=False, PDIV2=False, VTUNE_VCT=1, VCO_SEL_FORCE=0, VCO_SEL_INIT=2, FREQ_INIT_POS=7, FREQ_INIT=0, FREQ_SETTLING_N=4, VTUNE_WAIT_N=128, VCO_SEL_FREQ_MAX=250, VCO_SEL_FREQ_MIN=5, dbgMode=False):
"""Performs VCO Coarse Frequency Tuning Using On-Chip LMS8001 IC Calibration State-Machine"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Store the current PLL Profile Index before proceeding to the new one for configuration
PROFILE_OLD=self.chip.PLL.ACTIVE_PROFILE
if (PROFILE_OLD!=PROFILE):
self.chip.PLL.ACTIVE_PROFILE=PROFILE
# Determine the FB-DIV configuration for targeted VCO frequency and self.fRef reference frequency
(N_INT, N_FRAC, N_FIX)=self.calc_fbdiv(F_TARGET, IntN_Mode, PDIV2)
# The exact value of targetec VCO frequency that will be used in automatic coarse-tune algorithm
# If IntN-Mode is chosen, VCO will be locked to the closest integer multiple of reference frequency
FVCO_TARGET=N_FIX*(N_INT+N_FRAC/2.0**20)*self.fRef
# Calculate the fractional division words
N_FRAC_H=int(math.floor(N_FRAC/2**16))
N_FRAC_L=int(N_FRAC-N_FRAC_H*(2**16))
# Enable PLL
self.enablePLL(PDIV2, IntN_Mode, XBUF_SLFBEN, PROFILE)
# Define VCO
reg_vco_cfg=self.chip.getRegisterByName("PLL_VCO_CFG_"+str(PROFILE))
# Set the VCO tuning voltage value during coarse-tuning
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']=VTUNE_VCT
# Define SDM & FB-DIV Modulus
reg_sdm_cfg=self.chip.getRegisterByName("PLL_SDM_CFG_"+str(PROFILE))
if (IntN_Mode or N_FRAC==0):
reg_sdm_cfg['INTMOD_EN_'+str(PROFILE)]=1
else:
reg_sdm_cfg['INTMOD_EN_'+str(PROFILE)]=0
reg_sdm_cfg['INTMOD_'+str(PROFILE)+'<9:0>']=int(N_INT)
reg_fracmod_l=self.chip.getRegisterByName("PLL_FRACMODL_"+str(PROFILE))
reg_fracmod_l['FRACMODL_'+str(PROFILE)+'<15:0>']=N_FRAC_L
reg_fracmod_h=self.chip.getRegisterByName("PLL_FRACMODH_"+str(PROFILE))
reg_fracmod_h['FRACMODH_'+str(PROFILE)+'<3:0>']=N_FRAC_H
# Reset PLL, Enable Calibration Mode
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
reg_pll_cfg['PLL_CALIBRATION_EN']=1
reg_pll_cfg['CTUNE_RES<1:0>']=3
# Write VCO AUTO-CAL Registers
reg_pll_cal_auto1=self.chip.getRegisterByName('PLL_CAL_AUTO1')
reg_pll_cal_auto1['VCO_SEL_FORCE']=VCO_SEL_FORCE
reg_pll_cal_auto1['VCO_SEL_INIT<1:0>']=VCO_SEL_INIT
reg_pll_cal_auto1['FREQ_INIT_POS<2:0>']=FREQ_INIT_POS
reg_pll_cal_auto1['FREQ_INIT<7:0>']=FREQ_INIT
reg_pll_cal_auto2=self.chip.getRegisterByName('PLL_CAL_AUTO2')
reg_pll_cal_auto2['FREQ_SETTLING_N<3:0>']=FREQ_SETTLING_N
reg_pll_cal_auto2['VTUNE_WAIT_N<7:0>']=VTUNE_WAIT_N
reg_pll_cal_auto3=self.chip.getRegisterByName('PLL_CAL_AUTO3')
reg_pll_cal_auto3['VCO_SEL_FREQ_MAX<7:0>']=VCO_SEL_FREQ_MAX
reg_pll_cal_auto3['VCO_SEL_FREQ_MIN<7:0>']=VCO_SEL_FREQ_MIN
# Start VCO Auto-Tuning Process
reg_pll_cal_auto0=self.chip.getRegisterByName('PLL_CAL_AUTO0')
reg_pll_cal_auto0['FCAL_START']=1
# Wait for VCO Auto-Tuning to Finish
while(True):
reg_pll_cal_auto0=self.chip.getRegisterByName('PLL_CAL_AUTO0')
if (reg_pll_cal_auto0['FCAL_START']==0):
break
# Evaluate Calibration Results
reg_pll_cal_auto0=self.chip.getRegisterByName('PLL_CAL_AUTO0')
if (reg_pll_cal_auto0['VCO_SEL_FINAL_VAL'] and reg_pll_cal_auto0['FREQ_FINAL_VAL']):
VCO_SEL_FINAL=reg_pll_cal_auto0['VCO_SEL_FINAL<1:0>']
VCO_FREQ_FINAL=reg_pll_cal_auto0['FREQ_FINAL<7:0>']
else:
self.chip.log("Calibration Failed!!!!")
return False
# Disable Calibration
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_CALIBRATION_EN']=0
# Write Calibration Results to the Dedicated VCO Registers in the Chosen Profile
reg_vco_freq=self.chip.getRegisterByName('PLL_VCO_FREQ_'+str(PROFILE))
reg_vco_freq['VCO_FREQ_'+str(PROFILE)+'<7:0>']=VCO_FREQ_FINAL
reg_vco_cfg=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
reg_vco_cfg['VCO_SEL_'+str(PROFILE)+'<1:0>']=VCO_SEL_FINAL
if (dbgMode):
self.chip.log("Calibration Done!!!")
self.chip.log("Configured PLL Profile=%d" %(PROFILE))
self.chip.log("Target VCO Frequency [MHz]= %.5f" %(FVCO_TARGET/1.0e6))
self.chip.log("Frequency Error [Hz]= %.2e" %(abs(FVCO_TARGET-F_TARGET)))
self.chip.log("VCO_SEL_FINAL= %d" %(VCO_SEL_FINAL))
self.chip.log("VCO_FREQ_FINAL= %d" %(VCO_FREQ_FINAL))
self.chip.log('')
self.chip.log('')
if (dbgMode):
self.chip.PLL.infoLOCK()
# Go back to the initial PLL profile
if (PROFILE_OLD!=PROFILE):
self.chip.PLL.ACTIVE_PROFILE=PROFILE_OLD
self.chip.setImmediateMode(Imd_Mode)
return True
def vco_manual_cloop_tune(self, F_TARGET, PROFILE=0, XBUF_SLFBEN=1, IntN_Mode=False, PDIV2=False, dbgMode=False):
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Store the current PLL Profile Index before proceeding to the new one for configuration
PROFILE_OLD=self.chip.PLL.ACTIVE_PROFILE
if (PROFILE_OLD!=PROFILE):
self.chip.PLL.ACTIVE_PROFILE=PROFILE
# Determine the FB-DIV configuration for targeted VCO frequency and self.fRef reference frequency
(N_INT, N_FRAC, N_FIX)=self.calc_fbdiv(F_TARGET, IntN_Mode, PDIV2)
# The exact value of targetec VCO frequency that will be used in automatic coarse-tune algorithm
# If IntN-Mode is chosen, VCO will be locked to the closest integer multiple of reference frequency
FVCO_TARGET=N_FIX*(N_INT+N_FRAC/2.0**20)*self.fRef
# Calculate the fractional division words
N_FRAC_H=int(math.floor(N_FRAC/2**16))
N_FRAC_L=int(N_FRAC-N_FRAC_H*(2**16))
# Enable PLL
self.enablePLL(PDIV2, IntN_Mode, XBUF_SLFBEN, PROFILE)
# Define VCO
reg_vco_cfg=self.chip.getRegisterByName("PLL_VCO_CFG_"+str(PROFILE))
# Define SDM & FB-DIV Modulus
reg_sdm_cfg=self.chip.getRegisterByName("PLL_SDM_CFG_"+str(PROFILE))
if (IntN_Mode or N_FRAC==0):
reg_sdm_cfg['INTMOD_EN_'+str(PROFILE)]=1
else:
reg_sdm_cfg['INTMOD_EN_'+str(PROFILE)]=0
reg_sdm_cfg['INTMOD_'+str(PROFILE)+'<9:0>']=int(N_INT)
reg_fracmod_l=self.chip.getRegisterByName("PLL_FRACMODL_"+str(PROFILE))
reg_fracmod_l['FRACMODL_'+str(PROFILE)+'<15:0>']=N_FRAC_L
reg_fracmod_h=self.chip.getRegisterByName("PLL_FRACMODH_"+str(PROFILE))
reg_fracmod_h['FRACMODH_'+str(PROFILE)+'<3:0>']=N_FRAC_H
# Reset PLL, Enable Manual Calibration Mode
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
reg_pll_cfg['PLL_CALIBRATION_EN']=1
reg_pll_cfg['PLL_CALIBRATION_MODE']=1
reg_pll_cal_man=self.chip.getRegisterByName('PLL_CAL_MAN')
# 1st step is to determine the correct VCO core for targeted frequency
reg_pll_cal_man['VCO_SEL_MAN<1:0>']=2
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=15
sleep(0.01) # wait 10ms for PLL loop to settle
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
if (reg_pll_status['VTUNE_LOW']==1):
reg_pll_cal_man['VCO_SEL_MAN<1:0>']=1
else:
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=240
sleep(0.01)
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
if (reg_pll_status['VTUNE_HIGH']==1):
reg_pll_cal_man['VCO_SEL_MAN<1:0>']=3
# 2nd step is to determine optimal cap bank configuration of selected VCO core for the targeted frequency value
freq_low=0
freq_high=255
freq=int((freq_high+freq_low+1)/2)
iter_num=0
while (freq_low<freq_high and iter_num<=8):
iter_num+=1
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=freq
sleep(0.01)
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
if (reg_pll_status['VTUNE_HIGH']==1):
freq_low=freq
freq=int((freq_high+freq_low+1)/2.0)
elif (reg_pll_status['VTUNE_LOW']==1):
freq_high=freq
freq=int((freq_high+freq_low+1)/2.0)
else:
if (reg_pll_status['PLL_LOCK']==1):
# Cap. bank configuration for which PLL is locked at the targeted frequency is found
# This is the starting point for the next step
break
else:
self.chip.log("Calibration Failed.")
return False
# Find 1st cap. bank configuration above initial one, for which stands VTUNE_LOW=1
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
freq_init=freq
while(reg_pll_status['VTUNE_LOW']==0):
freq=freq+1
if (freq>=255):
break
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=freq
sleep(0.01)
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
freq_max=freq
# Find 1st cap. bank configuration bellow initial one, for which stands VTUNE_HIGH=1
freq=freq_init
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=freq
sleep(0.01)
while(reg_pll_status['VTUNE_HIGH']==0):
freq=freq-1
if (freq<=1):
break
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=freq
sleep(0.01)
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
# In some VCO_FREQ<7:0> regions, FVCO vs VCO_FREQ<7:0> is not monotonic
# Next line detects that condition and exits the loop to prevent false results
if (reg_pll_status['VTUNE_LOW']==1):
break
freq_min=freq
# Optimal cap. bank configuration is between freq_min and freq_max
# It can be arithmetic or geometric average of boundary values
#freq_opt=int(math.sqrt(freq_min*freq_max))
freq_opt=int((freq_min+freq_max)/2.0)
sel_opt=reg_pll_cal_man['VCO_SEL_MAN<1:0>']
# Exit the manual calibration mode, enter the normal PLL operation mode
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
reg_pll_cfg['PLL_CALIBRATION_EN']=0
reg_pll_cfg['PLL_CALIBRATION_MODE']=0
# Write the results of calibration to the dedicated registers inside the chosen PLL profile
reg_vco_freq=self.chip.getRegisterByName('PLL_VCO_FREQ_'+str(PROFILE))
reg_vco_freq['VCO_FREQ_'+str(PROFILE)+'<7:0>']=freq_opt
reg_vco_cfg=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
reg_vco_cfg['VCO_SEL_'+str(PROFILE)+'<1:0>']=sel_opt
if (dbgMode):
self.chip.log("")
self.chip.log("Closed-Loop Manual Calibration Done!!!")
self.chip.log("Configured PLL Profile= %d" %(PROFILE))
self.chip.log("Target VCO Frequency [MHz]= %.5f" % (FVCO_TARGET/1.0e6))
self.chip.log("Frequency Error [Hz]= %.2e" %(abs(FVCO_TARGET-F_TARGET)))
self.chip.log("VCO_SEL_FINAL= %d" %(sel_opt))
self.chip.log("VCO_FREQ_FINAL= %d" %(freq_opt))
self.chip.log("VCO_FREQ_INIT= %d" %(freq_init))
self.chip.log("VCO_FREQ_MIN= %d" %(freq_min))
self.chip.log("VCO_FREQ_MAX= %d" %(freq_max))
self.chip.log('')
self.chip.log('')
if (dbgMode):
self.chip.PLL.infoLOCK()
# Go back to the initial PLL profile
if (PROFILE_OLD!=PROFILE):
self.chip.PLL.ACTIVE_PROFILE=PROFILE_OLD
self.chip.setImmediateMode(Imd_Mode)
return True
def vco_manual_ctune(self, F_TARGET, XBUF_SLFBEN=1, PROFILE=0, IntN_Mode=False, PDIV2=False, VTUNE_VCT=2, dbgMode=False):
"""Selects the tuning curve where VCO frequency @ VTUNE_VCT is closest to F_TARGET (greater/equal than targeted frequecy)"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Store the current PLL Profile Index before proceeding to the new one for configuration
PROFILE_OLD=self.chip.PLL.ACTIVE_PROFILE
if (PROFILE_OLD!=PROFILE):
self.chip.PLL.ACTIVE_PROFILE=PROFILE
# Determine the FB-DIV configuration for targeted VCO frequency and self.fRef reference frequency
(N_INT, N_FRAC, N_FIX)=self.calc_fbdiv(F_TARGET, IntN_Mode, PDIV2)
# The exact value of targetec VCO frequency that will be used in automatic coarse-tune algorithm
# If IntN-Mode is chosen, VCO will be locked to the closest integer multiple of reference frequency
FVCO_TARGET=N_FIX*(N_INT+N_FRAC/2.0**20)*self.fRef
# Calculate the fractional division words
N_FRAC_H=int(math.floor(N_FRAC/2**16))
N_FRAC_L=int(N_FRAC-N_FRAC_H*(2**16))
# Enable PLL
self.enablePLL(PDIV2, IntN_Mode, XBUF_SLFBEN, PROFILE)
# Define VCO
reg_vco_cfg=self.chip.getRegisterByName("PLL_VCO_CFG_"+str(PROFILE))
# Set the VCO tuning voltage value during coarse-tuning
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']=VTUNE_VCT
# Define SDM & FB-DIV Modulus
reg_sdm_cfg=self.chip.getRegisterByName("PLL_SDM_CFG_"+str(PROFILE))
if (IntN_Mode or N_FRAC==0):
reg_sdm_cfg['INTMOD_EN_'+str(PROFILE)]=1
else:
reg_sdm_cfg['INTMOD_EN_'+str(PROFILE)]=0
reg_sdm_cfg['INTMOD_'+str(PROFILE)+'<9:0>']=int(N_INT)
reg_fracmod_l=self.chip.getRegisterByName("PLL_FRACMODL_"+str(PROFILE))
reg_fracmod_l['FRACMODL_'+str(PROFILE)+'<15:0>']=N_FRAC_L
reg_fracmod_h=self.chip.getRegisterByName("PLL_FRACMODH_"+str(PROFILE))
reg_fracmod_h['FRACMODH_'+str(PROFILE)+'<3:0>']=N_FRAC_H
# Reset PLL, Enable Calibration Mode
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
reg_pll_cfg['CTUNE_RES<1:0>']=3
reg_pll_cfg['PLL_CALIBRATION_EN']=1
reg_pll_cfg['PLL_CALIBRATION_MODE']=1
# Write to PLL_CAL_MAN Register
reg_pll_cal_man=self.chip.getRegisterByName('PLL_CAL_MAN')
# Enable Coarse-Tuning Frequency Comparator
reg_pll_cal_man['CTUNE_EN']=1
# Initial Value for VCO_SEL
reg_pll_cal_man['VCO_SEL_MAN<1:0>']=2
# Find optimal VCO Core
# 24.02.2017. - overlap between VCO cores 2 and 3 is quite large, therefore value 240 for upper boundary can be decreased down to 200
#reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=240
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=200
reg_pll_cal_man['CTUNE_START']=1
# Start the coarse-tuning step
# Wait for CTUNE_STEP_DONE
#while (reg_pll_cal_man['CTUNE_STEP_DONE']==0):
# reg_pll_cal_man=self.chip.getRegisterByName('PLL_CAL_MAN')
# Read the result of coarse-tuning step
freq_high=reg_pll_cal_man['FREQ_HIGH']
freq_equal=reg_pll_cal_man['FREQ_EQUAL']
freq_low=reg_pll_cal_man['FREQ_LOW']
# Reset the frequency comparator
reg_pll_cal_man['CTUNE_START']=0
if (freq_low==1):
reg_pll_cal_man['VCO_SEL_MAN<1:0>']=3
else:
#reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=15
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=8
# Start the coarse-tuning step
reg_pll_cal_man['CTUNE_START']=1
# Wait for CTUNE_STEP_DONE
#while (reg_pll_cal_man['CTUNE_STEP_DONE']==0):
# reg_pll_cal_man=self.chip.getRegisterByName('PLL_CAL_MAN')
# Read the result of coarse-tuning step
freq_high=reg_pll_cal_man['FREQ_HIGH']
freq_equal=reg_pll_cal_man['FREQ_EQUAL']
freq_low=reg_pll_cal_man['FREQ_LOW']
# Reset the frequency comparator
reg_pll_cal_man['CTUNE_START']=0
if (freq_high==1):
reg_pll_cal_man['VCO_SEL_MAN<1:0>']=1
# Find the optimal VCO_FREQ value
bit_pos=7
bit_mask=0
freq=0
while (bit_pos>=0):
freq+=2**bit_pos
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=freq
# Start the coarse-tuning step
reg_pll_cal_man['CTUNE_START']=1
# Wait for CTUNE_STEP_DONE
#while (reg_pll_cal_man['CTUNE_STEP_DONE']==0):
# reg_pll_cal_man=self.chip.getRegisterByName('PLL_CAL_MAN')
# Read the result of coarse-tuning step
freq_high=reg_pll_cal_man['FREQ_HIGH']
freq_equal=reg_pll_cal_man['FREQ_EQUAL']
freq_low=reg_pll_cal_man['FREQ_LOW']
# Reset the frequency comparator
reg_pll_cal_man['CTUNE_START']=0
bit_mask=(2**bit_pos)*(1-freq_low)
bit_val=(freq&bit_mask)>>bit_pos
if (bit_val==1):
freq-=2**bit_pos
if (bit_pos==0 and freq_low):
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']+=1
# In the last pass, set VTUNE_VCT to minimum value of 300 mV
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']=0
# Start the coarse-tuning step
reg_pll_cal_man['CTUNE_START']=1
# Wait for CTUNE_STEP_DONE
#while (reg_pll_cal_man['CTUNE_STEP_DONE']==0):
# reg_pll_cal_man=self.chip.getRegisterByName('PLL_CAL_MAN')
# Read the result of coarse-tuning step
freq_high=reg_pll_cal_man['FREQ_HIGH']
freq_equal=reg_pll_cal_man['FREQ_EQUAL']
freq_low=reg_pll_cal_man['FREQ_LOW']
# Reset the frequency comparator
reg_pll_cal_man['CTUNE_START']=0
# Set-Back the VTUNE_VCT to the initial value
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']=VTUNE_VCT
if (freq_high==1):
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']-=1
bit_pos-=1
sel_opt=reg_pll_cal_man['VCO_SEL_MAN<1:0>']
freq_opt=reg_pll_cal_man['VCO_FREQ_MAN<7:0>']
# Disable Frequency Comparator
reg_pll_cal_man['CTUNE_EN']=0
# Exit the manual calibration mode, enter the normal PLL operation mode
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
reg_pll_cfg['PLL_CALIBRATION_EN']=0
reg_pll_cfg['PLL_CALIBRATION_MODE']=0
# Write the results of calibration to the dedicated registers inside the chosen PLL profile
reg_vco_freq=self.chip.getRegisterByName('PLL_VCO_FREQ_'+str(PROFILE))
reg_vco_freq['VCO_FREQ_'+str(PROFILE)+'<7:0>']=freq_opt
reg_vco_cfg=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
reg_vco_cfg['VCO_SEL_'+str(PROFILE)+'<1:0>']=sel_opt
if (dbgMode):
self.chip.log("Open-Loop Manual Calibration Done!!!")
self.chip.log("Configured PLL Profile= %d" %(PROFILE))
self.chip.log("Target VCO Frequency [MHz]= %.5f" %(FVCO_TARGET/1.0e6))
self.chip.log("Frequency Error [Hz]= %.2e" %(abs(FVCO_TARGET-F_TARGET)))
self.chip.log("VCO_SEL_FINAL= %d" %(sel_opt))
self.chip.log("VCO_FREQ_FINAL= %d" %(freq_opt))
self.chip.log('')
self.chip.log('')
if (dbgMode):
self.chip.PLL.infoLOCK()
# Go back to the initial PLL profile
if (PROFILE_OLD!=PROFILE):
self.chip.PLL.ACTIVE_PROFILE=PROFILE_OLD
self.chip.setImmediateMode(Imd_Mode)
return True
def optimLPF(self, PM_deg=49.8, fc=80.0e3, PROFILE=0, dbgMode=False):
PM_rad=PM_deg*math.pi/180
wc=2*math.pi*fc
# Check VCO_SEL
reg_vco_cfg=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
vco_sel=reg_vco_cfg['VCO_SEL_'+str(PROFILE)+'<1:0>']
# Use Average for KVCO in Calculations
if (vco_sel==1):
KVCO_avg=44.404e6
elif (vco_sel==2):
KVCO_avg=33.924e6
elif (vco_sel==3):
KVCO_avg=41.455e6
else:
self.chip.log('Ext. LO selected in PLL_PROFILE %d.' % (PROFILE))
return None
# Read CP Current Value
reg_pll_cp_cfg0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
PULSE=reg_pll_cp_cfg0['PULSE_'+str(PROFILE)+'<5:0>']
reg_pll_cp_cfg1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
ICT_CP=reg_pll_cp_cfg1['ICT_CP_'+str(PROFILE)+'<4:0>']
Icp=ICT_CP*25.0e-6/16.0*PULSE
# Read Feedback-Divider Modulus
reg_pll_enable=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
PDIV2=reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]
reg_pll_sdm_cfg=self.chip.getRegisterByName('PLL_SDM_CFG_'+str(PROFILE))
N_INT=reg_pll_sdm_cfg['INTMOD_'+str(PROFILE)+'<9:0>']
INTMOD_EN=reg_pll_sdm_cfg['INTMOD_EN_'+str(PROFILE)]
reg_pll_fracmodl=self.chip.getRegisterByName('PLL_FRACMODL_'+str(PROFILE))
N_FRACL=reg_pll_fracmodl['FRACMODL_'+str(PROFILE)+'<15:0>']
reg_pll_fracmodh=self.chip.getRegisterByName('PLL_FRACMODH_'+str(PROFILE))
N_FRACH=reg_pll_fracmodh['FRACMODH_'+str(PROFILE)+'<3:0>']
N_FRAC=N_FRACH*2**16+N_FRACL
N=N_INT+(1-INTMOD_EN)*N_FRAC*1.0/2.0**20
Kvco=2*math.pi*KVCO_avg
Kphase=Icp/(2*math.pi)
gamma=1.045
T31=0.1
# Approx. formula, Dean Banerjee
T1=(1.0/math.cos(PM_rad)-math.tan(PM_rad))/(wc*(1+T31))
T3=T1*T31;
T2=gamma/((wc**2)*(T1+T3));
A0=(Kphase*Kvco)/((wc**2)*N)*math.sqrt((1+(wc**2)*(T2**2))/((1+(wc**2)*(T1**2))*(1+(wc**2)*(T3**2))));
A2=A0*T1*T3;
A1=A0*(T1+T3);
C1=A2/(T2**2)*(1+math.sqrt(1+T2/A2*(T2*A0-A1)));
C3=(-(T2**2)*(C1**2)+T2*A1*C1-A2*A0)/((T2**2)*C1-A2);
C2=A0-C1-C3;
R2=T2/C2;
R3=A2/(C1*C3*T2);
if (dbgMode):
self.chip.log('Loop-Filter Optimization')
self.chip.log('-'*45)
self.chip.log('Input Parameters')
self.chip.log('\tIcp=%.2f uA' %(Icp/1.0e-6))
self.chip.log('\tKVCO=%.2f MHz/V' %(KVCO_avg/1.0e6))
self.chip.log('\tNDIV=%.2f' % (N))
self.chip.log('-'*45)
self.chip.log('Ideal LPF Values')
self.chip.log('\tC1= %.2f pF' %(C1/1.0e-12))
self.chip.log('\tC2= %.2f pF' %(C2/1.0e-12))
self.chip.log('\tR2= %.2f kOhm' %(R2/1.0e3))
self.chip.log('\tC3= %.2f pF' %(C3/1.0e-12))
self.chip.log('\tR3= %.2f kOhm' %(R3/1.0e3))
self.chip.log('')
self.chip.log('')
C1_CODE=int(round(C1/1.2e-12))
C2_CODE=int(round((C2-150.0e-12)/10.0e-12))
C3_CODE=int(round((C3-5.0e-12)/1.2e-12))
C1_CODE=int(min(max(C1_CODE,0),15))
C2_CODE=int(min(max(C2_CODE,0),15))
C3_CODE=int(min(max(C3_CODE,0),15))
R2_CODE=int(round(24.6e3/R2))
R3_CODE=int(round(14.9e3/R3))
R2_CODE=min(max(R2_CODE,1),15)
R3_CODE=min(max(R3_CODE,1),15)
self.setLPF(C1=C1_CODE, C2=C2_CODE, R2=R2_CODE, C3=C3_CODE, R3=R3_CODE, PROFILE=PROFILE)
def getNDIV(self, PROFILE=0):
"""
Returns float that represents PLL feedback division ratio for configuration in PLL profile PROFILE.
"""
# Set Immediate Mode for LMS8001 EVB
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_pll_enable=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
PDIV2=reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]
reg_fracmodl=self.chip.getRegisterByName('PLL_FRACMODL_'+str(PROFILE))
reg_fracmodh=self.chip.getRegisterByName('PLL_FRACMODH_'+str(PROFILE))
reg_pll_sdm_cfg=self.chip.getRegisterByName('PLL_SDM_CFG_'+str(PROFILE))
NINT=reg_pll_sdm_cfg['INTMOD_'+str(PROFILE)+'<9:0>']
NFRAC=reg_fracmodh['FRACMODH_'+str(PROFILE)+'<3:0>']*2**16+reg_fracmodl['FRACMODL_'+str(PROFILE)+'<15:0>']
self.chip.setImmediateMode(Imd_Mode)
return 2**PDIV2*1.0*(NINT*1.0+NFRAC*1.0/2**20)
def getNFFDIV(self, PROFILE=0):
"""
Returns float that represents PLL feedforward division ratio for configuration in PLL profile PROFILE.
"""
# Set Immediate Mode for LMS8001 EVB
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_pll_ff_cfg=self.chip.getRegisterByName('PLL_FF_CFG_'+str(PROFILE))
if (reg_pll_ff_cfg['FFDIV_SEL_'+str(PROFILE)]==0):
return 1.0
else:
return 2.0**int(reg_pll_ff_cfg['FFMOD_'+str(PROFILE)])
self.chip.setImmediateMode(Imd_Mode)
def getNIQDIV2(self, channel, PROFILE=0):
"""
Returns float that represents PLL IQ-DivBy2 division ratio for configuration in PLL profile PROFILE for desired LO channel.
"""
if (PROFILE>=8):
self.chip.log('Wrong PLL Profile Number. Valid values 0-7.')
return None
# Set Immediate Mode for LMS8001 EVB
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
if (channel=='A' or channel==0):
reg_pll_lodist_cfg=self.chip.getRegisterByName('PLL_LODIST_CFG_'+str(PROFILE))
IQ_EXP=(reg_pll_lodist_cfg["PLL_LODIST_FSP_OUT0_"+str(PROFILE)+"<2:0>"]&4)>>2
elif (channel=='B' or channel==1):
reg_pll_lodist_cfg=self.chip.getRegisterByName('PLL_LODIST_CFG_'+str(PROFILE))
IQ_EXP=(reg_pll_lodist_cfg["PLL_LODIST_FSP_OUT1_"+str(PROFILE)+"<2:0>"]&4)>>2
elif (channel=='C' or channel==2):
reg_pll_lodist_cfg=self.chip.getRegisterByName('PLL_LODIST_CFG_'+str(PROFILE))
IQ_EXP=(reg_pll_lodist_cfg["PLL_LODIST_FSP_OUT2_"+str(PROFILE)+"<2:0>"]&4)>>2
elif (channel=='D' or channel==3):
reg_pll_lodist_cfg=self.chip.getRegisterByName('PLL_LODIST_CFG_'+str(PROFILE))
IQ_EXP=(reg_pll_lodist_cfg["PLL_LODIST_FSP_OUT3_"+str(PROFILE)+"<2:0>"]&4)>>2
else:
self.chip.log('Wrong LO channel selected. Valid values: "A" or 0, "B" or 1, "C" or 2, "D" or 3.')
return None
self.chip.setImmediateMode(Imd_Mode)
return 2.0**(1.0-IQ_EXP)
def get_LOfreq(self, channel, PROFILE=0):
"""
Returns the exact value of LO frequency at chosen LO channel.
"""
if (PROFILE>=8):
self.chip.log('Wrong PLL Profile Number. Valid values 0-7.')
return None
# Set Immediate Mode for LMS8001 EVB
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Get Feedback-Divider Division Modulus
N_FBDIV=self.getNDIV(PROFILE=PROFILE)
# Get Feedforward-Divider Division Modulus
N_FFDIV=self.getNFFDIV(PROFILE=PROFILE)
# Get IQ-DivBy2 Division Modulus
N_IQDIV2=self.getNIQDIV2(channel, PROFILE)
self.chip.setImmediateMode(Imd_Mode)
return (N_FBDIV)*self.fRef/N_FFDIV/N_IQDIV2
def centerVTUNE(self, PROFILE=0, dbgMode=False):
"""
This method should be used when coarse tuning algorithm converges to the subband at which PLL locks with VTUNE_HIGH=1 or VTUNE_LOW=1
If it's possible, this method tweaks different VCO setings in order to get PLL locked at desired frequency with VTUNE_HIGH=VTUNE_LOW=0
The purpose of this method is same as of centerVTUNE method.
Algorithm is different.
"""
# Set Immediate Mode for LMS8001 EVB
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Reset PLL
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
# Here set active PLL profile to the value given by argument PROFILE
self.chip.PLL.ACTIVE_PROFILE=PROFILE
# Get register with VTUNE_HIGH and VTUNE_LOW Indicators and PLL_LOCK bit
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
# Get register with VCO_FREQ_n<7:0> word
#reg_pll_vco_freq=self.chip.getRegisterByName('PLL_VCO_FREQ_'+str(PROFILE))
# Get register with VDIV_SWVDD_n<1:0> word
reg_pll_vco_cfg=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
# Get Initial value for VCO_FREQ<1:0> word
#freq_init=reg_pll_vco_freq['VCO_FREQ_'+str(PROFILE)+'<7:0>']
# Get Initial value for VDIV_SWVDD<1:0> word
vdiv_swvdd_init=reg_pll_vco_cfg['VDIV_SWVDD_'+str(PROFILE)+'<1:0>']
#sel_init=reg_pll_vco_cfg['VCO_SEL_'+str(PROFILE)+'<1:0>']
# Get Initial Value for VCO_AMP<7:0> and VCO_AAC_EN
amp_init=reg_pll_vco_cfg['VCO_AMP_'+str(PROFILE)+'<6:0>']
aac_en_init=reg_pll_vco_cfg['VCO_AAC_EN_'+str(PROFILE)]
# Get VTUNE_HIGH, VTUNE_LOW, PLL_LOCK bit values
vtune_high=reg_pll_status['VTUNE_HIGH']
vtune_low=reg_pll_status['VTUNE_LOW']
pll_lock=reg_pll_status['PLL_LOCK']
if (vtune_high==0 and vtune_low==0):
if (dbgMode):
self.chip.log('Centering of VTUNE not needed.')
self.chip.setImmediateMode(Imd_Mode)
return True
swvdd_list=range(0,4)
swvdd_list.reverse()
amp_list=range(0,4)
amp_list.reverse()
# Try to center VTUNE by changing Bias Voltages of MOS switches in Capacitor Bank and VCO Amp control and reruning VCO Auto-Tuning State-Machine
reg_pll_vco_cfg['VCO_AAC_EN_'+str(PROFILE)]=1
for amp in amp_list:
reg_pll_vco_cfg['VCO_AMP_'+str(PROFILE)+'<6:0>']=amp
for vdiv_swvdd in swvdd_list:
if not (amp_init==amp and vdiv_swvdd_init==vdiv_swvdd):
reg_pll_vco_cfg['VDIV_SWVDD_'+str(PROFILE)+'<1:0>']=vdiv_swvdd
# changed FREQ_INIT_POS to 5
# The VCO Auto-Tuning State Machine will not be re-runed again for each amp and swvdd combination
# The following two commands can be commented
#autotune_status=self.vco_auto_ctune(F_TARGET=F_TARGET, PROFILE=0, XBUF_SLFBEN=1, IntN_Mode=INTMOD_EN, PDIV2=PDIV2_EN, VTUNE_VCT=1, VCO_SEL_FORCE=1, VCO_SEL_INIT=sel_init, FREQ_INIT_POS=5, FREQ_INIT=freq_init, dbgMode=dbgMode)
#sleep(0.001)
vtune_high=reg_pll_status['VTUNE_HIGH']
vtune_low=reg_pll_status['VTUNE_LOW']
pll_lock=reg_pll_status['PLL_LOCK']
if (vtune_high==0 and vtune_low==0):
if (dbgMode):
self.chip.log('VTUNE voltage centered successfuly.')
self.chip.log('New VCO control values: VDIV_AMP<6:0>= %d, VCO_AAC_EN=1, VDIV_SWVDD<1:0>= %d' %(amp, vdiv_swvdd))
self.chip.log('')
self.chip.PLL.infoLOCK()
self.chip.setImmediateMode(Imd_Mode)
# Set back PLL_CAL_AUTO1 to starting values
# Uncomment these lines bellow if autotuning was invoked for each step of centering VTUNE
#reg_pll_cal_auto1['VCO_SEL_FORCE']=vco_sel_force_init
#reg_pll_cal_auto1['VCO_SEL_INIT<1:0>']=vco_sel_init
#reg_pll_cal_auto1['FREQ_INIT_POS<2:0>']=vco_freq_init_pos
#reg_pll_cal_auto1['FREQ_INIT<7:0>']=vco_freq_init
return True
if (dbgMode):
self.chip.log("Centering VTUNE failed.")
# Set back VDIV_SWVDD<1:0> and FREQ<7:0> to inital values
reg_pll_vco_cfg['VDIV_SWVDD_'+str(PROFILE)+'<1:0>']=vdiv_swvdd_init
#reg_pll_vco_freq['VCO_FREQ_'+str(PROFILE)+'<7:0>']=freq_init
# Set back VCO amplitude controls to initial values
reg_pll_vco_cfg['VCO_AMP_'+str(PROFILE)+'<6:0>']=amp_init
reg_pll_vco_cfg['VCO_AAC_EN_'+str(PROFILE)]=aac_en_init
# Set back the inital value of Immediate mode for LMS8001 EVB
self.chip.setImmediateMode(Imd_Mode)
return False
def setLOFREQ(self, F_LO, XBUF_SLFBEN=1, IQ=False, IntN_Mode=False, CTUNE_METHOD='OPEN-LOOP', PROFILE=0, dbgMode=False):
"""
This methods configures PLL-LODIST subsystems of LMS8001 IC to generate desired LO frequency.
Frequency Range Available with Quadrature Divider By 2 enabled:
260 MHz<=F_LO<=4.55 GHz,
Frequency Range Available with Quadrature Divider By 2 disabled:,
520 MHz<=F_LO<=9.11 GHz.
Frequencies bellow 520 MHz can only be synthesized using IQ generator.
CTUNE_METHOD='OPEN-LOOP' calls the vco_auto_tune method to tune VCO to the desired frequency
CTUNE_METHOD='OPEN-LOOP-MANUAL' calls the vco_manual_ctune method to tune VCO to the desired frequency
CTUNE_METHOD='CLOSE-LOOP' calls the vco_manual_cloop_tune method to tune VCO to the desired frequency
"""
if (IQ):
if not (260.0e6<=F_LO<=4.55e9):
self.chip.log("F_LO should be between 260 MHz and 4.55 GHz, with argument IQ=True. Failed to set LO Freq.")
return False
DIV2IQ=1
else:
if not (260.0e6<=F_LO<=9.11e9):
self.chip.log("F_LO should be between 260 MHz and 9.11 GHz. Failed to set LO Freq.")
return False
if (260e6<=F_LO<=520e6):
self.chip.log("F_LO values between 260 MHz and 520 MHz can only be generated with argument IQ=True. Failed to set LO Freq.")
return False
DIV2IQ=0
FFMOD=0
F_VCO=(2.0**DIV2IQ)*(2.0**FFMOD)*F_LO
while not (4.1e9<=F_VCO<=9.11e9):
FFMOD+=1
F_VCO=(2.0**DIV2IQ)*(2**FFMOD)*F_LO
if (dbgMode):
self.chip.log('')
self.chip.log('Setting LO Frequency')
self.chip.log('-'*60)
self.chip.log('Required FF-DIV Modulus: %d (%d)' %(2**FFMOD, FFMOD))
self.chip.log('IQ DIV2 Gen: %s' %(str(IQ)))
self.chip.log('Targeted VCO Frequency: %.5f GHz' %(F_VCO/1.0e9))
self.chip.log('IntN-Mode: %s' %(str(IntN_Mode)))
self.chip.log('-'*60)
self.chip.log('')
# Set FF-DIV Control Signals
self.setFFDIV(FFMOD=FFMOD, PROFILE=PROFILE)
if (CTUNE_METHOD=='OPEN-LOOP'):
# Read VCO AUTO-CAL Registers - use user defined values
reg_pll_cal_auto1=self.chip.getRegisterByName('PLL_CAL_AUTO1')
VCO_SEL_FORCE=reg_pll_cal_auto1['VCO_SEL_FORCE']
VCO_SEL_INIT=reg_pll_cal_auto1['VCO_SEL_INIT<1:0>']
FREQ_INIT_POS=reg_pll_cal_auto1['FREQ_INIT_POS<2:0>']
FREQ_INIT=reg_pll_cal_auto1['FREQ_INIT<7:0>']
reg_pll_cal_auto2=self.chip.getRegisterByName('PLL_CAL_AUTO2')
FREQ_SETTLING_N=reg_pll_cal_auto2['FREQ_SETTLING_N<3:0>']
VTUNE_WAIT_N=reg_pll_cal_auto2['VTUNE_WAIT_N<7:0>']
reg_pll_cal_auto3=self.chip.getRegisterByName('PLL_CAL_AUTO3')
VCO_SEL_FREQ_MAX=reg_pll_cal_auto3['VCO_SEL_FREQ_MAX<7:0>']
VCO_SEL_FREQ_MIN=reg_pll_cal_auto3['VCO_SEL_FREQ_MIN<7:0>']
# Read PLL_EN_FB_PDIV2_n value - use user defined values
reg_pll_enable=self.chip.getRegisterByName("PLL_ENABLE_"+str(PROFILE))
PDIV2=reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]
# Read VTUNE_VCT_n value - use user defined values
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
VTUNE_VCT=reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']
ctune_status=self.vco_auto_ctune(F_TARGET=F_VCO, PROFILE=PROFILE, XBUF_SLFBEN=XBUF_SLFBEN, IntN_Mode=IntN_Mode, PDIV2=PDIV2, VTUNE_VCT=VTUNE_VCT, VCO_SEL_FORCE=VCO_SEL_FORCE, VCO_SEL_INIT=VCO_SEL_INIT, FREQ_INIT_POS=FREQ_INIT_POS, FREQ_INIT=FREQ_INIT, FREQ_SETTLING_N=FREQ_SETTLING_N, VTUNE_WAIT_N=VTUNE_WAIT_N, VCO_SEL_FREQ_MAX=VCO_SEL_FREQ_MAX, VCO_SEL_FREQ_MIN=VCO_SEL_FREQ_MIN, dbgMode=dbgMode)
elif (CTUNE_METHOD=='OPEN-LOOP-MANUAL'):
# Read PLL_EN_FB_PDIV2_n value - use user defined values
reg_pll_enable=self.chip.getRegisterByName("PLL_ENABLE_"+str(PROFILE))
PDIV2=reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]
# Read VTUNE_VCT_n value - use user defined values
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
VTUNE_VCT=reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']
ctune_status=self.vco_manual_ctune(F_TARGET=F_VCO, XBUF_SLFBEN=XBUF_SLFBEN, PROFILE=PROFILE, IntN_Mode=IntN_Mode, PDIV2=PDIV2, VTUNE_VCT=VTUNE_VCT, dbgMode=dbgMode)
elif (CTUNE_METHOD=='CLOSE-LOOP'):
# Read PLL_EN_FB_PDIV2_n value - use user defined values
reg_pll_enable=self.chip.getRegisterByName("PLL_ENABLE_"+str(PROFILE))
PDIV2=reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]
ctune_status=self.vco_manual_cloop_tune(F_VCO, PROFILE=PROFILE, XBUF_SLFBEN=XBUF_SLFBEN, IntN_Mode=IntN_Mode, PDIV2=PDIV2, dbgMode=dbgMode)
else:
if (dbgMode):
self.chip.log('Bad CTUNE_METHOD selected. Possible Options: OPEN-LOOP and CLOSE-LOOP.')
self.chip.log('Setting LO Frequency failed.')
return False
if not (self.chip.PLL.VTUNE_HIGH==0 and self.chip.PLL.VTUNE_LOW==0):
self.centerVTUNE(PROFILE=PROFILE, dbgMode=dbgMode)
if (ctune_status):
if (dbgMode):
self.chip.log('Setting LO Frequency finished succesfully.')
return True
else:
self.chip.log('Setting LO Frequency failed.')
return False
def optim_PLL_LoopBW(self, PM_deg=49.8, fc=120.0e3, FIT_KVCO=False, PROFILE=0, dbgMode=False):
"""
This method finds optimal PLL configuration, CP pulse current and LPF element values.
Optimization finds maximal CP current which can results with targeted PLL Loop BW using Loop-Filter elements which can be implemented in LMS8001 IC.
Result should be PLL configuration with best phase noise performance for targeted loop bandwidth.
"""
# Get initial CP current settings
reg_pll_cp_cfg0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
PULSE_INIT=reg_pll_cp_cfg0['PULSE_'+str(PROFILE)+'<5:0>']
OFS_INIT=reg_pll_cp_cfg0['OFS_'+str(PROFILE)+'<5:0>']
reg_pll_cp_cfg1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
ICT_CP_INIT=reg_pll_cp_cfg1['ICT_CP_'+str(PROFILE)+'<4:0>']
# Pulse control word of CP inside LMS8001 will be swept from 63 to 4.
# First value that gives implementable PLL configuration will be used.
cp_pulse_vals=range(4,64)
cp_pulse_vals.reverse()
# Estimate the value of KVCO for settings in the PLL Profile PROFILE
KVCO_avg=self.estim_KVCO(FIT_KVCO=FIT_KVCO, PROFILE=PROFILE)
# Read Feedback-Divider Modulus
N=self.getNDIV(PROFILE=PROFILE)
#Kvco=2*math.pi*KVCO_avg
for cp_pulse in cp_pulse_vals:
# Calculate CP Current Value
Icp=ICT_CP_INIT*25.0e-6/16.0*cp_pulse
gamma=1.045
T31=0.1
LPF_IDEAL_VALS=self.calc_ideal_LPF(fc=fc, PM_deg=PM_deg, Icp=Icp, KVCO_HzV=KVCO_avg, N=N, gamma=gamma, T31=T31)
(LPFvals_OK, LPF_REAL_VALS)=self.calc_real_LPF(LPF_IDEAL_VALS)
if (LPFvals_OK):
# Set CP Pulse Current to the optimized value
self.setCP(PULSE=cp_pulse, OFS=OFS_INIT, ICT_CP=ICT_CP_INIT, PROFILE=PROFILE)
# Set LPF Components to the optimized values
self.setLPF(C1=LPF_REAL_VALS['C1_CODE'], C2=LPF_REAL_VALS['C2_CODE'], R2=LPF_REAL_VALS['R2_CODE'], C3=LPF_REAL_VALS['C3_CODE'], R3=LPF_REAL_VALS['R3_CODE'], PROFILE=PROFILE)
if (dbgMode):
self.chip.log('PLL LoopBW Optimization finished successfuly.')
self.chip.log('-'*45)
self.chip.log('\tIcp=%.2f uA' %(Icp/1.0e-6))
self.chip.log('\tUsed Value for KVCO=%.2f MHz/V' %(KVCO_avg/1.0e6))
self.chip.log('\tNDIV=%.2f' % (N))
self.chip.log('-'*45)
self.chip.log('')
self.chip.log('Ideal LPF Values')
self.chip.log('-'*45)
self.chip.log('\tC1= %.2f pF' %(LPF_IDEAL_VALS['C1']/1.0e-12))
self.chip.log('\tC2= %.2f pF' %(LPF_IDEAL_VALS['C2']/1.0e-12))
self.chip.log('\tR2= %.2f kOhm' %(LPF_IDEAL_VALS['R2']/1.0e3))
self.chip.log('\tC3= %.2f pF' %(LPF_IDEAL_VALS['C3']/1.0e-12))
self.chip.log('\tR3= %.2f kOhm' %(LPF_IDEAL_VALS['R3']/1.0e3))
self.chip.log('')
return True
if (dbgMode):
self.chip.log('PLL LoopBW Optimization failed.')
self.chip.log('Some of the LPF component(s) out of implementable range.')
# Set back to initial settings of CP
self.setCP(PULSE=PULSE_INIT, OFS=OFS_INIT, ICT_CP=ICT_CP_INIT, PROFILE=PROFILE)
return False
def optimCPandLD(self, PROFILE=0, dbgMode=False):
"""This method checks if PLL works in fractional-N Mode. If this condition is true, it sets the offset CP current to optimize phase noise performance in FracN operation mode.
When CP offset current is used, it is recommended to set ICP_OFS ~ 1.9% of ICP_PULSE for Frac-N Mode, 1.2% of ICP_PULSE for Int-N Mode"""
# Check operating mode of LMS8001 PLL
reg_pll_sdm_cfg=self.chip.getRegisterByName('PLL_SDM_CFG_'+str(PROFILE))
INTMOD_EN=reg_pll_sdm_cfg['INTMOD_EN_'+str(PROFILE)]
# Read CP current configuration
reg_pll_cp_cfg0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
reg_pll_cp_cfg1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
PULSE=reg_pll_cp_cfg0['PULSE_'+str(PROFILE)+'<5:0>']
OFS=reg_pll_cp_cfg0['OFS_'+str(PROFILE)+'<5:0>']
ICT_CP=reg_pll_cp_cfg1['ICT_CP_'+str(PROFILE)+'<4:0>']
# Read Lock Detector Threashold Voltage
LD_VCT=reg_pll_cp_cfg1['LD_VCT_'+str(PROFILE)+'<1:0>']
# Calculate OFS and LD_VCT optimal values
if (INTMOD_EN):
# Set Offset Current and Lock Detector Threashold for IntN-Operating Mode
LD_VCT=2
Icp=(25.0*ICT_CP/16.0)*PULSE
# Calculate Target Value for Offset Current, as 1.2% of Pulse current value
Icp_OFS=1.2/100.0*Icp
Icp_OFS_step=(25.0*ICT_CP/16.0)*0.25
OFS=int(round(Icp_OFS/Icp_OFS_step))
else:
# Set Offset Current and Lock Detector Threashold for FracN-Operating Mode
LD_VCT=0
Icp=(25.0*ICT_CP/16.0)*PULSE
# Calculate Target Value for Offset Current, as 1.9% of Pulse current value
Icp_OFS=1.9/100.0*Icp
Icp_OFS_step=(25.0*ICT_CP/16.0)*0.25
OFS=int(max(1, round(Icp_OFS/Icp_OFS_step)))
self.setCP(PULSE=PULSE, OFS=OFS, ICT_CP=ICT_CP, PROFILE=PROFILE)
self.setLD(LD_VCT=LD_VCT, PROFILE=PROFILE)
if (dbgMode):
self.chip.log('')
self.chip.log('Optimization of CP-OFS and LD-VCT Settings')
self.chip.log('-'*60)
self.chip.log('OFS=%d' %(OFS))
self.chip.log('LD_VCT=%d' %(LD_VCT))
self.chip.log('-'*60)
self.chip.log('')
return True
def configPLL(self, F_LO, IQ=False, autoConfXBUF=True, autoConfVREG=True, IntN_Mode=False, LoopBW=340.0e3, PM=55.0, FIT_KVCO=True, BWEF=1.0, FLOCK_N=200, SKIP_STEPS=[], CTUNE_METHOD='OPEN-LOOP', FLOCK_METHOD='SIMPLE', FLOCK_VCO_SPDUP=1, PROFILE=0, dbgMode=False):
"""This method does complete configuration of LMS8001 IC PLL in 5 steps:
1. 'VCO_CTUNE' STEP
Runs VCO Coarse Frequency Tuning and Sets FF-DIV Ratios needed for generation of F_LO frequency
CTUNE_METHOD='OPEN-LOOP' calls the vco_auto_tune method to tune VCO to the desired frequency
CTUNE_METHOD='OPEN-LOOP-MANUAL' calls the vco_manual_ctune method to tune VCO to the desired frequency
CTUNE_METHOD='CLOSE-LOOP' calls the vco_manual_cloop_tune method to tune VCO to the desired frequency
2. 'OPTIM_PLL_LOOPBW' STEP
Optimizes PLL configuration for targeted LoopBW and Phase Margin (PM)
3. 'OPTIM_CP_OFFSET' STEP
Optimize CP offset current and Lock-Detector threashold settings depending on chosen PLL operating mode
4. 'OPTIM_FAST_LOCK' STEP
Sets Fast-Lock Settings for PLL Profile PROFILE
"""
# Calculate Loop-Crossover frequency
fc=LoopBW/1.65
# Set VCO Bias Parameters
if (autoConfVREG):
self.setVCOBIAS(EN=1, BYP_VCOREG=1)
else:
self.chip.PLL.EN_VCOBIAS=1
# Set XBUF_SLFBEN Parameter
if (autoConfXBUF):
XBUF_SLFBEN=1
else:
XBUF_SLFBEN=self.chip.PLL.PLL_XBUF_SLFBEN
# Step 1 - Tune PLL to generate F_LO frequency at LODIST outputs that should be manualy enabled outside this method
if not ((1 in SKIP_STEPS) or ('VCO_CTUNE' in SKIP_STEPS)):
# Set VCO Core Parameters
self.setVCO(AMP=3, VDIV_SWVDD=2, PROFILE=PROFILE)
status1=self.setLOFREQ(F_LO, IQ=IQ, XBUF_SLFBEN=XBUF_SLFBEN, IntN_Mode=IntN_Mode, CTUNE_METHOD=CTUNE_METHOD, PROFILE=PROFILE, dbgMode=dbgMode)
if not (status1):
self.chip.log('PLL Tuning to F_LO=%.5f GHz failed.' %(F_LO/1.0e9))
return status1
else:
status1=True
# Step 2 - Optimize PLL settings for targeted LoopBW
if not ((2 in SKIP_STEPS) or ('OPTIM_PLL_LOOPBW' in SKIP_STEPS)):
status2=self.optim_PLL_LoopBW(PM_deg=PM, fc=fc, FIT_KVCO=FIT_KVCO, PROFILE=PROFILE, dbgMode=dbgMode)
if not (status2):
self.chip.log('Optimization of PLL at F_LO=%.5f GHz, LoopBW=%.2f kHz and PM=%.2f deg failed.' %(F_LO/1.0e9, LoopBW/1.0e3, PM))
else:
status2=True
# Step 3 - Optimize CP offset current Lock Detector Threashold depending on operating mode chosen (IntN or FracN)
if not ((3 in SKIP_STEPS) or ('OPTIM_CP_OFFSET' in SKIP_STEPS)):
status3=self.optimCPandLD(PROFILE=PROFILE, dbgMode=dbgMode)
if not (status3):
self.chip.log('Optimization of CP-OFS and LD-VCT at F_LO=%.5f GHz.' %(F_LO/1.0e9))
else:
status3=True
# Step 4 - Configure Fast-Lock Mode Registers
if not ((4 in SKIP_STEPS) or ('OPTIM_FAST_LOCK' in SKIP_STEPS)):
if (BWEF>=1.0):
self.setFLOCK(BWEF, LoopBW=BWEF*LoopBW, PM=PM, FLOCK_N=FLOCK_N, Ch_EN=[], METHOD=FLOCK_METHOD, FIT_KVCO=FIT_KVCO, FLOCK_VCO_SPDUP=FLOCK_VCO_SPDUP, PROFILE=PROFILE)
else:
status4=True
return (status1 and status2 and status3)
|
python
|
# Generated by Django 2.1.14 on 2019-12-02 11:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('djautotask', '0030_task_phase'),
('djautotask', '0028_task_secondary_resources'),
]
operations = [
]
|
python
|
# -*- coding: utf-8 -*-
import dataiku
from dataiku.customrecipe import *
import pandas as pd
import networkx as nx
from networkx.algorithms import bipartite
# Read recipe config
input_name = get_input_names_for_role('Input Dataset')[0]
output_name = get_output_names_for_role('Output Dataset')[0]
needs_eig = get_recipe_config()['eigenvector_centrality']
needs_clu = get_recipe_config()['clustering']
needs_tri = get_recipe_config()['triangles']
needs_clo = get_recipe_config()['closeness']
needs_pag = get_recipe_config()['pagerank']
needs_squ = get_recipe_config()['sq_clustering']
node_A=get_recipe_config()['node_A']
node_B=get_recipe_config()['node_B']
print get_recipe_config()
# Recipe input
df = dataiku.Dataset(input_name).get_dataframe()
print "[+] Dataset loaded..."
# Creating the bipartite graph
graph = nx.Graph()
graph.add_edges_from(zip(df[node_A].values.tolist(),df[node_B].values.tolist()))
print "[+] Created bipartite graph..."
# Always run: nodes degree
print "[+] Computing degree..."
deg = pd.Series(nx.degree(graph), name='degree')
stats = pd.DataFrame(list(deg),columns=['node_name','degree'])
if needs_eig:
print "[+] Computing eigenvector centrality..."
eig = pd.Series(nx.eigenvector_centrality_numpy(graph), name='eigenvector_centrality').reset_index()
eig.columns=['node_name','eigenvector_centrality']
if needs_clu:
print "[+] Computing clustering coefficient..."
clu = pd.Series(nx.clustering(graph), name='clustering_coefficient').reset_index()
clu.columns=['node_name','clustering_coefficient']
if needs_tri:
print "[+] Computing number of triangles..."
tri = pd.Series(nx.triangles(graph), name='triangles').reset_index()
tri.columns=['node_name','triangles']
if needs_clo:
print "[+] Computing closeness centrality..."
clo = pd.Series(nx.closeness_centrality(graph), name='closeness_centrality').reset_index()
clo.columns=['node_name','closeness_centrality']
if needs_pag:
print "[+] Computing pagerank..."
pag = pd.Series(nx.pagerank(graph), name='pagerank').reset_index()
pag.columns=['node_name','pagerank']
if needs_squ:
print "[+] Computing square clustering..."
squ = pd.Series(nx.square_clustering(graph), name='square_clustering_coefficient').reset_index()
squ.columns=['node_name','square_clustering_coefficient']
# Always run: connected components
_cco = {}
for i, c in enumerate(nx.connected_components(graph)):
for e in c:
_cco[e] = i
cco = pd.Series(_cco, name='connected_component_id').reset_index()
cco.columns=['node_name','connected_component_id']
# Putting all together
stats = stats.merge(cco,how='left')
if needs_eig:
stats = stats.merge(eig,how='left')
if needs_clu:
stats = stats.merge(clu,how='left')
if needs_tri:
stats = stats.merge(tri,how='left')
if needs_clo:
stats = stats.merge(clo,how='left')
if needs_pag:
stats = stats.merge(pag,how='left')
if needs_squ:
stats = stats.merge(squ,how='left')
_s = stats["connected_component_id"].value_counts().reset_index()
_s.columns = ['connected_component_id', 'connected_component_size']
stats = stats.merge(_s, on="connected_component_id", how="left")
# Recipe outputs
print "[+] Writing output dataset..."
graph = dataiku.Dataset(output_name)
graph.write_with_schema(stats)
|
python
|
import json
import gmplot
import os
import random
import collections
# FIX FOR MISSING MARKERS
# 1. Open gmplot.py in Lib/site-packages/gmplot
# 2. Replace line 29 (self.coloricon.....) with the following two lines:
# self.coloricon = os.path.join(os.path.dirname(__file__), 'markers/%s.png')
# self.coloricon = self.coloricon.replace('/', '\\').replace('\\', '\\\\')
def create_range_map(user_json, date, start, end, position_json, show_trips):
nice_colors = collections.deque(['#006699', '#6e4673', '#649e0b', '#f6921e', '#d14343', '#00afaf', '#66bbed', '#95609c', '#a1c964', '#faaf40', '#e56f6f', '#46dbdb'])
start_set = False
gmap = None
# Go through selected trips.
for i in range(start, end + 1):
latt_list = []
long_list = []
transport = int(user_json['TripDocuments'][date]['TripList'][i]['Transport']['$numberInt'])
print(transport)
if transport == 0: # WALK
map_marker = '#000000'
elif transport == 1: # BIKE
map_marker = '#0000FF'
elif transport == 2: # CAR
map_marker = '#0000CD'
else: # TRANSIT
map_marker = '#00BFFF'
# Go through logs in a trip.
for log in user_json['TripDocuments'][date]['TripList'][i]['TripPositions']:
latt_list.append(float(log['Latitude']['$numberDouble']))
long_list.append(float(log['Longitude']['$numberDouble']))
# Set the start of the map at the first trip.
if not start_set:
gmap = gmplot.GoogleMapPlotter(latt_list[0], long_list[0], 13)
gmap.apikey = 'AIzaSyDPVbZkJPURllC7bFlR44iZhoLfwNSS5JI'
start_set = True
for log in user_json['TripDocuments'][date]['TripList'][i]['TripPositions']:
gmap.marker(float(log['Latitude']['$numberDouble']), float(log['Longitude']['$numberDouble']), color=map_marker, title=f"SPEED: {log['Speed']}")
color = None
if nice_colors.count == 0:
color = "#%06x" % random.randint(0, 0xFFFFFF)
else:
color = nice_colors[0]
nice_colors.popleft()
gmap.plot(latt_list, long_list, color, edge_width=5)
''''# Add markers for trip.
if show_trips:
for idx, log in enumerate(user_json['TripDocuments'][date]['TripList'][i]['TripPositions']):
if idx == 0:
gmap.marker(float(log['Latitude']['$numberDouble']), float(log['Longitude']['$numberDouble']), '#7FFF00', title=f'TRIP: {str(i)} START')
elif idx == len(user_json['TripDocuments'][date]['TripList'][i]['TripPositions']) + 1:
gmap.marker(float(log['Latitude']['$numberDouble']), float(log['Longitude']['$numberDouble']), '#A52A2A', title=f'TRIP: {str(i)} END')
else:
gmap.marker(float(log['Latitude']['$numberDouble']), float(log['Longitude']['$numberDouble']), '#4682B4')
'''
# Add markers for positions.
if not show_trips:
for pos in position_json:
gmap.marker(float(pos['Latitude']['$numberDouble']), float(pos['Longitude']['$numberDouble']), '#FFA500')
gmap.draw(os.path.join(os.getcwd(), 'plots', f'result.html'))
def generate_map_gui():
# Load JSON.
collection = open('raw.json', 'r').readlines()
users = []
for user in collection:
users.append(json.loads(user))
# Select user.
print('\nShowing users:')
for idx, user in enumerate(users):
print(f"[{idx}]: {user['_id']}")
user_select = int(input('Please select a user: '))
while user_select > len(users) - 1:
print('Wrong input!')
user_select = int(input('Please select a user: '))
# Show trip date overview.
print(f"\nShowing dates for user: {users[user_select]['_id']}")
for idx, date in enumerate(users[user_select]['TripDocuments']):
print(f"[{idx}]: {date['_id']}")
# Select date.
date_select = int(input('Please select a date: '))
while date_select > len(users[user_select]['TripDocuments']) - 1:
print('Wrong input!')
date_select = int(input('Please select a date: '))
# Show trip overview for chosen date.
print(f"\nShowing trips for date: {users[user_select]['TripDocuments'][date_select]['_id']}")
for idx, trip in enumerate(users[user_select]['TripDocuments'][date_select]['TripList']):
print(f"[{idx}]: {trip['_id']}")
# Range select
print('\nPlease select a range of trips to map. Give the same number twice to only map one.')
start_range = int(input('Start range: '))
end_range = int(input('End range: '))
pos_json = None
''''# Get positions for user.
pos_collection = open('rawPos.json', 'r').readlines()
pos_json = None
for user_positions in pos_collection:
user_pos_data = json.loads(user_positions)
if user_pos_data['_id'] == users[user_select]['_id']:
# Get pos doc for selected date.
for doc in user_pos_data['Documents']:
if doc['_id'] == users[user_select]['TripDocuments'][date_select]['_id']:
pos_json = doc['PositionList']'''
create_range_map(users[user_select], date_select, start_range, end_range, pos_json, True)
print('\nMap created in plots/result.html')
if __name__ == '__main__':
generate_map_gui()
|
python
|
import pytest
from cx_const import Number, StepperDir
from cx_core.stepper import MinMax, Stepper, StepperOutput
class FakeStepper(Stepper):
def __init__(self) -> None:
super().__init__(MinMax(0, 1), 1)
def step(self, value: Number, direction: str) -> StepperOutput:
return StepperOutput(next_value=0, next_direction=None)
@pytest.mark.parametrize(
"direction_input, previous_direction, expected_direction",
[
(StepperDir.UP, StepperDir.UP, StepperDir.UP),
(StepperDir.DOWN, StepperDir.DOWN, StepperDir.DOWN),
(StepperDir.UP, StepperDir.DOWN, StepperDir.UP),
(StepperDir.DOWN, StepperDir.UP, StepperDir.DOWN),
(StepperDir.TOGGLE, StepperDir.UP, StepperDir.DOWN),
(StepperDir.TOGGLE, StepperDir.DOWN, StepperDir.UP),
],
)
def test_get_direction(
direction_input: str, previous_direction: str, expected_direction: str
) -> None:
stepper = FakeStepper()
stepper.previous_direction = previous_direction
direction_output = stepper.get_direction(0, direction_input)
assert direction_output == expected_direction
@pytest.mark.parametrize(
"direction_input, expected_sign",
[
(StepperDir.UP, 1),
(StepperDir.DOWN, -1),
(StepperDir.UP, 1),
(StepperDir.DOWN, -1),
],
)
def test_sign(direction_input: str, expected_sign: int) -> None:
stepper = FakeStepper()
sign_output = stepper.sign(direction_input)
assert sign_output == expected_sign
|
python
|
# Generated by Django 2.1.5 on 2019-01-25 19:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0008_shoppingcart'),
]
operations = [
migrations.AddField(
model_name='shoppingcart',
name='total_price',
field=models.IntegerField(default=-1),
),
migrations.AddField(
model_name='shoppingcart',
name='user_address',
field=models.CharField(default='unknown', max_length=200),
),
migrations.AddField(
model_name='shoppingcart',
name='user_name',
field=models.CharField(default='unknown', max_length=30),
),
]
|
python
|
import numpy as np
def Adam_Opt(X_0, function, gradient_function, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, max_iter=500,
disp=False, tolerance=1e-5, store_steps=False):
"""
To be passed into Scipy Minimize method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize
https://github.com/sagarvegad/Adam-optimizer/blob/master/Adam.py
https://arxiv.org/abs/1412.6980
Args:
function (callable): Stochastic objective function
gradient_function (callable): function to obtain gradient of Stochastic objective
X0 (np.array): Initial guess
learning_rate (float): Step size
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
epsilon (float): Constant (small) for numerical stability
Attributes:
t (int): Timestep
m_t (float): first moment vector
v_t (float): second moment vector
"""
input_vectors=[]
output_results=[]
# initialization
t=0 # timestep
m_t = 0 #1st moment vector
v_t = 0 #2nd moment vector
X_t = X_0
while(t<max_iter):
if store_steps is True:
input_vectors.append(X_t)
output_results.append(function(X_t))
t+=1
g_t = gradient_function(X_t)
m_t = beta_1*m_t + (1-beta_1)*g_t #updates the moving averages of the gradient (biased first moment estimate)
v_t = beta_2*v_t + (1-beta_2)*(g_t*g_t) #updates the moving averages of the squared gradient (biased 2nd
# raw moment estimate)
m_cap = m_t / (1 - (beta_1 ** t)) # Compute bias-corrected first moment estimate
v_cap = v_t / (1 - (beta_2 ** t)) # Compute bias-corrected second raw moment estimate
X_t_prev = X_t
X_t = X_t_prev - (learning_rate * m_cap) / (np.sqrt(v_cap) + epsilon) # updates the parameters
if disp is True:
output = function(X_t)
print('step: {} input:{} obj_funct: {}'.format(t, X_t, output))
if np.isclose(X_t, X_t_prev, atol=tolerance).all(): # convergence check
break
if store_steps is True:
return X_t, input_vectors, output_results
else:
return X_t
if __name__ == '__main__':
def Function_to_minimise(input_vect, const=2):
# z = x^2 + y^2 + constant
x = input_vect[0]
y = input_vect[1]
z = x ** 2 + y ** 2 + const
return z
def calc_grad(input_vect):
# z = 2x^2 + y^2 + constant
x = input_vect[0]
y = input_vect[1]
dz_dx = 2 * x
dz_dy = 2 * y
return np.array([dz_dx, dz_dy])
X0 = np.array([1,2])
GG = Adam_Opt(X0, calc_grad,
learning_rate=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
print(Function_to_minimise(GG))
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
x = np.arange(-10, 10, 0.25)
y = np.arange(-10, 10, 0.25)
const = 2
x, y = np.meshgrid(x, y)
z = x ** 2 + y ** 2 + const
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.viridis)
plt.show()
print('Minimum should be:', 2.0)
### for scipy ###
# (fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
# bounds=bounds, constraints=constraints,
# callback=callback, **options)
def fmin_ADAM(f, x0, fprime=None, args=(), gtol=1e-5,
maxiter=500, full_output=0, disp=1, maxfev=500,
retall=0, callback=None, learning_rate = 0.001,
beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
delta (float): stepsize to approximate gradient
"""
opts = {'gtol': gtol,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _adam_minimize(f, x0, fprime, args=args, callback=callback,
xtol=gtol, maxiter=maxiter,
disp=disp, maxfev=maxfev, return_all=retall,
learning_rate = learning_rate,
beta_1 = beta_1, beta_2 = beta_2, epsilon=epsilon, **opts)
if full_output:
retlist = (res['x'], res['fun'], #res['jac'],
res['nfev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
return result
from scipy.optimize.optimize import OptimizeResult, wrap_function, _status_message, _check_unknown_options
from numpy import squeeze
# _minimize_powell
def _adam_minimize(func, x0, args=(), jac=None, callback=None,
xtol=1e-8, maxiter=None, maxfev=None,
disp=False, return_all=False,
learning_rate = 0.001,
beta_1=0.9, beta_2=0.999, epsilon=1e-8, **unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*1000``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
direc : ndarray
Initial set of direction vectors for the Powell method.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is required for Adam-CG method')
if maxfev is None:
maxfev = maxiter + 10
_, func = wrap_function(func, args)
retall = return_all
if retall:
allvecs = [x0]
all_jac_vecs=[jac(x0)]
fval = squeeze(func(x0))
# initialization
t=0 # timestep
m_t = 0 # 1st moment vector
v_t = 0 # 2nd moment vector
X_t = x0
fcalls=0
iter = 0
while True:
# ADAM Algorithm
t+=1
g_t = jac(X_t)
m_t = beta_1*m_t + (1-beta_1)*g_t #updates the moving averages of the gradient (biased first moment estimate)
v_t = beta_2*v_t + (1-beta_2)*(g_t*g_t) #updates the moving averages of the squared gradient (biased 2nd
# raw moment estimate)
m_cap = m_t / (1 - (beta_1 ** t)) # Compute bias-corrected first moment estimate
v_cap = v_t / (1 - (beta_2 ** t)) # Compute bias-corrected second raw moment estimate
X_t_prev = X_t
X_t = X_t_prev - (learning_rate * m_cap) / (np.sqrt(v_cap) + epsilon) # updates the parameters
# Adam END
# updates and termination criteria
fcalls+=1
fval = func(X_t)
iter += 1
if callback is not None:
callback(X_t)
if retall:
allvecs.append(X_t)
all_jac_vecs.append(g_t)
if fcalls >= maxfev: # max function evaluation
break
if iter >= maxiter: # max no. of iterations
break
if np.isclose(X_t, X_t_prev, atol=xtol).all(): # convergence check
break
warnflag = 0
if fcalls >= maxfev:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print("Warning: " + msg)
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
elif np.isnan(fval) or np.isnan(x).any():
warnflag = 3
msg = _status_message['nan']
if disp:
print("Warning: " + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iter)
print(" Function evaluations: %d" % fcalls)
result = OptimizeResult(fun=fval, nit=iter, nfev=fcalls,
status=warnflag, success=(warnflag == 0),
message=msg, x=X_t)
if retall:
result['allvecs'] = allvecs
result['jac'] = all_jac_vecs
return result
if __name__ == '__main__':
def Function_to_minimise(input_vect, const=2):
# z = x^2 + y^2 + constant
x = input_vect[0]
y = input_vect[1]
z = x ** 2 + y ** 2 + const
return z
def calc_grad(input_vect):
# z = 2x^2 + y^2 + constant
x = input_vect[0]
y = input_vect[1]
dz_dx = 2 * x
dz_dy = 2 * y
return np.array([dz_dx, dz_dy])
X0 = np.array([1,2])
x = fmin_ADAM(Function_to_minimise, X0, fprime=calc_grad, learning_rate=1, maxiter=800, full_output=1, gtol=1e-5) #retall=1)
print(x)
|
python
|
from __future__ import absolute_import
from rest_framework.response import Response
from sentry import options
from sentry.api.bases.project import ProjectEndpoint
from sentry.models import ProjectKey
class ProjectDocsEndpoint(ProjectEndpoint):
def get(self, request, project):
data = options.get('sentry:docs')
project_key = ProjectKey.get_default(project)
context = {
'platforms': data['platforms'],
}
if project_key:
context['dsn'] = project_key.dsn_private
context['dsnPublic'] = project_key.dsn_public
return Response(context)
|
python
|
import tensorflow as tf
from groupy.gconv.make_gconv_indices import make_c4_z2_indices, make_c4_p4_indices,\
make_d4_z2_indices, make_d4_p4m_indices, flatten_indices
from groupy.gconv.tensorflow_gconv.transform_filter import transform_filter_2d_nchw, transform_filter_2d_nhwc
def gconv2d(input, filter, strides, padding, gconv_indices, gconv_shape_info,
use_cudnn_on_gpu=None, data_format='NHWC', name=None):
"""
Tensorflow implementation of the group convolution.
This function has the same interface as the standard convolution nn.conv2d, except for two new parameters,
gconv_indices and gconv_shape_info. These can be obtained from gconv2d_util(), and are described below
:param input: a tensor with (batch, height, width, in channels) axes.
:param filter: a tensor with (ksize, ksize, in channels * in transformations, out channels) axes.
The shape for filter can be obtained from gconv2d_util().
:param strides: A list of ints. 1-D of length 4. The stride of the sliding window for each dimension of input.
Must be in the same order as the dimension specified with format.
:param padding: A string from: "SAME", "VALID". The type of padding algorithm to use.
:param gconv_indices: indices used in the filter transformation step of the G-Conv.
Can be obtained from gconv2d_util() or using a command like flatten_indices(make_d4_p4m_indices(ksize=3)).
:param gconv_shape_info: a tuple containing
(num output channels, num output transformations, num input channels, num input transformations, kernel size)
Can be obtained from gconv2d_util()
:param use_cudnn_on_gpu: an optional bool. Defaults to True.
:param data_format: the order of axes. Currently only NCHW is supported
:param name: a name for the operation (optional)
:return: tensor with (batch, out channels, height, width) axes.
"""
if data_format != 'NHWC':
raise NotImplemented('Currently only NHWC data_format is supported. Got:' + str(data_format))
# Transform the filters
transformed_filter = transform_filter_2d_nhwc(w=filter, flat_indices=gconv_indices, shape_info=gconv_shape_info)
# Convolve input with transformed filters
conv = tf.nn.conv2d(input=input, filter=transformed_filter, strides=strides, padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format, name=name)
return conv
def gconv2d_util(h_input, h_output, in_channels, out_channels, ksize):
"""
Convenience function for setting up static data required for the G-Conv.
This function returns:
1) an array of indices used in the filter transformation step of gconv2d
2) shape information required by gconv2d
5) the shape of the filter tensor to be allocated and passed to gconv2d
:param h_input: one of ('Z2', 'C4', 'D4'). Use 'Z2' for the first layer. Use 'C4' or 'D4' for later layers.
:param h_output: one of ('C4', 'D4'). What kind of transformations to use (rotations or roto-reflections).
The choice of h_output of one layer should equal h_input of the next layer.
:param in_channels: the number of input channels. Note: this refers to the number of (3D) channels on the group.
The number of 2D channels will be 1, 4, or 8 times larger, depending the value of h_input.
:param out_channels: the number of output channels. Note: this refers to the number of (3D) channels on the group.
The number of 2D channels will be 1, 4, or 8 times larger, depending on the value of h_output.
:param ksize: the spatial size of the filter kernels (typically 3, 5, or 7).
:return: gconv_indices
"""
if h_input == 'Z2' and h_output == 'C4':
gconv_indices = flatten_indices(make_c4_z2_indices(ksize=ksize))
nti = 1
nto = 4
elif h_input == 'C4' and h_output == 'C4':
gconv_indices = flatten_indices(make_c4_p4_indices(ksize=ksize))
nti = 4
nto = 4
elif h_input == 'Z2' and h_output == 'D4':
gconv_indices = flatten_indices(make_d4_z2_indices(ksize=ksize))
nti = 1
nto = 8
elif h_input == 'D4' and h_output == 'D4':
gconv_indices = flatten_indices(make_d4_p4m_indices(ksize=ksize))
nti = 8
nto = 8
else:
raise ValueError('Unknown (h_input, h_output) pair:' + str((h_input, h_output)))
w_shape = (ksize, ksize, in_channels * nti, out_channels)
gconv_shape_info = (out_channels, nto, in_channels, nti, ksize)
return gconv_indices, gconv_shape_info, w_shape
def gconv2d_addbias(input, bias, nti=8):
"""
In a G-CNN, the feature maps are interpreted as functions on a group G instead of functions on the plane Z^2.
Just like how we use a single scalar bias per 2D feature map, in a G-CNN we should use a single scalar bias per
G-feature map. Failing to do this breaks the equivariance and typically hurts performance.
A G-feature map usually consists of a number (e.g. 4 or 8) adjacent channels.
This function will add a single bias vector to a stack of feature maps that has e.g. 4 or 8 times more 2D channels
than G-channels, by replicating the bias across adjacent groups of 2D channels.
:param input: tensor of shape (n, h, w, ni * nti), where n is the batch dimension, (h, w) are the height and width,
ni is the number of input G-channels, and nti is the number of transformations in H.
:param bias: tensor of shape (ni,)
:param nti: number of transformations, e.g. 4 for C4/p4 or 8 for D4/p4m.
:return: input with bias added
"""
# input = tf.reshape(input, ())
pass # TODO
|
python
|
# Generated by Django 2.0.9 on 2019-12-05 20:27
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Curso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('create_at', models.DateTimeField(auto_now_add=True)),
('start', models.DateTimeField(blank=True, default=datetime.datetime(2019, 12, 5, 20, 27, 55, 729200, tzinfo=utc))),
('end', models.DateTimeField(blank=True, null=True)),
('document', models.FileField(blank=True, upload_to='documents/')),
],
),
migrations.CreateModel(
name='Interfaz',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('document', models.FileField(blank=True, null=True, upload_to='documents/')),
('photo', models.ImageField(blank=True, null=True, upload_to='fotos/')),
('curso', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='interfaz', to='cursos.Curso')),
],
),
]
|
python
|
class SeedsNotFound(Exception):
pass
class ZoneNotFound(Exception):
pass
class TooManyZones(Exception):
pass
|
python
|
"""
@author: acfromspace
"""
"""
Notes:
Find the most common word from a paragraph that can't be a banned word.
"""
from collections import Counter
class Solution:
def most_common_word(self, paragraph: str, banned: [str]) -> str:
unbanned = []
for character in "!?',;.":
paragraph = paragraph.replace(character, " ")
paragraph_list = paragraph.lower().split()
for word in paragraph_list:
if word not in banned:
unbanned.append(word)
# Get the `most_common` element, which holds a key value, which then we need the key.
return Counter(unbanned).most_common(1)[0][0]
test = Solution()
paragraph = "kraq and jeff are talking about the problems with kraq jeff JEFF KRAQ are"
banned = "jeff kraq"
print("most_common_word():", test.most_common_word(paragraph, banned))
"""
Time complexity: O(p+b). "p" is the size of the `paragraph` and "b" is the size of `banned`.
Space complexity: O(p+b). To store the `paragraph_list` and the `banned` data structures.
"""
|
python
|
import itertools
import os
import random
import pytest
from polyswarmd.utils.bloom import BloomFilter
@pytest.fixture
def log_entries():
def _mk_address():
return os.urandom(20)
def _mk_topic():
return os.urandom(32)
return [(_mk_address(), [_mk_topic()
for _ in range(1, random.randint(0, 4))])
for _ in range(1, random.randint(0, 30))]
def check_bloom(bloom, log_entries):
for address, topics in log_entries:
assert address in bloom
for topic in topics:
assert topic in bloom
def test_bloom_filter_add_method(log_entries):
bloom = BloomFilter()
for address, topics in log_entries:
bloom.add(address)
for topic in topics:
bloom.add(topic)
check_bloom(bloom, log_entries)
def test_bloom_filter_extend_method(log_entries):
bloom = BloomFilter()
for address, topics in log_entries:
bloom.extend([address])
bloom.extend(topics)
check_bloom(bloom, log_entries)
def test_bloom_filter_from_iterable_method(log_entries):
bloomables = itertools.chain.from_iterable(
itertools.chain([address], topics) for address, topics in log_entries
)
bloom = BloomFilter.from_iterable(bloomables)
check_bloom(bloom, log_entries)
def test_casting_to_integer():
bloom = BloomFilter()
assert int(bloom) == 0
bloom.add(b'value 1')
bloom.add(b'value 2')
assert int(bloom) == int(
'63119152483043774890037882090529841075600744123634985501563996'
'49538536948165624479433922134690234594539820621615046612478986'
'72305890903532059401028759565544372404512800814146245947429340'
'89705729059810916441565944632818634262808769353435407547341248'
'57159120012171916234314838712163868338766358254974260070831608'
'96074485863379577454706818623806701090478504217358337630954958'
'46332941618897428599499176135798020580888127915804442383594765'
'16518489513817430952759084240442967521334544396984240160630545'
'50638819052173088777264795248455896326763883458932483359201374'
'72931724136975431250270748464358029482656627802817691648'
)
def test_casting_to_binary():
bloom = BloomFilter()
assert bin(bloom) == '0b0'
bloom.add(b'value 1')
bloom.add(b'value 2')
assert bin(bloom) == (
'0b1000000000000000000000000000000000000000001000000100000000000000'
'000000000000000000000000000000000000000000000010000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000001000000'
'000000000000000000000000000000000000000000000000000000000000000010'
'000000000000000000000000000000000000000100000000000000000000001000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000010000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000010000000000001000000000000001000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000001000000000000000000000000000000000000000000000000000100000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000100000000000000000'
'00000000000000000000000000000000000001000000000000000000000000'
)
def test_combining_filters():
b1 = BloomFilter()
b2 = BloomFilter()
b1.add(b'a')
b1.add(b'b')
b1.add(b'c')
b2.add(b'd')
b2.add(b'e')
b2.add(b'f')
b1.add(b'common')
b2.add(b'common')
assert b'a' in b1
assert b'b' in b1
assert b'c' in b1
assert b'a' not in b2
assert b'b' not in b2
assert b'c' not in b2
assert b'd' in b2
assert b'e' in b2
assert b'f' in b2
assert b'd' not in b1
assert b'e' not in b1
assert b'f' not in b1
assert b'common' in b1
assert b'common' in b2
b3 = b1 | b2
assert b'a' in b3
assert b'b' in b3
assert b'c' in b3
assert b'd' in b3
assert b'e' in b3
assert b'f' in b3
assert b'common' in b3
b4 = b1 + b2
assert b'a' in b4
assert b'b' in b4
assert b'c' in b4
assert b'd' in b4
assert b'e' in b4
assert b'f' in b4
assert b'common' in b4
b5 = BloomFilter(int(b1))
b5 |= b2
assert b'a' in b5
assert b'b' in b5
assert b'c' in b5
assert b'd' in b5
assert b'e' in b5
assert b'f' in b5
assert b'common' in b5
b6 = BloomFilter(int(b1))
b6 += b2
assert b'a' in b6
assert b'b' in b6
assert b'c' in b6
assert b'd' in b6
assert b'e' in b6
assert b'f' in b6
assert b'common' in b6
|
python
|
# -*- coding: utf-8 -*-
"""Unit test package for fv3config."""
|
python
|
from SimPy.SimulationRT import Simulation, Process, hold
import numpy as np
import scipy as sp
import scipy.io as spio
import networkx as nx
import matplotlib.pyplot as plt
import ConfigParser
from pylayers.util.project import *
import pylayers.util.pyutil as pyu
from pylayers.network.network import Network, Node, PNetwork
from pylayers.gis.layout import Layout
import copy
import pickle
import pdb
import os
class Save(Process):
"""
Save all variables of a simulnet simulation.
Save process can be setup with the save.ini file from /<project>/ini
Attributes
----------
net : pylayers.network.network()
sim : SimPy.SimulationRT()
savemat : dictionnary with all the saved results from a simulation
( obtained after self.export() )
Methods
-------
run ():
save the current simulation every k steps (setup into save.ini)
load():
Load saved results of a simulation. file extension .pck
export(etype) :
export the results into the etype format.
available format :
- 'python'
- 'matlab'
"""
def __init__(self, **args):
defaults = {'L': None,
'net': None,
'sim': None}
## initialize attributes
for key, value in defaults.items():
if key in args:
setattr(self, key, args[key])
else:
setattr(self, key, value)
args[key] = value
self.args = args
Process.__init__(self, name='save', sim=self.args['sim'])
self.C = ConfigParser.ConfigParser()
self.C.read(pyu.getlong('save.ini','ini'))
self.opt = dict(self.C.items('config'))
self.pos = dict(self.C.items('position'))
self.ldp = dict(self.C.items('ldp'))
self.wstd = dict(self.C.items('wstd'))
self.lpos = eval(self.pos['position'])
self.lldp = eval(self.ldp['ldp'])
self.lwstd = eval(self.wstd['wstd'])
self.sim = args['sim']
self.net = args['net']
def load(self,filename=[]):
""" Load a saved trace simulation
Examples
--------
>>> from pylayers.util.save import *
>>> S=Save()
>>> S.load()
"""
if filename == []:
filename = self.filename
out=[0]
infile = open(os.path.join(basename,pstruc['DIRNETSAVE'],filename), 'r')
while 1:
try:
out.append(pickle.load(infile))
except (EOFError, pickle.UnpicklingError):
break
out.pop(0)
infile.close()
dout= dict(out[-1])
return dout
def mat_export(self):
"""
export save simulation to a matlab file
Examples
--------
>>> from pylayers.util.save import *
>>> S=Save()
>>> S.mat_export()
"""
self.save=self.load()
self.savemat=copy.deepcopy(self.save)
nodes=self.save['saveopt']['type'].keys()
for inn,n in enumerate(nodes):
self.savemat['node_'+n]=self.save[n]
for n2 in nodes:
if n2 != n:
try:
self.savemat['node_'+n]['node_'+n2]=self.save[n][n2]
del self.savemat[n][n2]
except:
pass
del self.savemat[n]
for o in self.save['saveopt']:
if o =='subnet' and inn == 0:
for r in self.save['saveopt']['lwstd']:
li=self.save['saveopt'][o][r]
self.savemat['saveopt'][o][r]=['node_'+l for l in li]
else :
try:
self.savemat['saveopt'][o]['node_'+n]=self.save['saveopt'][o][n]
del self.savemat['saveopt'][o][n]
except:
pass
spio.savemat(os.path.join(basename,pstruc['DIRNETSAVE'],self.filename),self.savemat)
self.save=self.load()
def run(self):
"""
Run the save Result process
"""
self.save={}
self.filename = eval(self.opt['filename'])
self.file=open(os.path.join(basename,pstruc['DIRNETSAVE'],self.filename),'write')
self.save['saveopt'] = {}
self.save['saveopt']['lpos'] = self.lpos
self.save['saveopt']['lldp'] = self.lldp
self.save['saveopt']['lwstd'] = self.lwstd
self.save['saveopt']['nbsamples'] = np.ceil(eval(self.sim.sim_opt['duration'])/eval(self.opt['save_update_time']))+1
self.save['saveopt']['duration'] = eval(self.sim.sim_opt['duration'])
self.save['saveopt']['save_update_time'] = eval(self.opt['save_update_time'])
pickle.dump(self.save, self.file)
self.file.close()
self.idx=0
### init save dictionnary
self.save['saveopt']['Layout'] = self.L._filename
self.save['saveopt']['type'] = nx.get_node_attributes(self.net,'type')
self.save['saveopt']['epwr'] = nx.get_node_attributes(self.net,'epwr')
self.save['saveopt']['sens'] = nx.get_node_attributes(self.net,'sens')
self.save['saveopt']['subnet']={}
for wstd in self.lwstd:
self.save['saveopt']['subnet'][wstd]=self.net.SubNet[wstd].nodes()
[self.save.update({n:{}}) for n in self.net.nodes()]
# find the size of save array regarding the simulation duwstdion and
# the saved sample time
nb_sample=np.ceil(eval(self.sim.sim_opt['duration'])/eval(self.opt['save_update_time']))+1
# create void array to be fill with simulation data
for n in self.net.nodes():
for position in self.lpos:
self.save[n][position]=np.zeros((nb_sample,2))*np.nan
for e in self.net.edges():
self.save[e[0]][e[1]]={}
self.save[e[1]][e[0]]={}
for wstd in self.lwstd:
self.save[e[0]][e[1]][wstd]={}
self.save[e[1]][e[0]][wstd]={}
for ldp in self.lldp:
self.save[e[0]][e[1]][wstd][ldp]=np.zeros((nb_sample,2))*np.nan
self.save[e[1]][e[0]][wstd][ldp]=np.zeros((nb_sample,2))*np.nan
while True:
rl={}
for wstd in self.lwstd:
for ldp in self.lldp:
rl[wstd+ldp]=nx.get_edge_attributes(self.net.SubNet[wstd],ldp)
for n in self.net.nodes():
for position in self.lpos:
try:
p = nx.get_node_attributes(self.net,position)
self.save[n][position][self.idx]=p[n]
except:
pass
for e in self.net.edges():
for wstd in self.lwstd:
for ldp in self.lldp:
try:
le=tuple([e[0],e[1],wstd])
self.save[e[0]][e[1]][wstd][ldp][self.idx]=rl[wstd+ldp][le]
self.save[e[1]][e[0]][wstd][ldp][self.idx]=rl[wstd+ldp][le]
except:
pass
self.file=open(os.path.join(basename,pstruc['DIRNETSAVE'],self.filename),'a')
pickle.dump(self.save, self.file)
self.file.close()
self.idx=self.idx+1
yield hold, self, eval(self.opt['save_update_time'])
|
python
|
# Import libraries
from bs4 import BeautifulSoup
import requests
import psycopg2
import dateutil.parser as p
from colorama import Fore, Back, Style
# Insert the results to the database
def insert_datatable(numberOfLinks, selected_ticker, filtered_links_with_dates, conn, cur):
if filtered_links_with_dates:
for link in filtered_links_with_dates:
cur.execute("INSERT INTO articles (SYMBOL, LINK, ARTICLE_DATE) VALUES ('{a}', '{b}', '{c}')".format(a=selected_ticker, b=link[0], c=link[1]))
conn.commit()
print(f"{Fore.RED}{numberOfLinks}.{Style.RESET_ALL}\t{Fore.CYAN}{link[1]}{Style.RESET_ALL}\t{Fore.GREEN}{link[0]}{Style.RESET_ALL}")
numberOfLinks += 1
else:
print(f"{Fore.GREEN}No links have been found in the date range given{Style.RESET_ALL}")
print('\n')
# Filter out any irrelevant article based on dates
def extract_date(x, dateToBegin, dateToEnd):
if x[1] >= dateToBegin and x[1] <= dateToEnd:
return x
# Scrape the web pages and get the links
def get_news(dateToBegin, dateToEnd, endpoint, port, dbName, usr, masterUserPassword, selected_tickers):
# Get the year, month and day of the ending date in the query
endingDate = dateToEnd.strftime('%Y-%m-%d').split("-")
year = endingDate[0]
month = endingDate[1]
day = endingDate[2]
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
# Open database connection
conn = psycopg2.connect(host=endpoint, port=port, database=dbName, user=usr, password=masterUserPassword)
cur = conn.cursor()
# Scrape article links and dates
for selected_ticker in selected_tickers:
print('\n')
print(f"{Fore.MAGENTA}The following links have been collected and written to the database for{Style.RESET_ALL} {Fore.CYAN}{selected_ticker}: {Style.RESET_ALL}")
print('\n')
# Find the url of the page of the ending date
base_url = "https://www.marketwatch.com/search?q="+selected_ticker+"&m=Ticker&rpp=100&mp=2005&bd=true&bd=false&bdv="+month+"%2F"+day+"%2F"+year+"&rs=true"
page = 1
nav = "Next"
numberOfLinks = 1
# Keep crawling for more pages
while nav == "Next":
if page > 1:
new_page = "&o="+str(page)
else:
new_page = ""
# Scrape the target page
active_url = base_url + new_page
r = requests.get(active_url, headers=headers)
c = r.content
soup = BeautifulSoup(c, "html.parser")
# Find all results with the article links and dates
try:
resultlist = soup.findAll('div', attrs={'class' : 'resultlist'})[0]
except:
break
# Extract the links
search_results = resultlist.findAll('div', attrs={'class' : 'searchresult'})
links = [x.find('a')['href'] for x in search_results]
# Extract the dates
dates_and_times = resultlist.findAll('div', attrs={'class' : 'deemphasized'})
dates_extracted = [x.find('span').text.split("m")[-1].replace(".", "").lstrip() for x in dates_and_times]
article_dates = [p.parse(x).date() for x in dates_extracted]
# Merge links and dates
links_with_dates = list(zip(links, article_dates))
# Filter out any links that the dates are outside the query range
filtered_links_with_dates = list(filter(None, [extract_date(x, dateToBegin, dateToEnd) for x in links_with_dates]))
# Insert the results to the database
insert_datatable(numberOfLinks, selected_ticker, filtered_links_with_dates, conn, cur)
# Check if the next page is relevant
numberOfRelevantArticles = len(filtered_links_with_dates)
if numberOfRelevantArticles == 100:
try:
nav_links = soup.findAll('div', attrs={'class' : 'nextprevlinks'})
for nav_link in nav_links:
if "Next" in nav_link.text:
nav = "Next"
page += 100
numberOfLinks += 100
break
except:
nav = ""
else:
nav = ""
|
python
|
import random
import torch.nn as nn
import torch.nn.functional as F
from torch import LongTensor
from torch import from_numpy, ones, zeros
from torch.utils import data
from . import modified_linear
PATH_TO_SAVE_WEIGHTS = 'saved_weights/'
def get_layer_dims(dataname):
res_ = [1,2,2,4] if dataname in ['dsads'] else [1,2,4] if dataname in ['opp'] else [0.5, 1, 2] \
if dataname in ['hapt', 'milan', 'pamap', 'aruba'] else [500, 500] if dataname in ['cifar100'] else [100, 100, 100] \
if dataname in ['mnist', 'permuted_mnist'] else [1,2,2]
return res_
class Net(nn.Module):
def __init__(self, input_dim, n_classes, dataname, lwf=False, cosine_liner=False):
super(Net, self).__init__()
self.dataname = dataname
layer_nums = get_layer_dims(self.dataname)
self.layer_sizes = layer_nums if self.dataname in ['cifar100', 'mnist'] else\
[int(input_dim / num) for num in layer_nums]
self.fc0 = nn.Linear(input_dim, self.layer_sizes[0])
if len(self.layer_sizes) == 2:
self.fc_penultimate = nn.Linear(self.layer_sizes[0], self.layer_sizes[1])
elif len(self.layer_sizes) == 3:
self.fc1 = nn.Linear(self.layer_sizes[0], self.layer_sizes[1])
self.fc_penultimate = nn.Linear(self.layer_sizes[1], self.layer_sizes[2])
elif (len(self.layer_sizes) == 4):
self.fc1 = nn.Linear(self.layer_sizes[0], self.layer_sizes[1])
self.fc2 = nn.Linear(self.layer_sizes[1], self.layer_sizes[2])
self.fc_penultimate = nn.Linear(self.layer_sizes[2], self.layer_sizes[3])
final_dim = self.fc_penultimate.out_features
self.fc = modified_linear.CosineLinear(final_dim, n_classes) if cosine_liner \
else nn.Linear(final_dim, n_classes, bias=lwf==False) # no biases for LwF
def forward(self, x):
x = F.relu(self.fc0(x))
if len(self.layer_sizes) > 2:
x = F.relu(self.fc1(x))
if len(self.layer_sizes) > 3:
x = F.relu(self.fc2(x))
x = F.relu(self.fc_penultimate(x))
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class Dataset(data.Dataset):
def __init__(self, features, labels):
self.labels = labels
self.features = features
def __len__(self):
return len(self.features)
def __getitem__(self, idx):
X = from_numpy(self.features[idx])
y = self.labels[idx]
y = LongTensor([y])
return X, y
def get_sample(self, sample_size):
return random.sample(self.features, sample_size)
class BiasLayer(nn.Module):
def __init__(self, device):
super(BiasLayer, self).__init__()
self.beta = nn.Parameter(ones(1, requires_grad=True, device=device))
self.gamma = nn.Parameter(zeros(1, requires_grad=True, device=device))
def forward(self, x):
return self.beta * x + self.gamma
def printParam(self, i):
print(i, self.beta.item(), self.gamma.item())
def get_beta(self):
return self.beta
def get_gamma(self):
return self.gamma
def set_beta(self, new_beta):
self.beta = new_beta
def set_gamma(self, new_gamma):
self.gamma = new_gamma
def set_grad(self, bool_value):
self.beta.requires_grad = bool_value
self.gamma.requires_grad = bool_value
|
python
|
from core.advbase import *
def module():
return Pia
class Pia(Adv):
conf = {}
conf['slots.a'] = [
'Dragon_and_Tamer',
'Flash_of_Genius',
'Astounding_Trick',
'The_Plaguebringer',
'Dueling_Dancers'
]
conf['slots.d'] = 'Vayu'
conf['acl'] = """
`dragon(c3-s-end), not energy()=5 and s1.check()
`s3, not buff(s3)
`s2
`s4
`s1, buff(s3)
`fs, x=5
"""
conf['coabs'] = ['Blade','Dragonyule_Xainfried','Bow']
conf['share'] = ['Tobias']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Benjamin Vial
# License: MIT
"""
The :mod:`pytheas.homogenization.twoscale2D` module implements tools for the
two scale convergence homogenization of 2D metamaterials for TM polarization
"""
from .femmodel import TwoScale2D
|
python
|
import os.path as osp
import numpy as np
import numpy.linalg as LA
import random
import open3d as o3
import torch
import common3Dfunc as c3D
from asm_pcd import asm
from ASM_Net import pointnet
"""
Path setter
"""
def set_paths( dataset_root, category ):
paths = {}
paths["trainset_path"] = osp.join(dataset_root,category,"train")
"""
paths["testset_path"] = osp.join(dataset_root,category,"test")
paths["valset_path"] = osp.join(dataset_root,category,"val")
paths["original_path"] = osp.join(dataset_root,category,"original")
paths["sorted_path"] = osp.join(dataset_root,category,"sorted")
paths["trainmodels_path"] = osp.join(dataset_root,category,"train_models")
paths["testmodels_path"] = osp.join(dataset_root,category,"test_models")
paths["valmodels_path"] = osp.join(dataset_root,category,"val_models")
"""
for p in paths.values():
if osp.exists(p) is not True:
print("!!ERROR!! Path not found. Following path is not found.")
print(p)
return False
return paths
def load_asmds( root, synset_names ):
""" load multiple Active Shape Model Deformations
Args:
root(str): Root directory
synset_names(str): List of class names.
The first element "BG" is ignored.
Return:
dict: A dictionary of ASMDeformation
"""
print("Root dir:", root )
asmds = {}
for s in range(len(synset_names)-1):
paths = set_paths( root, synset_names[s+1] )
trainset_path = paths["trainset_path"]
info = np.load( osp.join(trainset_path,"info.npz"))
asmd = asm.ASMdeformation( info )
asmds[synset_names[s+1]] = asmd
return asmds
def load_models( root, dirname, n_epoch, synset_names, ddim, n_points, device ):
""" Load multiple network weights (for experiments)
Args:
root(str): Path to dataset root
dirname(str): Directory name of weights
n_epoch(int): choose the epoch of weights
synset_names(str): The first element is "BG" should be ignored.
use_dim(int): # of dimensions used to deformation
n_points(int): # of points fed to the networks
device(str): device("cuda:0" or "cpu")
Return:
A dictionary of weights
"""
print("Root dir:", root )
models = {}
for s in range(len(synset_names)-1):
path = osp.join(root,
synset_names[s+1],
"weights",
dirname,
"model_"+str(n_epoch)+".pth")
print(" loading:", path )
total_dim = ddim+1 # deformation(ddim) + scale(1)
model = pointnet.ASM_Net(k = total_dim, num_points = n_points)
model.load_state_dict( torch.load(path) )
model.to(device)
model.eval()
models[synset_names[s+1]] = model
return models
def load_models_release( root, synset_names, ddim, n_points, device ):
""" Load multiple network weights (for release)
Args:
root(str): Path to model root
synset_names(str): The first element is "BG" should be ignored.
use_dim(int): # of dimensions used to deformation
n_points(int): # of points fed to the networks
device(str): device("cuda:0" or "cpu")
Return:
A dictionary of weights
"""
print("Root dir:", root )
models = {}
for s in range(len(synset_names)-1):
path = osp.join(root,
synset_names[s+1],
"model.pth")
print(" loading:", path )
total_dim = ddim+1 # deformation(ddim) + scale(1)
model = pointnet.ASM_Net(k = total_dim, num_points = n_points)
model.load_state_dict( torch.load(path) )
model.to(device)
model.eval()
models[synset_names[s+1]] = model
return models
def get_pcd_from_rgbd( im_c, im_d, intrinsic ):
""" generate point cloud from cv2 image
Args:
im_c(ndarray 3ch): RGB image
im_d(ndarray 1ch): Depth image
intrinsic(PinholeCameraIntrinsic): intrinsic parameter
Return:
open3d.geometry.PointCloud: point cloud
"""
color_raw = o3.geometry.Image(im_c)
depth_raw = o3.geometry.Image(im_d)
rgbd_image = o3.geometry.RGBDImage.create_from_color_and_depth( color_raw, depth_raw,
depth_scale=1000.0,
depth_trunc=3.0,
convert_rgb_to_intensity=False )
pcd = o3.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic )
return pcd
def generate_pose():
""" generate pose from hemisphere-distributed viewpoints
"""
# y axis(yr): -pi - pi
# x axis(xr): 0 - 0.5pi
# view_direction(ar): -0.1pi - 0.1pi
yr = (random.random()*2.0*np.pi)-np.pi
xr = (random.random()*0.5*np.pi)
ar = (random.random()*0.2*np.pi)-(0.1*np.pi)
# x,y-axis
y = c3D.RPY2Matrix4x4( 0, yr, 0 )[:3,:3]
x = c3D.RPY2Matrix4x4( xr, 0, 0 )[:3,:3]
rot = np.dot( x, y )
# rotation around view axis
v = np.array([0.,0.,-1.]) #basis vector
rot_v = np.dot(x,v) # prepare axis
q = np.hstack([ar,rot_v]) # generate quaternion
q = q/LA.norm(q) # unit quaternion
pose = c3D.quaternion2rotation(q)
rot = np.dot(pose,rot)
return rot
def get_mask( mask_info, choice="pred" ):
"""
Args:
mask_info(dict): object mask of "GT" and "Mask RCNN used NOCS_CVPR2019)
choice(str): choice of mask.gt(GT) or pred(Mask-RCNN).
Return:
tuple: mask
"""
key_id = choice+"_class_ids"
key_mask = choice+"_masks"
class_ids = mask_info[key_id]
mask = mask_info[key_mask]
return np.asarray(mask), np.asarray(class_ids)
def get_model_scale( image_path, model_root ):
model_path = None
meta_path = image_path + '_meta.txt'
sizes = []
class_ids = []
pcds = []
with open(meta_path, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
words = line[:-1].split(' ')
model_path = osp.join( model_root, words[-1]+".obj")
pcd = o3.io.read_triangle_mesh(model_path)
bb = pcd.get_axis_aligned_bounding_box()
bbox = bb.get_max_bound() - bb.get_min_bound()
size = np.linalg.norm(bbox)
sizes.append(size)
class_ids.append(int(words[1]))
pcds.append(pcd)
return np.asarray(sizes), np.asarray(class_ids), pcds
|
python
|
import gym
import gym_maze
import copy
#env = gym.make("maze-random-10x10-plus-v0")
#env = gym.make("maze-sample-100x100-v0")
#env = gym.make("maze-random-30x30-plus-v0")
env_name= "maze-sample-10x10-v0"
env = gym.make(env_name)
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
max_steps = env._max_episode_steps
threshold = env.spec.reward_threshold
print(state_size, action_size, max_steps, threshold)
#print(dir(env.action_space))
#env1 = copy.copy(env)
#env2 = copy.copy(env)
for i in range(10):
print(f"*** RUNNING ENVIRONMENT {i+1}")
copy_env = copy.copy(env)
observation = copy_env.reset()
st = 0
while True:
st += 1
copy_env.render()
action = copy_env.action_space.sample()
#print(observation, action)
observation, reward, done, info = copy_env.step(action)
if done or (st == copy_env._max_episode_steps - 1):
copy_env.close()
break
#done = False
#observation = env.reset()
#while True:
# env.render()
# action = env.action_space.sample()
# print(observation, action)
# observation, reward, done, info = env.step(action)
|
python
|
# program that asks user for number and prompts user to guess number untill the user guess the right number
# helen o'shea
# 20210211
import random
number = random.randint(0,100)
guess = int(input("Please guess the number between 0 and 100: "))
attempt = 1
while guess!=number:
if guess<number:
attempt +=1
guess = int(input("your guess is too low please guess again: "))
else:
attempt += 1
guess = int(input("your guess is too high please guess again: "))
print("you guessed {} correctly in {} attempts".format(guess, attempt))
|
python
|
# -*- coding: utf-8 -*-
"""
Script to retrieve regobs data relevant for forecast analysis at NVE.
"""
__author__ = 'kmu'
import datetime as dt
import pandas as pd
from varsomdata import getobservations as go
def get_snow_obs(from_date, to_date):
all_data_snow = go.get_all_observations(from_date, to_date, geohazard_tids=10)
return all_data_snow
def get_weak_layers_from_snow_profiles(from_date, to_date):
snow_profiles = go.get_snow_profile('2018-12-13', '2018-12-26')
def get_danger_signs(from_date, to_date, region_ids):
ds_list = go.get_danger_sign(from_date, to_date, region_ids=None, location_id=None, group_id=None,
observer_ids=None, observer_nick=None, observer_competence=None, output='List', geohazard_tids=10,
lang_key=1)
df = go._make_data_frame(ds_list)
return df
def get_incident(from_date, to_date, region_ids=None, location_id=None, group_id=None, observer_ids=None, observer_nick=None, observer_competence=None, output='List', geohazard_tids=None, lang_key=1):
inc_list = go.get_incident(from_date, to_date, region_ids=None, location_id=None, group_id=None, observer_ids=None, observer_nick=None, observer_competence=None, output='List', geohazard_tids=10, lang_key=1)
inc_list = [i.to_dict() for i in inc_list]
df = pd.DataFrame(inc_list)
return df
def get_stability_tests_for_article(from_date, to_date, region_ids):
st_list = go.get_column_test(from_date, to_date, region_ids)
_st = []
for st in st_list:
_st.append(st.OriginalData)
return _st
if __name__ == "__main__":
region_ids = [3003, 3007, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3022, 3023, 3024, 3027, 3028, 3029,
3031, 3032, 3034, 3035]
from_date = dt.date(2018, 12, 1)
to_date = dt.date(2019, 1, 31)
#all_data_snow = get_snow_obs(from_date, to_date)
#ds = get_danger_signs(from_date, to_date, region_ids)
#inc = get_incident(from_date, to_date, region_ids=region_ids)
#inc.to_csv('../localstorage/aval_incidents_2013_2019.csv', index_label='index')
st_list = get_stability_tests_for_article(from_date, to_date, region_ids)
df = pd.DataFrame(st_list)
df.to_csv('../localstorage/stability_tests.csv', index_label='index')
k = 'm'
#aw_dict = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=True)
#df = pandas.DataFrame(aw_dict)
#df.to_csv('../localstorage/norwegian_avalanche_warnings_season_17_18.csv', index_label='index')
|
python
|
def f(*a<caret>rgs):
"""
"""
|
python
|
#!/usr/bin/python
# This creates the level1 fsf's and the script to run the feats on condor
import os
import glob
studydir ='/mnt/40TB-raid6/Experiments/FCTM_S/FCTM_S_Data/Analyses'
fsfdir="%s/group/lvl2_B_hab_feats_v1"%(studydir)
subdirs=glob.glob("%s/1[0-9][0-9][0-9][0-9]"%(studydir))
#subdirs=glob.glob("%s/18301"%(studydir))
setnum = 'B'
for dir in list(subdirs):
splitdir = dir.split('/')
splitdir_sub = splitdir[7] # You will need to edit this
subnum=splitdir_sub[-5:] # You also may need to edit this
subfeats=glob.glob("%s/model/B_hab_lvl1_v1/B_run[0-9].feat"%(dir))
if len(subfeats)==6: # Add your own second loop for 2 feat cases
print(subnum)
replacements = {'17271':subnum}
with open("%s/lvl2_B.fsf"%(fsfdir)) as infile:
with open("%s/B_hab-lvl2fe-TEMP%s.fsf"%(fsfdir, subnum), 'w') as outfile:
for line in infile:
for src, target in replacements.iteritems():
line = line.replace(src, target)
outfile.write(line)
|
python
|
import tempfile
import mdtraj
import pandas as pd
from kmbio import PDB
from kmtools import sequence_tools, structure_tools
from .distances_and_orientations import (
construct_residue_df,
construct_residue_pairs_df,
residue_df_to_row,
residue_pairs_df_to_row,
validate_residue_df,
validate_residue_pairs_df,
)
def get_interaction_dataset(structure, r_cutoff=5):
"""Copied from "datapkg/pdb-analysis/notebooks/extract_pdb_interactions.ipynb" """
interactions = structure_tools.get_interactions(structure, r_cutoff=r_cutoff, interchain=False)
interactions_core, interactions_interface = structure_tools.process_interactions(interactions)
interactions_core_aggbychain = structure_tools.process_interactions_core(
structure, interactions_core
)
# Not neccessary to drop duplicates in our cases
# interactions_core, interactions_core_aggbychain = structure_tools.drop_duplicates_core(
# interactions_core, interactions_core_aggbychain
# )
return interactions_core, interactions_core_aggbychain
def get_interaction_dataset_wdistances(structure_file, model_id, chain_id, r_cutoff=12):
structure = PDB.load(structure_file)
chain = structure[0][chain_id]
num_residues = len(list(chain.residues))
dd = structure_tools.DomainDef(model_id, chain_id, 1, num_residues)
domain = structure_tools.extract_domain(structure, [dd])
distances_core = structure_tools.get_distances(
domain.to_dataframe(), r_cutoff, groupby="residue"
)
assert (distances_core["residue_idx_1"] <= distances_core["residue_idx_2"]).all()
return domain, distances_core
GET_ADJACENCY_WITH_DISTANCES_ROW_ATTRIBUTES = [
"structure_id",
"model_id",
"chain_id",
"sequence",
"s_start",
"s_end",
"q_start",
"q_end",
"sseq",
"a2b",
"b2a",
"residue_idx_1_corrected",
"residue_idx_2_corrected",
]
def get_adjacency_with_distances_and_orientations(
row, max_cutoff=12, min_cutoff=None, structure_url_prefix="rcsb://"
):
""""""
missing_attributes = [
attr for attr in GET_ADJACENCY_WITH_DISTANCES_ROW_ATTRIBUTES if not hasattr(row, attr)
]
assert not missing_attributes, missing_attributes
# === Parse input structure ===
# Load structure
url = f"{structure_url_prefix}{row.structure_id.lower()}.cif.gz"
structure = PDB.load(url)
# Template sequence
chain_sequence = structure_tools.get_chain_sequence(
structure[row.model_id][row.chain_id], if_unknown="replace"
)
template_sequence = chain_sequence[int(row.s_start - 1) : int(row.s_end)]
assert len(template_sequence) == len(row.a2b)
# Target sequence
target_sequence = row.sequence[int(row.q_start - 1) : int(row.q_end)]
assert len(target_sequence) == len(row.b2a)
# Extract domain
dd = structure_tools.DomainDef(row.model_id, row.chain_id, int(row.s_start), int(row.s_end))
domain = structure_tools.extract_domain(structure, [dd])
assert template_sequence == structure_tools.get_chain_sequence(domain, if_unknown="replace")
assert template_sequence == row.sseq.replace("-", "")
# === Generate mdtraj trajectory ===
with tempfile.NamedTemporaryFile(suffix=".pdb") as pdb_file:
PDB.save(domain, pdb_file.name)
traj = mdtraj.load(pdb_file.name)
assert template_sequence == traj.top.to_fasta()[0]
# === Extract residues and residue-residue interactions ===
# Residue info
residue_df = construct_residue_df(traj)
validate_residue_df(residue_df)
residue_df["residue_idx_corrected"] = pd.array(
residue_df["residue_idx"].apply(
lambda idx: sequence_tools.convert_residue_index_a2b(idx, row.b2a)
),
dtype=pd.Int64Dtype(),
)
# Residue pair info
residue_pairs_df = construct_residue_pairs_df(traj)
validate_residue_pairs_df(residue_pairs_df)
for i in [1, 2]:
residue_pairs_df[f"residue_idx_{i}_corrected"] = pd.array(
residue_pairs_df[f"residue_idx_{i}"].apply(
lambda idx: sequence_tools.convert_residue_index_a2b(idx, row.b2a)
),
dtype=pd.Int64Dtype(),
)
# === Sanity check ===
# Get the set of interactions
interactions_1 = set(
residue_pairs_df[
(
residue_pairs_df["residue_idx_1_corrected"]
< residue_pairs_df["residue_idx_2_corrected"]
)
& (residue_pairs_df["distance"] <= 5.0)
][["residue_idx_1_corrected", "residue_idx_2_corrected"]].apply(tuple, axis=1)
)
# Get the reference set of interactions
interactions_2 = {
(int(r1), int(r2)) if r1 <= r2 else (int(r2), int(r1))
for r1, r2 in zip(row.residue_idx_1_corrected, row.residue_idx_2_corrected)
if pd.notnull(r1) and pd.notnull(r2)
}
assert not interactions_1 ^ interactions_2, interactions_1 ^ interactions_2
return {**residue_df_to_row(residue_df), **residue_pairs_df_to_row(residue_pairs_df)}
def get_adjacency_with_distances(
row, max_cutoff=12, min_cutoff=None, structure_url_prefix="rcsb://"
):
"""
Notes:
- This is the 2018 version, where we calculated distnaces only.
"""
missing_attributes = [
attr for attr in GET_ADJACENCY_WITH_DISTANCES_ROW_ATTRIBUTES if not hasattr(row, attr)
]
assert not missing_attributes, missing_attributes
# Load structure
url = f"{structure_url_prefix}{row.structure_id.lower()}.cif.gz"
structure = PDB.load(url)
# Template sequence
chain_sequence = structure_tools.get_chain_sequence(
structure[row.model_id][row.chain_id], if_unknown="replace"
)
template_sequence = chain_sequence[int(row.s_start - 1) : int(row.s_end)]
assert len(template_sequence) == len(row.a2b)
# Target sequence
target_sequence = row.sequence[int(row.q_start - 1) : int(row.q_end)]
assert len(target_sequence) == len(row.b2a)
# Extract domain
dd = structure_tools.DomainDef(row.model_id, row.chain_id, int(row.s_start), int(row.s_end))
domain = structure_tools.extract_domain(structure, [dd])
assert template_sequence == structure_tools.get_chain_sequence(domain, if_unknown="replace")
assert template_sequence == row.sseq.replace("-", "")
# Get interactions
distances_core = structure_tools.get_distances(
domain, max_cutoff, min_cutoff, groupby="residue"
)
assert (distances_core["residue_idx_1"] <= distances_core["residue_idx_2"]).all()
# Map interactions to target
for i in [1, 2]:
distances_core[f"residue_idx_{i}_corrected"] = distances_core[f"residue_idx_{i}"].apply(
lambda idx: sequence_tools.convert_residue_index_a2b(idx, row.b2a)
)
# Remove missing values
distances_core = distances_core[
distances_core["residue_idx_1_corrected"].notnull()
& distances_core["residue_idx_2_corrected"].notnull()
]
# Convert to integers
distances_core[["residue_idx_1_corrected", "residue_idx_2_corrected"]] = distances_core[
["residue_idx_1_corrected", "residue_idx_2_corrected"]
].astype(int)
# Sanity check
assert (
distances_core["residue_idx_1_corrected"] < distances_core["residue_idx_2_corrected"]
).all()
# Get the set of interactions
interactions_1 = set(
distances_core[(distances_core["distance"] <= 5)][
["residue_idx_1_corrected", "residue_idx_2_corrected"]
].apply(tuple, axis=1)
)
# Get the reference set of interactions
interactions_2 = {
(int(r1), int(r2)) if r1 <= r2 else (int(r2), int(r1))
for r1, r2 in zip(row.residue_idx_1_corrected, row.residue_idx_2_corrected)
if pd.notnull(r1) and pd.notnull(r2)
}
assert not interactions_1 ^ interactions_2
return (
distances_core["residue_idx_1_corrected"].values,
distances_core["residue_idx_2_corrected"].values,
distances_core["distance"].values,
)
|
python
|
# -*- coding: UTF-8 -*-
# Copyright (c) 2010 - 2014, Pascal Volk
# See COPYING for distribution information.
"""
vmm.password
~~~~~~~~~~~~~~~~~~~~~~~~~~~
vmm's password module to generate password hashes from
passwords or random passwords. This module provides following
functions:
hashed_password = pwhash(password[, scheme][, user])
random_password = randompw()
scheme, encoding = verify_scheme(scheme)
schemes, encodings = list_schemes()
scheme = extract_scheme(hashed_password)
"""
import hashlib
import re
from base64 import b64encode
from binascii import b2a_hex
from crypt import crypt
from random import SystemRandom
from subprocess import Popen, PIPE
from gettext import gettext as _
from vmm import ENCODING
from vmm.emailaddress import EmailAddress
from vmm.common import get_unicode, version_str
from vmm.constants import VMM_ERROR
from vmm.errors import VMMError
SALTCHARS = "./0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
PASSWDCHARS = "._-+#*23456789abcdefghikmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ"
DEFAULT_B64 = (None, "B64", "BASE64")
DEFAULT_HEX = (None, "HEX")
CRYPT_ID_MD5 = 1
CRYPT_ID_BLF = "2a"
CRYPT_ID_SHA256 = 5
CRYPT_ID_SHA512 = 6
CRYPT_SALT_LEN = 2
CRYPT_BLF_ROUNDS_MIN = 4
CRYPT_BLF_ROUNDS_MAX = 31
CRYPT_BLF_SALT_LEN = 22
CRYPT_MD5_SALT_LEN = 8
CRYPT_SHA2_ROUNDS_DEFAULT = 5000
CRYPT_SHA2_ROUNDS_MIN = 1000
CRYPT_SHA2_ROUNDS_MAX = 999999999
CRYPT_SHA2_SALT_LEN = 16
SALTED_ALGO_SALT_LEN = 4
cfg_dget = lambda option: None
_sys_rand = SystemRandom()
_choice = _sys_rand.choice
def _get_salt(s_len):
return "".join(_choice(SALTCHARS) for _ in range(s_len))
def _doveadmpw(password, scheme, encoding):
"""Communicates with Dovecot's doveadm and returns
the hashed password: {scheme[.encoding]}hash
"""
if encoding:
scheme = ".".join((scheme, encoding))
cmd_args = [
cfg_dget("bin.doveadm"),
"pw",
"-s",
scheme,
"-p",
get_unicode(password),
]
process = Popen(cmd_args, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
if process.returncode:
raise VMMError(stderr.strip().decode(ENCODING), VMM_ERROR)
hashed = stdout.strip().decode(ENCODING)
if not hashed.startswith("{%s}" % scheme):
raise VMMError(
"Unexpected result from %s: %s" % (cfg_dget("bin.doveadm"), hashed),
VMM_ERROR,
)
return hashed
def _md4_new():
"""Returns an new MD4-hash object if supported by the hashlib -
otherwise `None`.
"""
try:
return hashlib.new("md4")
except ValueError as err:
if err.args[0].startswith("unsupported hash type"):
return None
else:
raise
def _format_digest(digest, scheme, encoding):
"""Formats the arguments to a string: {scheme[.encoding]}digest."""
if not encoding:
return "{%s}%s" % (scheme, digest)
return "{%s.%s}%s" % (scheme, encoding, digest)
def _clear_hash(password, scheme, encoding):
"""Generates a (encoded) CLEARTEXT/PLAIN 'hash'."""
password = password.decode(ENCODING)
if encoding:
if encoding == "HEX":
password = b2a_hex(password.encode()).decode()
else:
password = b64encode(password.encode()).decode()
return _format_digest(password, scheme, encoding)
return "{%s}%s" % (scheme, password)
def _get_crypt_blowfish_salt():
"""Generates a salt for Blowfish crypt."""
rounds = cfg_dget("misc.crypt_blowfish_rounds")
if rounds < CRYPT_BLF_ROUNDS_MIN:
rounds = CRYPT_BLF_ROUNDS_MIN
elif rounds > CRYPT_BLF_ROUNDS_MAX:
rounds = CRYPT_BLF_ROUNDS_MAX
return "$%s$%02d$%s" % (CRYPT_ID_BLF, rounds, _get_salt(CRYPT_BLF_SALT_LEN))
def _get_crypt_sha2_salt(crypt_id):
"""Generates a salt for crypt using the SHA-256 or SHA-512 encryption
method.
*crypt_id* must be either `5` (SHA-256) or `6` (SHA-512).
"""
assert crypt_id in (CRYPT_ID_SHA256, CRYPT_ID_SHA512), (
"invalid crypt " "id: %r" % crypt_id
)
if crypt_id is CRYPT_ID_SHA512:
rounds = cfg_dget("misc.crypt_sha512_rounds")
else:
rounds = cfg_dget("misc.crypt_sha256_rounds")
if rounds < CRYPT_SHA2_ROUNDS_MIN:
rounds = CRYPT_SHA2_ROUNDS_MIN
elif rounds > CRYPT_SHA2_ROUNDS_MAX:
rounds = CRYPT_SHA2_ROUNDS_MAX
if rounds == CRYPT_SHA2_ROUNDS_DEFAULT:
return "$%d$%s" % (crypt_id, _get_salt(CRYPT_SHA2_SALT_LEN))
return "$%d$rounds=%d$%s" % (crypt_id, rounds, _get_salt(CRYPT_SHA2_SALT_LEN))
def _crypt_hash(password, scheme, encoding):
"""Generates (encoded) CRYPT/MD5/{BLF,MD5,SHA{256,512}}-CRYPT hashes."""
if scheme == "CRYPT":
salt = _get_salt(CRYPT_SALT_LEN)
elif scheme == "BLF-CRYPT":
salt = _get_crypt_blowfish_salt()
elif scheme in ("MD5-CRYPT", "MD5"):
salt = "$%d$%s" % (CRYPT_ID_MD5, _get_salt(CRYPT_MD5_SALT_LEN))
elif scheme == "SHA256-CRYPT":
salt = _get_crypt_sha2_salt(CRYPT_ID_SHA256)
else:
salt = _get_crypt_sha2_salt(CRYPT_ID_SHA512)
encrypted = crypt(password.decode(ENCODING), salt)
if encoding:
if encoding == "HEX":
encrypted = b2a_hex(encrypted.encode()).decode()
else:
encrypted = b64encode(encrypted.encode()).decode()
return _format_digest(encrypted, scheme, encoding)
def _md4_hash(password, scheme, encoding):
"""Generates encoded PLAIN-MD4 hashes."""
md4 = _md4_new()
if md4:
md4.update(password)
if encoding in DEFAULT_HEX:
digest = md4.hexdigest()
else:
digest = b64encode(md4.digest()).decode()
return _format_digest(digest, scheme, encoding)
return _doveadmpw(password, scheme, encoding)
def _md5_hash(password, scheme, encoding, user=None):
"""Generates DIGEST-MD5 aka PLAIN-MD5 and LDAP-MD5 hashes."""
md5 = hashlib.md5()
if scheme == "DIGEST-MD5":
md5.update(user.localpart.encode() + b":" + user.domainname.encode() + b":")
md5.update(password)
if (scheme in ("PLAIN-MD5", "DIGEST-MD5") and encoding in DEFAULT_HEX) or (
scheme == "LDAP-MD5" and encoding == "HEX"
):
digest = md5.hexdigest()
else:
digest = b64encode(md5.digest()).decode()
return _format_digest(digest, scheme, encoding)
def _ntlm_hash(password, scheme, encoding):
"""Generates NTLM hashes."""
md4 = _md4_new()
if md4:
password = b"".join(bytes(x) for x in zip(password, bytes(len(password))))
md4.update(password)
if encoding in DEFAULT_HEX:
digest = md4.hexdigest()
else:
digest = b64encode(md4.digest()).decode()
return _format_digest(digest, scheme, encoding)
return _doveadmpw(password, scheme, encoding)
def _create_hashlib_hash(algorithm, with_salt=False):
def hash_password(password, scheme, encoding):
# we default to an empty byte-string to keep the internal logic
# clean as it behaves like we would not have used a salt
salt = _get_salt(SALTED_ALGO_SALT_LEN).encode() if with_salt else b""
_hash = algorithm(password + salt)
if encoding in DEFAULT_B64:
digest = b64encode(_hash.digest() + salt).decode()
else:
digest = _hash.hexdigest() + b2a_hex(salt).decode()
return _format_digest(digest, scheme, encoding)
return hash_password
_sha1_hash = _create_hashlib_hash(hashlib.sha1)
_sha256_hash = _create_hashlib_hash(hashlib.sha256)
_sha512_hash = _create_hashlib_hash(hashlib.sha512)
_smd5_hash = _create_hashlib_hash(hashlib.md5, with_salt=True)
_ssha1_hash = _create_hashlib_hash(hashlib.sha1, with_salt=True)
_ssha256_hash = _create_hashlib_hash(hashlib.sha256, with_salt=True)
_ssha512_hash = _create_hashlib_hash(hashlib.sha512, with_salt=True)
_scheme_info = {
"CLEAR": (_clear_hash, 0x2010DF00),
"CLEARTEXT": (_clear_hash, 0x10000F00),
"CRAM-MD5": (_doveadmpw, 0x10000F00),
"CRYPT": (_crypt_hash, 0x10000F00),
"DIGEST-MD5": (_md5_hash, 0x10000F00),
"HMAC-MD5": (_doveadmpw, 0x10000F00),
"LANMAN": (_doveadmpw, 0x10000F00),
"LDAP-MD5": (_md5_hash, 0x10000F00),
"MD5": (_crypt_hash, 0x10000F00),
"MD5-CRYPT": (_crypt_hash, 0x10000F00),
"NTLM": (_ntlm_hash, 0x10000F00),
"OTP": (_doveadmpw, 0x10100A01),
"PLAIN": (_clear_hash, 0x10000F00),
"PLAIN-MD4": (_md4_hash, 0x10000F00),
"PLAIN-MD5": (_md5_hash, 0x10000F00),
"RPA": (_doveadmpw, 0x10000F00),
"SCRAM-SHA-1": (_doveadmpw, 0x20200A01),
"SHA": (_sha1_hash, 0x10000F00),
"SHA1": (_sha1_hash, 0x10000F00),
"SHA256": (_sha256_hash, 0x10100A01),
"SHA512": (_sha512_hash, 0x20000B03),
"SKEY": (_doveadmpw, 0x10100A01),
"SMD5": (_smd5_hash, 0x10000F00),
"SSHA": (_ssha1_hash, 0x10000F00),
"SSHA256": (_ssha256_hash, 0x10200A04),
"SSHA512": (_ssha512_hash, 0x20000B03),
}
def extract_scheme(password_hash):
"""Returns the extracted password scheme from *password_hash*.
If the scheme couldn't be extracted, **None** will be returned.
"""
scheme = re.match(r"^\{([^\}]{3,37})\}", password_hash)
if scheme:
return scheme.groups()[0]
return scheme
def list_schemes():
"""Returns the tuple (schemes, encodings).
`schemes` is an iterator for all supported password schemes (depends on
the used Dovecot version and features of the libc).
`encodings` is a tuple with all usable encoding suffixes.
"""
dcv = cfg_dget("misc.dovecot_version")
schemes = (k for (k, v) in _scheme_info.items() if v[1] <= dcv)
encodings = (".B64", ".BASE64", ".HEX")
return schemes, encodings
def verify_scheme(scheme):
"""Checks if the password scheme *scheme* is known and supported by the
configured `misc.dovecot_version`.
The *scheme* maybe a password scheme's name (e.g.: 'PLAIN') or a scheme
name with a encoding suffix (e.g. 'PLAIN.BASE64'). If the scheme is
known and supported by the used Dovecot version,
a tuple ``(scheme, encoding)`` will be returned.
The `encoding` in the tuple may be `None`.
Raises a `VMMError` if the password scheme:
* is unknown
* depends on a newer Dovecot version
* has a unknown encoding suffix
"""
assert isinstance(scheme, str), "Not a str: {!r}".format(scheme)
scheme_encoding = scheme.upper().split(".")
scheme = scheme_encoding[0]
if scheme not in _scheme_info:
raise VMMError(_("Unsupported password scheme: '%s'") % scheme, VMM_ERROR)
if cfg_dget("misc.dovecot_version") < _scheme_info[scheme][1]:
raise VMMError(
_("The password scheme '%(scheme)s' requires Dovecot " ">= v%(version)s.")
% {"scheme": scheme, "version": version_str(_scheme_info[scheme][1])},
VMM_ERROR,
)
if len(scheme_encoding) > 1:
if scheme_encoding[1] not in ("B64", "BASE64", "HEX"):
raise VMMError(
_("Unsupported password encoding: '%s'") % scheme_encoding[1], VMM_ERROR
)
encoding = scheme_encoding[1]
else:
encoding = None
return scheme, encoding
def pwhash(password, scheme=None, user=None):
"""Generates a password hash from the plain text *password* string.
If no *scheme* is given the password scheme from the configuration will
be used for the hash generation. When 'DIGEST-MD5' is used as scheme,
also an EmailAddress instance must be given as *user* argument.
"""
if not isinstance(password, str):
raise TypeError("Password is not a string: %r" % password)
password = password.encode(ENCODING).strip()
if not password:
raise ValueError("Could not accept empty password.")
if scheme is None:
scheme = cfg_dget("misc.password_scheme")
scheme, encoding = verify_scheme(scheme)
if scheme == "DIGEST-MD5":
assert isinstance(user, EmailAddress)
return _md5_hash(password, scheme, encoding, user)
return _scheme_info[scheme][0](password, scheme, encoding)
def randompw(pw_len):
"""Generates a plain text random password.
The length of the password can be configured in the ``vmm.cfg``
(account.password_length).
"""
if pw_len < 8:
pw_len = 8
return "".join(_sys_rand.sample(PASSWDCHARS, pw_len))
# Check for Blowfish/SHA-256/SHA-512 support in crypt.crypt()
if "$2a$04$0123456789abcdefABCDE.N.drYX5yIAL1LkTaaZotW3yI0hQhZru" == crypt(
"08/15!test~4711", "$2a$04$0123456789abcdefABCDEF$"
):
_scheme_info["BLF-CRYPT"] = (_crypt_hash, 0x20000B06)
if (
"$5$rounds=1000$0123456789abcdef$K/DksR0DT01hGc8g/kt9McEgrbFMKi9qrb1jehe7hn4"
== crypt("08/15!test~4711", "$5$rounds=1000$0123456789abcdef$")
):
_scheme_info["SHA256-CRYPT"] = (_crypt_hash, 0x20000B06)
if (
"$6$rounds=1000$0123456789abcdef$ZIAd5WqfyLkpvsVCVUU1GrvqaZTqvhJoouxdSqJO71l9Ld3"
"tVrfOatEjarhghvEYADkq//LpDnTeO90tcbtHR1"
== crypt("08/15!test~4711", "$6$rounds=1000$0123456789abcdef$")
):
_scheme_info["SHA512-CRYPT"] = (_crypt_hash, 0x20000B06)
del cfg_dget
|
python
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import gettext as _
from .models import *
class MyUserCreationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(MyUserCreationForm, self).__init__(*args, **kwargs)
self.fields['email'].required = True
class Meta(UserCreationForm.Meta):
model = student
fields = ('username', 'email', 'first_name', 'last_name', 'state', 'city',
'educational_role', 'institute', 'language', 'password1',
'password2')
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = student
class MyUserAdmin(UserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm
fieldsets = UserAdmin.fieldsets + (
(None, {'fields': ('username', 'email', )}),
(_('Personal info'), {'fields': ('first_name', 'last_name',
'state', 'city', 'educational_role', 'institute', 'language')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'first_name', 'last_name', 'state', 'city',
'educational_role', 'institute', 'language', 'password1',
'password2')}
),
)
admin.site.register(board)
admin.site.register(exam)
try:
admin.site.unregister(User)
except:
pass
admin.site.register(student, MyUserAdmin)
admin.site.register(educational_institute)
admin.site.register(search_result)
|
python
|
from memoria import *
class Filas():
def __init__(self, dic_process_id):
self.filas = [[],[],[],[]]
self.dic_process_id = dic_process_id
self.memoria = Memoria()
self.ultimo_executado = None
self.qtd_processos = len(dic_process_id) # qtd de processos.
self.aging = 5 # tanto de tempo para aumentar a prioridade.
self.qtd_proc_fin = 0
return
def insere_processo(self, processo) :
## insere um processo de acordo com sua prioridade na lista
"""verifico aqui se há espaço para ser executado ou na hora de executar? """
prioridade = processo.prioridade
if (prioridade == 0 ):
self.filas[0].append(processo.id)
elif (prioridade == 1):
self.filas[1].append(processo.id)
elif (prioridade == 2):
self.filas[2].append(processo.id)
elif (prioridade == 3):
self.filas[3].append(processo.id)
return
def executa_processo(self):
""" verifica qual é o processoa ser executado.
e executa o mesmo.
se acabar o tempo, apaga ele da fila.
"""
def _executa( id , fila_atual):
"""retorna true se um processo acabou. Falso se não. """
self.ultimo_executado = id
if ( self.dic_process_id[id].tempo_processador > 0):
self.dic_process_id[id].tempo_processador -= 1
if (self.dic_process_id[id].tempo_processador == 0 ) :
self.qtd_proc_fin += 1
self.remove_processo(fila_atual)
# retorna se acabou
return True
#retorna q não acabou
return False
self.ultimo_executado = None
for i in range(0,4):
if (len(self.filas[i]) > 0):
acabou =_executa(self.filas[i][0] , i)
return acabou
return False
def remove_processo(self, fila_atual):
## remove um processo de acordo com sua fila
self.filas[fila_atual] = self.filas[fila_atual][1:]
return
def aumenta_prioridade(self):
#print(self.filas)
for filas in self.filas:
for processo in filas:
pid = processo
#print('pid',pid,'\t','ultimo',self.ultimo_executado)
if (pid != self.ultimo_executado):
self.dic_process_id[pid].tempo_ultima_execucao += 1
# se tiver passado de 10 sem executar aumenta em 1 a prioridade.
if (self.dic_process_id[pid].tempo_ultima_execucao >= self.aging):
if (self.dic_process_id[pid].prioridade > 1):
prio = self.dic_process_id[pid].prioridade
#remove o processo da fila antiga
self.filas[prio] = [x for x in self.filas[prio] if x != pid]
self.dic_process_id[pid].prioridade -= 1
self.insere_processo(self.dic_process_id[pid])
self.dic_process_id[pid].tempo_ultima_execucao = 0
else:
self.dic_process_id[pid].tempo_ultima_execucao = 0
return
def log_filas(self):
log = set()
for filas in self.filas:
for pid in filas:
log.add(pid)
return log if len(log) > 0 else set([None])
def __repr__(self):
return str(self.filas)
|
python
|
def assert_keys_exist(obj, keys):
assert set(keys) <= set(obj.keys())
|
python
|
#!/usr/bin/env python3
import requests
from datetime import datetime, timedelta
import time
import numpy as np
import pandas as pd
import psycopg2
from psycopg2.extras import execute_values
import config as cfg
def connect_to_rds():
conn = psycopg2.connect(
host=cfg.HOST,
database=cfg.DATABASE,
user=cfg.UID,
password=cfg.PWD)
return conn
def get_epoch_and_pst_24hr():
utc = datetime.utcnow()
pst = timedelta(hours=7)
current_hour = (utc - pst).hour
epoch = round(utc.timestamp())
return current_hour, epoch
def remove_agency_tag(id_string):
underscore_loc = id_string.find('_')
final = int(id_string[underscore_loc+1:])
return(final)
def query_active_trips(key, endpoint):
call_text = endpoint + key
response = requests.get(call_text)
response = response.json()
return response
def clean_active_trips(response):
active_trip_statuses = response['data']['list']
to_remove = []
# Find indices of trips that are inactive or have no data
for i, bus in enumerate(response['data']['list']):
if bus['tripId'] == '' or bus['status'] == 'CANCELED' or bus['location'] == None:
to_remove.append(i)
# Remove inactive trips starting with the last index
for index in sorted(to_remove, reverse=True):
del active_trip_statuses[index]
return active_trip_statuses
def upload_to_rds(to_upload, conn, collected_time):
to_upload_list = []
for bus_status in to_upload:
to_upload_list.append(
(str(remove_agency_tag(bus_status['tripId'])),
str(remove_agency_tag(bus_status['vehicleId'])),
str(round(bus_status['location']['lat'], 10)),
str(round(bus_status['location']['lon'], 10)),
str(round(bus_status['tripStatus']['orientation'])),
str(bus_status['tripStatus']['scheduleDeviation']),
str(round(bus_status['tripStatus']['totalDistanceAlongTrip'], 10)),
str(round(bus_status['tripStatus']['distanceAlongTrip'], 10)),
str(remove_agency_tag(bus_status['tripStatus']['closestStop'])),
str(remove_agency_tag(bus_status['tripStatus']['nextStop'])),
str(bus_status['tripStatus']['lastLocationUpdateTime'])[:-3],
str(collected_time)))
with conn.cursor() as curs:
try:
args_str = ','.join(curs.mogrify('(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', x).decode('utf-8') for x in to_upload_list)
query_str = 'INSERT INTO active_trips_study (tripid, vehicleid, lat, lon, orientation, scheduledeviation, totaltripdistance, tripdistance, closeststop, nextstop, locationtime, collectedtime) VALUES ' + args_str
curs.execute(query_str)
conn.commit()
except:
# Catch all errors and continue to keep server up and running
conn.rollback()
return query_str
def main_function():
endpoint = 'http://api.pugetsound.onebusaway.org/api/where/vehicles-for-agency/1.json?key='
conn = connect_to_rds()
current_hour, current_epoch = get_epoch_and_pst_24hr()
while current_hour < 19:
response = query_active_trips(cfg.API_KEY, endpoint)
current_hour, current_epoch = get_epoch_and_pst_24hr()
cleaned_response = clean_active_trips(response)
args_str = upload_to_rds(cleaned_response, conn, current_epoch)
time.sleep(8)
conn.close()
if __name__ == "__main__":
main_function()
|
python
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from itertools import product, count
from matplotlib.colors import LinearSegmentedColormap
# it produce more vectors pointing diagonally than vectors pointing along
# an axis
# # generate uniform unit vectors
# def generate_unit_vectors(n):
# 'Generates matrix NxN of unit length vectors'
# v = np.random.uniform(-1, 1, (n, n, 2))
# l = np.sqrt(v[:, :, 0] ** 2 + v[:, :, 1] ** 2).reshape(n, n, 1)
# v /= l
# return v
def generate_unit_vectors(n,m):
'Generates matrix NxN of unit length vectors'
phi = np.random.uniform(0, 2*np.pi, (n, m))
v = np.stack((np.cos(phi), np.sin(phi)), axis=-1)
return v
# quintic interpolation
def qz(t):
return t * t * t * (t * (t * 6 - 15) + 10)
# cubic interpolation
def cz(t):
return -2 * t * t * t + 3 * t * t
def generate_2D_perlin_noise(size = (200,200), ns=1):
'''
generate_2D_perlin_noise(size, ns)
Generate 2D array of size x size filled with Perlin noise.
Parameters
----------
size : int or (int, int)
Size of 2D array size x size.
ns : int
Distance between nodes.
Returns
-------
m : ndarray
The 2D array filled with Perlin noise.
'''
if type(size) == int:
size = (size, size)
assert size[0]%ns==0 and size[1]%ns==0, 'divisible error. re-set node distance(ns)'
nc = [int(size[0] / ns), int(size[1] / ns)] # number of nodes
grid_size_h = int(size[0] / ns + 1) # number of points in grid
grid_size_w = int(size[1] / ns + 1) # number of points in grid
# generate grid of vectors
v = generate_unit_vectors(grid_size_h, grid_size_w)
# generate some constans in advance
ad, ar = np.arange(ns), np.arange(-ns, 0, 1)
bd, br = np.arange(ns), np.arange(-ns, 0, 1)
# vectors from each of the 4 nearest nodes to a point in the NSxNS patch
vd = np.zeros((ns, ns, 4, 1, 2))
# for (l1, l2), c in zip(product((ad, ar), repeat=2), count()):
vd[:, :, 0, 0] = np.stack(np.meshgrid(bd, ad, indexing='xy'), axis=2)
vd[:, :, 1, 0] = np.stack(np.meshgrid(br, ad, indexing='xy'), axis=2)
vd[:, :, 2, 0] = np.stack(np.meshgrid(bd, ar, indexing='xy'), axis=2)
vd[:, :, 3, 0] = np.stack(np.meshgrid(br, ar, indexing='xy'), axis=2)
# interpolation coefficients
d = qz(np.stack((np.zeros((ns, ns, 2)),
np.stack(np.meshgrid(ad, bd, indexing='ij'), axis=2)),
axis=2)/ns)
dd = np.stack(np.meshgrid(ad, bd, indexing='ij'), axis=2)
dd = dd.astype('float')
d[:, :, 0] = 1 - d[:, :, 1]
# make copy and reshape for convenience
d0 = d[..., 0].copy().reshape(ns, ns, 1, 2)
d1 = d[..., 1].copy().reshape(ns, ns, 2, 1)
# print(d0,d1)
# make an empy matrix
m = np.zeros((size[0], size[1]))
# reshape for convenience
t = m.reshape(nc[0], ns, nc[1], ns)
# calculate values for a NSxNS patch at a time
for i in np.arange(nc[0]):
for j in np.arange(nc[1]): # loop through the grid
# get four node vectors
av = v[i:i+2, j:j+2].reshape(4, 2, 1)
# 'vector from node to point' dot 'node vector'
at = np.matmul(vd, av).reshape(ns, ns, 2, 2)
# horizontal and vertical interpolation
t[i, :, j, :] = np.matmul(np.matmul(d0, at), d1).reshape(ns, ns)
return m
if __name__ == "__main__":
img = generate_2D_perlin_noise(200, 20)
plt.figure()
plt.imshow(img, cmap=cm.gray)
img = generate_2D_perlin_noise((200,300), 10)
print(type(img), img.shape, img.min(), img.max())
plt.figure()
plt.imshow(img, cmap=cm.gray)
plt.axis('off')
img = generate_2D_perlin_noise((200,50), 25)
print(type(img), img.shape, img.min(), img.max())
plt.figure()
plt.imshow(img, cmap=cm.gray)
plt.axis('off')
plt.figure()
plt.imshow(img>3, cmap=cm.gray)
plt.figure()
plt.imshow(img>1, cmap=cm.gray)
# generate "sky"
#img0 = generate_2D_perlin_noise(400, 80)
#img1 = generate_2D_perlin_noise(400, 40)
#img2 = generate_2D_perlin_noise(400, 20)
#img3 = generate_2D_perlin_noise(400, 10)
#
#img = (img0 + img1 + img2 + img3) / 4
#cmap = LinearSegmentedColormap.from_list('sky',
# [(0, '#0572D1'),
# (0.75, '#E5E8EF'),
# (1, '#FCFCFC')])
#img = cm.ScalarMappable(cmap=cmap).to_rgba(img)
#plt.imshow(img)
|
python
|
from aiogram import Dispatcher
from bulletin_board_bot.misc.user_data import UserDataStorage
from bulletin_board_bot.dependencies import DIContainer
from bulletin_board_bot.middlewares.di import DIContainerMiddleware
from bulletin_board_bot.middlewares.userdata import UserDataMiddleware
def setup_middlewares(dp: Dispatcher,
user_data_storage: UserDataStorage,
container: DIContainer):
dp.setup_middleware(UserDataMiddleware(user_data_storage))
dp.setup_middleware(DIContainerMiddleware(container))
|
python
|
from django.contrib import admin
from core.models import Profile, BraFitting, Suggestion, Resource
# Register your models here.
@admin.register(BraFitting)
class BraFittingAdmin(admin.ModelAdmin):
pass
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
pass
@admin.register(Suggestion)
class SuggestionAdmin(admin.ModelAdmin):
pass
@admin.register(Resource)
class ResourceAdmin(admin.ModelAdmin):
pass
|
python
|
"""Tests for _get_tablename_schema_names association schemas function."""
import pytest
from open_alchemy.schemas import association
class TestGetTablenameSchemaNames:
"""Tests for _get_tablename_schema_names."""
# pylint: disable=protected-access
TESTS = [
pytest.param({}, set(), {}, id="empty"),
pytest.param({"Schema1": {}}, set(), {}, id="single not constructable"),
pytest.param(
{"Schema1": {"x-tablename": "table 1"}},
set("table 2"),
{},
id="single miss",
),
pytest.param(
{"Schema1": {"x-tablename": "table 1"}},
{"table 1"},
{"table 1": ("Schema1", ["Schema1"])},
id="single hit",
),
pytest.param(
{
"Schema1": {
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-inherits": True},
]
},
"RefSchema": {"x-tablename": "table 1"},
},
{"table 1"},
{"table 1": ("RefSchema", ["Schema1", "RefSchema"])},
id="single hit $ref first",
),
pytest.param(
{
"RefSchema": {"x-tablename": "table 1"},
"Schema1": {
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-inherits": True},
]
},
},
{"table 1"},
{"table 1": ("RefSchema", ["RefSchema", "Schema1"])},
id="single hit $ref second",
),
pytest.param(
{"Schema1": {"allOf": [{"x-tablename": "table 1"}]}},
{"table 1"},
{"table 1": ("Schema1", ["Schema1"])},
id="single hit allOf",
),
pytest.param(
{
"Schema1": {
"allOf": [
{"x-tablename": "table 1"},
{"$ref": "#/components/schemas/RefSchema"},
]
},
"RefSchema": {"x-tablename": "ref_table"},
},
{"table 1"},
{"table 1": ("Schema1", ["Schema1"])},
id="single hit allOf local $ref local first",
),
pytest.param(
{
"Schema1": {
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-tablename": "table 1"},
]
},
"RefSchema": {"x-tablename": "ref_table"},
},
{"table 1"},
{"table 1": ("Schema1", ["Schema1"])},
id="single hit allOf local $ref $ref first",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
},
set(),
{},
id="multiple miss",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
},
{"table 1"},
{"table 1": ("Schema1", ["Schema1"])},
id="multiple first hit",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
},
{"table 2"},
{"table 2": ("Schema2", ["Schema2"])},
id="multiple second hit",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
},
{"table 1", "table 2"},
{"table 1": ("Schema1", ["Schema1"]), "table 2": ("Schema2", ["Schema2"])},
id="multiple all hit",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 1"},
},
{"table 1"},
{"table 1": ("Schema2", ["Schema1", "Schema2"])},
id="multiple same tablename",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
"Schema3": {"x-tablename": "table 3"},
},
{"table 1", "table 2", "table 3"},
{
"table 1": ("Schema1", ["Schema1"]),
"table 2": ("Schema2", ["Schema2"]),
"table 3": ("Schema3", ["Schema3"]),
},
id="many different tablename",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 1"},
"Schema3": {"x-tablename": "table 3"},
},
{"table 1", "table 2", "table 3"},
{
"table 1": ("Schema2", ["Schema1", "Schema2"]),
"table 3": ("Schema3", ["Schema3"]),
},
id="many different first middle same tablename",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
"Schema3": {"x-tablename": "table 1"},
},
{"table 1", "table 2", "table 3"},
{
"table 1": ("Schema3", ["Schema1", "Schema3"]),
"table 2": ("Schema2", ["Schema2"]),
},
id="many first last same tablename",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
"Schema3": {"x-tablename": "table 2"},
},
{"table 1", "table 2", "table 3"},
{
"table 1": ("Schema1", ["Schema1"]),
"table 2": ("Schema3", ["Schema2", "Schema3"]),
},
id="many middle last same tablename",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 1"},
"Schema3": {"x-tablename": "table 1"},
},
{"table 1", "table 2", "table 3"},
{"table 1": ("Schema3", ["Schema1", "Schema2", "Schema3"])},
id="many all same tablename",
),
]
@staticmethod
@pytest.mark.parametrize("schemas, tablenames, expected_mapping", TESTS)
@pytest.mark.schemas
@pytest.mark.association
def test_(schemas, tablenames, expected_mapping):
"""
GIVEN schemas, tablenames and expected mappng
WHEN _get_tablename_schema_names is called with the schemas and tablenames
THEN the expected mapping is returned.
"""
returned_mapping = association._get_tablename_schema_names(
schemas=schemas, tablenames=tablenames
)
assert returned_mapping == expected_mapping
|
python
|
from django.template import Library
from ..classes import Menu, SourceColumn
register = Library()
def _navigation_resolve_menu(context, name, source=None, sort_results=None):
result = []
menu = Menu.get(name)
link_groups = menu.resolve(
context=context, source=source, sort_results=sort_results
)
if link_groups:
result.append(
{
'link_groups': link_groups, 'menu': menu
}
)
return result
@register.simple_tag(takes_context=True)
def navigation_get_sort_field_querystring(context, column):
return column.get_sort_field_querystring(context=context)
@register.simple_tag
def navigation_get_source_columns(
source, exclude_identifier=False, names=None, only_identifier=False
):
return SourceColumn.get_for_source(
source=source, exclude_identifier=exclude_identifier,
names=names, only_identifier=only_identifier
)
@register.simple_tag(takes_context=True)
def navigation_resolve_menu(context, name, source=None, sort_results=None):
return _navigation_resolve_menu(
context=context, name=name, source=source, sort_results=sort_results
)
@register.simple_tag(takes_context=True)
def navigation_resolve_menus(context, names, source=None, sort_results=None):
result = []
for name in names.split(','):
result.extend(
_navigation_resolve_menu(
context=context, name=name, source=source, sort_results=sort_results
)
)
return result
@register.simple_tag(takes_context=True)
def navigation_source_column_get_sort_icon(context, column):
if column:
result = column.get_sort_icon(context=context)
return result
else:
return ''
@register.simple_tag(takes_context=True)
def navigation_source_column_resolve(context, column):
if column:
result = column.resolve(context=context)
return result
else:
return ''
|
python
|
from esipy.client import EsiClient
from waitlist.utility.swagger.eve import get_esi_client
from waitlist.utility.swagger import get_api
from waitlist.utility.swagger.eve.universe.responses import ResolveIdsResponse,\
CategoriesResponse, CategoryResponse, GroupResponse, GroupsResponse,\
TypesResponse, TypeResponse
from typing import List
from esipy.exceptions import APIException
class UniverseEndpoint(object):
def __init__(self, client: EsiClient = None) -> None:
if client is None:
self.__client: EsiClient = get_esi_client(
token=None, noauth=True, retry_request=True)
self.__api: App = get_api()
else:
self.__client: EsiClient = client
self.__api: App = get_api()
def resolve_ids(self, ids_list: [int]) -> ResolveIdsResponse:
"""
:param list maximum of 1000 ids allowed at once
"""
resp = self.__client.request(
self.__api.op['post_universe_names'](ids=ids_list))
return ResolveIdsResponse(resp)
def get_categories(self) -> CategoriesResponse:
"""
Get response containing a list of all category ids
"""
resp = self.__client.request(
self.__api.op['get_universe_categories']())
return CategoriesResponse(resp)
def get_category(self, category_id: int) -> CategoryResponse:
"""
Get response containing information about the category
"""
resp = self.__client.request(
self.__api.op['get_universe_categories_category_id'](
category_id=category_id))
return CategoryResponse(resp)
def get_category_multi(self,
category_ids: List[int]) -> List[CategoryResponse]:
ops = []
for category_id in category_ids:
ops.append(self.__api.op['get_universe_categories_category_id'](
category_id=category_id))
response_infos = self.__client.multi_request(ops)
return [CategoryResponse(info[1]) for info in response_infos]
def get_groups(self) -> List[GroupsResponse]:
"""
Get response containing a list of all group ids
"""
resp = self.__client.head(
self.__api.op['get_universe_groups'](page=1))
if (resp.status != 200):
raise APIException("", resp.status)
pages = 1
if 'X-Pages' in resp.header:
pages = int(resp.header['X-Pages'][0])
ops = []
for page in range(1, pages+1):
ops.append(self.__api.op['get_universe_groups'](page=page))
responses = self.__client.multi_request(ops)
response_list: List[GroupsResponse] = []
for data_tuple in responses: # (request, response)
response_list.append(GroupsResponse(data_tuple[1]))
return response_list
def get_group(self, group_id: int) -> GroupResponse:
"""
Get response containing information about the group
"""
resp = self.__client.request(
self.__api.op['get_universe_groups_group_id'](
group_id=group_id))
return GroupResponse(resp)
def get_group_multi(self, group_ids: List[int]) -> List[GroupResponse]:
ops = []
for group_id in group_ids:
ops.append(self.__api.op['get_universe_groups_group_id'](
group_id=group_id))
response_infos = self.__client.multi_request(ops)
return [GroupResponse(info[1]) for info in response_infos]
def get_types(self) -> List[TypesResponse]:
"""
Get response containing a list of all type ids
"""
resp = self.__client.head(
self.__api.op['get_universe_types'](page=1))
if (resp.status != 200):
raise APIException("", resp.status)
pages = 1
if 'X-Pages' in resp.header:
pages = int(resp.header['X-Pages'][0])
ops = []
for page in range(1, pages+1):
ops.append(self.__api.op['get_universe_types'](page=page))
responses = self.__client.multi_request(ops)
response_list: List[TypesResponse] = []
for data_tuple in responses: # (request, response)
response_list.append(TypesResponse(data_tuple[1]))
return response_list
def get_type(self, type_id: int) -> TypeResponse:
"""
Get response containing information about the type
"""
resp = self.__client.request(
self.__api.op['get_universe_types_type_id'](
type_id=type_id))
return TypeResponse(resp)
def get_type_multi(self, type_ids: List[int]) -> List[TypeResponse]:
ops = []
for type_id in type_ids:
ops.append(self.__api.op['get_universe_types_type_id'](
type_id=type_id))
response_infos = self.__client.multi_request(ops)
return [TypeResponse(info[1]) for info in response_infos]
|
python
|
titulo = str(input('qual o titulo: '))
autor = str(input('escritor: '))
comando = ['/give @p written_book{pages:[', "'", '"','] ,title:', ',author:', '}', titulo, autor] #estrutura do comando
l = str(input('cole aqui: ')) #livro em si.
tl = len(l) #total de caracteres do livro
print(tl)
qcn = (tl/256) #quantidade de cortes nessessarios
print(qcn)
qbn = 256 #quantidade de caracteres suportado
aux = 1 #variavel auxiliar
input('s? ')
for ndp in range(0, int(qcn)):
qnd = (l[aux -1:qbn])
comando.append(qnd)
print(qnd)
qbn = qbn + 256
if tl > 255:
aux = aux + 256
print('fim conferencia')
print(comando[0], comando[1], comando[2], comando[8],comando[2],
comando[1],',', comando[1], comando[2], comando[9],comando[2], comando[1],
',', comando[1], comando[2], comando[10],comando[2], comando[1],
',', comando[1], comando[2], comando[11],comando[2], comando[1],
',', comando[1], comando[2], comando[12],comando[2], comando[1],
',', comando[1], comando[2], comando[13],comando[2], comando[1],
',', comando[1], comando[2], comando[14],comando[2], comando[1],
',', comando[1], comando[2], comando[15],comando[2], comando[1],
',', comando[1], comando[2], comando[16],comando[2], comando[1],
',', comando[1], comando[2], comando[17],comando[2], comando[1],
',', comando[1], comando[2], comando[18],comando[2], comando[1],
',', comando[1], comando[2], comando[19],comando[2], comando[1],
',', comando[1], comando[2], comando[20],comando[2], comando[1],
',', comando[1], comando[2], comando[21],comando[2], comando[1],
',', comando[1], comando[2], comando[22],comando[2], comando[1],
',', comando[1], comando[2], comando[23],comando[2], comando[1],
',', comando[1], comando[2], comando[24],comando[2], comando[1],
',', comando[1], comando[2], comando[25],comando[2], comando[1],
',', comando[1], comando[2], comando[26],comando[2], comando[1],
',', comando[1], comando[2], comando[27],comando[2], comando[1],
',', comando[1], comando[2], comando[28],comando[2], comando[1],
',', comando[1], comando[2], comando[29],comando[2], comando[1],
',', comando[1], comando[2], comando[30],comando[2], comando[1],
',', comando[1], comando[2], comando[31],comando[2], comando[1],
',', comando[1], comando[2], comando[32],comando[2], comando[1],
',', comando[1], comando[2], comando[33],comando[2], comando[1],
',', comando[1], comando[2], comando[34],comando[2], comando[1],
',', comando[1], comando[2], comando[35],comando[2], comando[1],
',', comando[1], comando[2], comando[36],comando[2], comando[1],
',', comando[1], comando[2], comando[37],comando[2], comando[1],
',', comando[1], comando[2], comando[38],comando[2], comando[1],
',', comando[1], comando[2], comando[39],comando[2], comando[1],
',', comando[1], comando[2], comando[40],comando[2], comando[1],
',', comando[1], comando[2], comando[41],comando[2], comando[1],
',', comando[1], comando[2], comando[42],comando[2], comando[1],
',', comando[1], comando[2], comando[43],comando[2], comando[1],
',', comando[1], comando[2], comando[44],comando[2], comando[1],
',', comando[1], comando[2], comando[45],comando[2], comando[1],
',', comando[1], comando[2], comando[46],comando[2], comando[1],
',', comando[1], comando[2], comando[47],comando[2], comando[1],
',', comando[1], comando[2], comando[48],comando[2], comando[1],
',', comando[1], comando[2], comando[49],comando[2], comando[1],
',', comando[1], comando[2], comando[50],comando[2], comando[1],
',', comando[1], comando[2], comando[51],comando[2], comando[1],
',', comando[1], comando[2], comando[52],comando[2], comando[1],
',', comando[1], comando[2], comando[53],comando[2], comando[1],
',', comando[1], comando[2], comando[54],comando[2], comando[1],
',', comando[1], comando[2], comando[55],comando[2], comando[1],
',', comando[1], comando[2], comando[56],comando[2], comando[1],
',', comando[1], comando[2], comando[57],comando[2], comando[1],
',', comando[1], comando[2], comando[58],comando[2], comando[1],
',', comando[1], comando[2], comando[59],comando[2], comando[1],
',', comando[1], comando[2], comando[60],comando[2], comando[1],
',', comando[1], comando[2], comando[61],comando[2], comando[1],
',', comando[1], comando[2], comando[62],comando[2], comando[1],
',', comando[1], comando[2], comando[63],comando[2], comando[1],
',', comando[1], comando[2], comando[64],comando[2], comando[1],
',', comando[1], comando[2], comando[65],comando[2], comando[1],
',', comando[1], comando[2], comando[66],comando[2], comando[1],
',', comando[1], comando[2], comando[67],comando[2], comando[1],
',', comando[1], comando[2], comando[68],comando[2], comando[1],
',', comando[1], comando[2], comando[69],comando[2], comando[1],
',', comando[1], comando[2], comando[70],comando[2], comando[1],
',', comando[1], comando[2], comando[71],comando[2], comando[1],
',', comando[1], comando[2], comando[72],comando[2], comando[1],
',', comando[1], comando[2], comando[73],comando[2], comando[1],
',', comando[1], comando[2], comando[74],comando[2], comando[1],
',', comando[1], comando[2], comando[75],comando[2], comando[1],
',', comando[1], comando[2], comando[76],comando[2], comando[1],
',', comando[1], comando[2], comando[77],comando[2], comando[1],
',', comando[1], comando[2], comando[78],comando[2], comando[1],
',', comando[1], comando[2], comando[79],comando[2], comando[1],
',', comando[1], comando[2], comando[80],comando[2], comando[1],
',', comando[1], comando[2], comando[81],comando[2], comando[1],
',', comando[1], comando[2], comando[82],comando[2], comando[1],
',', comando[1], comando[2], comando[83],comando[2], comando[1],
',', comando[1], comando[2], comando[84],comando[2], comando[1],
',', comando[1], comando[2], comando[85],comando[2], comando[1],
',', comando[1], comando[2], comando[86],comando[2], comando[1],
',', comando[1], comando[2], comando[87],comando[2], comando[1],
',', comando[1], comando[2], comando[88],comando[2], comando[1],
',', comando[1], comando[2], comando[89],comando[2], comando[1],
',', comando[1], comando[2], comando[90],comando[2], comando[1],
',', comando[1], comando[2], comando[91],comando[2], comando[1],
',', comando[1], comando[2], comando[92],comando[2], comando[1],
',', comando[1], comando[2], comando[93],comando[2], comando[1],
',', comando[1], comando[2], comando[94],comando[2], comando[1],
',', comando[1], comando[2], comando[95],comando[2], comando[1],
',', comando[1], comando[2], comando[96],comando[2], comando[1],
',', comando[1], comando[2], comando[97],comando[2], comando[1],
',', comando[1], comando[2], comando[98],comando[2], comando[1],
',', comando[1], comando[2], comando[99],comando[2], comando[1],
',', comando[1], comando[2], comando[100],comando[2], comando[1],
',', comando[1], comando[2], comando[101],comando[2], comando[1],
',', comando[1], comando[2], comando[102],comando[2], comando[1],
',', comando[1], comando[2], comando[103],comando[2], comando[1],
',', comando[1], comando[2], comando[104],comando[2], comando[1],
',', comando[1], comando[2], comando[105],comando[2], comando[1],
',', comando[1], comando[2], comando[106],comando[2], comando[1],
',', comando[1], comando[2], comando[107],comando[2], comando[1],
comando[3],comando[2], comando[6],comando[2], comando[4],comando[2], comando[7],comando[2], comando[5])
|
python
|
r"""
Super Partitions
AUTHORS:
- Mike Zabrocki
A super partition of size `n` and fermionic sector `m` is a
pair consisting of a strict partition of some integer `r` of
length `m` (that may end in a `0`) and an integer partition of
`n - r`.
This module provides tools for manipulating super partitions.
Super partitions are the indexing set for symmetric functions in
super space.
Super partitions may be input in two different formats: one as a pair
consisiting of fermionic (strict partition) and a bosonic (partition) part
and the other as a list of integer values where the negative entries come
first and are listed in strict order followed by the positive values in
weak order.
A super partition is displayed as two partitions separated by a semicolon
as a default. Super partitions may also be displayed as a weakly increasing
sequence of integers that are strict if the numbers are not positive.
These combinatorial objects index the space of symmetric polynomials in
two sets of variables, one commuting and one anti-commuting, and they
are known as symmetric functions in super space (hence the origin of the
name super partitions).
EXAMPLES::
sage: SuperPartitions()
Super Partitions
sage: SuperPartitions(2)
Super Partitions of 2
sage: SuperPartitions(2).cardinality()
8
sage: SuperPartitions(4,2)
Super Partitions of 4 and of fermionic sector 2
sage: [[2,0],[1,1]] in SuperPartitions(4,2)
True
sage: [[1,0],[1,1]] in SuperPartitions(4,2)
False
sage: [[1,0],[2,1]] in SuperPartitions(4)
True
sage: [[1,0],[2,2,1]] in SuperPartitions(4)
False
sage: [[1,0],[2,1]] in SuperPartitions()
True
sage: [[1,1],[2,1]] in SuperPartitions()
False
sage: [-2, 0, 1, 1] in SuperPartitions(4,2)
True
sage: [-1, 0, 1, 1] in SuperPartitions(4,2)
False
sage: [-2, -2, 2, 1] in SuperPartitions(7,2)
False
REFERENCES:
- [JL2016]_
"""
#*****************************************************************************
# Copyright (C) 2018 Mike Zabrocki <zabrocki at mathstat.yorku.ca>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function, absolute_import, division
from six import add_metaclass
from functools import reduce
from sage.structure.list_clone import ClonableArray
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.structure.richcmp import richcmp, richcmp_method
from sage.combinat.partition import Partition, Partitions, _Partitions
from sage.combinat.composition import Composition
from sage.categories.enumerated_sets import EnumeratedSets
from sage.rings.integer import Integer
from sage.structure.global_options import GlobalOptions
from sage.rings.all import ZZ
from sage.misc.inherit_comparison import InheritComparisonClasscallMetaclass
from sage.misc.all import uniq
@richcmp_method
@add_metaclass(InheritComparisonClasscallMetaclass)
class SuperPartition(ClonableArray):
r"""
A super partition.
A *super partition* of size `n` and fermionic sector `m` is a
pair consisting of a strict partition of some integer `r` of
length `m` (that may end in a `0`) and an integer partition of
`n - r`.
EXAMPLES::
sage: sp = SuperPartition([[1,0],[2,2,1]]); sp
[1, 0; 2, 2, 1]
sage: sp[0]
(1, 0)
sage: sp[1]
(2, 2, 1)
sage: sp.fermionic_degree()
2
sage: sp.bosonic_degree()
6
sage: sp.length()
5
sage: sp.conjugate()
[4, 2; ]
"""
@staticmethod
def __classcall_private__(cls, lst):
r"""
Construct a superpartition in the correct parent
EXAMPLES::
sage: SuperPartition([[1],[1]]).parent()
Super Partitions
sage: SuperPartition([[1],[1]])
[1; 1]
sage: SuperPartition([-1, 1])
[1; 1]
sage: SuperPartition([[1,1],[1]])
Traceback (most recent call last):
...
ValueError: [[1, 1], [1]] not in Super Partitions
sage: SuperPartition([-1,1])
[1; 1]
sage: SuperPartition([])
[; ]
sage: SP = SuperPartitions(8,4)([[3,2,1,0],[2]])
sage: SuperPartition(SP) is SP
True
"""
if isinstance(lst, SuperPartition):
return lst
SPs = SuperPartitions()
if not lst:
return SPs([[],[]])
elif isinstance(lst[0], (list, tuple)):
return SPs([[Integer(a) for a in lst[0]],
[Integer(a) for a in lst[1]]])
else:
return SPs([[-a for a in lst if a <= 0],
[a for a in lst if a > 0]])
def __init__(self, parent, lst, check=True, immutable=True):
"""
Initialize ``self``.
EXAMPLES::
sage: SP = SuperPartition([[1],[1]])
sage: TestSuite(SP).run()
"""
if check and lst not in parent:
raise ValueError("%s not in %s" % (lst, parent))
lst = [tuple(lst[0]), tuple(lst[1])]
ClonableArray.__init__(self, parent, lst, False, immutable)
def check(self):
"""
Check that ``self`` is a valid super partition.
EXAMPLES::
sage: SP = SuperPartition([[1],[1]])
sage: SP.check()
"""
if self not in self.parent():
raise ValueError("%s not in %s"%(self, self.parent()))
def __richcmp__(self, other, op):
r"""
Check whether ``self`` is equal to ``other``.
.. TODO::
This overwrites the equality check of
:class:`~sage.structure.list_clone.ClonableArray`
in order to circumvent the coercion framework.
Eventually this should be solved more elegantly.
For now, two elements are compared by their defining lists.
"""
if isinstance(other, SuperPartition):
return richcmp(list(self), list(other), op)
else:
return richcmp(list(self), other, op)
def _hash_(self):
"""
Return the hash of ``self``.
EXAMPLES::
sage: SP = SuperPartition([[1],[1]])
sage: hash(tuple(SP)) == hash(SP)
True
"""
return hash(tuple(self))
def _repr_(self):
r"""
Return a string representation of ``self``.
A super partition is represented by the antisymmetric and symmetric
parts separated by a semicolon.
EXAMPLES::
sage: SuperPartition([[1],[1]])
[1; 1]
sage: SuperPartition([[],[1]])
[; 1]
sage: SuperPartition([])
[; ]
sage: SuperPartitions.options.display = "list"
sage: SuperPartition([[1],[1]])
[-1, 1]
sage: SuperPartition([[],[1]])
[1]
sage: SuperPartition([-2,-1,0,2,1])
[-2, -1, 0, 2, 1]
sage: SuperPartitions.options.display = "pair"
sage: SuperPartition([[1],[1]])
[[1], [1]]
sage: SuperPartition([[],[1]])
[[], [1]]
sage: SuperPartition([-2,-1,0,2,1])
[[2, 1, 0], [2, 1]]
sage: SuperPartitions.options._reset()
"""
display = self.parent().options.display
if display == "default":
return '['+', '.join(str(a) for a in self.antisymmetric_part())+\
'; '+', '.join(str(a) for a in self.symmetric_part())+']'
elif display == "pair":
return self._repr_pair()
elif display == "list":
return self._repr_list()
def _repr_pair(self):
r"""
Represention of a super partition as a pair.
A super partition is represented by a list consisting of the
antisymmetric and symmetric parts.
EXAMPLES::
sage: SuperPartition([[1],[1]])._repr_pair()
'[[1], [1]]'
sage: SuperPartition([[],[1]])._repr_pair()
'[[], [1]]'
sage: SuperPartition([[],[]])._repr_pair()
'[[], []]'
"""
return repr(self.to_list())
def _repr_list(self):
r"""
Represention of a super partition as a list.
A super partition is represented by a list consisting of the
negative values for the antisymmetric part listed first followed
by positive values for the symmetric part
EXAMPLES::
sage: SuperPartition([[1],[1]])._repr_list()
'[-1, 1]'
sage: SuperPartition([[],[1]])._repr_list()
'[1]'
sage: SuperPartition([[],[]])._repr_list()
'[]'
"""
return repr([-a for a in self[0]] + list(self[1]))
def _latex_(self):
r"""
Latex a super partition.
A super partition is represented by the antisymmetric and symmetric
parts separated by a semicolon.
EXAMPLES::
sage: latex(SuperPartition([[1],[1]]))
(1; 1)
sage: latex(SuperPartition([[],[1]]))
(; 1)
"""
return ('(' + ','.join(str(a) for a in self.antisymmetric_part())
+ '; ' + ', '.join(str(a) for a in self.symmetric_part()) + ')')
def to_list(self):
r"""
The list of two lists with the antisymmetric and symmetric parts.
EXAMPLES::
sage: SuperPartition([[1],[1]]).to_list()
[[1], [1]]
sage: SuperPartition([[],[1]]).to_list()
[[], [1]]
"""
return [list(self[0]), list(self[1])]
def to_composition(self):
r"""
Concatenate the antisymmetric and symmetric parts to a composition.
OUTPUT:
- a (possibly weak) composition
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).to_composition()
[3, 1, 2, 2, 1]
sage: SuperPartition([[2,1,0],[3,3]]).to_composition()
[2, 1, 0, 3, 3]
sage: SuperPartition([[2,1,0],[3,3]]).to_composition().parent()
Compositions of non-negative integers
"""
return Composition(self[0] + self[1])
def to_partition(self):
r"""
Concatenate and sort the antisymmetric and symmetric parts
to a partition.
OUTPUT:
- a partition
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).to_partition()
[3, 2, 2, 1, 1]
sage: SuperPartition([[2,1,0],[3,3]]).to_partition()
[3, 3, 2, 1]
sage: SuperPartition([[2,1,0],[3,3]]).to_partition().parent()
Partitions
"""
return Partition(sorted(self[0] + self[1], reverse=True))
def antisymmetric_part(self):
r"""
The antisymmetric part as a list of strictly decreasing integers.
OUTPUT:
- a list
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).antisymmetric_part()
[3, 1]
sage: SuperPartition([[2,1,0],[3,3]]).antisymmetric_part()
[2, 1, 0]
"""
return list(self[0])
a_part = antisymmetric_part
def symmetric_part(self):
r"""
The symmetric part as a list of weakly decreasing integers.
OUTPUT:
- a list
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).symmetric_part()
[2, 2, 1]
sage: SuperPartition([[2,1,0],[3,3]]).symmetric_part()
[3, 3]
"""
return list(self[1])
s_part = symmetric_part
def bosonic_degree(self):
r"""
Return the bosonic degree of ``self``.
The *bosonic degree* is the sum of the sizes of the
antisymmetric and symmetric parts.
OUTPUT:
- an integer
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).bosonic_degree()
9
sage: SuperPartition([[2,1,0],[3,3]]).bosonic_degree()
9
"""
return sum(self.antisymmetric_part() + self.symmetric_part())
degree = bosonic_degree
def fermionic_degree(self):
r"""
Return the fermionic degree of ``self``.
The *fermionic degree* is the length of the antisymmetric part.
OUTPUT:
- an integer
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).fermionic_degree()
2
sage: SuperPartition([[2,1,0],[3,3]]).fermionic_degree()
3
"""
return len(self.antisymmetric_part())
fermionic_sector = fermionic_degree
def bi_degree(self):
r"""
Return the bidegree of ``self``, which is a pair consisting
of the bosonic and fermionic degree.
OUTPUT:
- a tuple of two integers
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).bi_degree()
(9, 2)
sage: SuperPartition([[2,1,0],[3,3]]).bi_degree()
(9, 3)
"""
return (self.bosonic_degree(), self.fermionic_degree())
def length(self):
r"""
Return the length of ``self``, which is the sum of the
lengths of the antisymmetric and symmetric part.
OUTPUT:
- an integer
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).length()
5
sage: SuperPartition([[2,1,0],[3,3]]).length()
5
"""
return self.fermionic_degree()+len(self.symmetric_part())
def bosonic_length(self):
r"""
Return the length of the partition of the symmetric part.
OUTPUT:
- an integer
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).bosonic_length()
3
sage: SuperPartition([[2,1,0],[3,3]]).bosonic_length()
2
"""
return len(self.symmetric_part())
def shape_circled_diagram(self):
r"""
A concatenated partition with an extra cell for each antisymmetric part
OUTPUT:
- a partition
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).shape_circled_diagram()
[4, 2, 2, 2, 1]
sage: SuperPartition([[2,1,0],[3,3]]).shape_circled_diagram()
[3, 3, 3, 2, 1]
"""
return Partition(sorted([a+1 for a in self.antisymmetric_part()]
+ self.symmetric_part(), reverse=True))
@staticmethod
def from_circled_diagram(shape, corners):
r"""
Construct a super partition from a circled diagram.
A circled diagram consists of a partition of the concatenation of
the antisymmetric and symmetric parts and a list of addable cells
of the partition which indicate the location of the circled cells.
INPUT:
- ``shape`` -- a partition or list of integers
- ``corners`` -- a list of removable cells of ``shape``
OUTPUT:
- a :class:`SuperPartition`
EXAMPLES::
sage: SuperPartition.from_circled_diagram([3, 2, 2, 1, 1], [(0, 3), (3, 1)])
[3, 1; 2, 2, 1]
sage: SuperPartition.from_circled_diagram([3, 3, 2, 1], [(2, 2), (3, 1), (4, 0)])
[2, 1, 0; 3, 3]
sage: from_cd = SuperPartition.from_circled_diagram
sage: all(sp == from_cd(*sp.to_circled_diagram()) for sp in SuperPartitions(4))
True
"""
return SuperPartition([sorted([c[1] for c in corners], reverse=True),
[shape[i] for i in range(len(shape))
if i not in [c[0] for c in corners]]])
def to_circled_diagram(self):
r"""
The shape of the circled diagram and a list of addable cells
A circled diagram consists of a partition for the outer shape
and a list of removable cells of the partition indicating the
location of the circled cells
OUTPUT:
- a list consisting of a partition and a list of pairs of integers
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).to_circled_diagram()
[[3, 2, 2, 1, 1], [(0, 3), (3, 1)]]
sage: SuperPartition([[2,1,0],[3,3]]).to_circled_diagram()
[[3, 3, 2, 1], [(2, 2), (3, 1), (4, 0)]]
sage: from_cd = SuperPartition.from_circled_diagram
sage: all(sp == from_cd(*sp.to_circled_diagram()) for sp in SuperPartitions(4))
True
"""
shape = self.to_partition()
corners = [c for c in shape.addable_cells() if c[1] in self.antisymmetric_part()]
return [shape, corners]
def conjugate(self):
r"""
Conjugate of a super partition.
The *conjugate* of a super partition is defined by conjugating
the circled diagram.
OUPUT:
- a :class:`SuperPartition`
EXAMPLES::
sage: SuperPartition([[3, 1, 0], [4, 3, 2, 1]]).conjugate()
[6, 4, 1; 3]
sage: all(sp == sp.conjugate().conjugate() for sp in SuperPartitions(4))
True
sage: all(sp.conjugate() in SuperPartitions(3,2) for sp in SuperPartitions(3,2))
True
"""
sd = self.to_circled_diagram()
return SuperPartition.from_circled_diagram(sd[0].conjugate(),
[(j,i) for (i,j) in sd[1]])
def zee(self):
r"""
Return the centralizer size of a permutation of cycle
type symmetric part of ``self``.
OUTPUT:
- a positive integer
EXAMPLES::
sage: SuperPartition([[1,0],[3,1,1]]).zee()
6
sage: SuperPartition([[1],[2,2,1]]).zee()
8
sage: sum(1/sp.zee() for sp in SuperPartitions(6,0))
1
"""
return Partition(self.symmetric_part()).centralizer_size()
def sign(self):
r"""
Return the sign of a permutation of cycle type the
symmetric part of ``self``.
OUTPUT:
- either `1` or `-1`
EXAMPLES::
sage: SuperPartition([[1,0],[3,1,1]]).sign()
-1
sage: SuperPartition([[1,0],[3,2,1]]).sign()
1
sage: sum(sp.sign()/sp.zee() for sp in SuperPartitions(6,0))
0
"""
return (-1)**(self.degree()-len(self.symmetric_part()))
def dominates(self, other):
r"""
Return ``True`` if and only if ``self`` dominates ``other``.
If the symmetric and anti-symmetric parts of ``self`` and ``other``
are not the same size then the result is ``False``.
EXAMPLES::
sage: LA = SuperPartition([[2,1],[2,1,1]])
sage: LA.dominates([[2,1],[3,1]])
False
sage: LA.dominates([[2,1],[1,1,1,1]])
True
sage: LA.dominates([[3],[2,1,1]])
False
sage: LA.dominates([[1],[1]*6])
False
"""
return (self.degree() == sum(other[0]) + sum(other[1]) and
Partition(self.antisymmetric_part()).dominates(other[0]) and
Partition(self.symmetric_part()).dominates(other[1]))
def add_horizontal_border_strip_star(self, h):
r"""
Return a list of super partitions that differ from ``self``
by a horizontal strip.
The notion of horizontal strip comes from the Pieri rule for the
Schur-star basis of symmetric functions in super space (see
Theorem 7 from [JL2016]_).
INPUT:
- ``h`` -- number of cells in the horizontal strip
OUPUT:
- a list of super partitions
EXAMPLES::
sage: SuperPartition([[4,1],[3]]).add_horizontal_border_strip_star(3)
[[4, 1; 3, 3],
[4, 1; 4, 2],
[3, 1; 5, 2],
[4, 1; 5, 1],
[3, 1; 6, 1],
[4, 0; 4, 3],
[3, 0; 5, 3],
[4, 0; 5, 2],
[3, 0; 6, 2],
[4, 1; 6],
[3, 1; 7]]
sage: SuperPartition([[2,1],[3]]).add_horizontal_border_strip_star(2)
[[2, 1; 3, 2], [2, 1; 4, 1], [2, 0; 3, 3], [2, 0; 4, 2], [2, 1; 5]]
"""
sp1, circ_list = self.to_circled_diagram()
nsp = [list(la) + [0] for la in sp1.add_horizontal_border_strip(h)]
sp1 = sp1 + [0]
out = []
for elt in nsp:
row_changed = [row1-row2 for row1,row2 in zip(elt,sp1)]
new_sp = [elt, [(i[0]+1, elt[i[0]+1]) for i in circ_list
if row_changed[i[0]] != 0]
# TODO: Check that this is not suppose to be
# a tuple of size 1
+ [(i) for i in circ_list if row_changed[i[0]] == 0]]
if len(uniq([k for (j,k) in new_sp[1]])) == len(new_sp[1]):
out += [SuperPartition.from_circled_diagram(*new_sp)]
return out
def add_horizontal_border_strip_star_bar(self, h):
r"""
List super partitions that differ from ``self`` by a horizontal strip.
The notion of horizontal strip comes from the Pieri rule for the
Schur-star-bar basis of symmetric functions in super space (see
Theorem 10 from [JL2016]_).
INPUT:
- ``h`` -- number of cells in the horizontal strip
OUPUT:
- a list of super partitions
EXAMPLES::
sage: SuperPartition([[4,1],[5,4]]).add_horizontal_border_strip_star_bar(3)
[[4, 3; 5, 4, 1],
[4, 1; 5, 4, 3],
[4, 2; 5, 5, 1],
[4, 1; 5, 5, 2],
[4, 2; 6, 4, 1],
[4, 1; 6, 4, 2],
[4, 1; 6, 5, 1],
[4, 1; 7, 4, 1],
[4, 3; 5, 5],
[4, 3; 6, 4],
[4, 2; 6, 5],
[4, 2; 7, 4],
[4, 1; 7, 5],
[4, 1; 8, 4]]
sage: SuperPartition([[3,1],[5]]).add_horizontal_border_strip_star_bar(2)
[[3, 2; 5, 1],
[3, 1; 5, 2],
[4, 1; 5, 1],
[3, 1; 6, 1],
[4, 2; 5],
[3, 2; 6],
[4, 1; 6],
[3, 1; 7]]
"""
sp1, circ_list = self.to_circled_diagram()
nsp = [list(la) + [0] for la in sp1.add_horizontal_border_strip(h)]
sp1 = sp1 + [0]
out = []
for asp in nsp:
asp = asp + [0]
change_in_rows = [asp[i] - sp1[i] for i in range(len(sp1))]
moved_circ_list = [[] for i in range(len(circ_list))]
for i,pos in enumerate(circ_list):
if change_in_rows[pos[0]] == 0:
moved_circ_list[i].append(pos)
else:
if pos[0] == 0:
moved_circ_list[i].append((0, pos[1]+change_in_rows[0]))
if pos[1] == asp[1]:
moved_circ_list[i].append((1, asp[1]))
else:
if pos[1] + change_in_rows[pos[0]] < sp1[pos[0]-1]:
moved_circ_list[i].append((pos[0], pos[1]+change_in_rows[pos[0]]))
if asp[pos[0]+1] == sp1[pos[0]]:
moved_circ_list[i].append((pos[0]+1, pos[1]))
out += [[moved_circ_list, asp]]
result = []
for i in out:
if not i[0]:
result += [[i[1],i[0]]]
else:
x = reduce(lambda a,b: [item_a + item_b for item_a in a for item_b in b], i[0])
for j in x:
result += [[i[1], list(zip(j,j[1:]))[::2]]]
return [SuperPartition.from_circled_diagram(*i)
for i in result if len(i[1]) == len(self[0])]
class SuperPartitions(UniqueRepresentation, Parent):
r"""
Super partitions.
A super partition of size `n` and fermionic sector `m` is a
pair consisting of a strict partition of some integer `r` of
length `m` (that may end in a `0`) and an integer partition of
`n - r`.
INPUT:
- ``n`` -- an integer (optional: default ``None``)
- ``m`` -- if ``n`` is specified, an integer (optional: default ``None``)
Super partitions are the indexing set for symmetric functions
in super space.
EXAMPLES::
sage: SuperPartitions()
Super Partitions
sage: SuperPartitions(2)
Super Partitions of 2
sage: SuperPartitions(2).cardinality()
8
sage: SuperPartitions(4,2)
Super Partitions of 4 and of fermionic sector 2
sage: [[2,0],[1,1]] in SuperPartitions(4,2)
True
sage: [[1,0],[1,1]] in SuperPartitions(4,2)
False
sage: [[1,0],[2,1]] in SuperPartitions(4)
True
sage: [[1,0],[2,2,1]] in SuperPartitions(4)
False
sage: [[1,0],[2,1]] in SuperPartitions()
True
sage: [[1,1],[2,1]] in SuperPartitions()
False
"""
@staticmethod
def __classcall_private__(self, n=None, m=None, **kwargs):
r"""
Return the corresponding parent based upon input.
TESTS::
sage: from sage.combinat.superpartition import *
sage: isinstance(SuperPartitions(), SuperPartitions_all)
True
sage: isinstance(SuperPartitions(3), SuperPartitions_n)
True
sage: isinstance(SuperPartitions(3,2), SuperPartitions_n_m)
True
::
sage: SP = SuperPartitions(5,2)
sage: SP2 = SuperPartitions(int(5),int(2))
sage: SP3 = SuperPartitions(ZZ(5),int(2))
sage: SP is SP2
True
sage: SP is SP3
True
::
sage: SP = SuperPartitions(5)
sage: SP2 = SuperPartitions(int(5))
sage: SP3 = SuperPartitions(ZZ(5))
sage: SP is SP2
True
sage: SP is SP3
True
"""
if n is None:
return SuperPartitions_all()
elif n in ZZ:
if m is None:
return SuperPartitions_n(n)
elif m in ZZ:
return SuperPartitions_n_m(n, m)
raise ValueError("m must be an integer")
raise ValueError("n must be an integer")
def __init__(self, is_infinite=False):
"""
Initialize ``self``.
EXAMPLES::
sage: SP = SuperPartitions()
sage: TestSuite(SP).run()
"""
cat = EnumeratedSets()
if is_infinite:
cat = cat.Infinite()
else:
cat = cat.Finite()
Parent.__init__(self, category=cat)
Element = SuperPartition
class options(GlobalOptions):
"""
Set the global options for elements of the SuperPartition class.
The defaults are for Super Partitions to be displayed in a list
notation with the fermionic part and the bosonic part separated
by a semicolon. There is a slight disadvantage to this notation
because a list containing a semicolon can not be used as input
for a super partition.
@OPTIONS@
EXAMPLES::
sage: sp = SuperPartition([[1, 0], [2, 2, 1]])
sage: SuperPartitions.options.display
default
sage: sp
[1, 0; 2, 2, 1]
sage: SuperPartitions.options.display = 'list'
sage: sp
[-1, 0, 2, 2, 1]
sage: SuperPartitions.options._reset()
""",
NAME = 'SuperPartition'
module = 'sage.combinat.superpartition'
display = dict(default="default",
description="Specifies how the super partitions should "
"be printed",
values=dict(list="the super partitions are displayed in "
"a list of two lists",
pair="the super partition is displayed as a "
"list of integers",
default="the super partition is displayed in "
"a form [fermionic part; bosonic part]"),
case_sensitive=False)
def _element_constructor_(self, lst, check=True):
"""
Construct an element with ``self`` as parent.
EXAMPLES::
sage: SP = SuperPartitions()
sage: SP([[],[3,3,1]])
[; 3, 3, 1]
sage: SP([[],[3,3,1]]) in SP
True
sage: SP([[],[3,3,1]]).parent()
Super Partitions
sage: SuperPartitions(7)([[],[3,3,1]])
[; 3, 3, 1]
sage: SuperPartitions(7,0)([[],[3,3,1]])
[; 3, 3, 1]
sage: SuperPartitions(7,1)([[],[3,3,1]])
Traceback (most recent call last):
...
ValueError: [[], [3, 3, 1]] not in Super Partitions of 7 and of fermionic sector 1
"""
if not lst:
return self.element_class(self, [[], []], check=check)
if isinstance(lst, SuperPartition):
lst = list(lst)
if isinstance(lst[0], (list, tuple)):
return self.element_class(self, [lst[0], [a for a in lst[1] if a > 0]],
check=check)
else:
return self.element_class(self, [[-a for a in lst if a <= 0],
[a for a in lst if a > 0]],
check=check)
def __contains__(self, x):
"""
TESTS::
sage: [[1],[2,1]] in SuperPartitions()
True
sage: [[],[]] in SuperPartitions()
True
sage: [[0],[]] in SuperPartitions()
True
sage: [[],[0]] in SuperPartitions()
True
sage: [-1, 2, 1] in SuperPartitions()
True
sage: [2, -1, 1, 0] in SuperPartitions()
True
sage: [2, 0, 1, -1] in SuperPartitions()
False
sage: [] in SuperPartitions()
True
sage: [0] in SuperPartitions()
True
"""
if isinstance(x, SuperPartition):
return True
if isinstance(x, (list, tuple)) and all(isinstance(i, (int, Integer))
or i in ZZ for i in x):
sp = [a for a in x if a <= 0]
return (all(sp[i] > sp[i-1] for i in range(1,len(sp)))
and [a for a in x if a > 0] in _Partitions)
elif (isinstance(x, (list, tuple)) and len(x) == 2
and isinstance(x[0], (list, tuple)) and isinstance(x[1], (list, tuple))):
for i in x[0] + x[1]:
if i not in ZZ:
return False
if i < 0:
return False
return (all(x[0][i] > x[0][i+1] for i in range(len(x[0])-1))
and all(x[1][i] >= x[1][i+1] for i in range(len(x[1])-1))
and ((not x[0]) or x[0][-1] >= 0) and ((not x[1]) or x[1][-1] >= 0))
else:
return False
class SuperPartitions_n_m(SuperPartitions):
def __init__(self, n, m):
"""
Initialize ``self``.
TESTS::
sage: SP = SuperPartitions(3,2)
sage: TestSuite(SP).run()
"""
self.n = n
self.m = m
SuperPartitions.__init__(self, False)
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: repr(SuperPartitions(3,2))
'Super Partitions of 3 and of fermionic sector 2'
"""
return "Super Partitions of %s and of fermionic sector %s"%(self.n, self.m)
def __contains__(self, x):
"""
TESTS::
sage: [[3,2,1,0],[2]] in SuperPartitions(8,4)
True
sage: [[3,2,1,0],[]] in SuperPartitions(6,3)
False
sage: [[],[]] in SuperPartitions(0,0)
True
sage: [[0],[]] in SuperPartitions(0,1)
True
sage: [[],[]] in SuperPartitions(0,1)
False
sage: [-3,-2,-1,0,2] in SuperPartitions(8,4)
True
sage: [0] in SuperPartitions(0,0)
False
sage: [] in SuperPartitions(0,0)
True
sage: [0] in SuperPartitions(0,1)
True
"""
if x in SuperPartitions():
if not x:
return self.n == 0 and self.m == 0
if isinstance(x[0], (list, tuple)):
n = sum(x[0] + x[1])
m = len(x[0])
else:
n = sum(abs(a) for a in x)
m = len([a for a in x if a <= 0])
return n == self.n and m == self.m
else:
return False
def __iter__(self):
r"""
An iterator for super partitions of degree ``n`` and sector ``m``.
EXAMPLES::
sage: SuperPartitions(6,2).cardinality()
28
sage: SuperPartitions(6,4).first()
[3, 2, 1, 0; ]
"""
for r in range(self.n+1):
for p1 in Partitions(r):
for p0 in Partitions(self.n-r, max_slope=-1, length=self.m):
yield self.element_class(self, [list(p0), list(p1)])
for p0 in Partitions(self.n-r, max_slope=-1, length=self.m-1):
yield self.element_class(self, [list(p0)+[0], list(p1)])
class SuperPartitions_n(SuperPartitions):
def __init__(self, n):
"""
Initialize ``self``.
TESTS::
sage: SP = SuperPartitions(3)
sage: TestSuite(SP).run()
"""
self.n = n
SuperPartitions.__init__(self, False)
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: repr(SuperPartitions(3))
'Super Partitions of 3'
"""
return "Super Partitions of %s"%self.n
def __contains__(self, x):
"""
EXAMPLES::
sage: SuperPartitions(7)([[],[3,3,1]]) in SuperPartitions()
True
sage: SuperPartitions()([[],[3,3,1]]) in SuperPartitions(7)
True
sage: [[],[]] in SuperPartitions(0)
True
sage: [[0],[]] in SuperPartitions(0)
True
sage: [0] in SuperPartitions(0)
True
sage: [] in SuperPartitions(0)
True
sage: [1] in SuperPartitions(0)
False
"""
if x in SuperPartitions():
if not x:
return self.n == 0
if isinstance(x[0], (list, tuple)):
n = sum(x[0] + x[1])
else:
n = sum(abs(a) for a in x)
return n == self.n
else:
return False
def __iter__(self):
r"""
An iterator for super partitions of degree ``n``.
EXAMPLES::
sage: SuperPartitions(1).list()
[[; 1], [1; ], [0; 1], [1, 0; ]]
sage: SuperPartitions(6).cardinality()
80
"""
m = 0
while self.n >= m * (m-1) // 2:
for LA in SuperPartitions(self.n, m):
yield self.element_class(self, LA)
m += 1
class SuperPartitions_all(SuperPartitions):
def __init__(self):
"""
Initialize ``self``.
TESTS::
sage: SP = SuperPartitions()
sage: TestSuite(SP).run()
"""
SuperPartitions.__init__(self, True)
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: repr(SuperPartitions())
'Super Partitions'
"""
return "Super Partitions"
def __iter__(self):
"""
Iterate over all super partitions.
EXAMPLES::
sage: SP = SuperPartitions()
sage: it = SP.__iter__()
sage: [next(it) for i in range(6)]
[[; ], [0; ], [; 1], [1; ], [0; 1], [1, 0; ]]
"""
n = 0
while True:
for sp in SuperPartitions(n):
yield self.element_class(self, list(sp))
n += 1
|
python
|
from ..job.job_context import JobContext
from ..task.task_context import TaskContext
from ..tc.tc_context import TcContext
class VerticalContext:
def __init__(self,
sys_conf_dict,
task_context: TaskContext = None,
job_context: JobContext = None,
tc_context: TcContext = None):
self.sys_conf_dict = sys_conf_dict
self.task_context = task_context
self.job_context = job_context
self.tc_context = tc_context
|
python
|
import numpy as np
import random
import heapq
from itertools import count
def time_fun(x, slope, shift):
return 1/(1+np.exp((slope*x - shift)))
class eligibility_trace():
def __init__(self, lambda_v, r, slope=3, shift=5):
self.E = 0
self.lambda_v = lambda_v
self.r = r
self.slope = slope
self.shift = shift
def get_trace(self):
return time_fun(self.E, slope=self.slope, shift=self.shift)
def general_iterate(self):
self.E = self.r*self.lambda_v*self.E
def is_pushed(self):
self.E += 1
class Transition_tuple():
def __init__(self, state, action, action_mean, reward, curiosity, next_state, done_mask, t):
#expects as list of items for each initalization variable
self.state = np.array(state)
self.action = np.array(action)
self.action_mean = np.array(action_mean)
self.reward = np.array(reward)
self.curiosity = np.array(curiosity)
self.next_state = np.array(next_state)
self.done_mask = np.array(done_mask)
self.t = np.array(t)
def get_all_attributes(self):
return [self.state, self.action, self.action_mean, self.reward, self.curiosity, self.next_state, self.done_mask, self.t]
class Reservoir_with_Cur_n_Time_Restirction_Replay_Memory():
def __init__(self, capacity=10000, lambda_v=0.5, r=1, slope=3, shift=5):
self.capacity = capacity
self.storage = []
self.tiebreaker = count()
self.time_eligibilty_trace = eligibility_trace(lambda_v=lambda_v, r=r, slope=slope, shift=shift)
def push(self, state, action, action_mean, reward, curiosity, next_state, done_mask, t):
ran = random.uniform(0,1)
self.time_eligibilty_trace.general_iterate()
if ran < self.time_eligibilty_trace.get_trace():
data = (state, action, action_mean, reward, curiosity, next_state, done_mask, t)
priority = curiosity.item()
d = (priority, next(self.tiebreaker), data)
if len(self.storage) < self.capacity:
heapq.heappush(self.storage, d)
return True
elif priority > self.storage[0][0]:
heapq.heapreplace(self.storage, d)
self.time_eligibilty_trace.is_pushed()
return True
else:
return False
else:
return False
def sample(self, batch_size):
indices = self.get_sample_indices(batch_size)
state, action, action_mean, reward, curiosity, next_state, done_mask, t_array = self.encode_sample(indices=indices)
return Transition_tuple(state, action, action_mean, reward, curiosity, next_state, done_mask, t_array)
def encode_sample(self, indices):
state, action, action_mean, reward, curiosity, next_state, done_mask, t_array = [], [], [], [], [], [], [], []
for i in indices:
data = self.storage[i][2]
s, a, a_m, r, c, n_s, d, t = data
state.append(s)
action.append(a)
action_mean.append(a_m)
reward.append(r)
curiosity.append(c)
next_state.append(n_s)
done_mask.append(d)
t_array.append(t)
return state, action, action_mean, reward, curiosity, next_state, done_mask, t_array
def get_sample_indices(self, batch_size):
if len(self.storage) < self.capacity:
indices = np.random.choice(len(self.storage), batch_size)
else:
indices = np.random.choice(self.capacity, batch_size)
return indices
def __len__(self):
return len(self.storage)
|
python
|
'''
true_env = ["../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/online_learning/esarsa/step50k/gridsearch_realenv/"]
k1_notimeout = ["../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k1_notimeout/esarsa/step10k/optimalfixed_eps0/"]
k1_timeout1000 = ["../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k1_timeout1000/esarsa/step10k/optimalfixed_eps0/"]
k3ensemble_notimeout = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
k3ensemble_timeout1000 = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
k3ensemble_adversarial_notimeout = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
k3ensemble_adverarial_timeout1000 = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
AcrobotdistantStart_regularTrans_timeout200 = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
AcrobottrueStart_adversarialTrans_timeout1000 = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/",
]
'''
'''
data2500_eps0_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step2500/optimalfixed_eps0"]
data2500_eps10_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step2500/optimalfixed_eps10"]
data2500_eps25_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step2500/optimalfixed_eps25"]
data2500_eps50_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step2500/optimalfixed_eps50"]
data2500_eps75_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step2500/optimalfixed_eps75"]
data2500_eps100_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step2500/optimalfixed_eps100"]
data2500_eps0_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step2500/optimalfixed_eps0"]
data2500_eps10_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step2500/optimalfixed_eps10"]
data2500_eps25_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step2500/optimalfixed_eps25"]
data2500_eps50_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step2500/optimalfixed_eps50"]
data2500_eps75_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step2500/optimalfixed_eps75"]
data2500_eps100_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step2500/optimalfixed_eps100"]
data2500_eps0_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step2500/optimalfixed_eps0"]
data2500_eps10_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step2500/optimalfixed_eps10"]
data2500_eps25_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step2500/optimalfixed_eps25"]
data2500_eps50_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step2500/optimalfixed_eps50"]
data2500_eps75_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step2500/optimalfixed_eps75"]
data2500_eps100_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step2500/optimalfixed_eps100"]
data5k_eps0_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step5k/optimalfixed_eps0"]
data5k_eps10_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step5k/optimalfixed_eps10"]
data5k_eps25_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step5k/optimalfixed_eps25"]
data5k_eps50_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step5k/optimalfixed_eps50"]
data5k_eps75_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step5k/optimalfixed_eps75"]
data5k_eps100_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step5k/optimalfixed_eps100"]
data5k_eps0_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step5k/optimalfixed_eps0"]
data5k_eps10_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step5k/optimalfixed_eps10"]
data5k_eps25_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step5k/optimalfixed_eps25"]
data5k_eps50_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step5k/optimalfixed_eps50"]
data5k_eps75_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step5k/optimalfixed_eps75"]
data5k_eps100_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step5k/optimalfixed_eps100"]
data5k_eps0_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step5k/optimalfixed_eps0"]
data5k_eps10_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step5k/optimalfixed_eps10"]
data5k_eps25_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step5k/optimalfixed_eps25"]
data5k_eps50_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step5k/optimalfixed_eps50"]
data5k_eps75_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step5k/optimalfixed_eps75"]
data5k_eps100_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step5k/optimalfixed_eps100"]
data10k_eps0_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step10k/optimalfixed_eps0"]
data10k_eps10_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step10k/optimalfixed_eps10"]
data10k_eps25_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step10k/optimalfixed_eps25"]
data10k_eps50_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step10k/optimalfixed_eps50"]
data10k_eps75_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step10k/optimalfixed_eps75"]
data10k_eps100_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step10k/optimalfixed_eps100"]
data10k_eps0_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step10k/optimalfixed_eps0"]
data10k_eps10_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step10k/optimalfixed_eps10"]
data10k_eps25_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step10k/optimalfixed_eps25"]
data10k_eps50_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step10k/optimalfixed_eps50"]
data10k_eps75_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step10k/optimalfixed_eps75"]
data10k_eps100_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step10k/optimalfixed_eps100"]
data10k_eps0_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step10k/optimalfixed_eps0"]
data10k_eps10_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step10k/optimalfixed_eps10"]
data10k_eps25_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step10k/optimalfixed_eps25"]
data10k_eps50_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step10k/optimalfixed_eps50"]
data10k_eps75_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step10k/optimalfixed_eps75"]
data10k_eps100_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step10k/optimalfixed_eps100"]
'''
'''
data10k_eps10_k5_p20_ens = [
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed1/transfer_optimalfixed_eps10",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed2/transfer_optimalfixed_eps10",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed3/transfer_optimalfixed_eps10",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed4/transfer_optimalfixed_eps10",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed5/transfer_optimalfixed_eps10"
]
data10k_eps25_k5_p20_ens = [
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed1/transfer_optimalfixed_eps25",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed2/transfer_optimalfixed_eps25",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed3/transfer_optimalfixed_eps25",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed4/transfer_optimalfixed_eps25",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed5/transfer_optimalfixed_eps25"
]
data10k_eps50_k5_p20_ens = [
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed1/transfer_optimalfixed_eps50",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed2/transfer_optimalfixed_eps50",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed3/transfer_optimalfixed_eps50",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed4/transfer_optimalfixed_eps50",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed5/transfer_optimalfixed_eps50"
]
'''
'''
ac_true = ["../../../../../../Downloads/transferabledata/new/hyperparam_ap_CEM_gridsearch/data/hyperparam_ap/acrobot/online_learning/esarsa/step15k/sweep/"]
ac_rnd = [34, 4, 43, 30, 24, 32, 40, 11, 20, 30, 3, 16, 53, 45, 0, 21, 43, 23, 44, 50, 9, 41, 37, 37, 11, 2, 26, 33, 18, 20]
ac_offline = ["../../../../../../Downloads/transferabledata/new/hyperparam_ap_CEM_gridsearch/data/hyperparam_ap/acrobot/offline_learning/k3_timeout750/esarsa/step15k/optimalfixed_eps0/sweep/"]
ac_cemOffline = ["../../../../../../Downloads/transferabledata/new/hyperparam_ap_CEM_gridsearch/data/hyperparam_ap/acrobot/list/CEMoffline_onlineEvaluation/esarsa/step15k/sweep/"]
ac_cemOnline = ["../../../../../../Downloads/transferabledata/new/hyperparam_ap_CEM_gridsearch/data/hyperparam_ap/acrobot/list/CEMonline_onlineEvaluation/esarsa/step15k/sweep/"]
'''
ac_true_temp = ["../../data/hyperparam_v5/acrobot/online_learning/esarsa/step15k/sweep/"]
ac_true_dqn = ["../../data/hyperparam_v5/acrobot/online_learning/dqn/step600k/sweep/"]
ac_laplace_knn_5k_dqn = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout20k/dqn/step5k_env/data_optimal/drop0/sweep_rep1/"]
ac_knn_15k = ["../../data/hyperparam_v4.8/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step15k_env/data_optimal/drop0/sweep/"]
ac_knn_10k = ["../../data/hyperparam_v4.8/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step10k_env/data_optimal/drop0/sweep/"]
ac_knn_5k = ["../../data/hyperparam_v4.8/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step5k_env/data_optimal/drop0/sweep/"]
ac_knn_2p5k = ["../../data/hyperparam_v4.8/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step2.5k_env/data_optimal/drop0/sweep/"]
ac_knn_1k = ["../../data/hyperparam_v4.8/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step1k_env/data_optimal/drop0/sweep/"]
ac_knn_500 = ["../../data/hyperparam_v4.8/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step500_env/data_optimal/drop0/sweep/"]
# ac_knn_15k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step15k_env/data_optimal/drop0/sweep/"]
# ac_knn_10k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step10k_env/data_optimal/drop0/sweep/"]
# ac_knn_5k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step5k_env/data_optimal/drop0/sweep/"]
# ac_knn_2p5k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step2.5k_env/data_optimal/drop0/sweep/"]
# ac_knn_1k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step1k_env/data_optimal/drop0/sweep/"]
# ac_knn_500 = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step500_env/data_optimal/drop0/sweep/"]
ac_laplace_knn_15k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step15k_env/data_optimal/drop0/sweep_rep1/"]
ac_laplace_knn_10k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step10k_env/data_optimal/drop0/sweep_rep1/"]
ac_laplace_knn_5k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step5k_env/data_optimal/drop0/sweep_rep1/"]
ac_laplace_knn_2p5k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step2.5k_env/data_optimal/drop0/sweep_rep1/"]
ac_laplace_knn_1k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step1k_env/data_optimal/drop0/sweep_rep1/"]
ac_laplace_knn_500 = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step500_env/data_optimal/drop0/sweep_rep1/"]
ac_scale_network_15k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_separated/timeout500/esarsa/step15k_env/data_optimal/sweep"]
ac_scale_network_10k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_separated/timeout500/esarsa/step10k_env/data_optimal/sweep"]
ac_scale_network_5k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_separated/timeout500/esarsa/step5k_env/data_optimal/sweep"]
ac_scale_network_2p5k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_separated/timeout500/esarsa/step2.5k_env/data_optimal/sweep"]
ac_scale_network_1k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_separated/timeout500/esarsa/step1k_env/data_optimal/sweep"]
ac_scale_network_500 = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_separated/timeout500/esarsa/step500_env/data_optimal/sweep"]
ac_scale_laplace_network_15k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step15k_env/data_optimal/sweep"]
ac_scale_laplace_network_10k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step10k_env/data_optimal/sweep"]
ac_scale_laplace_network_5k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step5k_env/data_optimal/sweep"]
ac_scale_laplace_network_2p5k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step2.5k_env/data_optimal/sweep"]
ac_scale_laplace_network_1k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step1k_env/data_optimal/sweep"]
ac_scale_laplace_network_500 = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step500_env/data_optimal/sweep"]
ac_rnd = [34, 4, 43, 30, 24, 32, 40, 11, 20, 30, 3, 16, 53, 45, 0, 21, 43, 23, 44, 50, 9, 41, 37, 37, 11, 2, 26, 33, 18, 20]
basepath = "../../../../../../Downloads/transferabledata/new/data_dcp/final/data/hyperparam_v5/"
ac_true = [basepath + "acrobot/online_learning/esarsa/step15k/sweep/"]
ac_optim_knn = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step15k_env/data_optimal/drop0/sweep_rep1/"]
ac_suboptim_knn = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step15k_env/data_suboptimal/drop0/sweep_rep1/"]
ac_subsuboptim_knn = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step15k_env/data_subsuboptimal/drop0/sweep_rep1/"]
ac_optim_network = [basepath + "acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step15k_env/data_optimal/sweep/"]
ac_suboptim_network = [basepath + "acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step15k_env/data_suboptimal/sweep/"]
ac_subsuboptim_network = [basepath + "acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step15k_env/data_subsuboptimal/sweep/"]
ac_fqi_eps0 = ["../../data/hyperparam_v5/acrobot/offline_learning/fqi/eps0/fqi/fqi-adam/alpha_hidden_epsilon/step15k_env/optimalfixed_eps0/lambda1e-3/lockat_baseline_online/"]
ac_fqi_eps0p1 = ["../../data/hyperparam_v5/acrobot/offline_learning/fqi/eps0.1/fqi/fqi-adam/alpha_hidden_epsilon/step15k_env/optimalfixed_eps0/lambda1e-3/lockat_baseline_online/"]
ac_rnd = [34, 4, 43, 30, 24, 32, 40, 11, 20, 30, 3, 16, 53, 45, 0, 21, 43, 23, 44, 50, 9, 41, 37, 37, 11, 2, 26, 33, 18, 20]
basepath = "../../data/finalPlots/data/hyperparam_v5/"
ac_true = [basepath + "acrobot/online_learning/esarsa/step15k/sweep/"]
ac_knnlaplace_optim_5k_plot1 = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step5k_env/data_optimal/drop0/sweep_rep1/"]
ac_knnlaplace_optim_500_plot2 = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step500_env/data_optimal/drop0/sweep_rep1/"]
ac_knnlaplace_optim_1k_plot2 = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step1k_env/data_optimal/drop0/sweep_rep1/"]
ac_knnlaplace_optim_2500_plot2 = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step2.5k_env/data_optimal/drop0/sweep_rep1/"]
ac_knnlaplace_optim_5k_plot2 = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step5k_env/data_optimal/drop0/sweep_rep1/"]
|
python
|
# Copyright 2021 Universidade da Coruña
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Miguel Ángel Abella González <[email protected]>
# Gabriel Rodríguez <[email protected]>
#
# Contact:
# Gabriel Rodríguez <[email protected]>
"""<replace_with_module_description>"""
from benchmarks.polybench import PolyBench
from benchmarks.polybench_classes import ArrayImplementation
from benchmarks.polybench_classes import PolyBenchOptions, PolyBenchSpec
from numpy.core.multiarray import ndarray
import numpy as np
class Trmm(PolyBench):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
implementation = options.POLYBENCH_ARRAY_IMPLEMENTATION
if implementation == ArrayImplementation.LIST:
return _StrategyList.__new__(_StrategyList, options, parameters)
elif implementation == ArrayImplementation.LIST_PLUTO:
return _StrategyListPluto.__new__(_StrategyListPluto, options, parameters)
elif implementation == ArrayImplementation.LIST_FLATTENED:
return _StrategyListFlattened.__new__(_StrategyListFlattened, options, parameters)
elif implementation == ArrayImplementation.NUMPY:
return _StrategyNumPy.__new__(_StrategyNumPy, options, parameters)
elif implementation == ArrayImplementation.LIST_FLATTENED_PLUTO:
return _StrategyListFlattenedPluto.__new__(_StrategyListFlattenedPluto, options, parameters)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
# The parameters hold the necessary information obtained from "polybench.spec" file
params = parameters.DataSets.get(self.DATASET_SIZE)
if not isinstance(params, dict):
raise NotImplementedError(f'Dataset size "{self.DATASET_SIZE.name}" not implemented '
f'for {parameters.Category}/{parameters.Name}.')
# Set up problem size from the given parameters (adapt this part with appropriate parameters)
self.M = params.get('M')
self.N = params.get('N')
def run_benchmark(self):
# Create data structures (arrays, auxiliary variables, etc.)
alpha = 1.5
A = self.create_array(2, [self.M, self.M], self.DATA_TYPE(0))
B = self.create_array(2, [self.M, self.N], self.DATA_TYPE(0))
# Initialize data structures
self.initialize_array(alpha, A, B)
# Benchmark the kernel
self.time_kernel(alpha, A, B)
# Return printable data as a list of tuples ('name', value).
# Each tuple element must have the following format:
# (A: str, B: matrix)
# - A: a representative name for the data (this string will be printed out)
# - B: the actual data structure holding the computed result
#
# The syntax for the return statement would then be:
# - For single data structure results:
# return [('data_name', data)]
# - For multiple data structure results:
# return [('matrix1', m1), ('matrix2', m2), ... ]
return [('B', B)]
class _StrategyList(Trmm):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyList)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
def initialize_array(self, alpha, A: list, B: list):
for i in range(0, self.M):
for j in range(0, i):
A[i][j] = self.DATA_TYPE((i + j) % self.M) / self.M
A[i][i] = 1.0
for j in range(0, self.N):
B[i][j] = self.DATA_TYPE((self.N + (i - j)) % self.N) / self.N
def print_array_custom(self, B: list, name: str):
for i in range(0, self.M):
for j in range(0, self.N):
if (i * self.M + j) % 20 == 0:
self.print_message('\n')
self.print_value(B[i][j])
def kernel(self, alpha, A: list, B: list):
# BLAS parameters
# SIDE = 'L'
# UPLO = 'L'
# TRANSA = 'T'
# DIAG = 'U'
# = > Form B := alpha * A ** T * B.
# A is MxM
# B is MxN
# scrop begin
for i in range(0, self.M):
for j in range(0, self.N):
for k in range(i + 1, self.M):
B[i][j] += A[k][i] * B[k][j]
B[i][j] = alpha * B[i][j]
# scop end
class _StrategyListPluto(_StrategyList):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyListPluto)
def kernel(self, alpha, A: list, B: list):
# scop begin
if((self.M-1>= 0) and (self.N-1>= 0)):
if((self.M-2>= 0)):
for c1 in range ((self.N-1)+1):
for c2 in range ((self.M-2)+1):
for c3 in range (c2 + 1 , (self.M-1)+1):
B[c2][c1] += A[c3][c2] * B[c3][c1]
for c1 in range ((self.M-1)+1):
for c2 in range ((self.N-1)+1):
B[c1][c2] = alpha * B[c1][c2]
# scop end
class _StrategyListFlattened(Trmm):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyListFlattened)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
if options.LOAD_ELIMINATION: self.kernel = self.kernel_le
else: self.kernel = self.kernel_regular
def initialize_array(self, alpha, A: list, B: list):
for i in range(0, self.M):
for j in range(0, i):
A[self.M * i + j] = self.DATA_TYPE((i+j) % self.M) / self.M
A[self.M * i + i] = 1.0
for j in range(0, self.N):
B[self.N * i + j] = self.DATA_TYPE((self.N+(i-j)) % self.N) / self.N
def print_array_custom(self, B: list, name: str):
for i in range(0, self.M):
for j in range(0, self.N):
if (i * self.M + j) % 20 == 0:
self.print_message('\n')
self.print_value(B[self.N * i + j])
# Regular version
def kernel_regular(self, alpha, A: list, B: list):
# scrop begin
for i in range(0, self.M):
for j in range(0, self.N):
for k in range(i + 1, self.M):
B[self.N * i + j] += A[self.M * k + i] * B[self.N * k + j]
B[self.N * i + j] = alpha * B[self.N * i + j]
# scop end
# Load elimination
def kernel_le(self, alpha, A: list, B: list):
# scrop begin
for i in range(0, self.M):
for j in range(0, self.N):
tmp = B[self.N*i+j]
for k in range(i + 1, self.M):
tmp += A[self.M * k + i] * B[self.N * k + j]
B[self.N * i + j] = alpha * tmp
# scop end
class _StrategyNumPy(Trmm):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyNumPy)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
def initialize_array(self, alpha, A: list, B: list):
for i in range(0, self.M):
for j in range(0, i):
A[i, j] = self.DATA_TYPE((i + j) % self.M) / self.M
A[i, i] = 1.0
for j in range(0, self.N):
B[i, j] = self.DATA_TYPE((self.N + (i - j)) % self.N) / self.N
def print_array_custom(self, B: ndarray, name: str):
for i in range(0, self.M):
for j in range(0, self.N):
if (i * self.M + j) % 20 == 0:
self.print_message('\n')
self.print_value(B[i, j])
def kernel(self, alpha, A: ndarray, B: ndarray):
# BLAS parameters
# SIDE = 'L'
# UPLO = 'L'
# TRANSA = 'T'
# DIAG = 'U'
# = > Form B := alpha * A ** T * B.
# A is MxM
# B is MxN
# scop begin
for i in range(0, self.M):
B[i,0:self.N] += (A[i+1:self.M,i,np.newaxis] * B[i+1:self.M,0:self.N]).sum(axis=0)
B[i,0:self.N] = alpha * B[i,0:self.N]
# scop end
class _StrategyListFlattenedPluto(_StrategyListFlattened):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyListFlattenedPluto)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
self.kernel_vectorizer = self.kernel_pluto
self.kernel = getattr( self, "kernel_%s" % (options.POCC) )
def kernel_pluto(self, alpha, A: list, B: list):
# --pluto
# scop begin
if((self.M-1>= 0) and (self.N-1>= 0)):
if((self.M-2>= 0)):
for c1 in range ((self.N-1)+1):
for c2 in range ((self.M-2)+1):
for c3 in range (c2 + 1 , (self.M-1)+1):
B[self.N*(c2) + c1] += A[self.M*(c3) + c2] * B[self.N*(c3) + c1]
for c1 in range ((self.M-1)+1):
for c2 in range ((self.N-1)+1):
B[self.N*(c1) + c2] = alpha * B[self.N*(c1) + c2]
# scop end
def kernel_maxfuse(self, alpha, A: list, B: list):
# --pluto --pluto-fuse maxfuse
# scop begin
if((self.M-1>= 0) and (self.N-1>= 0)):
if((self.M-2>= 0)):
for c0 in range ((self.N-1)+1):
for c1 in range ((self.M-2)+1):
for c4 in range (c1 + 1 , (self.M-1)+1):
B[(c1)*self.N + c0] += A[(c4)*self.M + c1] * B[(c4)*self.N + c0]
B[(c1)*self.N + c0] = alpha * B[(c1)*self.N + c0]
B[(self.M + -1)*self.N + c0] = alpha * B[(self.M + -1)*self.N + c0]
if self.M == 1:
for c0 in range ((self.N-1)+1):
B[(0)*self.N + c0] = alpha * B[(0)*self.N + c0]
# scop end
|
python
|
"""
Code for the paper "Mesh Classification with Dilated Mesh Convolutions."
published in 2021 IEEE International Conference on Image Processing.
Code Author: Vinit Veerendraveer Singh.
Copyright (c) VIMS Lab and its affiliates.
We adapt MeshNet to perform dilated convolutions by replacing the Stacked Dilated
Mesh Convolution block in place of its Mesh Convolution block.
This file test this redesigned model after training.
Note: For the ease of exposition and to keep this file coherent with the train.py
in the original MeshNet code, we do not add code comments to this file.
"""
import os
import random
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.utils.data as data
from config import get_test_config
from data import ModelNet40
from models import MeshNet
dataset = 'ModelNet40'
cfg = get_test_config(dataset=dataset)
os.environ['CUDA_VISIBLE_DEVICES'] = cfg['cuda_devices']
data_set = ModelNet40(cfg=cfg[dataset], part='test')
data_loader = data.DataLoader(data_set,
batch_size=1,
num_workers=4,
shuffle=True,
pin_memory=False)
def test_model(model):
correct_num = 0
for (centers, corners, normals, neighbors, rings, targets) in data_loader:
corners = corners - torch.cat([centers, centers, centers], 1)
centers = Variable(torch.cuda.FloatTensor(centers.cuda()))
corners = Variable(torch.cuda.FloatTensor(corners.cuda()))
normals = Variable(torch.cuda.FloatTensor(normals.cuda()))
for idx, ring in enumerate(rings):
rings[idx] = Variable(torch.cuda.LongTensor(ring.cuda()))
targets = Variable(torch.cuda.LongTensor(targets.cuda()))
outputs, _ = model(centers, corners, normals, neighbors, rings)
_, preds = torch.max(outputs, 1)
if preds[0] == targets[0]:
correct_num += 1
print('Accuracy: {:.4f}'.format(float(correct_num) / len(data_set)))
if __name__ == '__main__':
os.environ['PYTHONHASHSEED'] = str(0)
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_deterministic(False)
model_ft = MeshNet(cfg=cfg['MeshNet'], require_fea=True)
model_ft.cuda()
model_ft = nn.DataParallel(model_ft)
model_ft.load_state_dict(torch.load(cfg[dataset]['load_model']))
model_ft.eval()
test_model(model_ft)
|
python
|
from collections import namedtuple
import hexbytes
from eth_utils import is_checksum_address
from relay.signing import eth_sign, eth_validate, keccak256
EcSignature = namedtuple("EcSignature", "v r s")
class Order(object):
def __init__(
self,
exchange_address: str,
maker_address: str,
taker_address: str,
maker_token: str,
taker_token: str,
fee_recipient: str,
maker_token_amount: int,
taker_token_amount: int,
maker_fee: int,
taker_fee: int,
expiration_timestamp_in_sec: int,
salt: int,
v: int,
r: hexbytes.HexBytes,
s: hexbytes.HexBytes,
filled_maker_token_amount: int = 0,
filled_taker_token_amount: int = 0,
cancelled_maker_token_amount: int = 0,
cancelled_taker_token_amount: int = 0,
) -> None:
self.exchange_address = exchange_address
self.maker_address = maker_address
self.taker_address = taker_address
self.maker_token = maker_token
self.taker_token = taker_token
self.fee_recipient = fee_recipient
self.maker_token_amount = maker_token_amount
self.taker_token_amount = taker_token_amount
self.maker_fee = maker_fee
self.taker_fee = taker_fee
self.expiration_timestamp_in_sec = expiration_timestamp_in_sec
self.salt = salt
self.v = v
self.r = r
self.s = s
self.filled_maker_token_amount = filled_maker_token_amount
self.filled_taker_token_amount = filled_taker_token_amount
self.cancelled_maker_token_amount = cancelled_maker_token_amount
self.cancelled_taker_token_amount = cancelled_taker_token_amount
@property
def price(self) -> float:
return self.taker_token_amount / self.maker_token_amount
@property
def available_maker_token_amount(self) -> float:
return (
self.maker_token_amount
- self.filled_maker_token_amount
- self.cancelled_maker_token_amount
)
@property
def available_taker_token_amount(self) -> float:
return (
self.taker_token_amount
- self.filled_taker_token_amount
- self.cancelled_taker_token_amount
)
@property
def ec_signature(self):
return EcSignature(self.v, self.r, self.s)
def validate(self) -> bool:
return self.validate_signature() and self.validate_addresses()
def validate_signature(self) -> bool:
return eth_validate(self.hash(), (self.v, self.r, self.s), self.maker_address)
def validate_addresses(self) -> bool:
for address in [
self.exchange_address,
self.maker_token,
self.taker_token,
self.fee_recipient,
]:
if not is_checksum_address(address):
return False
return True
def is_expired(self, current_timestamp_in_sec: int) -> bool:
return current_timestamp_in_sec > self.expiration_timestamp_in_sec
def is_filled(self) -> bool:
return (
self.available_maker_token_amount <= 0
or self.available_taker_token_amount <= 0
)
def hash(self) -> hexbytes.HexBytes:
return hexbytes.HexBytes(
keccak256(
self.exchange_address,
self.maker_address,
self.taker_address,
self.maker_token,
self.taker_token,
self.fee_recipient,
self.maker_token_amount,
self.taker_token_amount,
self.maker_fee,
self.taker_fee,
self.expiration_timestamp_in_sec,
self.salt,
)
)
def __eq__(self, other: object) -> bool:
if isinstance(other, Order):
return self.hash() == other.hash()
else:
return False
class SignableOrder(Order):
def __init__(
self,
exchange_address: str,
maker_address: str,
taker_address: str,
maker_token: str,
taker_token: str,
fee_recipient: str,
maker_token_amount: int,
taker_token_amount: int,
maker_fee: int,
taker_fee: int,
expiration_timestamp_in_sec: int,
salt: int,
) -> None:
super().__init__(
exchange_address,
maker_address,
taker_address,
maker_token,
taker_token,
fee_recipient,
maker_token_amount,
taker_token_amount,
maker_fee,
taker_fee,
expiration_timestamp_in_sec,
salt,
v=0,
r=hexbytes.HexBytes(b""),
s=hexbytes.HexBytes(b""),
)
def sign(self, key) -> None:
v, r, s = eth_sign(self.hash(), key)
self.v = v
self.r = hexbytes.HexBytes(r)
self.s = hexbytes.HexBytes(s)
|
python
|
#!/usr/bin/env python3
# Test whether a client subscribed to a topic receives its own message sent to that topic, for long topics.
from mosq_test_helper import *
def do_test(topic, succeeds):
rc = 1
mid = 53
keepalive = 60
connect_packet = mosq_test.gen_connect("subpub-qos0-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
subscribe_packet = mosq_test.gen_subscribe(mid, topic, 0)
suback_packet = mosq_test.gen_suback(mid, 0)
publish_packet = mosq_test.gen_publish(topic, qos=0, payload="message")
port = mosq_test.get_port()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20, port=port)
if succeeds == True:
mosq_test.do_send_receive(sock, subscribe_packet, suback_packet, "suback")
mosq_test.do_send_receive(sock, publish_packet, publish_packet, "publish")
else:
mosq_test.do_send_receive(sock, subscribe_packet, b"", "suback")
rc = 0
sock.close()
except mosq_test.TestError:
pass
finally:
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
exit(rc)
do_test("/"*200, True) # 200 max hierarchy limit
do_test("abc/"*199+"d", True) # 200 max hierarchy limit, longer overall string than 200
do_test("/"*201, False) # Exceeds 200 max hierarchy limit
do_test("abc/"*201+"d", False) # Exceeds 200 max hierarchy limit, longer overall string than 200
exit(0)
|
python
|
import numpy as np
from pydeeprecsys.rl.agents.agent import ReinforcementLearning
from typing import Any, List, Optional
from pydeeprecsys.rl.experience_replay.experience_buffer import ExperienceReplayBuffer
from pydeeprecsys.rl.experience_replay.buffer_parameters import (
ExperienceReplayBufferParameters,
)
from pydeeprecsys.rl.neural_networks.policy_estimator import PolicyEstimator
from torch import FloatTensor
class ReinforceAgent(ReinforcementLearning):
"""Policy estimator using a value estimator as a baseline.
It's on-policy, for discrete action spaces, and episodic environments."""
def __init__(
self,
n_actions: int,
state_size: int,
hidden_layers: Optional[List[int]] = None,
discount_factor: int = 0.99, # a.k.a gamma
learning_rate=1e-3,
):
self.episode_count = 0
if not hidden_layers:
hidden_layers = [state_size * 2, state_size * 2]
self.policy_estimator = PolicyEstimator(
state_size,
hidden_layers,
n_actions,
learning_rate=learning_rate,
)
self.discount_factor = discount_factor
# starts the buffer
self.reset_buffer()
def reset_buffer(self):
self.buffer = ExperienceReplayBuffer(
ExperienceReplayBufferParameters(10000, 1, 1)
)
def top_k_actions_for_state(self, state: Any, k: int = 1) -> List[int]:
return self.policy_estimator.predict(state, k=k)
def action_for_state(self, state: Any) -> int:
return self.top_k_actions_for_state(state)[0]
def store_experience(
self, state: Any, action: Any, reward: float, done: bool, new_state: Any
):
state_flat = state.flatten()
new_state_flat = new_state.flatten()
self.buffer.store_experience(state_flat, action, reward, done, new_state_flat)
# FIXME: should learn after every episode, or after every N experiences?
if done: # and self.buffer.ready_to_predict():
self.learn_from_experiences()
self.reset_buffer()
def discounted_rewards(self, rewards: np.array) -> np.array:
"""From a list of rewards obtained in an episode, we calculate
the return minus the baseline. The baseline is the list of discounted
rewards minus the mean, divided by the standard deviation."""
discount_r = np.zeros_like(rewards)
timesteps = range(len(rewards))
reward_sum = 0
for i in reversed(timesteps):
reward_sum = rewards[i] + self.discount_factor * reward_sum
discount_r[i] = reward_sum
return_mean = discount_r.mean()
return_std = discount_r.std()
baseline = (discount_r - return_mean) / return_std
return baseline
def learn_from_experiences(self):
experiences = list(self.buffer.experience_queue)
states, actions, rewards, dones, next_states = zip(*experiences)
advantages = self.discounted_rewards(rewards)
advantages_tensor = FloatTensor(advantages).to(
device=self.policy_estimator.device
)
self.policy_estimator.update(states, advantages_tensor, actions)
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.api import monitored_resource_pb2 # type: ignore
from google.cloud.logging_v2.types import log_entry
from google.protobuf import duration_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.logging.v2',
manifest={
'DeleteLogRequest',
'WriteLogEntriesRequest',
'WriteLogEntriesResponse',
'WriteLogEntriesPartialErrors',
'ListLogEntriesRequest',
'ListLogEntriesResponse',
'ListMonitoredResourceDescriptorsRequest',
'ListMonitoredResourceDescriptorsResponse',
'ListLogsRequest',
'ListLogsResponse',
'TailLogEntriesRequest',
'TailLogEntriesResponse',
},
)
class DeleteLogRequest(proto.Message):
r"""The parameters to DeleteLog.
Attributes:
log_name (str):
Required. The resource name of the log to delete:
::
"projects/[PROJECT_ID]/logs/[LOG_ID]"
"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
"folders/[FOLDER_ID]/logs/[LOG_ID]"
``[LOG_ID]`` must be URL-encoded. For example,
``"projects/my-project-id/logs/syslog"``,
``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``.
For more information about log names, see
[LogEntry][google.logging.v2.LogEntry].
"""
log_name = proto.Field(
proto.STRING,
number=1,
)
class WriteLogEntriesRequest(proto.Message):
r"""The parameters to WriteLogEntries.
Attributes:
log_name (str):
Optional. A default log resource name that is assigned to
all log entries in ``entries`` that do not specify a value
for ``log_name``:
::
"projects/[PROJECT_ID]/logs/[LOG_ID]"
"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
"folders/[FOLDER_ID]/logs/[LOG_ID]"
``[LOG_ID]`` must be URL-encoded. For example:
::
"projects/my-project-id/logs/syslog"
"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"
The permission ``logging.logEntries.create`` is needed on
each project, organization, billing account, or folder that
is receiving new log entries, whether the resource is
specified in ``logName`` or in an individual log entry.
resource (google.api.monitored_resource_pb2.MonitoredResource):
Optional. A default monitored resource object that is
assigned to all log entries in ``entries`` that do not
specify a value for ``resource``. Example:
::
{ "type": "gce_instance",
"labels": {
"zone": "us-central1-a", "instance_id": "00000000000000000000" }}
See [LogEntry][google.logging.v2.LogEntry].
labels (Mapping[str, str]):
Optional. Default labels that are added to the ``labels``
field of all log entries in ``entries``. If a log entry
already has a label with the same key as a label in this
parameter, then the log entry's label is not changed. See
[LogEntry][google.logging.v2.LogEntry].
entries (Sequence[google.cloud.logging_v2.types.LogEntry]):
Required. The log entries to send to Logging. The order of
log entries in this list does not matter. Values supplied in
this method's ``log_name``, ``resource``, and ``labels``
fields are copied into those log entries in this list that
do not include values for their corresponding fields. For
more information, see the
[LogEntry][google.logging.v2.LogEntry] type.
If the ``timestamp`` or ``insert_id`` fields are missing in
log entries, then this method supplies the current time or a
unique identifier, respectively. The supplied values are
chosen so that, among the log entries that did not supply
their own values, the entries earlier in the list will sort
before the entries later in the list. See the
``entries.list`` method.
Log entries with timestamps that are more than the `logs
retention
period <https://cloud.google.com/logging/quota-policy>`__ in
the past or more than 24 hours in the future will not be
available when calling ``entries.list``. However, those log
entries can still be `exported with
LogSinks <https://cloud.google.com/logging/docs/api/tasks/exporting-logs>`__.
To improve throughput and to avoid exceeding the `quota
limit <https://cloud.google.com/logging/quota-policy>`__ for
calls to ``entries.write``, you should try to include
several log entries in this list, rather than calling this
method for each individual log entry.
partial_success (bool):
Optional. Whether valid entries should be written even if
some other entries fail due to INVALID_ARGUMENT or
PERMISSION_DENIED errors. If any entry is not written, then
the response status is the error associated with one of the
failed entries and the response includes error details keyed
by the entries' zero-based index in the ``entries.write``
method.
dry_run (bool):
Optional. If true, the request should expect
normal response, but the entries won't be
persisted nor exported. Useful for checking
whether the logging API endpoints are working
properly before sending valuable data.
"""
log_name = proto.Field(
proto.STRING,
number=1,
)
resource = proto.Field(
proto.MESSAGE,
number=2,
message=monitored_resource_pb2.MonitoredResource,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=3,
)
entries = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=log_entry.LogEntry,
)
partial_success = proto.Field(
proto.BOOL,
number=5,
)
dry_run = proto.Field(
proto.BOOL,
number=6,
)
class WriteLogEntriesResponse(proto.Message):
r"""Result returned from WriteLogEntries.
"""
class WriteLogEntriesPartialErrors(proto.Message):
r"""Error details for WriteLogEntries with partial success.
Attributes:
log_entry_errors (Mapping[int, google.rpc.status_pb2.Status]):
When ``WriteLogEntriesRequest.partial_success`` is true,
records the error status for entries that were not written
due to a permanent error, keyed by the entry's zero-based
index in ``WriteLogEntriesRequest.entries``.
Failed requests for which no entries are written will not
include per-entry errors.
"""
log_entry_errors = proto.MapField(
proto.INT32,
proto.MESSAGE,
number=1,
message=status_pb2.Status,
)
class ListLogEntriesRequest(proto.Message):
r"""The parameters to ``ListLogEntries``.
Attributes:
resource_names (Sequence[str]):
Required. Names of one or more parent resources from which
to retrieve log entries:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
May alternatively be one or more views
projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
Projects listed in the ``project_ids`` field are added to
this list.
filter (str):
Optional. A filter that chooses which log entries to return.
See `Advanced Logs
Queries <https://cloud.google.com/logging/docs/view/advanced-queries>`__.
Only log entries that match the filter are returned. An
empty filter matches all log entries in the resources listed
in ``resource_names``. Referencing a parent resource that is
not listed in ``resource_names`` will cause the filter to
return no results. The maximum length of the filter is 20000
characters.
order_by (str):
Optional. How the results should be sorted. Presently, the
only permitted values are ``"timestamp asc"`` (default) and
``"timestamp desc"``. The first option returns entries in
order of increasing values of ``LogEntry.timestamp`` (oldest
first), and the second option returns entries in order of
decreasing timestamps (newest first). Entries with equal
timestamps are returned in order of their ``insert_id``
values.
page_size (int):
Optional. The maximum number of results to return from this
request. Default is 50. If the value is negative or exceeds
1000, the request is rejected. The presence of
``next_page_token`` in the response indicates that more
results might be available.
page_token (str):
Optional. If present, then retrieve the next batch of
results from the preceding call to this method.
``page_token`` must be the value of ``next_page_token`` from
the previous response. The values of other method parameters
should be identical to those in the previous call.
"""
resource_names = proto.RepeatedField(
proto.STRING,
number=8,
)
filter = proto.Field(
proto.STRING,
number=2,
)
order_by = proto.Field(
proto.STRING,
number=3,
)
page_size = proto.Field(
proto.INT32,
number=4,
)
page_token = proto.Field(
proto.STRING,
number=5,
)
class ListLogEntriesResponse(proto.Message):
r"""Result returned from ``ListLogEntries``.
Attributes:
entries (Sequence[google.cloud.logging_v2.types.LogEntry]):
A list of log entries. If ``entries`` is empty,
``nextPageToken`` may still be returned, indicating that
more entries may exist. See ``nextPageToken`` for more
information.
next_page_token (str):
If there might be more results than those appearing in this
response, then ``nextPageToken`` is included. To get the
next set of results, call this method again using the value
of ``nextPageToken`` as ``pageToken``.
If a value for ``next_page_token`` appears and the
``entries`` field is empty, it means that the search found
no log entries so far but it did not have time to search all
the possible log entries. Retry the method with this value
for ``page_token`` to continue the search. Alternatively,
consider speeding up the search by changing your filter to
specify a single log name or resource type, or to narrow the
time range of the search.
"""
@property
def raw_page(self):
return self
entries = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=log_entry.LogEntry,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ListMonitoredResourceDescriptorsRequest(proto.Message):
r"""The parameters to ListMonitoredResourceDescriptors
Attributes:
page_size (int):
Optional. The maximum number of results to return from this
request. Non-positive values are ignored. The presence of
``nextPageToken`` in the response indicates that more
results might be available.
page_token (str):
Optional. If present, then retrieve the next batch of
results from the preceding call to this method.
``pageToken`` must be the value of ``nextPageToken`` from
the previous response. The values of other method parameters
should be identical to those in the previous call.
"""
page_size = proto.Field(
proto.INT32,
number=1,
)
page_token = proto.Field(
proto.STRING,
number=2,
)
class ListMonitoredResourceDescriptorsResponse(proto.Message):
r"""Result returned from ListMonitoredResourceDescriptors.
Attributes:
resource_descriptors (Sequence[google.api.monitored_resource_pb2.MonitoredResourceDescriptor]):
A list of resource descriptors.
next_page_token (str):
If there might be more results than those appearing in this
response, then ``nextPageToken`` is included. To get the
next set of results, call this method again using the value
of ``nextPageToken`` as ``pageToken``.
"""
@property
def raw_page(self):
return self
resource_descriptors = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=monitored_resource_pb2.MonitoredResourceDescriptor,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ListLogsRequest(proto.Message):
r"""The parameters to ListLogs.
Attributes:
parent (str):
Required. The resource name that owns the logs:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]".
page_size (int):
Optional. The maximum number of results to return from this
request. Non-positive values are ignored. The presence of
``nextPageToken`` in the response indicates that more
results might be available.
page_token (str):
Optional. If present, then retrieve the next batch of
results from the preceding call to this method.
``pageToken`` must be the value of ``nextPageToken`` from
the previous response. The values of other method parameters
should be identical to those in the previous call.
resource_names (Sequence[str]):
Optional. The resource name that owns the logs:
projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
To support legacy queries, it could also be:
"projects/[PROJECT_ID]" "organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]" "folders/[FOLDER_ID]".
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
resource_names = proto.RepeatedField(
proto.STRING,
number=8,
)
class ListLogsResponse(proto.Message):
r"""Result returned from ListLogs.
Attributes:
log_names (Sequence[str]):
A list of log names. For example,
``"projects/my-project/logs/syslog"`` or
``"organizations/123/logs/cloudresourcemanager.googleapis.com%2Factivity"``.
next_page_token (str):
If there might be more results than those appearing in this
response, then ``nextPageToken`` is included. To get the
next set of results, call this method again using the value
of ``nextPageToken`` as ``pageToken``.
"""
@property
def raw_page(self):
return self
log_names = proto.RepeatedField(
proto.STRING,
number=3,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class TailLogEntriesRequest(proto.Message):
r"""The parameters to ``TailLogEntries``.
Attributes:
resource_names (Sequence[str]):
Required. Name of a parent resource from which to retrieve
log entries:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
May alternatively be one or more views:
"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
"organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]".
filter (str):
Optional. A filter that chooses which log entries to return.
See `Advanced Logs
Filters <https://cloud.google.com/logging/docs/view/advanced_filters>`__.
Only log entries that match the filter are returned. An
empty filter matches all log entries in the resources listed
in ``resource_names``. Referencing a parent resource that is
not in ``resource_names`` will cause the filter to return no
results. The maximum length of the filter is 20000
characters.
buffer_window (google.protobuf.duration_pb2.Duration):
Optional. The amount of time to buffer log
entries at the server before being returned to
prevent out of order results due to late
arriving log entries. Valid values are between
0-60000 milliseconds. Defaults to 2000
milliseconds.
"""
resource_names = proto.RepeatedField(
proto.STRING,
number=1,
)
filter = proto.Field(
proto.STRING,
number=2,
)
buffer_window = proto.Field(
proto.MESSAGE,
number=3,
message=duration_pb2.Duration,
)
class TailLogEntriesResponse(proto.Message):
r"""Result returned from ``TailLogEntries``.
Attributes:
entries (Sequence[google.cloud.logging_v2.types.LogEntry]):
A list of log entries. Each response in the stream will
order entries with increasing values of
``LogEntry.timestamp``. Ordering is not guaranteed between
separate responses.
suppression_info (Sequence[google.cloud.logging_v2.types.TailLogEntriesResponse.SuppressionInfo]):
If entries that otherwise would have been
included in the session were not sent back to
the client, counts of relevant entries omitted
from the session with the reason that they were
not included. There will be at most one of each
reason per response. The counts represent the
number of suppressed entries since the last
streamed response.
"""
class SuppressionInfo(proto.Message):
r"""Information about entries that were omitted from the session.
Attributes:
reason (google.cloud.logging_v2.types.TailLogEntriesResponse.SuppressionInfo.Reason):
The reason that entries were omitted from the
session.
suppressed_count (int):
A lower bound on the count of entries omitted due to
``reason``.
"""
class Reason(proto.Enum):
r"""An indicator of why entries were omitted."""
REASON_UNSPECIFIED = 0
RATE_LIMIT = 1
NOT_CONSUMED = 2
reason = proto.Field(
proto.ENUM,
number=1,
enum='TailLogEntriesResponse.SuppressionInfo.Reason',
)
suppressed_count = proto.Field(
proto.INT32,
number=2,
)
entries = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=log_entry.LogEntry,
)
suppression_info = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=SuppressionInfo,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
python
|
from source.exceptions.not_found import NotFoundException
from source.repositories.player_tournament import PlayerTournamentRepository
import source.commons.message as message
class PlayerTournamentBusiness:
def __init__(self):
self.player_tournament_repository = PlayerTournamentRepository()
def find_all(self):
result = self.player_tournament_repository.find_all()
if not result:
raise NotFoundException(None, message.REGISTER_NOT_FOUND)
return result
def get_ranking(self, tournament_id):
result = self.player_tournament_repository.get_ranking(tournament_id)
if not result:
raise NotFoundException(None, message.REGISTER_NOT_FOUND)
return result
def get_player_tournament(self, data):
result = self.player_tournament_repository.get_player_tournament(data)
if not result:
raise NotFoundException(None, message.REGISTER_NOT_FOUND)
return result
def find_by_id(self, field_id):
result = self.player_tournament_repository.find_by_id(field_id)
if not result:
raise NotFoundException(None, message.REGISTER_NOT_FOUND)
return result
def save(self, data):
return self.player_tournament_repository.save(
data.get('player_id'),
data.get('tournament_id'),
data.get('position'),
data.get('points_acum'),
data.get('adm')
)
def update(self, field_id, data):
if not self.player_tournament_repository.find_by_id(field_id):
raise NotFoundException(None, message.PLAYER_NOT_FOUND)
for i in data:
self.player_tournament_repository.update(field_id, i, data[i])
return []
def delete(self, field_id):
if not self.player_tournament_repository.find_by_id(field_id):
raise NotFoundException(None, message.PLAYER_NOT_FOUND)
self.player_tournament_repository.delete(field_id)
return []
|
python
|
from datetime import datetime
from pathlib import Path
from typing import Optional, Tuple, Union, Sequence, List
from pydantic import BaseModel, validator
class MdocSectionData(BaseModel):
"""Data model for section data in a SerialEM mdoc file.
https://bio3d.colorado.edu/SerialEM/hlp/html/about_formats.htm
"""
ZValue: Optional[int]
TiltAngle: Optional[float]
PieceCoordinates: Optional[Tuple[float, float, int]]
StagePosition: Tuple[float, float]
StageZ: Optional[float]
Magnification: Optional[float]
CameraLength: Optional[float]
MagIndex: Optional[int]
Intensity: Optional[float]
SuperMontCoords: Optional[Tuple[float, float]]
PixelSpacing: Optional[float]
ExposureDose: Optional[float]
DoseRate: Optional[float]
SpotSize: Optional[float]
Defocus: Optional[float]
TargetDefocus: Optional[float]
ImageShift: Optional[Tuple[float, float]]
RotationAngle: Optional[float]
ExposureTime: Optional[float]
Binning: Optional[int]
UsingCDS: Optional[bool]
CameraIndex: Optional[int]
DividedBy2: Optional[bool]
LowDoseConSet: Optional[int]
MinMaxMean: Optional[Tuple[float, float, float]]
PriorRecordDose: Optional[float]
XedgeDxy: Optional[Tuple[float, float]]
YedgeDxy: Optional[Tuple[float, float]]
XedgeDxyVS: Optional[Tuple[float, float]]
YedgeDxyVS: Optional[Tuple[float, float]]
StageOffsets: Optional[Tuple[float, float]]
AlignedPieceCoords: Optional[Tuple[float, float]]
AlignedPieceCoordsVS: Optional[Tuple[float, float]]
SubFramePath: Optional[Path]
NumSubFrames: Optional[int]
FrameDosesAndNumbers: Optional[Sequence[Tuple[float, int]]]
DateTime: Optional[datetime]
NavigatorLabel: Optional[str]
FilterSlitAndLoss: Optional[Tuple[float, float]]
ChannelName: Optional[str]
MultiShotHoleAndPosition: Optional[Union[Tuple[int, int], Tuple[int, int, int]]]
CameraPixelSize: Optional[float]
Voltage: Optional[float]
@validator(
'PieceCoordinates',
'SuperMontCoords',
'ImageShift',
'MinMaxMean',
'StagePosition',
'XedgeDxy',
'YedgeDxy',
'XedgeDxyVS',
'XedgeDxyVS',
'StageOffsets',
'AlignedPieceCoords',
'AlignedPieceCoordsVS',
'FrameDosesAndNumbers',
'FilterSlitAndLoss',
'MultiShotHoleAndPosition',
pre=True)
def multi_number_string_to_tuple(cls, value: str):
return tuple(value.split())
@validator('DateTime', pre=True)
def mdoc_datetime_to_datetime(cls, value: str):
return datetime.strptime(value, '%d-%b-%y %H:%M:%S', )
@classmethod
def from_lines(cls, lines: List[str]):
lines = [line.strip('[]')
for line
in lines
if len(line) > 0]
key_value_pairs = [line.split('=') for line in lines]
key_value_pairs = [
(k.strip(), v.strip())
for k, v
in key_value_pairs
]
lines = {k: v for k, v in key_value_pairs}
return cls(**lines)
|
python
|
from logging import getLogger
from typing import List
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import backref, relationship
from app.models import orm
from app.settings import env_settings
logger = getLogger(__name__)
class ActorEntryAssociation(orm.Base):
actor_id = Column(Integer, ForeignKey("actor.id"), primary_key=True)
entry_id = Column(Integer, ForeignKey("entry.id"), primary_key=True)
role = Column(String)
def __init__(self, actor_id: int, role: str):
self.actor_id = actor_id
self.role = role
def __repr__(self):
if not env_settings().is_dev():
logger.warning(
f"Calling {self.__class__.__name__} __repr__ might cause additional select queries"
)
return "entry actor: %s: %s:%s " % (
self.entry.title[:40] + "..."
if len(self.entry.title) > 40
else self.entry.title,
self.actor.registered_name,
self.role,
)
def csv_format(self, sep: str):
return self.actor.registered_name + sep + self.role
class EntryTagAssociation(orm.Base):
entry_id = Column(Integer, ForeignKey("entry.id"), primary_key=True)
tag_id = Column(Integer, ForeignKey("tag.id"), primary_key=True)
entry = relationship(orm.Entry, back_populates="tags")
tag = relationship(orm.Tag, backref=backref("entries_tag"))
group_name = Column(String, nullable=True)
config = Column(JSONB)
def __init__(self, tag: orm.Tag, group_name: str):
self.tag = tag
self.group_name = group_name
def __repr__(self):
if not env_settings().is_dev():
logger.warning(
f"Calling {self.__class__.__name__} __repr__ might cause additional select queries"
)
return "entry tag: %s -> %s " % (
self.entry.title[:40] + "..."
if len(self.entry.title) > 40
else self.entry.title,
self.tag.value,
)
class EntryEntryAssociation(orm.Base):
id = Column(Integer, primary_key=True, autoincrement=True)
source_id = Column(Integer, ForeignKey("entry.id"))
destination_id = Column(Integer, ForeignKey("entry.id"))
source = relationship(orm.Entry, foreign_keys=[source_id])
destination = relationship(orm.Entry, foreign_keys=[destination_id])
# maybe also primary_key=True, if there could be multiple types of links between 2 entries
# reference_type = Column(String, index=True, nullable=True)
reference = Column(JSONB, nullable=True, default={})
def __init__(self, source: orm.Entry, destination: orm.Entry, reference: dict):
self.source = source
self.destination = destination
self.reference = reference
def __repr__(self):
return (
f"Entry-Entry ref: {self.source.id}/{self.source.slug} -> "
f"{self.destination.id}/{self.destination.slug}: {self.reference}"
)
class EntryTranslation(orm.Base):
id = Column(Integer, primary_key=True)
entries = relationship("Entry", back_populates="translation_group")
# we should have this so that no issue is raised
def __init__(self, entries: List[orm.Entry]):
self.entries = entries
# class ActorTagAssociation(orm.Base):
# actor_id = Column(Integer, ForeignKey("actor.id"), primary_key=True)
# tag_id = Column(Integer, ForeignKey("tag.id"), primary_key=True)
#
# def __repr__(self):
# if not env_settings().is_dev():
# logger.warning(
# f"Calling {self.__class__.__name__} __repr__ might cause additional select queries"
# )
# return "actor tag: %s: %s " % (
# self.entry.title[:40] + "..."
# if len(self.entry.title) > 40
# else self.entry.title,
# self.actor.registered_name,
# )
|
python
|
import numpy as np
import pandas as pd
import os
import sklearn
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
# normalize numerical columns
# one-hot categorical columns
def get_data(classification=True, regression=False, download=False):
url = 'https://raw.githubusercontent.com/Shane-Neeley/DrugMarket/master/drugmarket/drugmarket_dataframe.tsv'
if download:
df = pd.read_csv('drugmarket_dataframe.tsv', dtype={'MC':np.int64}, sep="\t")
else:
df = pd.read_csv(url, dtype={'MC':np.int64}, sep="\t")
# remove outliers
df = df[df['MC'] > 0]
# df = df[ (df['Phase 4'] > 0) | (df['Phase 3'] > 0) | (df['Phase 2'] > 0) | (df['Phase 1'] > 0)] # has any trials
# df = df[ (df['Phase 4'] < 500) | (df['Phase 3'] < 500) | (df['Phase 2'] < 500) | (df['Phase 1'] < 500)] # has too many trials
df = df[df['Symbol'] != "SYK"] # stryker an outlier
# easier to work with numpy array
data = df.values
# create a final output column of a category
# 1 = >$1Billion market cap, 0 = less
categ = np.array(data[:, -1] > 1e9, dtype=bool).astype(int)
categ = np.array([categ]).T
data = np.concatenate((data,categ),1)
# shuffle it
np.random.shuffle(data)
# split features and labels
X = data[:, 3:-2].astype(np.int64) # this just pulled excluded the last two columns
if (classification == True):
Y = data[:, -1].astype(np.int64) # this is the last column, 0 or 1 class for billion dollar valuation
if (regression == True):
Y = data[:, -2].astype(np.int64) # continuous value for marketcap
# print(df)
print(X)
# print('X.shape before')
# print(X.shape)
# Too many tags, do dimensionality reduction just on the tags (column 4 and on ..)
pca = PCA()
reduced = pca.fit_transform(X[:, 4:])
# print('reduced.shape before')
# print(reduced.shape)
# plt.scatter(reduced[:,0], reduced[:,1], s=100, c=Y, alpha=0.5)
# plt.title('reduced')
# plt.show()
reduced = reduced[:, :25] # .. however much cutoff u want
# print('reduced.shape after cutoff')
# print(reduced.shape)
# make new X
X = np.concatenate((X[:,:4], reduced),1)
#X = X[:,:4] # without tag data
# print('X.shape after concatenate')
# print(X.shape)
# print(X)
# plt.plot(pca.explained_variance_ratio_)
# plt.title('explained_variance_ratio_')
# plt.show()
# cumulative variance
# choose k = number of dimensions that gives us 95-99% variance
cumulative = []
last = 0
for v in pca.explained_variance_ratio_:
cumulative.append(last + v)
last = cumulative[-1]
# plt.plot(cumulative)
# plt.title('cumulative')
# plt.show()
print('size X: ' + str(X.shape))
print('size Y: ' + str(Y.shape))
# normalize phase columns by X - mean / std
for i in (0, 1, 2, 3):
m = X[:, i].mean()
s = X[:, i].std()
X[:, i] = (X[:, i] - m) / s
return X, Y, data
if __name__ == '__main__':
get_data()
|
python
|
import logging, sys, os, json, uuid
from datapackage_pipelines.wrapper import ingest, spew
from datapackage_pipelines.utilities.resources import PROP_STREAMING
CLI_MODE = len(sys.argv) > 1 and sys.argv[1] == '--cli'
if CLI_MODE:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
logging.debug("CLI MODE!")
parameters, datapackage, resources = {}, {}, []
else:
parameters, datapackage, resources = ingest()
default_parameters = {"num-rows": int(os.environ.get("NUM_ROWS", "10"))}
parameters = dict(default_parameters, **parameters)
logging.info(parameters)
stats = {}
aggregations = {"stats": stats}
def get_resource():
for i in range(0, parameters["num-rows"]):
yield {"uuid": str(uuid.uuid1()), "row_num": i}
if CLI_MODE:
for row in get_resource():
print(row)
else:
resource_descriptor = {PROP_STREAMING: True,
"name": "noise",
"path": "noise.csv",
"schema": {"fields": [{"name": "uuid", "type": "string"},
{"name": "row_num", "type": "integer"}],
"primaryKey": ["uuid"]}}
spew(dict(datapackage, resources=[resource_descriptor]),
[get_resource()], aggregations["stats"])
|
python
|
#!/usr/bin/python
import logging
import os
import json
import io
import uuid
# --------------------------------------------------------------------------------------
# Save this code in file "process_wrapper.py" and adapt as indicated in inline comments.
#
# Notes:
# - This is a Python 3 script.
# - The inputs will be given values by name, thus their order has no importance ...
# - ... except that the inputs with a default value must be listed last.
# - Parameter names are automatically converted into valid Python variable names.
# - Any empty line or line starting with a '#' character will be ignored.
# --------------------------------------------------------------------------------------
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
stream = io.StringIO()
handler = logging.StreamHandler(stream)
logger.addHandler(handler)
max_cpu=8
def execute(out_dir, collection_dir, models_dir, tilesAndShapes_json, daterange_json):
"""
Inputs:
collection_dir -- collection_dir -- 45/User String
models_dir -- models_dir -- 45/User String
tilesAndShapes_json -- tilesAndShapes_json -- 45/User String
daterange_json -- daterange_json -- 45/User String
Outputs:
segmentedfiles_json -- segmentedfiles_json -- 45/User String
exceptionLog -- exceptionLog -- 45/User String
Main Dependency:
mep-wps/uc-bundle-1
Software Dependencies:
pywps-4
Processing Resources:
ram -- 15
disk -- 10
cpu -- 8
"""
segmentedfiles_json = None
exceptionLog=None
# ----------------------------------------------------------------------------------
# Insert your own code below.
# The files generated by your code must be stored in the "out_dir" folder.
# Only the content of that folder is persisted in the datastore.
# Give appropriate values to the output parameters. These will be passed to the next
# process(es) following the workflow connections.
# ----------------------------------------------------------------------------------
try:
logger.info("Starting...")
out_dir=os.path.join('/'.join(models_dir.split('/')[:-1]),'output',str(uuid.uuid4().hex))
os.makedirs(out_dir,exist_ok=True)
logger.info("Overriding out_dir to "+str(out_dir))
logger.info("Contents of out_dir: "+str(os.listdir(path=str(out_dir))))
#os.environ['JAVA_HOME']='/usr/local/jre'
#os.environ['JRE_HOME']='/usr/local/jre'
ncpu=1
import subprocess
try: ncpu=int(subprocess.check_output("/usr/bin/nproc"))
except: pass
if ncpu>max_cpu: ncpu=max_cpu
logger.info("Using {} cores".format(str(ncpu)))
logger.info("Loading dependencies...")
from parcel.feature.segmentation.segmentation_filebased import main_segmentation
from asb_usecases.logic.common import polygon2bboxwindow
logger.info("Loading input jsons...")
tilesAndShapes=json.loads(tilesAndShapes_json)
daterange=json.loads(daterange_json)
logger.info("Computing...")
segmentedfiles=[]
for i in range(len(tilesAndShapes)):
workdir=os.path.join(str(out_dir),str(i))
#workdir=os.path.join(models_dir,str(i))
os.makedirs(workdir,exist_ok=True)
iresults={}
for tile,shape in tilesAndShapes[i].items():
# TODO this needs to be merged with segmentation, not to glob twice
bbox=polygon2bboxwindow.compute(collection_dir+'/*/01/*/*'+tile+'*/**/*'+tile+'*.tif', shape)
outimg=main_segmentation(
imgdir=collection_dir,
maskdir=os.path.join(models_dir,'convmasks10m'),
modeldir=os.path.join(models_dir,'models'),
outdir=workdir,
tiles=tile,
startdate=daterange['start'],
enddate=daterange['end'],
maxcloudcover=int(100),
bbox=bbox,
#nwindowspermodel=5,
ncpu=ncpu
)
iresults[tile]=outimg
segmentedfiles.append(iresults)
logger.info("Contents of out_dir: "+str(os.listdir(path=str(out_dir))))
logger.info("Dumping results into json...")
segmentedfiles_json=json.dumps(segmentedfiles)
logger.info("Finished...")
except Exception as e:
logger.exception("Exception in wrapper.")
logging.shutdown()
stream.flush()
exceptionLog=stream.getvalue()
# ----------------------------------------------------------------------------------
# The wrapper must return a dictionary that contains the output parameter values.
# ----------------------------------------------------------------------------------
return {
"segmentedfiles_json": segmentedfiles_json,
"exceptionLog": exceptionLog
}
|
python
|
from spec2wav.modules import Generator, Audio2Mel, Audio2Cqt
from pathlib import Path
import yaml
import torch
import os
def get_default_device():
if torch.cuda.is_available():
return "cuda"
else:
return "cpu"
def load_model(spec2wav_path, device=get_default_device()):
"""
Args:
spec2wav_path (str or Path): path to the root folder of dumped text2mel
device (str or torch.device): device to load the model
"""
root = Path(spec2wav_path)
with open(root / "args.yml", "r") as f:
args = yaml.load(f, Loader=yaml.FullLoader)
netG = Generator(args.n_mel_channels, args.ngf, args.n_residual_layers).to(device)
netG.load_state_dict(torch.load(root / "best_netG.pt", map_location=device))
return netG
class MelVocoder:
def __init__(
self,
path,
device=get_default_device(),
github=False,
model_name="multi_speaker",
):
#self.fft = Audio2Mel().to(device)
self.fft = Audio2Cqt().to(device)
if github:
netG = Generator(80, 32, 3).to(device)
root = Path(os.path.dirname(__file__)).parent
netG.load_state_dict(
torch.load(root / f"models/{model_name}.pt", map_location=device)
)
self.spec2wav = netG
else:
self.spec2wav = load_model(path, device)
self.device = device
def __call__(self, audio):
"""
Performs audio to mel conversion (See Audio2Mel in spec2wav/modules.py)
Args:
audio (torch.tensor): PyTorch tensor containing audio (batch_size, timesteps)
Returns:
torch.tensor: log-mel-spectrogram computed on input audio (batch_size, 80, timesteps)
"""
return self.fft(audio.unsqueeze(1).to(self.device))
def inverse(self, mel):
"""
Performs mel2audio conversion
Args:
mel (torch.tensor): PyTorch tensor containing log-mel spectrograms (batch_size, 80, timesteps)
Returns:
torch.tensor: Inverted raw audio (batch_size, timesteps)
"""
with torch.no_grad():
return self.spec2wav(mel.to(self.device)).squeeze(1)
|
python
|
from app import app
app.run('0.0.0.0')
|
python
|
import concurrent.futures as cf
import numpy as np
from multiprocessing import cpu_count
from tqdm import tqdm
from worms.util import jit, InProcessExecutor
from worms.search.result import ResultJIT
from worms.clashgrid import ClashGrid
def prune_clashes(
ssdag,
crit,
rslt,
max_clash_check=-1,
ca_clash_dis=4.0,
parallel=False,
approx=0,
verbosity=0,
merge_bblock=None,
pbar=False,
pbar_interval=10.0,
context_structure=None,
**kw,
):
# print('todo: clash check should handle symmetry')
if max_clash_check == 0:
return rslt
max_clash_check = min(max_clash_check, len(rslt.idx))
if max_clash_check < 0:
max_clash_check = len(rslt.idx)
if not pbar:
print(
f"mbb{f'{merge_bblock:04}' if merge_bblock else 'none'} checking clashes",
max_clash_check,
"of",
len(rslt.err),
)
verts = tuple(ssdag.verts)
# exe = cf.ProcessPoolExecutor if parallel else InProcessExecutor
exe = InProcessExecutor
with exe() as pool:
futures = list()
for i in range(max_clash_check):
dirns = tuple([v.dirn for v in verts])
iress = tuple([v.ires for v in verts])
chains = tuple([
ssdag.bbs[k][verts[k].ibblock[rslt.idx[i, k]]].chains for k in range(len(ssdag.verts))
])
ncacs = tuple([
ssdag.bbs[k][verts[k].ibblock[rslt.idx[i, k]]].ncac for k in range(len(ssdag.verts))
])
if isinstance(context_structure, ClashGrid):
clash = False
for pos, ncac in zip(rslt.pos[i], ncacs):
xyz = pos @ ncac[..., None]
if context_structure.clashcheck(xyz.squeeze()):
clash = True
break
if clash:
continue
futures.append(
pool.submit(
_check_all_chain_clashes,
dirns=dirns,
iress=iress,
idx=rslt.idx[i],
pos=rslt.pos[i],
chn=chains,
ncacs=ncacs,
thresh=ca_clash_dis * ca_clash_dis,
approx=approx,
))
futures[-1].index = i
if pbar:
desc = "checking clashes "
if merge_bblock is not None and merge_bblock >= 0:
desc = f"{desc} mbb{merge_bblock:04d}"
if merge_bblock is None:
merge_bblock = 0
futures = tqdm(
cf.as_completed(futures),
desc=desc,
total=len(futures),
mininterval=pbar_interval,
position=merge_bblock + 1,
)
ok = np.zeros(max_clash_check, dtype="?")
for f in futures:
ok[f.index] = f.result()
return ResultJIT(
rslt.pos[:max_clash_check][ok],
rslt.idx[:max_clash_check][ok],
rslt.err[:max_clash_check][ok],
rslt.stats,
)
@jit
def _chain_bounds(dirn, ires, chains, spliced_only=False, trim=8):
"return bounds for only spliced chains, with spliced away sequence removed"
chains = np.copy(chains)
bounds = []
seenchain = -1
if dirn[0] < 2:
ir = ires[0]
for i in range(len(chains)):
lb, ub = chains[i]
if lb <= ir < ub:
chains[i, dirn[0]] = ir + trim * (1, -1)[dirn[0]]
bounds.append((chains[i, 0], chains[i, 1]))
seenchain = i
if dirn[1] < 2:
ir = ires[1]
for i in range(len(chains)):
lb, ub = chains[i]
if lb <= ir < ub:
chains[i, dirn[1]] = ir + trim * (1, -1)[dirn[1]]
if seenchain == i:
if dirn[1]:
tmp = bounds[0][0], chains[i, 1]
else:
tmp = chains[i, 0], bounds[0][1]
# bounds[0][dirn[1]] = chains[i, dirn[1]]
bounds[0] = tmp
else:
bounds.append((chains[i, 0], chains[i, 1]))
if spliced_only:
return np.array(bounds, dtype=np.int32)
else:
return chains
@jit
def _has_ca_clash(position, ncacs, i, ichntrm, j, jchntrm, thresh, step=1):
for ichain in range(len(ichntrm)):
ilb, iub = ichntrm[ichain]
for jchain in range(len(jchntrm)):
jlb, jub = jchntrm[jchain]
for ir in range(ilb, iub, step):
ica = position[i] @ ncacs[i][ir, 1]
for jr in range(jlb, jub, step):
jca = position[j] @ ncacs[j][jr, 1]
d2 = np.sum((ica - jca)**2)
if d2 < thresh:
return True
return False
@jit
def _check_all_chain_clashes(dirns, iress, idx, pos, chn, ncacs, thresh, approx):
pos = pos.astype(np.float64)
for step in (3, 1): # 20% speedup.... ug... need BVH...
# only adjacent verts, only spliced chains
for i in range(len(dirns) - 1):
ichn = _chain_bounds(dirns[i], iress[i][idx[i]], chn[i], 1, 8)
for j in range(i + 1, i + 2):
jchn = _chain_bounds(dirns[j], iress[j][idx[j]], chn[j], 1, 8)
if _has_ca_clash(pos, ncacs, i, ichn, j, jchn, thresh, step):
return False
if step == 1 and approx == 2:
return True
# only adjacent verts, all chains
for i in range(len(dirns) - 1):
ichn = _chain_bounds(dirns[i], iress[i][idx[i]], chn[i], 0, 8)
for j in range(i + 1, i + 2):
jchn = _chain_bounds(dirns[j], iress[j][idx[j]], chn[j], 0, 8)
if _has_ca_clash(pos, ncacs, i, ichn, j, jchn, thresh, step):
return False
if step == 1 and approx == 1:
return True
# all verts, all chains
for i in range(len(dirns) - 1):
ichn = _chain_bounds(dirns[i], iress[i][idx[i]], chn[i], 0, 8)
for j in range(i + 1, len(dirns)):
jchn = _chain_bounds(dirns[j], iress[j][idx[j]], chn[j], 0, 8)
if _has_ca_clash(pos, ncacs, i, ichn, j, jchn, thresh, step):
return False
return True
|
python
|
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from ResNet import ResNet
import argparse
from utils import *
import time
from common.utils import allocate_gpu
def find_next_time(path_list, default=-1):
if default > -1:
return default
run_times = [int(path.split('_')[0]) for path in path_list]
# last_time = max(run_times)
if default == -1:
next_time = max(run_times) + 1 if run_times else 0
return next_time
elif default == -2:
return max(run_times) if run_times else 0
def find_next_time(path_list,default=-1):
run_times=[int(path.split('_')[0]) for path in path_list ]
# print(run_times)
last_time=max(run_times) if run_times else 0
if default == -1:
return last_time+1
else:
return default
"""parsing and configuration"""
def parse_args():
GPU = -1
GPU_ID = allocate_gpu(GPU)
print('Using GPU %d'%GPU_ID)
gpuNo = 'gpu10_%d'%GPU_ID
optimizer_name='adashift' #adam adashift amsgrad sgd
lr=0.01
beta1=0.9
beta2=0.999
keep_num=10
pred_g_op='max'
epoch_num = 50
desc = "Tensorflow implementation of ResNet"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--phase', type=str, default='train', help='train or test ?')
parser.add_argument('--dataset', type=str, default='cifar10', help='[cifar10, mnist, fashion-mnist, tiny')
parser.add_argument('--epoch', type=int, default=epoch_num, help='The number of epochs to run')
parser.add_argument('--test_span', type=int, default=20, help='step interval for test')
parser.add_argument('--batch_size', type=int, default=128, help='The size of batch per gpu')
parser.add_argument('--res_n', type=int, default=18, help='18, 34, 50, 101, 152')
parser.add_argument('--gpuNo', type=str, default=gpuNo, help='which gpu to use')
parser.add_argument('--run_time', type=int, default=-1, help="which time to run this experiment, used in the identifier of experiment. -1 automaticly add one to last time, -2 keep last record")
# parser.add_argument('--GPU', type=int, default=-1, help="which gpu to use")
# parser.add_argument('--T', type=str, default=T, help='identifier of experiment')
parser.add_argument('--optimizer_name', type=str, default=optimizer_name, help='[sgd, adam, amsgrad, adashift')
parser.add_argument('--lr', type=float, default=lr, help='initial learning rate')
parser.add_argument('--beta1', type=float, default=beta1, help='beta1 for optimizer')
parser.add_argument('--beta2', type=float, default=beta2, help='beta2 for optimizer')
parser.add_argument('--epsilon', type=float, default=1e-8, help='epsilon for optimizer')
parser.add_argument('--keep_num', type=int, default=keep_num, help='keep_num for adashift optimizer')
parser.add_argument('--pred_g_op', type=str, default=pred_g_op, help='pred_g_op for adashift optimizer')
parser.add_argument('--checkpoint_dir', type=str, default="",
help='Directory name to save the checkpoints')
parser.add_argument('--log_dir', type=str, default="",
help='Directory name to save training logs')
return check_args(parser.parse_args())
"""checking arguments"""
def check_args(args):
# # --checkpoint_dir
# check_folder(args.checkpoint_dir)
#
# # --result_dir
# check_folder(args.log_dir)
# --epoch
try:
assert args.epoch >= 1
except:
print('number of epochs must be larger than or equal to one')
# --batch_size
try:
assert args.batch_size >= 1
except:
print('batch size must be larger than or equal to one')
return args
if __name__ == '__main__':
# parse arguments
args = parse_args()
if not os.path.exists('./logs'):
os.makedirs('./logs')
# return args
if args is None:
exit()
run_time=find_next_time(os.listdir('./logs'),args.run_time)
T='%d_%s_%s_%d_%.3f_%.2f_%.3f'%(run_time,args.optimizer_name,args.pred_g_op,args.keep_num,args.lr,args.beta1,args.beta2)
args.T = T
print('Check params: %s'%T)
if args.run_time ==-1:
time.sleep(6)
log_dir='./logs/%s'%T
if not os.path.exists('./logs'):
os.makedirs('./logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
checkpoint_dir='./checkpoints/model_%s'%T
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
args.log_dir = log_dir
args.checkpoint_dir = checkpoint_dir
# open session
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, intra_op_parallelism_threads=4, inter_op_parallelism_threads=4)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
cnn = ResNet(sess, args)
# build graph
cnn.build_model()
# show network architecture
show_all_variables()
if args.phase == 'train' :
# launch the graph in a session
result=cnn.train()
print(" [:)] Training finished! \n")
cnn.test()
print(" [:)] Test finished!")
if args.phase == 'test' :
cnn.test()
print(" [:)] Test finished!")
|
python
|
#!/usr/bin/env python
# Copyright (C) 2015 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Apple puter, Inc. ("Apple") nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import sys
import os
def lookFor(relativePath):
return os.path.isfile(sys.argv[1] + relativePath)
def fileContains(relativePath, regexp):
with open(sys.argv[1] + relativePath) as file:
for line in file:
if regexp.search(line):
return True
return False
print("/* Identifying AVFoundation Support */")
if lookFor("/include/AVFoundationCF/AVCFBase.h"):
print("#define HAVE_AVCF 1")
if lookFor("/include/AVFoundationCF/AVCFPlayerItemLegibleOutput.h"):
print("#define HAVE_AVCF_LEGIBLE_OUTPUT 1")
if lookFor("/include/AVFoundationCF/AVCFAssetResourceLoader.h"):
print("#define HAVE_AVFOUNDATION_LOADER_DELEGATE 1")
if lookFor("/include/AVFoundationCF/AVCFAsset.h"):
regexp = re.compile("AVCFURLAssetIsPlayableExtendedMIMEType")
if fileContains("/include/AVFoundationCF/AVCFAsset.h", regexp):
print("#define HAVE_AVCFURL_PLAYABLE_MIMETYPE 1")
if lookFor("/include/QuartzCore/CACFLayer.h"):
regexp = re.compile("CACFLayerSetContentsScale")
if fileContains("/include/QuartzCore/CACFLayer.h", regexp):
print("#define HAVE_CACFLAYER_SETCONTENTSSCALE 1")
if lookFor("/include/AVFoundationCF/AVCFPlayerItemLegibleOutput.h"):
regexp = re.compile("kAVCFPlayerItemLegibleOutput_CallbacksVersion_2")
if fileContains("/include/AVFoundationCF/AVCFPlayerItemLegibleOutput.h", regexp):
print("#define HAVE_AVCFPLAYERITEM_CALLBACK_VERSION_2 1")
|
python
|
import os,sys
model = sys.argv[1]
stamp = int(sys.argv[2])
lr = float(sys.argv[3])
dropout = float(sys.argv[4])
bsize = int(sys.argv[5])
filein = 'test_result/' + model + '_' + str(dropout) + '_' + str(lr) + '_x_test.npy'
fileout = 'test_result/' + model + '_' + str(dropout) + '_' + str(lr) + '_x_test_' + str(stamp) + '.npy'
os.rename(filein, fileout)
filein = 'test_result/' + model + '_' + str(dropout) + '_' + str(lr) + '_y_test.npy'
fileout = 'test_result/' + model + '_' + str(dropout) + '_' + str(lr) + '_y_test_' + str(stamp) + '.npy'
os.rename(filein, fileout)
|
python
|
from tkinter import *
from tkinter.ttk import Combobox
from qiskit import IBMQ
import qiskit
import math
# THIS PART IS THE QUANTUM SHIT SO PUCKER YOUR BUTTHOLES
_backend = qiskit.BasicAer.get_backend('qasm_simulator')
_circuit = None
_bitCache = ''
def setqbits(n):
global _circuit
qr = qiskit.QuantumRegister(n)
cr = qiskit.ClassicalRegister(n)
_circuit = qiskit.QuantumCircuit(qr, cr)
_circuit.h(qr) # Apply Hadamard gate to qubits
_circuit.measure(qr, cr) # Collapses qubit to either 1 or 0 w/ equal prob.
setqbits(8) # Default Circuit is 8 Qubits
def set_backend(b='qasm_simulator'):
global _backend
if b == 'ibmqx4' or b == 'ibmqx5':
_backend = IBMQ.get_backend(b)
setqbits(5)
elif b == 'ibmq_16_melbourne':
_backend = IBMQ.get_backend(b)
setqbits(16)
elif b == 'ibmq_qasm_simulator':
_backend = IBMQ.get_backend(b)
setqbits(32)
else:
_backend = qiskit.BasicAer.get_backend('qasm_simulator')
setqbits(8)
# Strips QISKit output to just a bitstring.
def bitcount(counts):
return [k for k, v in counts.items() if v == 1][0]
# Populates the bitCache with at least n more bits.
def _request_bits(n):
global _bitCache
iterations = math.ceil(n / _circuit.width())
for _ in range(iterations):
# Create new job and run the quantum circuit
job = qiskit.execute(_circuit, _backend, shots=1)
_bitCache += bitcount(job.result().get_counts())
# Returns a random n-bit string by popping n bits from bitCache.
def bitstring(n):
global _bitCache
if len(_bitCache) < n:
_request_bits(n - len(_bitCache))
bitString = _bitCache[0:n]
_bitCache = _bitCache[n:]
return bitString
# Returns a random integer between and including [min, max].
# Running time is probabalistic but complexity is still O(n)
def randint(min, max):
delta = max - min
n = math.floor(math.log(delta, 2)) + 1
result = int(bitstring(n), 2)
while (result > delta):
result = int(bitstring(n), 2)
return result + min
def roll(nb_dice, nb_face):
roll_list = []
for i in range(nb_dice):
roll_list.append(randint(1, nb_face))
return roll_list
root = Tk()
class App:
# define the widgets
def __init__(self, master):
self.title = Label(master, fg="black", text="The Quantum Dice", font=('arial', 40))
self.nb_dices_entry = Combobox(master,
values=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
self.nb_faces_entry = Combobox(master, values=[4, 6, 8, 10, 12, 20, 100])
self.mod_entry = Combobox(master,
values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
self.nb_dices_label = Label(master, fg="black", text="How many dices? ", font=('arial', 20))
self.nb_faces_label = Label(master, fg="black", text="How many side?", font=('arial', 20))
self.mod_label = Label(master, fg="black", text="Would you like to include a modifier?", font=('arial', 20))
self.generate_button = Button(master, text="ROLL DICE", command=self.get_output)
self.list_output_int = Label(master, fg="black", bg="white", text="? ? ?") # TODO: add text function
self.mod_output_int = Label(master, fg="black", bg="white", text="0")
self.final_output_int = Label(master, fg="black", bg="white", text="0")
self.space = Label(master, fg="black", bg="white", text="")
self.list_output_lab = Label(master, fg="black", bg="white", text="Dices Thrown: ") # TODO: add text function
self.mod_output_lab = Label(master, fg="black", bg="white", text="Modifier: ")
self.final_output_lab = Label(master, fg="black", bg="white", text="Final: ")
# Call the widgets
self.title.grid(row=0, columnspan=3)
self.nb_dices_entry.grid(row=2, column=1)
self.nb_dices_entry.current(3)
self.nb_dices_label.grid(row=2, sticky=E)
self.nb_faces_entry.grid(row=3, column=1)
self.nb_faces_entry.current(4)
self.nb_faces_label.grid(row=3, sticky=E)
self.mod_entry.grid(row=4, column=1)
self.mod_entry.current(0)
self.mod_label.grid(row=4, sticky=E)
self.generate_button.grid(row=6, columnspan=3)
self.space.grid(row=7, columnspan=3)
self.list_output_lab.grid(row=8, sticky=E)
self.list_output_int.grid(row=8, column=1)
self.mod_output_lab.grid(row=9, sticky=E)
self.mod_output_int.grid(row=9, column=1)
self.final_output_lab.grid(row=10, sticky=E)
self.final_output_int.grid(row=10, column=1)
def get_output(self):
nb_dice = int(self.nb_dices_entry.get())
nb_face = int(self.nb_faces_entry.get())
output = roll(nb_dice, nb_face)
mod = int(self.mod_entry.get())
final = sum(output) + mod
self.list_output_int["text"] = output
self.mod_output_int["text"] = mod
self.final_output_int["text"] = final
app = App(root)
root.mainloop()
|
python
|
from selenium import webdriver
from bs4 import BeautifulSoup
import time
driver = webdriver.PhantomJS()
client_info_search_url = "https://xclient.info/search/s/"
app_list = ["cleanmymac", "alfred", "betterzip", "beyond compare", "iina", "Navicat Premium", "charles", "DaisyDisk",
"paw", "Typora"]
class update():
def execute(self):
for app_name in app_list:
# app_name = input("请输入App名称: ")
driver.get(client_info_search_url + app_name)
tags = BeautifulSoup(driver.page_source, 'lxml').findAll("div", class_="main")
for tag in tags:
name = tag.a["title"]
if app_name.lower() in name.lower():
name_list = name.split(" ")
name_list.pop(len(name_list) - 1)
name_version = ""
for item in name_list:
name_version += item
href = tag.a["href"] + "#versions"
date = tag.find("span", class_="item date").text
print(date + " - " + name_version + " - " + href)
time.sleep(2)
update().execute()
|
python
|
__author__='Pablo Leal'
import argparse
from keras.callbacks import LambdaCallback
import trainer.board as board
import trainer.loader as loader
import trainer.modeller as modeller
import trainer.saver as saver
from trainer.constans import BATCH_SIZE, CHECKPOINT_PERIOD
from trainer.constans import EPOCHS
from trainer.constans import PREDICTION_LENGTH
from trainer.constans import WINDOW_LENGTH
def saveModelToCloud(epoch, period=1):
if epoch % period = 0:
server.saveModelToCloud(model, pathToJobDir + '/epochs_' + jobname, '{:03d}'.format(epoch))
if __name__='__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-train-file',
help='GCS or local paths to training data',
required=True
)
parser.add_argument(
'-job-name',
help='GCS to write checkpoints and export models',
required=True
)
parser.add_argument(
'-job-dir',
help='GCS to write checkpoints and export models',
required=True
)
args=parser.parse_args()
arguments = args.__dict__
pathToJobDir = arguments.pop('job_dir')
jobName = arguments.pop('job_name')
pathToData = arguments.pop('train_file')
trainingDataDict, trainingLabelsDict, testingDataDict, testingLabelsDict = \
loader.loadObjectFromPickle(pathToData)
model = modeller.buildModel(WINDOW_LENGTH - PREDICTION_LENGTH, PREDICTION_LENGTH)
epochCallback = LambdaCallback (on_epoch_end=lambda epoch, logs: saveModelToCloud(epoch, CHECKPOINT_PERIOD))
model.fit(
[
trainingDataDict["weightedAverage"],
trainingDataDict["volume"],
],
[
trainingLabelsDict["weightedAverage"]
],
validation_data=(
[
testingDataDict["weightedAverage"],
testingDataDict["volume"],
],
[
testingLabelsDict["weightedAverage"]
]),
epochs=EPOCHS,
batch_size=BATCH_SIZE,
shuffle=True,
callback=[
board.createTensorboardConfig(pathToJobDir + "/logs"),
epochCallback
])
server.saveModelToCloud(model, pathToJobDir)
|
python
|
# -*- coding: utf-8 -*-
"""
awsecommerceservice
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class ItemSearchRequest(object):
"""Implementation of the 'ItemSearchRequest' model.
TODO: type model description here.
Attributes:
actor (string): TODO: type description here.
artist (string): TODO: type description here.
availability (AvailabilityEnum): TODO: type description here.
audience_rating (list of AudienceRatingEnum): TODO: type description
here.
author (string): TODO: type description here.
brand (string): TODO: type description here.
browse_node (string): TODO: type description here.
composer (string): TODO: type description here.
condition (ConditionEnum): TODO: type description here.
conductor (string): TODO: type description here.
director (string): TODO: type description here.
item_page (int): TODO: type description here.
keywords (string): TODO: type description here.
manufacturer (string): TODO: type description here.
maximum_price (int): TODO: type description here.
merchant_id (string): TODO: type description here.
minimum_price (int): TODO: type description here.
min_percentage_off (int): TODO: type description here.
music_label (string): TODO: type description here.
orchestra (string): TODO: type description here.
power (string): TODO: type description here.
publisher (string): TODO: type description here.
related_item_page (object): TODO: type description here.
relationship_type (list of string): TODO: type description here.
response_group (list of string): TODO: type description here.
search_index (string): TODO: type description here.
sort (string): TODO: type description here.
title (string): TODO: type description here.
release_date (string): TODO: type description here.
include_reviews_summary (string): TODO: type description here.
truncate_reviews_at (int): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"actor":'Actor',
"artist":'Artist',
"availability":'Availability',
"audience_rating":'AudienceRating',
"author":'Author',
"brand":'Brand',
"browse_node":'BrowseNode',
"composer":'Composer',
"condition":'Condition',
"conductor":'Conductor',
"director":'Director',
"item_page":'ItemPage',
"keywords":'Keywords',
"manufacturer":'Manufacturer',
"maximum_price":'MaximumPrice',
"merchant_id":'MerchantId',
"minimum_price":'MinimumPrice',
"min_percentage_off":'MinPercentageOff',
"music_label":'MusicLabel',
"orchestra":'Orchestra',
"power":'Power',
"publisher":'Publisher',
"related_item_page":'RelatedItemPage',
"relationship_type":'RelationshipType',
"response_group":'ResponseGroup',
"search_index":'SearchIndex',
"sort":'Sort',
"title":'Title',
"release_date":'ReleaseDate',
"include_reviews_summary":'IncludeReviewsSummary',
"truncate_reviews_at":'TruncateReviewsAt'
}
def __init__(self,
actor=None,
artist=None,
availability=None,
audience_rating=None,
author=None,
brand=None,
browse_node=None,
composer=None,
condition=None,
conductor=None,
director=None,
item_page=None,
keywords=None,
manufacturer=None,
maximum_price=None,
merchant_id=None,
minimum_price=None,
min_percentage_off=None,
music_label=None,
orchestra=None,
power=None,
publisher=None,
related_item_page=None,
relationship_type=None,
response_group=None,
search_index=None,
sort=None,
title=None,
release_date=None,
include_reviews_summary=None,
truncate_reviews_at=None):
"""Constructor for the ItemSearchRequest class"""
# Initialize members of the class
self.actor = actor
self.artist = artist
self.availability = availability
self.audience_rating = audience_rating
self.author = author
self.brand = brand
self.browse_node = browse_node
self.composer = composer
self.condition = condition
self.conductor = conductor
self.director = director
self.item_page = item_page
self.keywords = keywords
self.manufacturer = manufacturer
self.maximum_price = maximum_price
self.merchant_id = merchant_id
self.minimum_price = minimum_price
self.min_percentage_off = min_percentage_off
self.music_label = music_label
self.orchestra = orchestra
self.power = power
self.publisher = publisher
self.related_item_page = related_item_page
self.relationship_type = relationship_type
self.response_group = response_group
self.search_index = search_index
self.sort = sort
self.title = title
self.release_date = release_date
self.include_reviews_summary = include_reviews_summary
self.truncate_reviews_at = truncate_reviews_at
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
actor = dictionary.get('Actor')
artist = dictionary.get('Artist')
availability = dictionary.get('Availability')
audience_rating = dictionary.get('AudienceRating')
author = dictionary.get('Author')
brand = dictionary.get('Brand')
browse_node = dictionary.get('BrowseNode')
composer = dictionary.get('Composer')
condition = dictionary.get('Condition')
conductor = dictionary.get('Conductor')
director = dictionary.get('Director')
item_page = dictionary.get('ItemPage')
keywords = dictionary.get('Keywords')
manufacturer = dictionary.get('Manufacturer')
maximum_price = dictionary.get('MaximumPrice')
merchant_id = dictionary.get('MerchantId')
minimum_price = dictionary.get('MinimumPrice')
min_percentage_off = dictionary.get('MinPercentageOff')
music_label = dictionary.get('MusicLabel')
orchestra = dictionary.get('Orchestra')
power = dictionary.get('Power')
publisher = dictionary.get('Publisher')
related_item_page = dictionary.get('RelatedItemPage')
relationship_type = dictionary.get('RelationshipType')
response_group = dictionary.get('ResponseGroup')
search_index = dictionary.get('SearchIndex')
sort = dictionary.get('Sort')
title = dictionary.get('Title')
release_date = dictionary.get('ReleaseDate')
include_reviews_summary = dictionary.get('IncludeReviewsSummary')
truncate_reviews_at = dictionary.get('TruncateReviewsAt')
# Return an object of this model
return cls(actor,
artist,
availability,
audience_rating,
author,
brand,
browse_node,
composer,
condition,
conductor,
director,
item_page,
keywords,
manufacturer,
maximum_price,
merchant_id,
minimum_price,
min_percentage_off,
music_label,
orchestra,
power,
publisher,
related_item_page,
relationship_type,
response_group,
search_index,
sort,
title,
release_date,
include_reviews_summary,
truncate_reviews_at)
|
python
|
a, b, c = map(int, input().split())
print((a+b)%c)
print((a%c+b%c)%c)
print((a*b)%c)
print((a%c*b%c)%c)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
from __future__ import unicode_literals
#===============================================================================
def read_input(strip=True):
return raw_input().strip() if strip else raw_input()
def read_input_multi(strip=True):
return read_input(strip).split()
def read_int():
return int(read_input())
def read_int_multi():
return [int(s) for s in read_input_multi()]
def print_solution(i, solution):
print('Case #{}: {}'.format(i, solution))
#===============================================================================
def solve_matrix(n, soldier_lists):
solution = []
used_edges = []
for iteration in xrange(n):
valid_soldiers = [(i, l) for i, l in enumerate(soldier_lists) if i not in used_edges]
# print valid_soldiers
top_left = min([min([x for j, x in enumerate(l) if j >= iteration]) for i, l in valid_soldiers])
#print("top: {}".format(top_left))
edges = [l for i, l in valid_soldiers if l[iteration] == top_left]
used_edges += [i for i, l in valid_soldiers if l[iteration] == top_left]
if len(edges) == 2:
edge_heights = edges[0] + edges[1]
# print edge_heights
for soldiers in soldier_lists:
value = soldiers[iteration]
# print "value: " + str(value)
edge_heights.remove(value)
solution.append(edge_heights[0])
used_edges
else:
solution.append(edges[0][iteration])
return ' '.join([str(x) for x in solution])
#------------------------------------------------------------------------------
def solve():
n = read_int()
num_lists = 2 * n - 1
soldier_lists = [read_int_multi() for _ in xrange(num_lists)]
line = solve_matrix(n, soldier_lists)
return line
#===============================================================================
if __name__ == '__main__':
test_cases = read_int()
for t in xrange(test_cases):
solution = solve()
print_solution(t + 1, solution)
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import re
from typing import Any
import pytest
from _pytest.python_api import RaisesContext
from omegaconf import DictConfig, OmegaConf
from hydra._internal import utils
from hydra._internal.utils import _locate
from hydra.types import ObjectConf
from tests import AClass, Adam, AnotherClass, ASubclass, NestingClass, Parameters
@pytest.mark.parametrize( # type: ignore
"matrix,expected",
[
([["a"]], [1]),
([["a", "bb"]], [1, 2]),
([["a", "bb"], ["aa", "b"]], [2, 2]),
([["a"], ["aa", "b"]], [2, 1]),
([["a", "aa"], ["bb"]], [2, 2]),
([["a"]], [1]),
([["a"]], [1]),
([["a"]], [1]),
],
)
def test_get_column_widths(matrix: Any, expected: Any) -> None:
assert utils.get_column_widths(matrix) == expected
@pytest.mark.parametrize( # type: ignore
"config, expected, warning",
[
pytest.param(
OmegaConf.create({"_target_": "foo"}), "foo", False, id="ObjectConf:target"
),
pytest.param(
OmegaConf.create({"cls": "foo"}), "foo", "cls", id="DictConfig:cls"
),
pytest.param(
OmegaConf.create({"class": "foo"}), "foo", "class", id="DictConfig:class"
),
pytest.param(
OmegaConf.create({"target": "foo"}),
"foo",
"target",
id="DictConfig:target",
),
pytest.param(
OmegaConf.create({"cls": "foo", "_target_": "bar"}),
"bar",
False,
id="DictConfig:cls_target",
),
pytest.param(
OmegaConf.create({"class": "foo", "_target_": "bar"}),
"bar",
"class",
id="DictConfig:class_target",
),
# check that `target` is prioritized over `cls`/`class`.
pytest.param(
OmegaConf.create({"cls": "foo", "_target_": "bar"}),
"bar",
"cls",
id="DictConfig:pri_cls",
),
pytest.param(
OmegaConf.create({"class": "foo", "_target_": "bar"}),
"bar",
"class",
id="DictConfig:pri_class",
),
pytest.param(
OmegaConf.create({"target": "foo", "_target_": "bar"}),
"bar",
"target",
id="DictConfig:pri_target",
),
],
)
def test_get_class_name(
config: DictConfig, expected: Any, warning: Any, recwarn: Any
) -> None:
assert utils._get_cls_name(config) == expected
target_field_deprecated = (
"\nConfig key '{key}' is deprecated since Hydra 1.0 and will be removed in Hydra 1.1."
"\nUse '_target_' instead of '{field}'."
"\nSee https://hydra.cc/docs/next/upgrades/0.11_to_1.0/object_instantiation_changes"
)
if warning is not False:
assert recwarn[0].category == UserWarning
assert recwarn[0].message.args[0] == target_field_deprecated.format(
key=warning, field=warning
)
# TODO: why?
# @pytest.mark.skipif( # type: ignore
# sys.version_info < (3, 7), reason="requires python3.7"
# )
@pytest.mark.parametrize( # type: ignore
"name,expected",
[
("tests.Adam", Adam),
("tests.Parameters", Parameters),
("tests.AClass", AClass),
("tests.ASubclass", ASubclass),
("tests.NestingClass", NestingClass),
("tests.AnotherClass", AnotherClass),
("", pytest.raises(ImportError, match=re.escape("Empty path"))),
(
"not_found",
pytest.raises(
ImportError, match=re.escape("Error loading module 'not_found'")
),
),
(
"tests.b.c.Door",
pytest.raises(ImportError, match=re.escape("No module named 'tests.b'")),
),
],
)
def test_locate(name: str, expected: Any) -> None:
if isinstance(expected, RaisesContext):
with expected:
_locate(name)
else:
assert _locate(name) == expected
def test_object_conf_deprecated() -> None:
msg = (
"\nObjectConf is deprecated in favor of TargetConf since Hydra 1.0.0rc3 and will be removed in Hydra 1.1."
"\nSee https://hydra.cc/docs/next/upgrades/0.11_to_1.0/object_instantiation_changes"
)
with pytest.warns(
expected_warning=UserWarning, match=msg,
):
ObjectConf(target="foo")
|
python
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'QuestionnaireText'
db.create_table('cmsplugin_questionnairetext', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('body', self.gf('django.db.models.fields.TextField')()),
('depends_on_answer', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='trigger_text', null=True, to=orm['cms_saq.Answer'])),
))
db.send_create_signal('cms_saq', ['QuestionnaireText'])
def backwards(self, orm):
# Deleting model 'QuestionnaireText'
db.delete_table('cmsplugin_questionnairetext')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 10, 23, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms_saq.answer': {
'Meta': {'ordering': "('question', 'order', 'slug')", 'unique_together': "(('question', 'slug'),)", 'object_name': 'Answer'},
'help_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['cms_saq.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms_saq.bulkanswer': {
'Meta': {'object_name': 'BulkAnswer', 'db_table': "'cmsplugin_bulkanswer'", '_ormbases': ['cms.CMSPlugin']},
'answer_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms_saq.formnav': {
'Meta': {'object_name': 'FormNav', 'db_table': "'cmsplugin_formnav'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'end_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'formnav_ends'", 'null': 'True', 'to': "orm['cms.Page']"}),
'end_page_condition_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms_saq.Question']", 'null': 'True', 'blank': 'True'}),
'end_page_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'end_submission_set': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'next_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'formnav_nexts'", 'null': 'True', 'to': "orm['cms.Page']"}),
'next_page_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'prev_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'formnav_prevs'", 'null': 'True', 'to': "orm['cms.Page']"}),
'prev_page_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'submission_set_tag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'cms_saq.groupedanswer': {
'Meta': {'ordering': "('group', 'order', 'slug')", 'object_name': 'GroupedAnswer', '_ormbases': ['cms_saq.Answer']},
'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms_saq.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms_saq.progressbar': {
'Meta': {'object_name': 'ProgressBar', 'db_table': "'cmsplugin_progressbar'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'count_optional': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'cms_saq.question': {
'Meta': {'object_name': 'Question', 'db_table': "'cmsplugin_question'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'depends_on_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trigger_questions'", 'null': 'True', 'to': "orm['cms_saq.Answer']"}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'optional': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'cms_saq.questionnairetext': {
'Meta': {'object_name': 'QuestionnaireText', 'db_table': "'cmsplugin_questionnairetext'"},
'body': ('django.db.models.fields.TextField', [], {}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'depends_on_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trigger_text'", 'null': 'True', 'to': "orm['cms_saq.Answer']"})
},
'cms_saq.scoresection': {
'Meta': {'ordering': "('order', 'label')", 'object_name': 'ScoreSection'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['cms_saq.SectionedScoring']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms_saq.sectionedscoring': {
'Meta': {'object_name': 'SectionedScoring', 'db_table': "'cmsplugin_sectionedscoring'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'cms_saq.submission': {
'Meta': {'ordering': "('submission_set', 'user', 'question')", 'unique_together': "(('question', 'user', 'submission_set'),)", 'object_name': 'Submission'},
'answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'submission_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'null': 'True', 'to': "orm['cms_saq.SubmissionSet']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'saq_submissions'", 'to': "orm['auth.User']"})
},
'cms_saq.submissionset': {
'Meta': {'object_name': 'SubmissionSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'saq_submissions_sets'", 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['cms_saq']
|
python
|
def fibonacci(n):
for i in range(n+1):
fibo = [0, 1]
if i == 0:
print ("fibo( 0 ) = ", 0)
elif i == 1:
print ("fibo( 1 ) = ", 1)
else:
flag = True
for j in range(2, i):
if flag: # Replace first element fibonacci(n) + fibonacci(fibo[1])
fibo[0] = fibo[1] + fibo[0]
else: # Replace second element fibonacci(n) + fibonacci(fibo[0])
fibo[1] = fibo[0] + fibo[1]
flag = not flag
print (fibo[0]+fibo[1])
if __name__ == "__main__":
fibonacci(40)
|
python
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''
Contains function to identify bad channels based on time and freq domain
methods.
authors: Niko Kampel, [email protected]
Praveen Sripad, [email protected]
'''
import numpy as np
import mne
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import euclidean_distances
from .jumeg_utils import check_read_raw
def compute_euclidean_stats(epoch, sensitivity, mode='adaptive',
fraction=None):
'''
Compute the Euclidean matrix along with necessary statistics for data
from one single epoch.
Function can also be used for psd. (generic function)
Parameters
epoch: np.array
The data from which to compute the Euclidean matrices.
sensitivity: float in range of [0,100]
Percentile to compute threshold used for clustering,
which must be between 0 and 100 inclusive.
mode: str
The mode in which to return the statistics results.
Can be 'fixed' for fixed threshold or 'nearest'
for nearest neighbour points.
When a fixed threshold is used, a single percentile based value is
used for all the epochs/windows of the data. If adaptive is chosen,
a threshold value for every epoch is used.
Note: Fixed threshold is currently incompletely implemented and
we do not suggest using it.
fraction: float | None
Ratio of the number of samples to be chosen for clustering.
Returns
If mode is fixed returns a fixed percentile threshold.
If mode is nearest, returns the nearest neighbour.
#TODO doc needs to be updated
'''
if fraction:
number_of_samples = int(epoch.shape[1]*fraction)
sorted_peaks = np.sort(np.square(np.diff(epoch)), axis=1)
# just keep 1% of the samples
afp = sorted_peaks[:, sorted_peaks.shape[1]-number_of_samples:]
else:
# do not do reduced sampling fro psds
afp = epoch # slightly confusing, this part actually handles psd code
mydist = euclidean_distances(afp, afp)
# average_distances = np.average(mydist, axis=1)
if mode == 'adaptive':
# adaptive threshold depending on epochs
nearest_neighbour = np.sort(mydist, axis=1)[:, 1]
selected_threshold = np.percentile(np.tril(mydist), sensitivity)
return afp, nearest_neighbour, selected_threshold
elif mode == 'fixed':
# fixed threshold for all epochs
# not to be used
fixed_threshold = np.percentile(np.tril(mydist), sensitivity)
return afp, fixed_threshold
else:
raise RuntimeError('Mode should be one of fixed or nearest')
def clustered_afp(epochs, sensitivity_steps, fraction, mode='adaptive',
min_samples=1, n_jobs = None):
'''
Perform clustering on difference in signals from one sample to another.
This method helps us to identify flux jumps and largespikes in the data.
Parameters
epochs: mne.Epochs
sensitivity_steps: float in range of [0,100]
Percentile to compute threshold used for clusterin
signals,
which must be between 0 and 100 inclusive.
picks: list
Picks of the channels to be used.
min_samples: int
Number of samples to be chosen for DBSCAN clustering.
Returns
afps: np.array
Power spectral density values (n_epochs, n_chans, n_freqs)
afp_suspects: list
Suspected bad channels.
afp_nearest_neighbour: list
The nearest neighbour identified before DBSCAN clustering.
zlimit_afp: float
A scaling value used for plotting.
'''
# epochs = epochs.get_data()
afps, afp_suspects, afp_percentiles, afp_nearest_neighbour = [], [], [], []
# statistics for every epoch
for epoch in epochs:
if mode == 'adaptive':
afp, nearest_neighbour, selected_threshold = \
compute_euclidean_stats(epoch, sensitivity_steps, mode='adaptive')
afp_nearest_neighbour.append(nearest_neighbour)
afp_percentiles.append(selected_threshold)
elif mode == 'fixed':
# TODO complete fixed threshold computation
# statistics and clustering for every epoch for fixed threshold
afp, selected_threshold = compute_euclidean_stats(epoch, sensitivity_steps,
mode='fixed')
afp_percentiles.append(selected_threshold)
else:
raise RuntimeError('Mode unknown.')
# do the clustering for every epoch
db = DBSCAN(eps=selected_threshold, min_samples=min_samples,
metric='euclidean',n_jobs = n_jobs).fit(afp)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
suspect = [i for i, x in enumerate(db.labels_) if x]
afps.append(afp)
afp_suspects.append(suspect)
afps = np.asarray(afps)
afp_nearest_neighbour = np.asarray(afp_nearest_neighbour)
# hack to get a limit for plotting (this is not supposd to be here)
zlimit_afp = np.percentile(afp_percentiles, 50) * 4
return afps, afp_suspects, afp_nearest_neighbour, zlimit_afp
def clustered_psd(epochs, sensitivity_psd, picks, min_samples=1, n_jobs = None):
'''
Perform clustering on PSDs to identify bad channels.
Parameters
epochs: mne.Epochs
sensitivity_psd: float in range of [0,100]
Percentile to compute threshold used for clustering PSDs,
which must be between 0 and 100 inclusive.
picks: list
Picks of the channels to be used.
min_samples: int
Number of samples to be chosen for DBSCAN clustering.
Returns
psds: np.array
Power spectral density values (n_epochs, n_chans, n_freqs)
psd_suspects: list
Suspected bad channels.
psd_nearest_neighbour: list
The nearest neighbour identified before DBSCAN clustering.
zlimit_psd: float
A scaling value used for plotting.
'''
psds, freqs = mne.time_frequency.psd_welch(epochs, fmin=2., fmax=200.,
picks=picks)
psd_percentiles, psd_nearest_neighbour, psd_suspects = [], [], []
for ipsd in psds:
psd, nearest_neighbour, selected_threshold = \
compute_euclidean_stats(ipsd, sensitivity_psd, mode='adaptive')
psd_nearest_neighbour.append(nearest_neighbour)
psd_percentiles.append(selected_threshold)
db = DBSCAN(eps=selected_threshold, min_samples=min_samples,
metric='euclidean', n_jobs = n_jobs).fit(psd)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
suspect = [i for i, x in enumerate(db.labels_) if x]
psd_suspects.append(suspect)
psd_nearest_neighbour = np.asarray(psd_nearest_neighbour)
zlimit_psd = np.percentile(psd_percentiles, 50) * 4
return psds, psd_suspects, psd_nearest_neighbour, zlimit_psd
def make_minimap(picks, afp_suspects, psd_suspects):
'''
Make a minimap with bad channels identifed using time domain and freq
domain methods.
Helper function for plotting the values
'''
# values inside minimap are a workaround for colormap 'brg'
minimap = np.zeros((len(picks), len(afp_suspects))) # 0 if channel is regular
for e in range(0, len(afp_suspects)):
for c in afp_suspects[e]:
minimap[c, e] = 3 # yellow if afp is unusual
for e in range(0, len(afp_suspects)):
for c in psd_suspects[e]:
if minimap[c, e] == 3:
minimap[c, e] = 2 # red if afp+psd is unusual
else:
minimap[c, e] = 1 # purple if psd is unusual
# minimap marker
# coordinates for markers
x_afp, y_afp, x_psd, y_psd, x_both, y_both = [], [], [], [], [], []
for e in range(0, minimap.shape[1]):
for c in range(0, len(minimap)):
if minimap[c, e] == 3: # condition for afp
x_afp.append(e)
y_afp.append(c)
if minimap[c, e] == 1: # condition for psd
x_psd.append(e)
y_psd.append(c)
if minimap[c, e] == 2: # condition for both
x_both.append(e)
y_both.append(c)
return minimap, x_afp, y_afp, x_psd, y_psd, x_both, y_both
def validation_marker(minimap, picks_bad, picks_fp):
'''
Helper function for plotting bad channels identified using time domain (afp)
or freq domain (psd) methods.
Using the validation marker helps compare already marked bad channels with
automatically identified ones for testing purposes.
'''
x_miss, y_miss, x_hit, y_hit, x_fp, y_fp = [], [], [], [], [], []
for e in range(0, minimap.shape[1]):
for c in range(0, len(minimap)):
if c in picks_bad and minimap[c, e] > 0: # condition for hit
x_hit.append(e)
y_hit.append(c)
if c in picks_bad and minimap[c, e] == 0: # condition for miss
x_miss.append(e)
y_miss.append(c)
if c in picks_fp and minimap[c, e] > 0: # condition for miss
x_fp.append(e)
y_fp.append(c)
return x_miss, y_miss, x_hit, y_hit, x_fp, y_fp
def plot_autosuggest_summary(afp_nearest_neighbour, psd_nearest_neighbour,
picks, afp_suspects, psd_suspects, picks_bad,
picks_fp, zlimit_afp, zlimit_psd,
epoch_length, marks, validation=False):
'''
Plot showing the automated identification of bad channels using time and
frequency domain methods.
#TODO Improve documentation.
'''
import matplotlib.pyplot as plt
plt.style.use(['seaborn-deep'])
# calculate data for summary_plot
minimap, x_afp, y_afp, x_psd, y_psd, x_both, y_both = \
make_minimap(picks, afp_suspects, psd_suspects)
# calculate validation markers if necessary (for testing purposes only)
if validation:
x_miss, y_miss, x_hit, y_hit, x_fp, y_fp = \
validation_marker(minimap, picks_bad, picks_fp)
# do the actual plotting
summary_plot = plt.figure(figsize=(16, 10))
plt.subplots_adjust(hspace=0.2)
t = np.arange(len(minimap[1]))
# minimap
ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=2)
ax1.xaxis.tick_top()
ax1.set_xticks((t))
plt.xticks(t, (t+1)*epoch_length-epoch_length/2) # align minimap with clusterplots
ax1.grid(which='both')
plt.xlim([0, len(t)-1])
plt.ylim([len(minimap), 0])
plt.yticks(marks, [x+1 for x in marks]) # only tick channels of interest +1 cause numpy and mne coordinates are differnt
plt.ylabel('channel number')
# plt.xlabel('raw_fname = '+"'"+raw_fname+"'" + ' ; marked_chn = '+str(list(marks)))
ax1.xaxis.set_label_position('top')
#TODO find better way to find zlimit
# zlimit_afp = np.percentile(afp_percentiles, 50) * 4
plt.imshow(np.clip(afp_nearest_neighbour, 0, zlimit_afp).T*-1,
aspect='auto', interpolation='nearest', cmap='Blues')
# mark the default points
plt.scatter(x_afp, y_afp, s=60, marker='o', color='gold')
plt.scatter(x_both, y_both, s=60, marker='o', color='red')
# validation marker
if validation:
plt.scatter(x_miss, y_miss, s=10, marker='s', color='r')
plt.scatter(x_hit, y_hit, s=10, marker='s', color='limegreen')
plt.scatter(x_fp, y_fp, s=10, marker='s', color='gold')
# plot the AFP clustering
ax2 = plt.subplot2grid((4, 1), (2, 0), rowspan=2)
ax2.xaxis.tick_top()
ax2.set_xticks((t))
plt.xticks(t, (t+1)*epoch_length-epoch_length/2) # align minimap with clusterplots
ax2.grid(which='both')
plt.xlim([0, len(t)-1])
plt.ylim([len(minimap), 0])
plt.yticks(marks, [x+1 for x in marks]) # only tick channels of interest +1 cause numpy and mne coordinates are differnt
plt.ylabel('channel number')
plt.xlabel('time')
plt.scatter(x_psd, y_psd, s=60, marker='o', color='purple')
plt.scatter(x_both, y_both, s=60, marker='o', color='red')
# validation marker
if validation:
plt.scatter(x_miss, y_miss, s=20, marker='s', color='r')
plt.scatter(x_hit, y_hit, s=20, marker='s', color='limegreen')
plt.scatter(x_fp, y_fp, s=20, marker='s', color='gold')
#TODO find better way to find zlimit
# zlimit_psd = np.percentile(psd_percentiles, 50) * 4
ax2.imshow(np.clip(psd_nearest_neighbour, 0, zlimit_psd).T*-1,
aspect='auto', interpolation='nearest', cmap='Blues')
plt.close()
return summary_plot
def suggest_bads(raw, sensitivity_steps=97, sensitivity_psd=95,
fraction=0.001, epoch_length=None, summary_plot=False,
show_raw=False, n_jobs = 1, validation=True):
'''
Function to suggest bad channels. The bad channels are identified using
time domain methods looking for sharp jumps in short windows of data and
in the frequency domain looking for channels with unusual power
spectral densities.
Note: This function is still in the development stage and contains a lot of
hard coded values.
Parameters
----------
raw: str | mne.io.Raw
Filename or the raw object.
epoch_length: int | None
Length of the window to apply methods on.
summary_plot: bool
Set True to generate a summary plot showing suggested bads.
# parameters for step detection (AFP)
# in %, 0 marks all chanels 100 marks none; percentile of
# parameter for frequency analysis
# in %, 0 marks all chanels 100 marks none; percentile of
Returns
-------
suggest_bads: list
List of suggested bad channels.
raw: mne.io.Raw
Raw object updated with suggested bad channels.
'''
raw = check_read_raw(raw, preload=False)
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
ecg=False, exclude=[])
# if epoch length is not provided, chose a suitable length
if not epoch_length:
epoch_length = int(raw.n_times/(raw.info['sfreq'] * 20))
print('epoch_length of %d chosen' % epoch_length)
# add 0.01 to avoid 'dropping' of first epoch
events = mne.make_fixed_length_events(raw, 42, start=0.01,
duration=epoch_length)
epochs = mne.Epochs(raw, events, event_id=42, tmin=-epoch_length/2,
tmax=epoch_length/2, picks=picks)
picks_bad = [raw.ch_names.index(l) for l in raw.info['bads']]
# compute differences in time domain to identify abrupt jumps in the data
afps, afp_suspects, afp_nearest_neighbour, zlimit_afp = \
clustered_afp(epochs, sensitivity_steps, fraction, n_jobs = n_jobs)
# compute the psds and do the clustering to identify unusual channels
psds, psd_suspects, psd_nearest_neighbour, zlimit_psd = \
clustered_psd(epochs, sensitivity_psd, picks, n_jobs = n_jobs)
# if any of the channels' psds are all zeros, mark as suspect
zero_suspects = [ind for ind in range(psds.shape[1]) if not np.any(psds[:, ind, :])]
# reduce lists of marked epochs to lists of bad channels
picks_autodetect = \
list(set().union([item for sublist in psd_suspects for item in sublist],
[item for sublist in afp_suspects for item in sublist]))
# get the bads suggested but not previosuly marked
picks_fp = [x for x in set(picks_autodetect) if x not in set(picks_bad)]
# marks are all channels of interest, including premarked bad channels
# and zero channels (channel indices)
jumps = list(set([item for sublist in afp_suspects for item in sublist]))
jumps_ch_names = [raw.ch_names[i] for i in jumps]
unusual = list(set([item for sublist in psd_suspects for item in sublist]))
unusual_ch_names = [raw.ch_names[i] for i in unusual]
dead_ch_names = [raw.ch_names[i] for i in zero_suspects]
print("Suggested bads [jumps]:", jumps_ch_names)
print("Suggested bads [unusual]:", unusual_ch_names)
print("Suggested bads [dead]:", dead_ch_names)
marks = list(set(picks_autodetect) | set(picks_bad) | set(zero_suspects))
# show summary plot for enhanced manual inspection
#TODO zero suspects do not have any colour coding for the moment
if summary_plot:
fig = \
plot_autosuggest_summary(afp_nearest_neighbour, psd_nearest_neighbour,
picks, afp_suspects, psd_suspects, picks_bad,
picks_fp, zlimit_afp, zlimit_psd,
epoch_length, marks,
validation=False)
fig.show()
# channel names in str
suggested = [raw.ch_names[i] for i in marks]
# add suggested channels to the raw.info
raw.info['bads'] = suggested
print('Suggested bad channels: ', suggested)
if show_raw:
raw.plot(block=True)
visual = raw.info['bads']
visual.sort()
print('Bad channels after visual inspection: ', visual)
return visual, raw
else:
return suggested, raw
|
python
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from __future__ import with_statement
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from powerdns import PowerDNSCollector
################################################################################
class TestPowerDNSCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('PowerDNSCollector', {
'interval': 1,
'bin': 'true',
'use_sudo': False,
})
self.collector = PowerDNSCollector(config, None)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_fake_data(self, publish_mock):
with patch('subprocess.Popen.communicate', Mock(return_value=(
self.getFixture('pdns_control-2.9.22.6-1.el6-A').getvalue(),
''))):
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
with patch('subprocess.Popen.communicate', Mock(return_value=(
self.getFixture('pdns_control-2.9.22.6-1.el6-B').getvalue(),
''))):
self.collector.collect()
metrics = {
'corrupt-packets': 1.0,
'deferred-cache-inserts': 2.0,
'deferred-cache-lookup': 3.0,
'latency': 4.0,
'packetcache-hit': 5.0,
'packetcache-miss': 6.0,
'packetcache-size': 7.0,
'qsize-q': 8.0,
'query-cache-hit': 9.0,
'query-cache-miss': 10.0,
'recursing-answers': 11.0,
'recursing-questions': 12.0,
'servfail-packets': 13.0,
'tcp-answers': 14.0,
'tcp-queries': 15.0,
'timedout-packets': 16.0,
'udp-answers': 17.0,
'udp-queries': 18.0,
'udp4-answers': 19.0,
'udp4-queries': 20.0,
'udp6-answers': 21.0,
'udp6-queries': 22.0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
python
|
"""Testing phantombuild."""
import tempfile
from pathlib import Path
import pytest
import phantombuild as pb
from phantombuild.phantombuild import (
CompileError,
HDF5LibraryNotFound,
PatchError,
RepoError,
)
VERSION = '3252f52501cac9565f9bc40527346c0e224757b9'
def test_get_phantom():
"""Test getting Phantom from GitHub."""
with tempfile.TemporaryDirectory() as tmpdirname:
path = Path(tmpdirname) / 'phantom'
pb.get_phantom(path)
pb.get_phantom(path)
(path / '.git/config').unlink()
with pytest.raises(RepoError):
pb.get_phantom(path)
def test_checkout_phantom_version_clean():
"""Test checking out a Phantom version."""
with tempfile.TemporaryDirectory() as tmpdirname:
path = Path(tmpdirname) / 'phantom'
pb.get_phantom(path)
pb.checkout_phantom_version(path=path, version=VERSION)
pb.checkout_phantom_version(path=path, version=VERSION)
def test_checkout_phantom_version_dirty():
"""Test checking out a Phantom version."""
with tempfile.TemporaryDirectory() as tmpdirname:
path = Path(tmpdirname) / 'phantom'
pb.get_phantom(path)
(path / 'src/main/phantom.F90').unlink()
pb.checkout_phantom_version(path=path, version=VERSION)
def test_phantom_patch():
"""Test patching Phantom."""
with tempfile.TemporaryDirectory() as tmpdirname:
path = Path(tmpdirname) / 'phantom'
pb.get_phantom(path)
pb.checkout_phantom_version(path=path, version=VERSION)
patch = Path(__file__).parent / 'stub' / 'test.patch'
pb.patch_phantom(path=path, patch=patch)
kwargs = {'path': path, 'patch': patch}
with pytest.raises(PatchError):
pb.patch_phantom(**kwargs)
def test_build_phantom():
"""Test building Phantom."""
with tempfile.TemporaryDirectory() as tmpdirname:
path = Path(tmpdirname) / 'phantom'
hdf5_path = Path('non_existent_dir')
pb.get_phantom(path)
pb.build_phantom(
path=path,
setup='empty',
system='gfortran',
extra_options={'MAXP': '1000000'},
)
kwargs = {
'path': path,
'setup': 'empty',
'system': 'gfortran',
'hdf5_path': hdf5_path,
}
with pytest.raises(HDF5LibraryNotFound):
pb.build_phantom(**kwargs)
kwargs = {
'path': path,
'setup': 'FakeSetup',
'system': 'gfortran',
}
with pytest.raises(CompileError):
pb.build_phantom(**kwargs)
def test_setup_calculation():
"""Test setting up Phantom calculation."""
with tempfile.TemporaryDirectory() as tmpdirname:
phantom_path = Path(tmpdirname) / 'phantom'
run_path = Path(tmpdirname) / 'run_path'
input_dir = Path(__file__).parent / 'stub'
in_file = input_dir / 'disc.in'
setup_file = input_dir / 'disc.setup'
pb.get_phantom(phantom_path)
pb.build_phantom(
path=phantom_path, version=VERSION, setup='disc', system='gfortran'
)
pb.setup_calculation(
prefix='disc',
setup_file=setup_file,
in_file=in_file,
run_path=run_path,
phantom_path=phantom_path,
)
|
python
|
import os
import time
from github import Github
from django.db import models
from calaccess_raw import get_model_list
from django.template.loader import render_to_string
from calaccess_raw.management.commands import CalAccessCommand
class Command(CalAccessCommand):
help = 'Create GitHub issues for fields missing verbose and/or help text'
def add_arguments(self, parser):
"""
Adds custom arguments specific to this command.
"""
super(Command, self).add_arguments(parser)
parser.add_argument(
"--dry-run",
action="store_true",
dest="dry_run",
default=False,
help="Print text of issues without sending to Github"
)
def handle(self, *args, **options):
super(Command, self).handle(*args, **options)
"""
Connect to Github using token stored in environment, loop over model fields, and \
create an issue for any choice field missing
"""
self.dry_run = options["dry_run"]
# set up connect to Github account
self.gh = Github(os.getenv('GITHUB_TOKEN'))
self.org = self.gh.get_organization("california-civic-data-coalition")
self.repo = self.org.get_repo("django-calaccess-raw-data")
self.labels = [
self.repo.get_label("small"),
self.repo.get_label("documentation"),
self.repo.get_label("enhancement"),
]
self.header(
"Creating GitHub issues for model choice fields"
)
model_list = sorted(
get_model_list(),
key=lambda x: (x().klass_group, x().klass_name)
)
models_to_fix = []
for m in model_list:
fields_to_fix = {}
for f in m._meta.fields:
if f.name == 'id':
continue
# test for verbose name
if not f.__dict__['_verbose_name']:
fields_to_fix[f] = {'no_verbose': True, 'no_help': False}
elif len(f.__dict__['_verbose_name']) == 0:
fields_to_fix[f] = {'no_verbose': True, 'no_help': False}
# test for help text
if len(f.help_text) == 0:
try:
fields_to_fix[f]['no_help'] = True
except KeyError:
fields_to_fix[f] = {'no_verbose': False, 'no_help': True}
if len(fields_to_fix) > 0:
fs = []
for k, v in fields_to_fix.items():
fs.append((k, v))
models_to_fix.append(
(m, tuple(fs))
)
for model, fields in models_to_fix:
context = dict(
model_name=model.__name__,
model_docs=model().DOCUMENTCLOUD_PAGES,
file_name=model.__module__.split('.')[-1] + '.py',
fields=fields,
)
title = "Add verbose and/or help text fields on {model_name} (in \
{file_name})".format(**context)
body = render_to_string(
'toolbox/createverboseandhelptextissues.md',
context,
)
self.log("-- Creating issue for {model_name}".format(**context))
if self.dry_run:
print '=========================='
print title
print '--------------------------'
print body
print '=========================='
else:
self.repo.create_issue(
title,
body=body,
labels=self.labels,
)
time.sleep(2.5)
|
python
|
"""
just run this script with python converter.py .
It will convert pytorch.ipynb to html page docs/pytorch-examples.html
"""
import nbformat
import markdown
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
notebook = nbformat.read('Pytorch.ipynb', as_version=nbformat.NO_CONVERT)
content = ''
cache = ''
for cell in notebook['cells']:
if cell['cell_type'] == 'code':
source = cell['source']
if source.startswith('#left') or source.startswith('#right'):
trimmed_source = source[source.index('\n') + 1:]
cache += "<div>{}</div>".format(highlight(trimmed_source, PythonLexer(), HtmlFormatter()))
if source.startswith('#right'):
content += "<div class='leftright-wrapper'><div class='leftright-cells'>{}</div></div> ".format(cache)
cache = ''
elif cell['cell_type'] == 'markdown':
content += "<div class='markdown-cell'>{}</div>".format(markdown.markdown(cell['source']))
else:
raise RuntimeError('not expected type of cell' + cell['cell_type'])
styles = HtmlFormatter().get_style_defs('.highlight')
styles += '''
body {
padding: 50px 10px;
}
.leftright-wrapper {
text-align: center;
overflow-x: auto;
}
.leftright-cells {
display: inline-flex;
text-align: left;
}
.leftright-cells > div {
padding: 0px 10px;
min-width: 350px;
}
.markdown-cell{
max-width: 700px;
margin: 0px auto;
}
h1 {
text-align: center;
padding: 10px 0px 0px;
}
'''
meta_tags = '''
<meta property="og:title" content="Writing better code with pytorch and einops">
<meta property="og:description" content="Learning by example: rewriting and fixing popular code fragments">
<meta property="og:image" content="http://arogozhnikov.github.io/images/einops/einops_video.gif">
<meta property="og:video" content="http://arogozhnikov.github.io/images/einops/einops_video.mp4" />
<meta property="og:url" content="https://arogozhnikov.github.io/einops/pytorch-examples.html">
<meta name="twitter:card" content="summary_large_image">
<!-- Non-Essential, But Recommended -->
<meta property="og:site_name" content="Writing better code with pytorch and einops">
<meta name="twitter:image:alt" content="Learning by example: rewriting and fixing popular code fragments">
'''
github_ribbon = '''
<a href="https://github.com/arogozhnikov/einops" class="github-corner" aria-label="View source on GitHub">
<svg width="80" height="80" viewBox="0 0 250 250" style="fill:#151513; color:#fff; position: absolute; top: 0; border: 0; right: 0;" aria-hidden="true">
<path d="M0,0 L115,115 L130,115 L142,142 L250,250 L250,0 Z"></path><path d="M128.3,109.0 C113.8,99.7 119.0,89.6 119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3 123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9 134.4,103.2" fill="currentColor" style="transform-origin: 130px 106px;" class="octo-arm"></path>
<path d="M115.0,115.0 C114.9,115.1 118.7,116.5 119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0 127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4 163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6 187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2 216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4 203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1 C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7 141.6,141.9 141.8,141.8 Z" fill="currentColor" class="octo-body"></path>
</svg></a>
<style>.github-corner:hover .octo-arm{animation:octocat-wave 560ms ease-in-out}@keyframes octocat-wave{0%,100%{transform:rotate(0)}20%,60%{transform:rotate(-25deg)}40%,80%{transform:rotate(10deg)}}@media (max-width:500px){.github-corner:hover .octo-arm{animation:none}.github-corner .octo-arm{animation:octocat-wave 560ms ease-in-out}}</style>
'''
result = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
{meta_tags}
<title>Writing better code with pytorch+einops</title>
<style>{styles}</style>
</head>
<body>
{github_ribbon}
{content}
</body>
</html>
'''
with open('../pytorch-examples.html', 'w') as f:
f.write(result)
|
python
|
#!/usr/bin/env python3
"""This is an example to train a task with CMA-ES.
Here it runs CartPole-v1 environment with 100 epoches.
Results:
AverageReturn: 100
RiseTime: epoch 38 (itr 760),
but regression is observed in the course of training.
"""
from metarl import wrap_experiment
from metarl.envs import MetaRLEnv
from metarl.experiment import LocalTFRunner
from metarl.experiment.deterministic import set_seed
from metarl.np.algos import CMAES
from metarl.np.baselines import LinearFeatureBaseline
from metarl.sampler import OnPolicyVectorizedSampler
from metarl.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def cma_es_cartpole(ctxt=None, seed=1):
"""Train CMA_ES with Cartpole-v1 environment.
Args:
ctxt (metarl.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = MetaRLEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
n_samples = 20
algo = CMAES(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
n_samples=n_samples)
runner.setup(algo, env, sampler_cls=OnPolicyVectorizedSampler)
runner.train(n_epochs=100, batch_size=1000)
cma_es_cartpole()
|
python
|
import uvicorn
from .main import app
uvicorn.run(app)
|
python
|
import collections
from torch.optim.lr_scheduler import LambdaLR
def chunk(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def flatten(d, parent_key='', sep='__'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def unflatten(dictionary, sep='__'):
out_dict = dict()
for key, value in dictionary.items():
parts = key.split(sep)
d = out_dict
for part in parts[:-1]:
if part not in d:
d[part] = dict()
d = d[part]
d[parts[-1]] = value
return out_dict
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
python
|
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy
import random
# Titan modules
import create_model
## Generate initial data set
N = 400
sig3 = 1 # 3-sigma error (controls the statistical fluctuation)
a = 10 # radius of the helix
b = 33/(2*np.pi) # 2*pi*b step of the helix
epsihelical = 1 # -1 or 1
theta = np.zeros((N, 1)) #Initialise column vector
for i in range(0, N):
theta[i] = 100/b*random.random()
X1 = a*np.cos(theta)
X2 = a*epsihelical*np.sin(theta)
X3 = b*theta
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X1,X2,X3, c='b', marker='o', s=0.5)
ax.set_xlim(-20,20)
ax.set_ylim(-20,20)
ax.set_zlim(-10,100)
# We add the statistical fluctuation
X1_ = np.zeros((N, 1)) #Initialise column vector
X2_ = np.zeros((N, 1)) #Initialise column vector
X3_ = np.zeros((N, 1)) #Initialise column vector
for i in range(0, N):
X1_[i] = X1[i] + sig3/3*random.gauss(0, 1)
X2_[i] = X2[i] + sig3/3*random.gauss(0, 1)
X3_[i] = X3[i] + sig3/3*random.gauss(0, 1)
fig = plt.figure(2)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X1_,X2_,X3_, c='b', marker='o', s=0.5)
ax.set_xlim(-20,20)
ax.set_ylim(-20,20)
ax.set_zlim(-10,100)
## 3 - Create probabilistic representation of the data
PLoM_model = create_model.TitanPLoM()
PLoM_model.Type = 'PLoM'
PLoM_model.ExpDesign.X = np.concatenate((X1_, X2_), axis=1) #X must be column vectors
PLoM_model.ExpDesign.Y = X3_
PLoM_model.Opt.scaling = 0
PLoM_model.Opt.optimizationm = 0
PLoM_model.Opt.epsvalue = 1.57 # value of the smoothing parameter (is not determined with the optimization procedure)
PLoM_model.Opt.m = 4
PLoM_model.titan_PLoM()
plt.show()
# 4 - Sample new realizations from the probabilistic representation of the data
PLoM_model.Itoopt.nMC = 10
PLoM_model.Itoopt.M0 = 110
PLoM_model.Itoopt.l0 = 0
PLoM_model.Itoopt.dt = 0.1196
Y = titan_PLoM_eval(PLoM_model)
# # 5 - Processing of new realizations
# nMC = Metamodel.Itoopt.nMC
# X1new = zeros(N*nMC,1)
# X2new = zeros(N*nMC,1)
# X3new = zeros(N*nMC,1)
# for ll=1:nMC
# for ii=1:N
# X1new((ll-1)*N+ii) = Y(1,ii,ll)
# X2new((ll-1)*N+ii) = Y(2,ii,ll)
# X3new((ll-1)*N+ii) = Y(3,ii,ll)
# end
# end
# figure
# scatter3(X1,X2,X3,'blue')
# hold on
# scatter3(X1new,X2new,X3new,'red')
# xlim([-20,20])
# ylim([-20,20])
# zlim([-10,100])
# figure
# plot(X1new)
# figure
# plot(X2new)
# figure
# plot(X3new)
|
python
|
from ast.Expresion import Expresion
from ast.Symbol import Symbol
from ast.Expresion import Expresion
from ast.Symbol import TIPOVAR as Tipo
from ast.Sentencia import Sentencia
import Reportes.ReporteD as Sentencias
class Select(Sentencia):
#SELECT selectclausules FROM selectbody wherecondicion
#Selecttable : SELECT selectclausules FROM selectbody wherecondicion
#St[0] = Select(t[2],t[4],t.slice[1].lineno,find_column(t.slice[1]),t[5])
def __init__(self,id,value,line, column,declare):
self.id = id
self.line= line
self.column = column
self.value = value
self.type = declare
def ejecutar(self,entorno,tree):
print("zVV Select")
#print("sentencias v "+Expres.id)
y= {}
try:
if self.id.type=="*":
print("xxc")
try:
print("zVV 1")
if self.value.type=="ID":
y = self.value.value
print(" y= "+str(y))
SentenciasR = Sentencias.ReporteD()
print("7000a ")
SentenciasR.write(y,"Select*from "+y,entorno,tree)
print("7001 ")
except:
pass
except:
pass
tree.agregarnodos(self)
return False
|
python
|
"""Function wrapping logic."""
import importlib
import os
import sys
import logging
import tempfile
import atexit
import functools
import typing
import traceback
from flask import request
import dploy_kickstart.errors as pe
import dploy_kickstart.transformers as pt
import dploy_kickstart.annotations as pa
log = logging.getLogger(__name__)
def nb_to_py(nb_file: str, location: str) -> str:
"""Convery .ipynb to temporary .py file."""
try:
import nbformat
import nbconvert
except ImportError as e:
raise pe.ScriptImportError(
f"{e}\nCannot import notebook conversion libraries."
+ "Please add `jupyter` (or `nbformat` and `nbconvert`)"
+ " to your dependencies.",
)
handle, filename = tempfile.mkstemp(text=True, suffix=".py")
with os.fdopen(handle, "w") as tf:
with open(os.path.join(location, nb_file)) as fh:
nb = nbformat.reads(fh.read(), nbformat.NO_CONVERT)
exporter = nbconvert.PythonExporter()
src, _ = exporter.from_notebook_node(nb)
tf.writelines(src)
# delete file on exit
atexit.register(functools.partial(os.remove, filename))
return os.path.basename(filename), os.path.dirname(filename)
def get_func_annotations(mod: typing.Generic) -> typing.List[pa.AnnotatedCallable]:
"""Scan usercode for function annotations."""
cm = []
# check which functions have relevant args and return 'em
for name, val in mod.__dict__.items():
if callable(val):
ac = pa.AnnotatedCallable(val)
if ac.has_args():
cm.append(ac)
return cm
def import_entrypoint(entrypoint: str, location: str) -> typing.Generic:
"""Import entrypoint from user code."""
# assert if entrypoint contains a path prefix and if so add it to location
if os.path.dirname(entrypoint) != "":
location = os.path.join(location, os.path.dirname(entrypoint))
entrypoint = os.path.basename(entrypoint)
# add location to path for mod importing
sys.path.insert(0, location)
# switch to location to allow for relative asset loading in usercode
os.chdir(location)
_, ext = os.path.splitext(entrypoint)
if ext == ".ipynb":
entrypoint, location = nb_to_py(entrypoint, location)
# add location of temporary .py file so it can be imported
sys.path.insert(0, location)
elif ext == ".py":
pass
else:
log.error(f"unsupportered entrypoint: {entrypoint}")
raise pe.UnsupportedEntrypoint(entrypoint)
mod_file, _ = os.path.splitext(entrypoint)
msg = "loading module '{}' (modfile: {}) from location '{}'".format(
entrypoint, mod_file, location
)
log.debug(msg)
try:
mod = importlib.import_module(mod_file, location)
except Exception as e:
raise pe.ScriptImportError(f"{msg}: {e}")
return mod
def func_wrapper(f: pa.AnnotatedCallable) -> typing.Callable:
"""Wrap functions with request logic."""
def exposed_func() -> typing.Callable:
# preprocess input for callable
try:
res = pt.MIME_TYPE_REQ_MAPPER[request.is_json](f, request)
except Exception:
raise pe.UserApplicationError(
message=f"error in executing '{f.__name__()}' method.",
traceback=traceback.format_exc(),
)
# determine whether or not to process response before sending it back to caller
try:
return pt.MIME_TYPE_RES_MAPPER[res.__class__.__name__](res)
except Exception:
raise pe.UserApplicationError(
message=f"error in executing '{f.__name__()}' method, the return type "
f"{res.__class__.__name__} is not supported",
traceback=traceback.format_exc(),
)
return exposed_func
|
python
|
#!/usr/bin/python
# minimal imports for faster startup
import os
from logger import logger
def run():
import time
import sys
import signal
import json
os.environ["QT_QPA_PLATFORM"] = "xcb" # window oddly resizes when regaining focus
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QHBoxLayout
from PyQt5.QtCore import QThread, QObject, pyqtSignal, pyqtSlot, Qt
from PyQt5.QtGui import QFont
# local
from unix_socket import UnixSocket
SOCKET_PATH = "/tmp/speech_gui.sock"
class Server(QThread):
update_signal = pyqtSignal(str)
def __init__(self):
super().__init__()
self._quit = False
self.state = {
"pause": False,
"hold": False,
"shift": False,
"ctrl": False,
"alt": False,
"win": False,
"key": ""
}
def run(self):
self._update()
while not self._quit:
try:
os.unlink(SOCKET_PATH)
except OSError:
if os.path.exists(SOCKET_PATH):
raise
sock = UnixSocket(SOCKET_PATH, 100)
sock.listen()
while True:
logger.info('Wait for a connection')
sock.accept()
logger.info('Connected. Listening for keys ...')
try:
# Receive the data in small chunks and retransmit it
while True:
msg = sock.receive()
self.state["pause"] = msg[0] == "1"
self.state["hold"] = msg[1] == "1"
self.state["shift"] = msg[2] == "1"
self.state["ctrl"] = msg[3] == "1"
self.state["alt"] = msg[4] == "1"
self.state["win"] = msg[5] == "1"
self.state["key"] = msg[6:]
self._update()
except RuntimeError as err:
logger.info(err)
finally:
logger.info('Clean up the connection')
sock.close_connection()
exit()
def _update(self):
message = json.dumps(self.state)
logger.info(message)
self.update_signal.emit(message)
def quit(self):
self._quit = True
class App(QObject):
colors = {
"background": "#B7EBB9",
"mods": "#8875E4DE",
"mods_hold": "#88F3E803",
"mod_active": "#8804C1E1",
"mod_active_hold": "#88F6AC0D"
}
labels = {}
def __init__(self):
super().__init__()
modsLayout = QHBoxLayout()
self.modsWidget = QWidget()
self.modsWidget.setLayout(modsLayout)
self.modsWidget.setStyleSheet('''
QLabel { font-size: 12pt; min-width: 14px; }
.QWidget { border-bottom-left-radius: 10px; border-top-left-radius: 10px; }
''')
layout = QHBoxLayout()
layout.addWidget(self.modsWidget)
self.widget = QWidget()
self.widget.setLayout(layout)
self.widget.setStyleSheet('''
QLabel { font-size: 12pt; }
QWidget { background-color: #88B7EBB9 }
''')
self.widget.setAttribute(Qt.WA_TranslucentBackground, True)
self.widget.setWindowFlags(Qt.FramelessWindowHint)
for labelKey in ['shift', 'ctrl', 'alt', 'win']:
label = QLabel()
label.setText(labelKey[0])
label.setAlignment(Qt.AlignCenter)
modsLayout.addWidget(label)
self.labels[labelKey] = label
self.labelKey = QLabel()
self.labelKey.setText('')
self.labelKey.setFixedWidth(100)
layout.addWidget(self.labelKey)
# layout.addStretch()
layout.setAlignment(Qt.AlignLeft)
layout.setSpacing(0)
self.widget.setWindowTitle("speechwindow")
self.widget.show()
class PostResizeThread(QThread):
def __init__(self, widget):
super().__init__()
self.widget = widget
def run(self):
time.sleep(.1)
self.widget.setGeometry(0, 0, 130, 40)
self.post_resize_thread = PostResizeThread(self.widget)
self.post_resize_thread.start()
@pyqtSlot(str)
def update(self, message):
data = json.loads(message)
modsBGColor = self.colors['mods_hold'] if data['hold'] else self.colors['mods']
self.modsWidget.setStyleSheet("""
QLabel { font-size: 12pt; min-width: 14px; }
.QWidget { border-bottom-left-radius: 10px; border-top-left-radius: 10px; background-color: %s}
""" % (modsBGColor))
for key, label in self.labels.items():
if data[key]:
colorKey = f"mod_active{'_hold' if data['hold'] else ''}"
label.setStyleSheet(f"background-color: {self.colors[colorKey]}")
else:
label.setStyleSheet(f"background-color: none")
self.labelKey.setText(" " + data["key"])
qapp = QApplication(sys.argv)
app = App()
serverthread = Server()
# thread safe communication, QtGui requires all gui related code to be called from the same thread
serverthread.update_signal.connect(app.update, Qt.QueuedConnection)
# design flaw, see https://stackoverflow.com/q/4938723/6040478
signal.signal(signal.SIGINT, signal.SIG_DFL)
serverthread.start()
qapp.exec_()
logger.info('Quit, collecting threads.')
serverthread.quit()
serverthread.wait()
# signal.pause()
run()
|
python
|
from __future__ import print_function, absolute_import
import abc
import six
from lazy import lazy
from pyreference.utils.genomics_utils import iv_from_pos_range, \
iv_from_pos_directional_before_after, dict_to_iv
class GenomicRegion(object):
""" Base class for both Gene and Transcript """
def __init__(self, reference, accession_id, data_dict):
self.reference = reference
self.accession_id = accession_id
self._dict = data_dict
def get_id(self):
return self.accession_id
@property
def biotype(self):
return '/'.join(sorted(self.get_biotypes()))
def get_biotypes(self):
# On gene it's a string
biotype = self._dict["biotype"]
if isinstance(biotype, six.string_types):
biotypes = biotype.split(",")
elif isinstance(biotype, list):
biotypes = biotype
return biotypes
@lazy
def iv(self):
return dict_to_iv(self._dict)
@lazy
def tss(self):
""" (Representative) Transcript Start Site
This is NOT the most 5' position (use iv.start_d_as_pos for that) """
transcript_iv = self.get_representative_transcript().iv
return transcript_iv.start_d_as_pos
def get_promoter_iv(self, promoter_range=1000):
return iv_from_pos_range(self.tss, promoter_range)
def get_promoter_sequence(self, promoter_range=1000):
iv = self.get_promoter_iv(promoter_range)
return self.reference.get_sequence_from_iv(iv)
def get_promoter_iv_custom_range(self, upstream_distance, downstream_distance):
"""Get any interval surrounding TSS
Note: total length of interval = upstream_distance + downstream_distance (The TSS base is included in downstream_distance)"""
return iv_from_pos_directional_before_after(self.tss, upstream_distance, downstream_distance)
def get_promoter_sequence_custom_range(self, upstream_distance, downstream_distance):
iv = self.get_promoter_iv_custom_range(upstream_distance, downstream_distance)
return self.reference.get_sequence_from_iv(iv)
@abc.abstractmethod
def get_representative_transcript(self):
pass
|
python
|
import os
import dill
import unittest
import collections
from swamp.utils import remove
from swamp.mr.mrresults import MrResults
RESULTS = collections.namedtuple('results', ['results'])
WORKDIR = os.path.join(os.environ['CCP4_SCR'], 'test_workdir')
MR_DIR = os.path.join(WORKDIR, 'swamp_mr')
class MrResultsTestCase(unittest.TestCase):
def test_1(self):
search_1 = os.path.join(MR_DIR, 'search_1')
search_1_run_1 = os.path.join(MR_DIR, 'search_1', 'run_1')
search_2 = os.path.join(MR_DIR, 'search_2')
search_2_run_1 = os.path.join(MR_DIR, 'search_2', 'run_1')
search_2_run_2 = os.path.join(MR_DIR, 'search_2', 'run_2')
directories = [WORKDIR, MR_DIR, search_1, search_1_run_1, search_2, search_2_run_1, search_2_run_2]
for directory in directories:
if not os.path.isdir(directory):
os.mkdir(directory)
self.addCleanup(remove, WORKDIR)
results = RESULTS(
results=[['SEARCH_1', 'RUN_1', 'LLG', 'TFZ', 'local_CC', 'overall_CC', 'rfree', 'rfactor', 'local_CC',
'overall_CC', '15', 'acl', 'is_extended', 'solution']])
with open(os.path.join(search_1_run_1, 'results.pckl'), 'wb') as fhandle:
dill.dump(results, fhandle)
results = RESULTS(
results=[['SEARCH_2', 'RUN_1', 'LLG', 'TFZ', 'local_CC', 'overall_CC', 'rfree', 'rfactor', 'local_CC',
'overall_CC', '45', 'acl', 'is_extended', 'solution']])
with open(os.path.join(search_2_run_1, 'results.pckl'), 'wb') as fhandle:
dill.dump(results, fhandle)
results = RESULTS(
results=[['SEARCH_2', 'RUN_2', 'LLG', 'TFZ', 'local_CC', 'overall_CC', 'rfree', 'rfactor', 'local_CC',
'overall_CC', '9', 'acl', 'is_extended', 'solution']])
with open(os.path.join(search_2_run_2, 'results.pckl'), 'wb') as fhandle:
dill.dump(results, fhandle)
results = MrResults(swamp_workdir=WORKDIR)
self.assertListEqual(sorted((os.path.join(search_1_run_1, 'results.pckl'),
os.path.join(search_2_run_2, 'results.pckl'),
os.path.join(search_2_run_1, 'results.pckl'))), sorted(results.pickle_list))
self.assertListEqual([['SEARCH_2', 'RUN_2', 'LLG', 'TFZ', 'local_CC', 'overall_CC', 'rfree', 'rfactor',
'local_CC', 'overall_CC', '9', 'acl', 'is_extended', 'solution'],
['SEARCH_2', 'RUN_1', 'LLG', 'TFZ', 'local_CC', 'overall_CC', 'rfree', 'rfactor',
'local_CC', 'overall_CC', '45', 'acl', 'is_extended', 'solution'],
['SEARCH_1', 'RUN_1', 'LLG', 'TFZ', 'local_CC', 'overall_CC', 'rfree', 'rfactor',
'local_CC', 'overall_CC', '15', 'acl', 'is_extended', 'solution']]
, results.results)
self.assertListEqual(["SEARCH ID", "RUN ID", "LLG", "TFZ", "PHSR_CC_LOC", "PHSR_CC_ALL", "RFMC_RFREE",
"RFMC_RFACT", "RFMC_CC_LOC", "RFMC_CC_ALL", "SHXE_CC", "SHXE_ACL", "IS_EXTENDED",
"SOLUTION"], results._result_table_fields)
self.assertEqual(results.logger_header, """\n******************************************************************\
****
******************* SWAMP-MR RESULTS ***************
**********************************************************************
Recovering results now...
""")
|
python
|
import functools
import itertools
import math
from evm.constants import (
UINT_255_MAX,
UINT_256_CEILING,
)
def int_to_big_endian(value):
byte_length = math.ceil(value.bit_length() / 8)
return (value).to_bytes(byte_length, byteorder='big')
def big_endian_to_int(value):
return int.from_bytes(value, byteorder='big')
def int_to_byte(value):
return bytes([value])
byte_to_int = ord
def ceilXX(value, ceiling):
remainder = value % ceiling
if remainder == 0:
return value
else:
return value + ceiling - remainder
ceil32 = functools.partial(ceilXX, ceiling=32)
ceil8 = functools.partial(ceilXX, ceiling=8)
def unsigned_to_signed(value):
if value <= UINT_255_MAX:
return value
else:
return value - UINT_256_CEILING
def signed_to_unsigned(value):
if value < 0:
return value + UINT_256_CEILING
else:
return value
def safe_ord(value):
if isinstance(value, int):
return value
else:
return ord(value)
def is_even(value):
return value % 2 == 0
def is_odd(value):
return value % 2 == 1
def get_highest_bit_index(value):
value >>= 1
for bit_length in itertools.count():
if not value:
return bit_length
value >>= 1
|
python
|
"""
Copyright 2014-2016 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: populate/pop_rules.py
Author: Jon Gunderson
"""
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import django
from django.core.exceptions import ObjectDoesNotExist
import re
fp = os.path.realpath(__file__)
path, filename = os.path.split(fp)
fae2_path = path.split('/populate')[0]
sys.path.append(fae2_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fae2.settings')
from django.conf import settings
django.setup()
import json
from abouts.models import FAQ
def addFAQ(seq, title, description):
try:
faq = FAQ.objects.get(seq=seq)
print("Updated FAQ: " + title)
faq.title = title
faq.description = description
except:
print("Created FAQ: " + title)
faq = FAQ(seq=seq, title=title, description=description)
faq.save()
desc = """
There are two major reasons why FAE 2.0 and AInspector Sidebar evaluation results may be different:
1. When a page includes dynamically loaded content, the DOM that FAE 2.0 sees will often be different from the DOM that AInspector Sidebar sees, resulting in different evaluation results. The more dynamic the content in the page, the more possibility of a discrepancy.
1. Pages that are responsive to screen dimensions will have different content rendered depending on the width of the screen. FAE 2.0 generally has a wide screen and AInspector Sidebar will analyze the content based on the current screen width.
**Note:** AInspector Sidebar will generally be more accurate than FAE for looking at Individual Pages.
"""
addFAQ(1, "FAE 2.0 and AInspector Sidebar evaluation results different?", desc)
desc = """
The rules are designed to help users understand what accessibility issues they need to consider in the design of a website.
Manual checks help users identify what they need to learn about accessibility inorder to insure their web resource is accessible.
Currently manual checks help inform users of what they need to understand about accessibility, but in FAE 2.1 users will be able to update manual checks to Pass, Fail or Not Applicable to update the report details, summary and implementation scores for rules and rule catagories.
"""
addFAQ(2, "Why report manual checking results?", desc)
|
python
|
"""
A complex number is a number in the form a + b * i where a and b are real and i satisfies i^2 = -1.
`a` is called the real part and `b` is called the imaginary part of `z`.
The conjugate of the number `a + b * i` is the number `a - b * i`.
The absolute value of a complex number `z = a + b * i` is a real number `|z| = sqrt(a^2 + b^2)`.
The square of the absolute value `|z|^2` is the result of multiplication of `z` by its complex conjugate.
The sum/difference of two complex numbers involves adding/subtracting their real and imaginary parts separately:
`(a + i * b) + (c + i * d) = (a + c) + (b + d) * i`,
`(a + i * b) - (c + i * d) = (a - c) + (b - d) * i.`
Multiplication result is by definition `(a + i * b) * (c + i * d) = (a * c - b * d) + (b * c + a * d) * i`.
The reciprocal of a non-zero complex number is `1 / (a + i * b) = a/(a^2 + b^2) - b/(a^2 + b^2) * i`.
Dividing a complex number a + i * b by another c + i * d gives:
`(a + i * b) / (c + i * d) = (a * c + b * d)/(c^2 + d^2) + (b * c - a * d)/(c^2 + d^2) * i`.
Raising `e` to a complex exponent can be expressed as:
`e^(a + i * b) = e^a * e^(i * b)`,
the last term of which is given by Euler's formula `e^(i * b) = cos(b) + i * sin(b)`.
Task:
Implement the following operations:
- addition, subtraction, multiplication and division of two complex numbers,
- conjugate, absolute value, exponent of a given complex number.
Assume the programming language you are using does not have an implementation of complex numbers.
"""
from math import sqrt, cos, sin, exp
from typing import Union
class ComplexNumber:
"""
A Class to emulate Complex Numbers
"""
REAL_SET = {int, float}
def __init__(self, real: Union[int, float], imaginary: Union[int, float] = 0):
self._real = real
self._imag = imaginary
self._validate()
def _validate(self):
if (type(self.real) not in self.REAL_SET or
type(self.imaginary) not in self.REAL_SET):
raise ValueError("Both the real and imaginary parts of the complex number must be real!!")
@property
def real(self):
"""
:return: Real part of the Complex Number
"""
return self._real
@property
def imaginary(self):
"""
:return: Imaginary part of the Complex Number
"""
return self._imag
def __eq__(self, other):
return self.real == other.real and self.imaginary == other.imaginary
def __add__(self, other):
return self.__class__(self.real + other.real, self.imaginary + other.imaginary)
def __mul__(self, other):
return self.__class__(self.real * other.real - self.imaginary * other.imaginary,
self.real * other.imaginary + self.imaginary * other.real)
def __sub__(self, other):
return self + self.__class__(-other.real, -other.imaginary)
# Since the square of absolute value of a Complex Number will have `imaginary == 0`
def _abs_square(self):
return (self * self.conjugate()).real
def _reciprocal(self):
return self.__class__(self.real / self._abs_square(),
-self.imaginary / self._abs_square())
def __truediv__(self, other):
return self * other._reciprocal()
def __abs__(self):
return sqrt(self._abs_square())
def conjugate(self):
"""
:return: The Complex Conjugate of the Complex Number
"""
return self.__class__(self.real, -self.imaginary)
# Calculate value of e^ib as per Euler's Formula
def _exp_imag_only(self):
return self.__class__(cos(self.imaginary), sin(self.imaginary))
def exp(self):
"""
:return: The value of `e` raised to the power of the Complex Number
"""
return self.__class__(exp(self.real)) * self._exp_imag_only()
|
python
|
"""
Module: 'sys' on pyboard 1.13.0-95
"""
# MCU: (sysname='pyboard', nodename='pyboard', release='1.13.0', version='v1.13-95-g0fff2e03f on 2020-10-03', machine='PYBv1.1 with STM32F405RG')
# Stubber: 1.3.4
argv = None
byteorder = 'little'
def exit():
pass
implementation = None
maxsize = 2147483647
modules = None
path = None
platform = 'pyboard'
def print_exception():
pass
stderr = None
stdin = None
stdout = None
version = '3.4.0'
version_info = None
|
python
|
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GridFS is a specification for storing large objects in Mongo.
The :mod:`gridfs` package is an implementation of GridFS on top of
:mod:`pymongo`, exposing a file-like interface.
"""
from __future__ import absolute_import, division
from twisted.internet import defer
from txmongo._gridfs.errors import NoFile
from txmongo._gridfs.grid_file import GridIn, GridOut, GridOutIterator
from txmongo import filter
from txmongo.filter import ASCENDING, DESCENDING
from txmongo.database import Database
assert GridOutIterator
class GridFS(object):
"""An instance of GridFS on top of a single Database.
"""
def __init__(self, database, collection="fs"):
"""Create a new instance of :class:`GridFS`.
Raises :class:`TypeError` if `database` is not an instance of
:class:`~pymongo.database.Database`.
:Parameters:
- `database`: database to use
- `collection` (optional): root collection to use
.. note::
Instantiating a GridFS object will implicitly create it indexes.
This could leads to errors if the underlying connection is closed
before the indexes creation request has returned. To avoid this you
should use the defer returned by :meth:`GridFS.indexes_created`.
.. versionadded:: 1.6
The `collection` parameter.
"""
if not isinstance(database, Database):
raise TypeError("TxMongo: database must be an instance of Database.")
self.__database = database
self.__collection = database[collection]
self.__files = self.__collection.files
self.__chunks = self.__collection.chunks
self.__indexes_created_defer = defer.DeferredList([
self.__files.create_index(
filter.sort(ASCENDING("filename") + ASCENDING("uploadDate"))),
self.__chunks.create_index(
filter.sort(ASCENDING("files_id") + ASCENDING("n")), unique=True)
])
def indexes_created(self):
"""Returns a defer on the creation of this GridFS instance's indexes
"""
d = defer.Deferred()
self.__indexes_created_defer.chainDeferred(d)
return d
def new_file(self, **kwargs):
"""Create a new file in GridFS.
Returns a new :class:`~gridfs.grid_file.GridIn` instance to
which data can be written. Any keyword arguments will be
passed through to :meth:`~gridfs.grid_file.GridIn`.
:Parameters:
- `**kwargs` (optional): keyword arguments for file creation
.. versionadded:: 1.6
"""
return GridIn(self.__collection, **kwargs)
def put(self, data, **kwargs):
"""Put data in GridFS as a new file.
Equivalent to doing:
>>> f = new_file(**kwargs)
>>> try:
>>> f.write(data)
>>> finally:
>>> f.close()
`data` can be either an instance of :class:`str` or a
file-like object providing a :meth:`read` method. Any keyword
arguments will be passed through to the created file - see
:meth:`~gridfs.grid_file.GridIn` for possible
arguments. Returns the ``"_id"`` of the created file.
:Parameters:
- `data`: data to be written as a file.
- `**kwargs` (optional): keyword arguments for file creation
.. versionadded:: 1.6
"""
grid_file = GridIn(self.__collection, **kwargs)
def _finally(result):
return grid_file.close().addCallback(lambda _: result)
return grid_file.write(data)\
.addBoth(_finally)\
.addCallback(lambda _: grid_file._id)
def get(self, file_id):
"""Get a file from GridFS by ``"_id"``.
Returns an instance of :class:`~gridfs.grid_file.GridOut`,
which provides a file-like interface for reading.
:Parameters:
- `file_id`: ``"_id"`` of the file to get
.. versionadded:: 1.6
"""
def ok(doc):
if doc is None:
raise NoFile("TxMongo: no file in gridfs with _id {0}".format(repr(file_id)))
return GridOut(self.__collection, doc)
return self.__collection.files.find_one({"_id": file_id}).addCallback(ok)
def get_version(self, filename=None, version=-1):
"""Get a file from GridFS by ``"filename"``.
Returns a version of the file in GridFS whose filename matches
`filename` and whose metadata fields match the supplied keyword
arguments, as an instance of :class:`~gridfs.grid_file.GridOut`.
Version numbering is a convenience atop the GridFS API provided
by MongoDB. If more than one file matches the query (either by
`filename` alone, by metadata fields, or by a combination of
both), then version ``-1`` will be the most recently uploaded
matching file, ``-2`` the second most recently
uploaded, etc. Version ``0`` will be the first version
uploaded, ``1`` the second version, etc. So if three versions
have been uploaded, then version ``0`` is the same as version
``-3``, version ``1`` is the same as version ``-2``, and
version ``2`` is the same as version ``-1``. Note that searching by
random (unindexed) meta data is not supported here.
Raises :class:`~gridfs.errors.NoFile` if no such version of
that file exists.
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `version` (optional): version of the file to get (defaults
to -1, the most recent version uploaded)
"""
query = {"filename": filename}
skip = abs(version)
if version < 0:
skip -= 1
myorder = DESCENDING("uploadDate")
else:
myorder = ASCENDING("uploadDate")
def ok(cursor):
if cursor:
return GridOut(self.__collection, cursor[0])
raise NoFile("no version %d for filename %r" % (version, filename))
return self.__files.find(query, filter=filter.sort(myorder), limit=1, skip=skip)\
.addCallback(ok)
def count(self, filename):
"""Count the number of versions of a given file.
Returns an integer number of versions of the file in GridFS whose filename matches
`filename`, or raises NoFile if the file doesn't exist.
:Parameters:
- `filename`: ``"filename"`` of the file to get version count of
"""
return self.__files.count({"filename": filename})
def get_last_version(self, filename):
"""Get a file from GridFS by ``"filename"``.
Returns the most recently uploaded file in GridFS with the
name `filename` as an instance of
:class:`~gridfs.grid_file.GridOut`. Raises
:class:`~gridfs.errors.NoFile` if no such file exists.
An index on ``{filename: 1, uploadDate: -1}`` will
automatically be created when this method is called the first
time.
:Parameters:
- `filename`: ``"filename"`` of the file to get
.. versionadded:: 1.6
"""
def ok(doc):
if doc is None:
raise NoFile("TxMongo: no file in gridfs with filename {0}".format(repr(filename)))
return GridOut(self.__collection, doc)
return self.__files.find_one({"filename": filename},
filter = filter.sort(DESCENDING("uploadDate"))).addCallback(ok)
# TODO add optional safe mode for chunk removal?
def delete(self, file_id):
"""Delete a file from GridFS by ``"_id"``.
Removes all data belonging to the file with ``"_id"``:
`file_id`.
.. warning:: Any processes/threads reading from the file while
this method is executing will likely see an invalid/corrupt
file. Care should be taken to avoid concurrent reads to a file
while it is being deleted.
:Parameters:
- `file_id`: ``"_id"`` of the file to delete
.. versionadded:: 1.6
"""
return defer.DeferredList([
self.__files.remove({"_id": file_id}, safe=True),
self.__chunks.remove({"files_id": file_id})
])
def list(self):
"""List the names of all files stored in this instance of
:class:`GridFS`.
.. versionchanged:: 1.6
Removed the `collection` argument.
"""
return self.__files.distinct("filename")
|
python
|
#!/usr/bin/python
#:deploy:OHsentinel:/usr/local/bin
#standard imports
import sys
import ConfigParser
import logging
import os
#custom imports
if os.path.isdir("/usr/local/share/OHsentinel"):
sys.path.append("/usr/local/share/OHsentinel")
elif os.path.isdir("/usr/share/OHsentinel"):
sys.path.append("/usr/share/OHsentinel")
try:
import OHcommons
import OHssdp
import OHcustoms
except:
print 'Could not import OHcli modules. Please check /usr/local/share/OHsentinel and /usr/share/OHsentinel. lxml and tabulate are also required.'
sys.exit(4)
def init():
"reading config and parameters"
config = {}
devices = {}
if True:#try:
conf = ConfigParser.ConfigParser()
if os.path.isfile('/etc/OHsentinel.conf'):
conf.read('/etc/OHsentinel.conf')
else:
conf.read('./OHsentinel.conf')
else:#except:
print 'Could not read config file'
sys.exit(1)
if conf.has_section('OHproduct'):
for item in conf.items('OHproduct'):
devices['product', item[0]] = item[1]
if conf.has_section('OHsender'):
for item in conf.items('OHsender'):
devices['sender', item[0]] = item[1]
if conf.has_section('OHradio'):
config['stations'] = conf.get('OHradio', 'stations').split(',')
if conf.has_section('fakeradio'):
for station in conf.items('fakeradio'):
config['fakeradio', station[0]] = station[1]
if conf.has_option('resources', 'xmlpath'):
config['xmlpath'] = conf.get('resources', 'xmlpath')
else: config['xmlpath'] = '/usr/local/share/OHsentinel/xml'
if conf.has_option('resources', 'xslpath'):
config['xslpath'] = conf.get('resources', 'xslpath')
else: config['xslpath'] = '/usr/local/share/OHsentinel/xsl'
if conf.has_option('resources', 'logfile'):
config['logfile'] = conf.get('resources', 'logfile')
else:
config['logfile'] = './OHsentinel.log'
if conf.has_option('resources', 'cmdport'):
config['cmdport'] = conf.get('resources', 'cmdport')
else:
config['cmdport'] = 8891
if conf.has_option('resources', 'remote'):
config['remote'] = conf.get('resources', 'remote')
else:
config['remote'] = 'http://localhost'
if conf.has_option('misc', 'xmltagdelimiter'):
config['xmltagdelimiter'] = conf.get('misc', 'xmltagdelimiter')
else: config['xmltagdelimiter'] = ';;'
if conf.has_option('misc', 'maxcolumnwidth'):
config['maxcolumnwidth'] = conf.getint('misc', 'maxcolumnwidth')
if conf.has_option('misc', 'standardtags'):
config['standardtags'] = conf.get('misc', 'standardtags')
config['searchstring', 'product'] = "urn:av-openhome-org:service:Product:1"
config['searchstring', 'sender'] = "urn:av-openhome-org:service:Sender:1"
config['searchstring', 'all'] = "ssdp:all"
config['searchtypes'] = ['product', 'sender', 'all']
if conf.has_section('customsearch'):
for st in conf.items('customsearch'):
config['searchstring', st[0]] = st[1]
config['searchtypes'].append(st[0])
"parse arguments"
args = OHcustoms.set_arguments(None, config['searchtypes'])
"setup logging"
numeric_level = getattr(logging, args.loglevel[0].upper(), None)
if args.log == ["screen"]:
logging.basicConfig(level=numeric_level, format='%(asctime)s - %(levelname)s - %(message)s')
logging.debug('logging started')
elif args.log == ["file"]:
log_handler = logging.handlers.WatchedFileHandler(config['logfile'])
log_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger = logging.getLogger()
logger.setLevel(numeric_level)
logger.addHandler(log_handler)
logging.debug('logging started')
elif args.log == ["syslog"]:
log_handler = logging.handlers.SysLogHandler(address = '/dev/log')
log_handler.setFormatter(logging.Formatter('OHsentinel - cli: %(levelname)s - %(message)s'))
logger = logging.getLogger()
logger.setLevel(numeric_level)
logger.addHandler(log_handler)
logging.debug('logging started')
elif args.log == ["systemd"]:
from systemd.journal import JournalHandler
logger = logging.getLogger()
logger.setLevel(numeric_level)
logger.addHandler(JournalHandler(SYSLOG_IDENTIFIER='OHsentinel'))
logging.debug('logging started')
logging.debug('Used configuration: ' + str(config))
logging.debug('Known devices: ' + str(devices))
return args, config, devices
args, config, devices = init()
logging.debug(args)
if args.mode == 'search':
OHcommons.search(args, devices, config)
elif args.mode == 'command':
if args.unit[0] == 'Custom':
"Process command in custom unit"
OHcustoms.command(args, devices, config)
else:
"Process standard command"
OHcommons.command(args, devices, config)
elif args.mode == 'explore':
OHcommons.explore(args, devices, config)
elif args.mode == 'remote':
OHcommons.remote(args, devices, config)
|
python
|
import unittest
from policosm.utils.levels import get_level
class LevelsTestCase(unittest.TestCase):
def test_known(self):
for highway in ['construction', 'demolished', 'raceway', 'abandoned', 'disused', 'foo', 'no','projected', 'planned','proposed','razed','dismantled','historic']:
self.assertEqual(0, get_level(highway))
for highway in ['stairway', 'elevator', 'corridor', 'hallway', 'slide']:
self.assertEqual(1, get_level(highway))
for highway in ['services', 'busway', 'bus_guideway', 'access','bus_stop', 'via_ferrata', 'access_ramp', 'emergency_access_point', 'emergency_bay','service', 'footway',
'traffic_island', 'virtual', 'cyleway', 'cycleway', 'byway', 'path', 'track', 'pedestrian', 'steps',
'platform', 'bridleway', 'rest_area', 'escape','footway']:
self.assertEqual(2, get_level(highway))
for highway in ['residential', 'yes', 'unclassified', 'crossing', 'unknown',
'bridge', 'lane', 'ford', 'psv', 'living_street','alley']:
self.assertEqual(3, get_level(highway))
for highway in ['tertiary', 'tertiary_link', 'turning_circle', 'road', 'roundabout', 'ice_road']:
self.assertEqual(4, get_level(highway))
for highway in ['secondary', 'secondary_link']:
self.assertEqual(5, get_level(highway))
for highway in ['primary', 'primary_link']:
self.assertEqual(6, get_level(highway))
for highway in ['trunk', 'trunk_link']:
self.assertEqual(7, get_level(highway))
for highway in ['motorway', 'motorway_link','ramp']:
self.assertEqual(8, get_level(highway))
def test_unknown(self):
self.assertEqual(3, get_level('zzz'))
if __name__ == '__main__':
unittest.main()
|
python
|
import torch
import torch.nn as nn
from torchsummary import summary
from lib.medzoo.BaseModelClass import BaseModel
"""
Code was borrowed and modified from this repo: https://github.com/josedolz/HyperDenseNet_pytorch
"""
def conv(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d,
BN=False, ws=False, activ=nn.LeakyReLU(0.2), gainWS=2):
convlayer = layer(nin, nout, kernel_size, stride=stride, padding=padding, bias=bias)
layers = []
# if ws:
# layers.append(WScaleLayer(convlayer, gain=gainWS))
if BN:
layers.append(nn.BatchNorm2d(nout))
if activ is not None:
if activ == nn.PReLU:
# to avoid sharing the same parameter, activ must be set to nn.PReLU (without '()')
layers.append(activ(num_parameters=1))
else:
# if activ == nn.PReLU(), the parameter will be shared for the whole network !
layers.append(activ)
layers.insert(ws, convlayer)
return nn.Sequential(*layers)
class ResidualConv(nn.Module):
def __init__(self, nin, nout, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):
super(ResidualConv, self).__init__()
convs = [conv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ),
conv(nout, nout, bias=bias, BN=BN, ws=ws, activ=None)]
self.convs = nn.Sequential(*convs)
res = []
if nin != nout:
res.append(conv(nin, nout, kernel_size=1, padding=0, bias=False, BN=BN, ws=ws, activ=None))
self.res = nn.Sequential(*res)
activation = []
if activ is not None:
if activ == nn.PReLU:
# to avoid sharing the same parameter, activ must be set to nn.PReLU (without '()')
activation.append(activ(num_parameters=1))
else:
# if activ == nn.PReLU(), the parameter will be shared for the whole network !
activation.append(activ)
self.activation = nn.Sequential(*activation)
def forward(self, input):
out = self.convs(input)
return self.activation(out + self.res(input))
def upSampleConv_Res(nin, nout, upscale=2, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):
return nn.Sequential(
nn.Upsample(scale_factor=upscale),
ResidualConv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ)
)
def conv_block(in_dim, out_dim, act_fn, kernel_size=3, stride=1, padding=1, dilation=1):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation),
nn.BatchNorm2d(out_dim),
act_fn,
)
return model
def conv_block_1(in_dim, out_dim):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=1),
nn.BatchNorm2d(out_dim),
nn.PReLU(),
)
return model
def conv_block_Asym(in_dim, out_dim, kernelSize):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([2, 0])),
nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, 2])),
nn.BatchNorm2d(out_dim),
nn.PReLU(),
)
return model
def conv_block_Asym_Inception(in_dim, out_dim, kernel_size, padding, dilation=1):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=[kernel_size, 1], padding=tuple([padding * dilation, 0]),
dilation=(dilation, 1)),
nn.BatchNorm2d(out_dim),
nn.ReLU(),
nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernel_size], padding=tuple([0, padding * dilation]),
dilation=(dilation, 1)),
nn.BatchNorm2d(out_dim),
nn.ReLU(),
)
return model
def conv_block_Asym_Inception_WithIncreasedFeatMaps(in_dim, mid_dim, out_dim, kernel_size, padding, dilation=1):
model = nn.Sequential(
nn.Conv2d(in_dim, mid_dim, kernel_size=[kernel_size, 1], padding=tuple([padding * dilation, 0]),
dilation=(dilation, 1)),
nn.BatchNorm2d(mid_dim),
nn.ReLU(),
nn.Conv2d(mid_dim, out_dim, kernel_size=[1, kernel_size], padding=tuple([0, padding * dilation]),
dilation=(dilation, 1)),
nn.BatchNorm2d(out_dim),
nn.ReLU(),
)
return model
def conv_block_Asym_ERFNet(in_dim, out_dim, kernelSize, padding, drop, dilation):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([padding, 0]), bias=True),
nn.ReLU(),
nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, padding]), bias=True),
nn.BatchNorm2d(out_dim, eps=1e-03),
nn.ReLU(),
nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([padding * dilation, 0]), bias=True,
dilation=(dilation, 1)),
nn.ReLU(),
nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, padding * dilation]), bias=True,
dilation=(1, dilation)),
nn.BatchNorm2d(out_dim, eps=1e-03),
nn.Dropout2d(drop),
)
return model
def conv_block_3_3(in_dim, out_dim):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=1),
nn.BatchNorm2d(out_dim),
nn.PReLU(),
)
return model
# TODO: Change order of block: BN + Activation + Conv
def conv_decod_block(in_dim, out_dim, act_fn):
model = nn.Sequential(
nn.ConvTranspose2d(in_dim, out_dim, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(out_dim),
act_fn,
)
return model
def dilation_conv_block(in_dim, out_dim, act_fn, stride_val, dil_val):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=stride_val, padding=1, dilation=dil_val),
nn.BatchNorm2d(out_dim),
act_fn,
)
return model
def maxpool():
pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
return pool
def avrgpool05():
pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
return pool
def avrgpool025():
pool = nn.AvgPool2d(kernel_size=2, stride=4, padding=0)
return pool
def avrgpool0125():
pool = nn.AvgPool2d(kernel_size=2, stride=8, padding=0)
return pool
def maxpool():
pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
return pool
def maxpool_1_4():
pool = nn.MaxPool2d(kernel_size=2, stride=4, padding=0)
return pool
def maxpool_1_8():
pool = nn.MaxPool2d(kernel_size=2, stride=8, padding=0)
return pool
def maxpool_1_16():
pool = nn.MaxPool2d(kernel_size=2, stride=16, padding=0)
return pool
def maxpool_1_32():
pool = nn.MaxPool2d(kernel_size=2, stride=32, padding=0)
def conv_block_3(in_dim, out_dim, act_fn):
model = nn.Sequential(
conv_block(in_dim, out_dim, act_fn),
conv_block(out_dim, out_dim, act_fn),
nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_dim),
)
return model
def classificationNet(D_in):
H = 400
D_out = 1
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, int(H / 4)),
torch.nn.ReLU(),
torch.nn.Linear(int(H / 4), D_out)
)
return model
# from layers import *
def croppCenter(tensorToCrop, finalShape):
org_shape = tensorToCrop.shape
diff = org_shape[2] - finalShape[2]
croppBorders = int(diff / 2)
return tensorToCrop[:,
:,
croppBorders:org_shape[2] - croppBorders,
croppBorders:org_shape[3] - croppBorders,
croppBorders:org_shape[4] - croppBorders]
def convBlock(nin, nout, kernel_size=3, batchNorm=False, layer=nn.Conv3d, bias=True, dropout_rate=0.0, dilation=1):
if batchNorm == False:
return nn.Sequential(
nn.PReLU(),
nn.Dropout(p=dropout_rate),
layer(nin, nout, kernel_size=kernel_size, bias=bias, dilation=dilation)
)
else:
return nn.Sequential(
nn.BatchNorm3d(nin),
nn.PReLU(),
nn.Dropout(p=dropout_rate),
layer(nin, nout, kernel_size=kernel_size, bias=bias, dilation=dilation)
)
def convBatch(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d, dilation=1):
return nn.Sequential(
layer(nin, nout, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias, dilation=dilation),
nn.BatchNorm2d(nout),
# nn.LeakyReLU(0.2)
nn.PReLU()
)
class HyperDenseNet_2Mod(BaseModel):
def __init__(self, in_channels=2, classes=4):
super(HyperDenseNet_2Mod, self).__init__()
self.num_classes = classes
assert in_channels == 2, "input channels must be two for this architecture"
# Path-Top
self.conv1_Top = convBlock(1, 25)
self.conv2_Top = convBlock(50, 25, batchNorm=True)
self.conv3_Top = convBlock(100, 25, batchNorm=True)
self.conv4_Top = convBlock(150, 50, batchNorm=True)
self.conv5_Top = convBlock(250, 50, batchNorm=True)
self.conv6_Top = convBlock(350, 50, batchNorm=True)
self.conv7_Top = convBlock(450, 75, batchNorm=True)
self.conv8_Top = convBlock(600, 75, batchNorm=True)
self.conv9_Top = convBlock(750, 75, batchNorm=True)
# Path-Bottom
self.conv1_Bottom = convBlock(1, 25)
self.conv2_Bottom = convBlock(50, 25, batchNorm=True)
self.conv3_Bottom = convBlock(100, 25, batchNorm=True)
self.conv4_Bottom = convBlock(150, 50, batchNorm=True)
self.conv5_Bottom = convBlock(250, 50, batchNorm=True)
self.conv6_Bottom = convBlock(350, 50, batchNorm=True)
self.conv7_Bottom = convBlock(450, 75, batchNorm=True)
self.conv8_Bottom = convBlock(600, 75, batchNorm=True)
self.conv9_Bottom = convBlock(750, 75, batchNorm=True)
self.fully_1 = nn.Conv3d(1800, 400, kernel_size=1)
self.fully_2 = nn.Conv3d(400, 200, kernel_size=1)
self.fully_3 = nn.Conv3d(200, 150, kernel_size=1)
self.final = nn.Conv3d(150, classes, kernel_size=1)
def forward(self, input):
# ----- First layer ------ #
# get 2 of the channels as 5D tensors
# pdb.set_trace()
print("input shape ", input.shape)
y1t = self.conv1_Top(input[:, 0:1, :, :, :])
y1b = self.conv1_Bottom(input[:, 1:2, :, :, :])
# ----- Second layer ------ #
# concatenate
y2t_i = torch.cat((y1t, y1b), dim=1)
y2b_i = torch.cat((y1b, y1t), dim=1)
y2t_o = self.conv2_Top(y2t_i)
y2b_o = self.conv2_Bottom(y2b_i)
# ----- Third layer ------ #
y2t_i_cropped = croppCenter(y2t_i, y2t_o.shape)
y2b_i_cropped = croppCenter(y2b_i, y2t_o.shape)
# concatenate
y3t_i = torch.cat((y2t_i_cropped, y2t_o, y2b_o), dim=1)
y3b_i = torch.cat((y2b_i_cropped, y2b_o, y2t_o), dim=1)
y3t_o = self.conv3_Top(y3t_i)
y3b_o = self.conv3_Bottom(y3b_i)
# ------ Fourth layer ----- #
y3t_i_cropped = croppCenter(y3t_i, y3t_o.shape)
y3b_i_cropped = croppCenter(y3b_i, y3t_o.shape)
# concatenate
y4t_i = torch.cat((y3t_i_cropped, y3t_o, y3b_o), dim=1)
y4b_i = torch.cat((y3b_i_cropped, y3b_o, y3t_o), dim=1)
y4t_o = self.conv4_Top(y4t_i)
y4b_o = self.conv4_Bottom(y4b_i)
# ------ Fifth layer ----- #
y4t_i_cropped = croppCenter(y4t_i, y4t_o.shape)
y4b_i_cropped = croppCenter(y4b_i, y4t_o.shape)
# concatenate
y5t_i = torch.cat((y4t_i_cropped, y4t_o, y4b_o), dim=1)
y5b_i = torch.cat((y4b_i_cropped, y4b_o, y4t_o), dim=1)
y5t_o = self.conv5_Top(y5t_i)
y5b_o = self.conv5_Bottom(y5b_i)
# ------ Sixth layer ----- #
y5t_i_cropped = croppCenter(y5t_i, y5t_o.shape)
y5b_i_cropped = croppCenter(y5b_i, y5t_o.shape)
# concatenate
y6t_i = torch.cat((y5t_i_cropped, y5t_o, y5b_o), dim=1)
y6b_i = torch.cat((y5b_i_cropped, y5b_o, y5t_o), dim=1)
y6t_o = self.conv6_Top(y6t_i)
y6b_o = self.conv6_Bottom(y6b_i)
# ------ Seventh layer ----- #
y6t_i_cropped = croppCenter(y6t_i, y6t_o.shape)
y6b_i_cropped = croppCenter(y6b_i, y6t_o.shape)
# concatenate
y7t_i = torch.cat((y6t_i_cropped, y6t_o, y6b_o), dim=1)
y7b_i = torch.cat((y6b_i_cropped, y6b_o, y6t_o), dim=1)
y7t_o = self.conv7_Top(y7t_i)
y7b_o = self.conv7_Bottom(y7b_i)
# ------ Eight layer ----- #
y7t_i_cropped = croppCenter(y7t_i, y7t_o.shape)
y7b_i_cropped = croppCenter(y7b_i, y7t_o.shape)
# concatenate
y8t_i = torch.cat((y7t_i_cropped, y7t_o, y7b_o), dim=1)
y8b_i = torch.cat((y7b_i_cropped, y7b_o, y7t_o), dim=1)
y8t_o = self.conv8_Top(y8t_i)
y8b_o = self.conv8_Bottom(y8b_i)
# ------ Ninth layer ----- #
y8t_i_cropped = croppCenter(y8t_i, y8t_o.shape)
y8b_i_cropped = croppCenter(y8b_i, y8t_o.shape)
# concatenate
y9t_i = torch.cat((y8t_i_cropped, y8t_o, y8b_o), dim=1)
y9b_i = torch.cat((y8b_i_cropped, y8b_o, y8t_o), dim=1)
y9t_o = self.conv9_Top(y9t_i)
y9b_o = self.conv9_Bottom(y9b_i)
##### Fully connected layers
y9t_i_cropped = croppCenter(y9t_i, y9t_o.shape)
y9b_i_cropped = croppCenter(y9b_i, y9t_o.shape)
outputPath_top = torch.cat((y9t_i_cropped, y9t_o, y9b_o), dim=1)
outputPath_bottom = torch.cat((y9b_i_cropped, y9b_o, y9t_o), dim=1)
inputFully = torch.cat((outputPath_top, outputPath_bottom), dim=1)
y = self.fully_1(inputFully)
y = self.fully_2(y)
y = self.fully_3(y)
return self.final(y)
def test(self, device='cpu'):
input_tensor = torch.rand(1, 2, 22, 22, 22)
ideal_out = torch.rand(1, self.num_classes, 22, 22, 22)
out = self.forward(input_tensor)
# assert ideal_out.shape == out.shape
# summary(self.to(torch.device(device)), (2, 22, 22, 22),device=device)
# torchsummaryX.summary(self,input_tensor.to(device))
print("HyperDenseNet test is complete", out.shape)
class HyperDenseNet(BaseModel):
def __init__(self, in_channels=3, classes=4):
super(HyperDenseNet, self).__init__()
assert in_channels == 3, "HyperDensenet supports 3 in_channels. For 2 in_channels use HyperDenseNet_2Mod "
self.num_classes = classes
# Path-Top
self.conv1_Top = convBlock(1, 25)
self.conv2_Top = convBlock(75, 25, batchNorm=True)
self.conv3_Top = convBlock(150, 25, batchNorm=True)
self.conv4_Top = convBlock(225, 50, batchNorm=True)
self.conv5_Top = convBlock(375, 50, batchNorm=True)
self.conv6_Top = convBlock(525, 50, batchNorm=True)
self.conv7_Top = convBlock(675, 75, batchNorm=True)
self.conv8_Top = convBlock(900, 75, batchNorm=True)
self.conv9_Top = convBlock(1125, 75, batchNorm=True)
# Path-Middle
self.conv1_Middle = convBlock(1, 25)
self.conv2_Middle = convBlock(75, 25, batchNorm=True)
self.conv3_Middle = convBlock(150, 25, batchNorm=True)
self.conv4_Middle = convBlock(225, 50, batchNorm=True)
self.conv5_Middle = convBlock(375, 50, batchNorm=True)
self.conv6_Middle = convBlock(525, 50, batchNorm=True)
self.conv7_Middle = convBlock(675, 75, batchNorm=True)
self.conv8_Middle = convBlock(900, 75, batchNorm=True)
self.conv9_Middle = convBlock(1125, 75, batchNorm=True)
# Path-Bottom
self.conv1_Bottom = convBlock(1, 25)
self.conv2_Bottom = convBlock(75, 25, batchNorm=True)
self.conv3_Bottom = convBlock(150, 25, batchNorm=True)
self.conv4_Bottom = convBlock(225, 50, batchNorm=True)
self.conv5_Bottom = convBlock(375, 50, batchNorm=True)
self.conv6_Bottom = convBlock(525, 50, batchNorm=True)
self.conv7_Bottom = convBlock(675, 75, batchNorm=True)
self.conv8_Bottom = convBlock(900, 75, batchNorm=True)
self.conv9_Bottom = convBlock(1125, 75, batchNorm=True)
self.fully_1 = nn.Conv3d(4050, 400, kernel_size=1)
self.fully_2 = nn.Conv3d(400, 200, kernel_size=1)
self.fully_3 = nn.Conv3d(200, 150, kernel_size=1)
self.final = nn.Conv3d(150, classes, kernel_size=1)
def forward(self, input):
# ----- First layer ------ #
# get the 3 channels as 5D tensors
y1t = self.conv1_Top(input[:, 0:1, :, :, :])
y1m = self.conv1_Middle(input[:, 1:2, :, :, :])
y1b = self.conv1_Bottom(input[:, 2:3, :, :, :])
# ----- Second layer ------ #
# concatenate
y2t_i = torch.cat((y1t, y1m, y1b), dim=1)
y2m_i = torch.cat((y1m, y1t, y1b), dim=1)
y2b_i = torch.cat((y1b, y1t, y1m), dim=1)
y2t_o = self.conv2_Top(y2t_i)
y2m_o = self.conv2_Middle(y2m_i)
y2b_o = self.conv2_Bottom(y2b_i)
# ----- Third layer ------ #
y2t_i_cropped = croppCenter(y2t_i, y2t_o.shape)
y2m_i_cropped = croppCenter(y2m_i, y2t_o.shape)
y2b_i_cropped = croppCenter(y2b_i, y2t_o.shape)
# concatenate
y3t_i = torch.cat((y2t_i_cropped, y2t_o, y2m_o, y2b_o), dim=1)
y3m_i = torch.cat((y2m_i_cropped, y2m_o, y2t_o, y2b_o), dim=1)
y3b_i = torch.cat((y2b_i_cropped, y2b_o, y2t_o, y2m_o), dim=1)
y3t_o = self.conv3_Top(y3t_i)
y3m_o = self.conv3_Middle(y3m_i)
y3b_o = self.conv3_Bottom(y3b_i)
# ------ Fourth layer ----- #
y3t_i_cropped = croppCenter(y3t_i, y3t_o.shape)
y3m_i_cropped = croppCenter(y3m_i, y3t_o.shape)
y3b_i_cropped = croppCenter(y3b_i, y3t_o.shape)
# concatenate
y4t_i = torch.cat((y3t_i_cropped, y3t_o, y3m_o, y3b_o), dim=1)
y4m_i = torch.cat((y3m_i_cropped, y3m_o, y3t_o, y3b_o), dim=1)
y4b_i = torch.cat((y3b_i_cropped, y3b_o, y3t_o, y3m_o), dim=1)
y4t_o = self.conv4_Top(y4t_i)
y4m_o = self.conv4_Middle(y4m_i)
y4b_o = self.conv4_Bottom(y4b_i)
# ------ Fifth layer ----- #
y4t_i_cropped = croppCenter(y4t_i, y4t_o.shape)
y4m_i_cropped = croppCenter(y4m_i, y4t_o.shape)
y4b_i_cropped = croppCenter(y4b_i, y4t_o.shape)
# concatenate
y5t_i = torch.cat((y4t_i_cropped, y4t_o, y4m_o, y4b_o), dim=1)
y5m_i = torch.cat((y4m_i_cropped, y4m_o, y4t_o, y4b_o), dim=1)
y5b_i = torch.cat((y4b_i_cropped, y4b_o, y4t_o, y4m_o), dim=1)
y5t_o = self.conv5_Top(y5t_i)
y5m_o = self.conv5_Middle(y5m_i)
y5b_o = self.conv5_Bottom(y5b_i)
# ------ Sixth layer ----- #
y5t_i_cropped = croppCenter(y5t_i, y5t_o.shape)
y5m_i_cropped = croppCenter(y5m_i, y5t_o.shape)
y5b_i_cropped = croppCenter(y5b_i, y5t_o.shape)
# concatenate
y6t_i = torch.cat((y5t_i_cropped, y5t_o, y5m_o, y5b_o), dim=1)
y6m_i = torch.cat((y5m_i_cropped, y5m_o, y5t_o, y5b_o), dim=1)
y6b_i = torch.cat((y5b_i_cropped, y5b_o, y5t_o, y5m_o), dim=1)
y6t_o = self.conv6_Top(y6t_i)
y6m_o = self.conv6_Middle(y6m_i)
y6b_o = self.conv6_Bottom(y6b_i)
# ------ Seventh layer ----- #
y6t_i_cropped = croppCenter(y6t_i, y6t_o.shape)
y6m_i_cropped = croppCenter(y6m_i, y6t_o.shape)
y6b_i_cropped = croppCenter(y6b_i, y6t_o.shape)
# concatenate
y7t_i = torch.cat((y6t_i_cropped, y6t_o, y6m_o, y6b_o), dim=1)
y7m_i = torch.cat((y6m_i_cropped, y6m_o, y6t_o, y6b_o), dim=1)
y7b_i = torch.cat((y6b_i_cropped, y6b_o, y6t_o, y6m_o), dim=1)
y7t_o = self.conv7_Top(y7t_i)
y7m_o = self.conv7_Middle(y7m_i)
y7b_o = self.conv7_Bottom(y7b_i)
# ------ Eight layer ----- #
y7t_i_cropped = croppCenter(y7t_i, y7t_o.shape)
y7m_i_cropped = croppCenter(y7m_i, y7t_o.shape)
y7b_i_cropped = croppCenter(y7b_i, y7t_o.shape)
# concatenate
y8t_i = torch.cat((y7t_i_cropped, y7t_o, y7m_o, y7b_o), dim=1)
y8m_i = torch.cat((y7m_i_cropped, y7m_o, y7t_o, y7b_o), dim=1)
y8b_i = torch.cat((y7b_i_cropped, y7b_o, y7t_o, y7m_o), dim=1)
y8t_o = self.conv8_Top(y8t_i)
y8m_o = self.conv8_Middle(y8m_i)
y8b_o = self.conv8_Bottom(y8b_i)
# ------ Ninth layer ----- #
y8t_i_cropped = croppCenter(y8t_i, y8t_o.shape)
y8m_i_cropped = croppCenter(y8m_i, y8t_o.shape)
y8b_i_cropped = croppCenter(y8b_i, y8t_o.shape)
# concatenate
y9t_i = torch.cat((y8t_i_cropped, y8t_o, y8m_o, y8b_o), dim=1)
y9m_i = torch.cat((y8m_i_cropped, y8m_o, y8t_o, y8b_o), dim=1)
y9b_i = torch.cat((y8b_i_cropped, y8b_o, y8t_o, y8m_o), dim=1)
y9t_o = self.conv9_Top(y9t_i)
y9m_o = self.conv9_Middle(y9m_i)
y9b_o = self.conv9_Bottom(y9b_i)
##### Fully connected layers
y9t_i_cropped = croppCenter(y9t_i, y9t_o.shape)
y9m_i_cropped = croppCenter(y9m_i, y9t_o.shape)
y9b_i_cropped = croppCenter(y9b_i, y9t_o.shape)
outputPath_top = torch.cat((y9t_i_cropped, y9t_o, y9m_o, y9b_o), dim=1)
outputPath_middle = torch.cat((y9m_i_cropped, y9m_o, y9t_o, y9b_o), dim=1)
outputPath_bottom = torch.cat((y9b_i_cropped, y9b_o, y9t_o, y9m_o), dim=1)
inputFully = torch.cat((outputPath_top, outputPath_middle, outputPath_bottom), dim=1)
y = self.fully_1(inputFully)
y = self.fully_2(y)
y = self.fully_3(y)
return self.final(y)
def test(self, device='cpu'):
device = torch.device(device)
input_tensor = torch.rand(1, 3, 20, 20, 20)
ideal_out = torch.rand(1, self.num_classes, 20, 20, 20)
out = self.forward(input_tensor)
# assert ideal_out.shape == out.shape
summary(self, (3, 16, 16, 16))
# torchsummaryX.summary(self, input_tensor.to(device))
print("HyperDenseNet test is complete!!!", out.shape)
# m = HyperDenseNet(1,4)
# m.test()
|
python
|
from sht3x_raspberry_exporter.sht3x import _crc8
def test_crc8():
assert _crc8(0xBE, 0xEF, 0x92)
|
python
|
import choraconfig, os.path
tool = choraconfig.clone_tool("chora")
tool["displayname"] = "CHORA:sds"
tool["shortname"] = "chora:sds"
tool["cmd"] = [choraconfig.parent(2,choraconfig.testroot) + "/duet.native","-chora-debug-recs","-chora-summaries","-chora-debug-squeeze","-chora","{filename}"]
|
python
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from ...core.loop.candidate_point_calculators import RandomSampling
from ...core.loop.loop_state import create_loop_state
from ...core.loop.model_updaters import NoopModelUpdater
from ...core.loop.outer_loop import OuterLoop
from ...core.parameter_space import ParameterSpace
class RandomSearch(OuterLoop):
def __init__(
self, space: ParameterSpace, x_init: np.ndarray = None, y_init: np.ndarray = None, cost_init: np.ndarray = None
):
"""
Simple loop to perform random search where in each iteration points are sampled uniformly at random
over the input space.
:param space: Input space where the optimization is carried out.
:param x_init: 2d numpy array of shape (no. points x no. input features) of initial X data
:param y_init: 2d numpy array of shape (no. points x no. targets) of initial Y data
:param cost_init: 2d numpy array of shape (no. points x no. targets) of initial cost of each function evaluation
"""
model_updaters = NoopModelUpdater()
candidate_point_calculator = RandomSampling(parameter_space=space)
if x_init is not None and y_init is not None:
loop_state = create_loop_state(x_init, y_init, cost=cost_init)
else:
loop_state = None
super().__init__(candidate_point_calculator, model_updaters, loop_state=loop_state)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.