max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
examples/gto/10-atom_info.py
|
umamibeef/pyscf
| 501 |
76529
|
<gh_stars>100-1000
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
Access molecule geometry.
Mole.natm is the total number of atoms. It is initialized in Mole.build()
function.
'''
from pyscf import gto
mol = gto.M(
atom = '''
O 0.000000 0.000000 0.117790
H 0.000000 0.755453 -0.471161
H 0.000000 -0.755453 -0.471161''',
basis = 'ccpvdz',
)
for i in range(mol.natm):
print('%s %s charge %f xyz %s' % (mol.atom_symbol(i),
mol.atom_pure_symbol(i),
mol.atom_charge(i),
mol.atom_coord(i)))
print("Atoms' charges in a vector\n%s" % mol.atom_charges())
print("Atoms' coordinates in an array\n%s" % mol.atom_coords())
|
convert.py
|
andreasjansson/VQMIVC
| 168 |
76534
|
import hydra
import hydra.utils as utils
from pathlib import Path
import torch
import numpy as np
from tqdm import tqdm
import soundfile as sf
from model_encoder import Encoder, Encoder_lf0
from model_decoder import Decoder_ac
from model_encoder import SpeakerEncoder as Encoder_spk
import os
import random
from glob import glob
import subprocess
from spectrogram import logmelspectrogram
import kaldiio
import resampy
import pyworld as pw
def select_wavs(paths, min_dur=2, max_dur=8):
pp = []
for p in paths:
x, fs = sf.read(p)
if len(x)/fs>=min_dur and len(x)/fs<=8:
pp.append(p)
return pp
def extract_logmel(wav_path, mean, std, sr=16000):
# wav, fs = librosa.load(wav_path, sr=sr)
wav, fs = sf.read(wav_path)
if fs != sr:
wav = resampy.resample(wav, fs, sr, axis=0)
fs = sr
#wav, _ = librosa.effects.trim(wav, top_db=15)
# duration = len(wav)/fs
assert fs == 16000
peak = np.abs(wav).max()
if peak > 1.0:
wav /= peak
mel = logmelspectrogram(
x=wav,
fs=fs,
n_mels=80,
n_fft=400,
n_shift=160,
win_length=400,
window='hann',
fmin=80,
fmax=7600,
)
mel = (mel - mean) / (std + 1e-8)
tlen = mel.shape[0]
frame_period = 160/fs*1000
f0, timeaxis = pw.dio(wav.astype('float64'), fs, frame_period=frame_period)
f0 = pw.stonemask(wav.astype('float64'), f0, timeaxis, fs)
f0 = f0[:tlen].reshape(-1).astype('float32')
nonzeros_indices = np.nonzero(f0)
lf0 = f0.copy()
lf0[nonzeros_indices] = np.log(f0[nonzeros_indices]) # for f0(Hz), lf0 > 0 when f0 != 0
mean, std = np.mean(lf0[nonzeros_indices]), np.std(lf0[nonzeros_indices])
lf0[nonzeros_indices] = (lf0[nonzeros_indices] - mean) / (std + 1e-8)
return mel, lf0
@hydra.main(config_path="config/convert.yaml")
def convert(cfg):
src_wav_paths = glob('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p225/*mic1.flac') # modified to absolute wavs path, can select any unseen speakers
src_wav_paths = select_wavs(src_wav_paths)
tar1_wav_paths = glob('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p231/*mic1.flac') # can select any unseen speakers
tar2_wav_paths = glob('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p243/*mic1.flac') # can select any unseen speakers
# tar1_wav_paths = select_wavs(tar1_wav_paths)
# tar2_wav_paths = select_wavs(tar2_wav_paths)
tar1_wav_paths = [sorted(tar1_wav_paths)[0]]
tar2_wav_paths = [sorted(tar2_wav_paths)[0]]
print('len(src):', len(src_wav_paths), 'len(tar1):', len(tar1_wav_paths), 'len(tar2):', len(tar2_wav_paths))
tmp = cfg.checkpoint.split('/')
steps = tmp[-1].split('-')[-1].split('.')[0]
out_dir = f'test/{tmp[-3]}-{tmp[-2]}-{steps}'
out_dir = Path(utils.to_absolute_path(out_dir))
out_dir.mkdir(exist_ok=True, parents=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder = Encoder(**cfg.model.encoder)
encoder_lf0 = Encoder_lf0()
encoder_spk = Encoder_spk()
decoder = Decoder_ac(dim_neck=64)
encoder.to(device)
encoder_lf0.to(device)
encoder_spk.to(device)
decoder.to(device)
print("Load checkpoint from: {}:".format(cfg.checkpoint))
checkpoint_path = utils.to_absolute_path(cfg.checkpoint)
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
encoder.load_state_dict(checkpoint["encoder"])
encoder_spk.load_state_dict(checkpoint["encoder_spk"])
decoder.load_state_dict(checkpoint["decoder"])
encoder.eval()
encoder_spk.eval()
decoder.eval()
mel_stats = np.load('./data/mel_stats.npy')
mean = mel_stats[0]
std = mel_stats[1]
feat_writer = kaldiio.WriteHelper("ark,scp:{o}.ark,{o}.scp".format(o=str(out_dir)+'/feats.1'))
for i, src_wav_path in tqdm(enumerate(src_wav_paths, 1)):
if i>10:
break
mel, lf0 = extract_logmel(src_wav_path, mean, std)
if i % 2 == 1:
ref_wav_path = random.choice(tar2_wav_paths)
tar = 'tarMale_'
else:
ref_wav_path = random.choice(tar1_wav_paths)
tar = 'tarFemale_'
ref_mel, _ = extract_logmel(ref_wav_path, mean, std)
mel = torch.FloatTensor(mel.T).unsqueeze(0).to(device)
lf0 = torch.FloatTensor(lf0).unsqueeze(0).to(device)
ref_mel = torch.FloatTensor(ref_mel.T).unsqueeze(0).to(device)
out_filename = os.path.basename(src_wav_path).split('.')[0]
with torch.no_grad():
z, _, _, _ = encoder.encode(mel)
lf0_embs = encoder_lf0(lf0)
spk_embs = encoder_spk(ref_mel)
output = decoder(z, lf0_embs, spk_embs)
logmel = output.squeeze(0).cpu().numpy()
feat_writer[out_filename] = logmel
feat_writer[out_filename+'_src'] = mel.squeeze(0).cpu().numpy().T
feat_writer[out_filename+'_ref'] = ref_mel.squeeze(0).cpu().numpy().T
subprocess.call(['cp', src_wav_path, out_dir])
feat_writer.close()
print('synthesize waveform...')
cmd = ['parallel-wavegan-decode', '--checkpoint', \
'/vocoder/checkpoint-3000000steps.pkl', \
'--feats-scp', f'{str(out_dir)}/feats.1.scp', '--outdir', str(out_dir)]
subprocess.call(cmd)
if __name__ == "__main__":
convert()
|
release/stubs.min/System/Windows/Forms/__init___parts/ListView.py
|
htlcnn/ironpython-stubs
| 182 |
76538
|
class ListView(Control,IComponent,IDisposable,IOleControl,IOleObject,IOleInPlaceObject,IOleInPlaceActiveObject,IOleWindow,IViewObject,IViewObject2,IPersist,IPersistStreamInit,IPersistPropertyBag,IPersistStorage,IQuickActivate,ISupportOleDropSource,IDropTarget,ISynchronizeInvoke,IWin32Window,IArrangedElement,IBindableComponent):
"""
Represents a Windows list view control,which displays a collection of items that can be displayed using one of four different views.
ListView()
"""
def AccessibilityNotifyClients(self,*args):
"""
AccessibilityNotifyClients(self: Control,accEvent: AccessibleEvents,objectID: int,childID: int)
Notifies the accessibility client applications of the specified
System.Windows.Forms.AccessibleEvents for the specified child control .
accEvent: The System.Windows.Forms.AccessibleEvents to notify the accessibility client applications of.
objectID: The identifier of the System.Windows.Forms.AccessibleObject.
childID: The child System.Windows.Forms.Control to notify of the accessible event.
AccessibilityNotifyClients(self: Control,accEvent: AccessibleEvents,childID: int)
Notifies the accessibility client applications of the specified
System.Windows.Forms.AccessibleEvents for the specified child control.
accEvent: The System.Windows.Forms.AccessibleEvents to notify the accessibility client applications of.
childID: The child System.Windows.Forms.Control to notify of the accessible event.
"""
pass
def ArrangeIcons(self,value=None):
"""
ArrangeIcons(self: ListView)
Arranges items in the control when they are displayed as icons based on the value of the
System.Windows.Forms.ListView.Alignment property.
ArrangeIcons(self: ListView,value: ListViewAlignment)
Arranges items in the control when they are displayed as icons with a specified alignment
setting.
value: One of the System.Windows.Forms.ListViewAlignment values.
"""
pass
def AutoResizeColumn(self,columnIndex,headerAutoResize):
"""
AutoResizeColumn(self: ListView,columnIndex: int,headerAutoResize: ColumnHeaderAutoResizeStyle)
Resizes the width of the given column as indicated by the resize style.
columnIndex: The zero-based index of the column to resize.
headerAutoResize: One of the System.Windows.Forms.ColumnHeaderAutoResizeStyle values.
"""
pass
def AutoResizeColumns(self,headerAutoResize):
"""
AutoResizeColumns(self: ListView,headerAutoResize: ColumnHeaderAutoResizeStyle)
Resizes the width of the columns as indicated by the resize style.
headerAutoResize: One of the System.Windows.Forms.ColumnHeaderAutoResizeStyle values.
"""
pass
def BeginUpdate(self):
"""
BeginUpdate(self: ListView)
Prevents the control from drawing until the System.Windows.Forms.ListView.EndUpdate method is
called.
"""
pass
def Clear(self):
"""
Clear(self: ListView)
Removes all items and columns from the control.
"""
pass
def CreateAccessibilityInstance(self,*args):
"""
CreateAccessibilityInstance(self: Control) -> AccessibleObject
Creates a new accessibility object for the control.
Returns: A new System.Windows.Forms.AccessibleObject for the control.
"""
pass
def CreateControlsInstance(self,*args):
"""
CreateControlsInstance(self: Control) -> ControlCollection
Creates a new instance of the control collection for the control.
Returns: A new instance of System.Windows.Forms.Control.ControlCollection assigned to the control.
"""
pass
def CreateHandle(self,*args):
""" CreateHandle(self: ListView) """
pass
def DefWndProc(self,*args):
"""
DefWndProc(self: Control,m: Message) -> Message
Sends the specified message to the default window procedure.
m: The Windows System.Windows.Forms.Message to process.
"""
pass
def DestroyHandle(self,*args):
"""
DestroyHandle(self: Control)
Destroys the handle associated with the control.
"""
pass
def Dispose(self):
"""
Dispose(self: ListView,disposing: bool)
Releases the unmanaged resources used by the System.Windows.Forms.ListView and optionally
releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def EndUpdate(self):
"""
EndUpdate(self: ListView)
Resumes drawing of the list view control after drawing is suspended by the
System.Windows.Forms.ListView.BeginUpdate method.
"""
pass
def EnsureVisible(self,index):
"""
EnsureVisible(self: ListView,index: int)
Ensures that the specified item is visible within the control,scrolling the contents of the
control if necessary.
index: The zero-based index of the item to scroll into view.
"""
pass
def FindItemWithText(self,text,includeSubItemsInSearch=None,startIndex=None,isPrefixSearch=None):
"""
FindItemWithText(self: ListView,text: str,includeSubItemsInSearch: bool,startIndex: int,isPrefixSearch: bool) -> ListViewItem
Finds the first System.Windows.Forms.ListViewItem or
System.Windows.Forms.ListViewItem.ListViewSubItem,if indicated,that begins with the specified
text value. The search starts at the specified index.
text: The text to search for.
includeSubItemsInSearch: true to include subitems in the search; otherwise,false.
startIndex: The index of the item at which to start the search.
isPrefixSearch: true to allow partial matches; otherwise,false.
Returns: The first System.Windows.Forms.ListViewItem that begins with the specified text value.
FindItemWithText(self: ListView,text: str,includeSubItemsInSearch: bool,startIndex: int) -> ListViewItem
Finds the first System.Windows.Forms.ListViewItem or
System.Windows.Forms.ListViewItem.ListViewSubItem,if indicated,that begins with the specified
text value. The search starts at the specified index.
text: The text to search for.
includeSubItemsInSearch: true to include subitems in the search; otherwise,false.
startIndex: The index of the item at which to start the search.
Returns: The first System.Windows.Forms.ListViewItem that begins with the specified text value.
FindItemWithText(self: ListView,text: str) -> ListViewItem
Finds the first System.Windows.Forms.ListViewItem that begins with the specified text value.
text: The text to search for.
Returns: The first System.Windows.Forms.ListViewItem that begins with the specified text value.
"""
pass
def FindNearestItem(self,*__args):
"""
FindNearestItem(self: ListView,searchDirection: SearchDirectionHint,x: int,y: int) -> ListViewItem
Finds the next item from the given x- and y-coordinates,searching in the specified direction.
searchDirection: One of the System.Windows.Forms.SearchDirectionHint values.
x: The x-coordinate for the point at which to begin searching.
y: The y-coordinate for the point at which to begin searching.
Returns: The System.Windows.Forms.ListViewItem that is closest to the given coordinates,searching in the
specified direction.
FindNearestItem(self: ListView,dir: SearchDirectionHint,point: Point) -> ListViewItem
Finds the next item from the given point,searching in the specified direction
dir: One of the System.Windows.Forms.SearchDirectionHint values.
point: The point at which to begin searching.
Returns: The System.Windows.Forms.ListViewItem that is closest to the given point,searching in the
specified direction.
"""
pass
def GetAccessibilityObjectById(self,*args):
"""
GetAccessibilityObjectById(self: Control,objectId: int) -> AccessibleObject
Retrieves the specified System.Windows.Forms.AccessibleObject.
objectId: An Int32 that identifies the System.Windows.Forms.AccessibleObject to retrieve.
Returns: An System.Windows.Forms.AccessibleObject.
"""
pass
def GetAutoSizeMode(self,*args):
"""
GetAutoSizeMode(self: Control) -> AutoSizeMode
Retrieves a value indicating how a control will behave when its
System.Windows.Forms.Control.AutoSize property is enabled.
Returns: One of the System.Windows.Forms.AutoSizeMode values.
"""
pass
def GetItemAt(self,x,y):
"""
GetItemAt(self: ListView,x: int,y: int) -> ListViewItem
Retrieves the item at the specified location.
x: The x-coordinate of the location to search for an item (expressed in client coordinates).
y: The y-coordinate of the location to search for an item (expressed in client coordinates).
Returns: A System.Windows.Forms.ListViewItem that represents the item at the specified position. If there
is no item at the specified location,the method returns null.
"""
pass
def GetItemRect(self,index,portion=None):
"""
GetItemRect(self: ListView,index: int,portion: ItemBoundsPortion) -> Rectangle
Retrieves the specified portion of the bounding rectangle for a specific item within the list
view control.
index: The zero-based index of the item within the System.Windows.Forms.ListView.ListViewItemCollection
whose bounding rectangle you want to return.
portion: One of the System.Windows.Forms.ItemBoundsPortion values that represents a portion of the
System.Windows.Forms.ListViewItem for which to retrieve the bounding rectangle.
Returns: A System.Drawing.Rectangle that represents the bounding rectangle for the specified portion of
the specified System.Windows.Forms.ListViewItem.
GetItemRect(self: ListView,index: int) -> Rectangle
Retrieves the bounding rectangle for a specific item within the list view control.
index: The zero-based index of the item within the System.Windows.Forms.ListView.ListViewItemCollection
whose bounding rectangle you want to return.
Returns: A System.Drawing.Rectangle that represents the bounding rectangle of the specified
System.Windows.Forms.ListViewItem.
"""
pass
def GetScaledBounds(self,*args):
"""
GetScaledBounds(self: Control,bounds: Rectangle,factor: SizeF,specified: BoundsSpecified) -> Rectangle
Retrieves the bounds within which the control is scaled.
bounds: A System.Drawing.Rectangle that specifies the area for which to retrieve the display bounds.
factor: The height and width of the control's bounds.
specified: One of the values of System.Windows.Forms.BoundsSpecified that specifies the bounds of the
control to use when defining its size and position.
Returns: A System.Drawing.Rectangle representing the bounds within which the control is scaled.
"""
pass
def GetService(self,*args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or
by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or
null if the System.ComponentModel.Component does not provide the specified service.
"""
pass
def GetStyle(self,*args):
"""
GetStyle(self: Control,flag: ControlStyles) -> bool
Retrieves the value of the specified control style bit for the control.
flag: The System.Windows.Forms.ControlStyles bit to return the value from.
Returns: true if the specified control style bit is set to true; otherwise,false.
"""
pass
def GetTopLevel(self,*args):
"""
GetTopLevel(self: Control) -> bool
Determines if the control is a top-level control.
Returns: true if the control is a top-level control; otherwise,false.
"""
pass
def HitTest(self,*__args):
"""
HitTest(self: ListView,x: int,y: int) -> ListViewHitTestInfo
Provides item information,given x- and y-coordinates.
x: The x-coordinate at which to retrieve the item information. The coordinate is relative to the
upper-left corner of the control.
y: The y-coordinate at which to retrieve the item information. The coordinate is relative to the
upper-left corner of the control.
Returns: A System.Windows.Forms.ListViewHitTestInfo.
HitTest(self: ListView,point: Point) -> ListViewHitTestInfo
Provides item information,given a point.
point: The System.Drawing.Point at which to retrieve the item information. The coordinates are relative
to the upper-left corner of the control.
Returns: A System.Windows.Forms.ListViewHitTestInfo.
"""
pass
def InitLayout(self,*args):
"""
InitLayout(self: Control)
Called after the control has been added to another container.
"""
pass
def InvokeGotFocus(self,*args):
"""
InvokeGotFocus(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.GotFocus event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the event to.
e: An System.EventArgs that contains the event data.
"""
pass
def InvokeLostFocus(self,*args):
"""
InvokeLostFocus(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LostFocus event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the event to.
e: An System.EventArgs that contains the event data.
"""
pass
def InvokeOnClick(self,*args):
"""
InvokeOnClick(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Click event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the System.Windows.Forms.Control.Click event to.
e: An System.EventArgs that contains the event data.
"""
pass
def InvokePaint(self,*args):
"""
InvokePaint(self: Control,c: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event for the specified control.
c: The System.Windows.Forms.Control to assign the System.Windows.Forms.Control.Paint event to.
e: An System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def InvokePaintBackground(self,*args):
"""
InvokePaintBackground(self: Control,c: Control,e: PaintEventArgs)
Raises the PaintBackground event for the specified control.
c: The System.Windows.Forms.Control to assign the System.Windows.Forms.Control.Paint event to.
e: An System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def IsInputChar(self,*args):
"""
IsInputChar(self: Control,charCode: Char) -> bool
Determines if a character is an input character that the control recognizes.
charCode: The character to test.
Returns: true if the character should be sent directly to the control and not preprocessed; otherwise,
false.
"""
pass
def IsInputKey(self,*args):
"""
IsInputKey(self: ListView,keyData: Keys) -> bool
keyData: One of the System.Windows.Forms.Keys values.
Returns: true if the specified key is a regular input key; otherwise,false.
"""
pass
def MemberwiseClone(self,*args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone,which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def NotifyInvalidate(self,*args):
"""
NotifyInvalidate(self: Control,invalidatedArea: Rectangle)
Raises the System.Windows.Forms.Control.Invalidated event with a specified region of the control
to invalidate.
invalidatedArea: A System.Drawing.Rectangle representing the area to invalidate.
"""
pass
def OnAfterLabelEdit(self,*args):
"""
OnAfterLabelEdit(self: ListView,e: LabelEditEventArgs)
Raises the System.Windows.Forms.ListView.AfterLabelEdit event.
e: A System.Windows.Forms.LabelEditEventArgs that contains the event data.
"""
pass
def OnAutoSizeChanged(self,*args):
"""
OnAutoSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.AutoSizeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackColorChanged(self,*args):
"""
OnBackColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackgroundImageChanged(self,*args):
"""
OnBackgroundImageChanged(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackgroundImageLayoutChanged(self,*args):
"""
OnBackgroundImageLayoutChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageLayoutChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBeforeLabelEdit(self,*args):
"""
OnBeforeLabelEdit(self: ListView,e: LabelEditEventArgs)
Raises the System.Windows.Forms.ListView.BeforeLabelEdit event.
e: A System.Windows.Forms.LabelEditEventArgs that contains the event data.
"""
pass
def OnBindingContextChanged(self,*args):
"""
OnBindingContextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BindingContextChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnCacheVirtualItems(self,*args):
"""
OnCacheVirtualItems(self: ListView,e: CacheVirtualItemsEventArgs)
Raises the System.Windows.Forms.ListView.CacheVirtualItems event.
e: A System.Windows.Forms.CacheVirtualItemsEventArgs that contains the event data.
"""
pass
def OnCausesValidationChanged(self,*args):
"""
OnCausesValidationChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CausesValidationChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnChangeUICues(self,*args):
"""
OnChangeUICues(self: Control,e: UICuesEventArgs)
Raises the System.Windows.Forms.Control.ChangeUICues event.
e: A System.Windows.Forms.UICuesEventArgs that contains the event data.
"""
pass
def OnClick(self,*args):
"""
OnClick(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Click event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnClientSizeChanged(self,*args):
"""
OnClientSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ClientSizeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnColumnClick(self,*args):
"""
OnColumnClick(self: ListView,e: ColumnClickEventArgs)
Raises the System.Windows.Forms.ListView.ColumnClick event.
e: A System.Windows.Forms.ColumnClickEventArgs that contains the event data.
"""
pass
def OnColumnReordered(self,*args):
"""
OnColumnReordered(self: ListView,e: ColumnReorderedEventArgs)
Raises the System.Windows.Forms.ListView.ColumnReordered event.
e: The System.Windows.Forms.ColumnReorderedEventArgs that contains the event data.
"""
pass
def OnColumnWidthChanged(self,*args):
"""
OnColumnWidthChanged(self: ListView,e: ColumnWidthChangedEventArgs)
Raises the System.Windows.Forms.ListView.ColumnWidthChanged event.
e: A System.Windows.Forms.ColumnWidthChangedEventArgs that contains the event data.
"""
pass
def OnColumnWidthChanging(self,*args):
"""
OnColumnWidthChanging(self: ListView,e: ColumnWidthChangingEventArgs)
Raises the System.Windows.Forms.ListView.ColumnWidthChanging event.
e: A System.Windows.Forms.ColumnWidthChangingEventArgs that contains the event data.
"""
pass
def OnContextMenuChanged(self,*args):
"""
OnContextMenuChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ContextMenuChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnContextMenuStripChanged(self,*args):
"""
OnContextMenuStripChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ContextMenuStripChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnControlAdded(self,*args):
"""
OnControlAdded(self: Control,e: ControlEventArgs)
Raises the System.Windows.Forms.Control.ControlAdded event.
e: A System.Windows.Forms.ControlEventArgs that contains the event data.
"""
pass
def OnControlRemoved(self,*args):
"""
OnControlRemoved(self: Control,e: ControlEventArgs)
Raises the System.Windows.Forms.Control.ControlRemoved event.
e: A System.Windows.Forms.ControlEventArgs that contains the event data.
"""
pass
def OnCreateControl(self,*args):
"""
OnCreateControl(self: Control)
Raises the System.Windows.Forms.Control.CreateControl method.
"""
pass
def OnCursorChanged(self,*args):
"""
OnCursorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CursorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDockChanged(self,*args):
"""
OnDockChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DockChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDoubleClick(self,*args):
"""
OnDoubleClick(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DoubleClick event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDpiChangedAfterParent(self,*args):
""" OnDpiChangedAfterParent(self: Control,e: EventArgs) """
pass
def OnDpiChangedBeforeParent(self,*args):
""" OnDpiChangedBeforeParent(self: Control,e: EventArgs) """
pass
def OnDragDrop(self,*args):
"""
OnDragDrop(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragDrop event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDragEnter(self,*args):
"""
OnDragEnter(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragEnter event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDragLeave(self,*args):
"""
OnDragLeave(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DragLeave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDragOver(self,*args):
"""
OnDragOver(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragOver event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDrawColumnHeader(self,*args):
"""
OnDrawColumnHeader(self: ListView,e: DrawListViewColumnHeaderEventArgs)
Raises the System.Windows.Forms.ListView.DrawColumnHeader event.
e: A System.Windows.Forms.DrawListViewColumnHeaderEventArgs that contains the event data.
"""
pass
def OnDrawItem(self,*args):
"""
OnDrawItem(self: ListView,e: DrawListViewItemEventArgs)
Raises the System.Windows.Forms.ListView.DrawItem event.
e: A System.Windows.Forms.DrawListViewItemEventArgs that contains the event data.
"""
pass
def OnDrawSubItem(self,*args):
"""
OnDrawSubItem(self: ListView,e: DrawListViewSubItemEventArgs)
Raises the System.Windows.Forms.ListView.DrawSubItem event.
e: A System.Windows.Forms.DrawListViewSubItemEventArgs that contains the event data.
"""
pass
def OnEnabledChanged(self,*args):
"""
OnEnabledChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.EnabledChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnEnter(self,*args):
"""
OnEnter(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Enter event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnFontChanged(self,*args):
"""
OnFontChanged(self: ListView,e: EventArgs)
Raises the FontChanged event.
e: The System.EventArgs that contains the event data.
"""
pass
def OnForeColorChanged(self,*args):
"""
OnForeColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ForeColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnGiveFeedback(self,*args):
"""
OnGiveFeedback(self: Control,gfbevent: GiveFeedbackEventArgs)
Raises the System.Windows.Forms.Control.GiveFeedback event.
gfbevent: A System.Windows.Forms.GiveFeedbackEventArgs that contains the event data.
"""
pass
def OnGotFocus(self,*args):
"""
OnGotFocus(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.GotFocus event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnHandleCreated(self,*args):
"""
OnHandleCreated(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""
pass
def OnHandleDestroyed(self,*args):
"""
OnHandleDestroyed(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""
pass
def OnHelpRequested(self,*args):
"""
OnHelpRequested(self: Control,hevent: HelpEventArgs)
Raises the System.Windows.Forms.Control.HelpRequested event.
hevent: A System.Windows.Forms.HelpEventArgs that contains the event data.
"""
pass
def OnImeModeChanged(self,*args):
"""
OnImeModeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ImeModeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnInvalidated(self,*args):
"""
OnInvalidated(self: Control,e: InvalidateEventArgs)
Raises the System.Windows.Forms.Control.Invalidated event.
e: An System.Windows.Forms.InvalidateEventArgs that contains the event data.
"""
pass
def OnItemActivate(self,*args):
"""
OnItemActivate(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.ListView.ItemActivate event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnItemCheck(self,*args):
"""
OnItemCheck(self: ListView,ice: ItemCheckEventArgs)
Raises the System.Windows.Forms.ListView.ItemCheck event.
ice: An System.Windows.Forms.ItemCheckEventArgs that contains the event data.
"""
pass
def OnItemChecked(self,*args):
"""
OnItemChecked(self: ListView,e: ItemCheckedEventArgs)
Raises the System.Windows.Forms.ListView.ItemChecked event.
e: An System.Windows.Forms.ItemCheckedEventArgs that contains the event data.
"""
pass
def OnItemDrag(self,*args):
"""
OnItemDrag(self: ListView,e: ItemDragEventArgs)
Raises the System.Windows.Forms.ListView.ItemDrag event.
e: An System.Windows.Forms.ItemDragEventArgs that contains the event data.
"""
pass
def OnItemMouseHover(self,*args):
"""
OnItemMouseHover(self: ListView,e: ListViewItemMouseHoverEventArgs)
Raises the System.Windows.Forms.ListView.ItemMouseHover event.
e: A System.Windows.Forms.ListViewItemMouseHoverEventArgs that contains the event data.
"""
pass
def OnItemSelectionChanged(self,*args):
"""
OnItemSelectionChanged(self: ListView,e: ListViewItemSelectionChangedEventArgs)
Raises the System.Windows.Forms.ListView.ItemSelectionChanged event.
e: A System.Windows.Forms.ListViewItemSelectionChangedEventArgs that contains the event data.
"""
pass
def OnKeyDown(self,*args):
"""
OnKeyDown(self: Control,e: KeyEventArgs)
Raises the System.Windows.Forms.Control.KeyDown event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def OnKeyPress(self,*args):
"""
OnKeyPress(self: Control,e: KeyPressEventArgs)
Raises the System.Windows.Forms.Control.KeyPress event.
e: A System.Windows.Forms.KeyPressEventArgs that contains the event data.
"""
pass
def OnKeyUp(self,*args):
"""
OnKeyUp(self: Control,e: KeyEventArgs)
Raises the System.Windows.Forms.Control.KeyUp event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def OnLayout(self,*args):
"""
OnLayout(self: Control,levent: LayoutEventArgs)
Raises the System.Windows.Forms.Control.Layout event.
levent: A System.Windows.Forms.LayoutEventArgs that contains the event data.
"""
pass
def OnLeave(self,*args):
"""
OnLeave(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Leave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnLocationChanged(self,*args):
"""
OnLocationChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LocationChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnLostFocus(self,*args):
"""
OnLostFocus(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LostFocus event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMarginChanged(self,*args):
"""
OnMarginChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MarginChanged event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnMouseCaptureChanged(self,*args):
"""
OnMouseCaptureChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseCaptureChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseClick(self,*args):
"""
OnMouseClick(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseClick event.
e: An System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseDoubleClick(self,*args):
"""
OnMouseDoubleClick(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseDoubleClick event.
e: An System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseDown(self,*args):
"""
OnMouseDown(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseDown event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseEnter(self,*args):
"""
OnMouseEnter(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseEnter event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseHover(self,*args):
"""
OnMouseHover(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseHover event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseLeave(self,*args):
"""
OnMouseLeave(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseMove(self,*args):
"""
OnMouseMove(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseMove event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseUp(self,*args):
"""
OnMouseUp(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseUp event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseWheel(self,*args):
"""
OnMouseWheel(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseWheel event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMove(self,*args):
"""
OnMove(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Move event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnNotifyMessage(self,*args):
"""
OnNotifyMessage(self: Control,m: Message)
Notifies the control of Windows messages.
m: A System.Windows.Forms.Message that represents the Windows message.
"""
pass
def OnPaddingChanged(self,*args):
"""
OnPaddingChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.PaddingChanged event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnPaint(self,*args):
"""
OnPaint(self: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def OnPaintBackground(self,*args):
"""
OnPaintBackground(self: Control,pevent: PaintEventArgs)
Paints the background of the control.
pevent: A System.Windows.Forms.PaintEventArgs that contains information about the control to paint.
"""
pass
def OnParentBackColorChanged(self,*args):
"""
OnParentBackColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackColorChanged event when the
System.Windows.Forms.Control.BackColor property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentBackgroundImageChanged(self,*args):
"""
OnParentBackgroundImageChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageChanged event when the
System.Windows.Forms.Control.BackgroundImage property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentBindingContextChanged(self,*args):
"""
OnParentBindingContextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BindingContextChanged event when the
System.Windows.Forms.Control.BindingContext property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentChanged(self,*args):
"""
OnParentChanged(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentCursorChanged(self,*args):
"""
OnParentCursorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CursorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentEnabledChanged(self,*args):
"""
OnParentEnabledChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.EnabledChanged event when the
System.Windows.Forms.Control.Enabled property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentFontChanged(self,*args):
"""
OnParentFontChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.FontChanged event when the
System.Windows.Forms.Control.Font property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentForeColorChanged(self,*args):
"""
OnParentForeColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ForeColorChanged event when the
System.Windows.Forms.Control.ForeColor property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentRightToLeftChanged(self,*args):
"""
OnParentRightToLeftChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RightToLeftChanged event when the
System.Windows.Forms.Control.RightToLeft property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentVisibleChanged(self,*args):
"""
OnParentVisibleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.VisibleChanged event when the
System.Windows.Forms.Control.Visible property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnPreviewKeyDown(self,*args):
"""
OnPreviewKeyDown(self: Control,e: PreviewKeyDownEventArgs)
Raises the System.Windows.Forms.Control.PreviewKeyDown event.
e: A System.Windows.Forms.PreviewKeyDownEventArgs that contains the event data.
"""
pass
def OnPrint(self,*args):
"""
OnPrint(self: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def OnQueryContinueDrag(self,*args):
"""
OnQueryContinueDrag(self: Control,qcdevent: QueryContinueDragEventArgs)
Raises the System.Windows.Forms.Control.QueryContinueDrag event.
qcdevent: A System.Windows.Forms.QueryContinueDragEventArgs that contains the event data.
"""
pass
def OnRegionChanged(self,*args):
"""
OnRegionChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RegionChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnResize(self,*args):
"""
OnResize(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""
pass
def OnRetrieveVirtualItem(self,*args):
"""
OnRetrieveVirtualItem(self: ListView,e: RetrieveVirtualItemEventArgs)
Raises the System.Windows.Forms.ListView.RetrieveVirtualItem event.
e: A System.Windows.Forms.RetrieveVirtualItemEventArgs that contains the event data.
"""
pass
def OnRightToLeftChanged(self,*args):
"""
OnRightToLeftChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RightToLeftChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnRightToLeftLayoutChanged(self,*args):
"""
OnRightToLeftLayoutChanged(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.ListView.RightToLeftLayoutChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnSearchForVirtualItem(self,*args):
"""
OnSearchForVirtualItem(self: ListView,e: SearchForVirtualItemEventArgs)
Raises the System.Windows.Forms.ListView.SearchForVirtualItem event.
e: A System.Windows.Forms.SearchForVirtualItemEventArgs that contains the event data.
"""
pass
def OnSelectedIndexChanged(self,*args):
"""
OnSelectedIndexChanged(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.ListView.SelectedIndexChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnSizeChanged(self,*args):
"""
OnSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.SizeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnStyleChanged(self,*args):
"""
OnStyleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.StyleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnSystemColorsChanged(self,*args):
"""
OnSystemColorsChanged(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""
pass
def OnTabIndexChanged(self,*args):
"""
OnTabIndexChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TabIndexChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnTabStopChanged(self,*args):
"""
OnTabStopChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TabStopChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnTextChanged(self,*args):
"""
OnTextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TextChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnValidated(self,*args):
"""
OnValidated(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Validated event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnValidating(self,*args):
"""
OnValidating(self: Control,e: CancelEventArgs)
Raises the System.Windows.Forms.Control.Validating event.
e: A System.ComponentModel.CancelEventArgs that contains the event data.
"""
pass
def OnVirtualItemsSelectionRangeChanged(self,*args):
"""
OnVirtualItemsSelectionRangeChanged(self: ListView,e: ListViewVirtualItemsSelectionRangeChangedEventArgs)
Raises the System.Windows.Forms.ListView.VirtualItemsSelectionRangeChanged event.
e: A System.Windows.Forms.ListViewVirtualItemsSelectionRangeChangedEventArgs that contains the
event data.
"""
pass
def OnVisibleChanged(self,*args):
"""
OnVisibleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.VisibleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def ProcessCmdKey(self,*args):
"""
ProcessCmdKey(self: Control,msg: Message,keyData: Keys) -> (bool,Message)
Processes a command key.
msg: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: true if the character was processed by the control; otherwise,false.
"""
pass
def ProcessDialogChar(self,*args):
"""
ProcessDialogChar(self: Control,charCode: Char) -> bool
Processes a dialog character.
charCode: The character to process.
Returns: true if the character was processed by the control; otherwise,false.
"""
pass
def ProcessDialogKey(self,*args):
"""
ProcessDialogKey(self: Control,keyData: Keys) -> bool
Processes a dialog key.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: true if the key was processed by the control; otherwise,false.
"""
pass
def ProcessKeyEventArgs(self,*args):
"""
ProcessKeyEventArgs(self: Control,m: Message) -> (bool,Message)
Processes a key message and generates the appropriate control events.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
Returns: true if the message was processed by the control; otherwise,false.
"""
pass
def ProcessKeyMessage(self,*args):
"""
ProcessKeyMessage(self: Control,m: Message) -> (bool,Message)
Processes a keyboard message.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
Returns: true if the message was processed by the control; otherwise,false.
"""
pass
def ProcessKeyPreview(self,*args):
"""
ProcessKeyPreview(self: Control,m: Message) -> (bool,Message)
Previews a keyboard message.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
Returns: true if the message was processed by the control; otherwise,false.
"""
pass
def ProcessMnemonic(self,*args):
"""
ProcessMnemonic(self: Control,charCode: Char) -> bool
Processes a mnemonic character.
charCode: The character to process.
Returns: true if the character was processed as a mnemonic by the control; otherwise,false.
"""
pass
def RaiseDragEvent(self,*args):
"""
RaiseDragEvent(self: Control,key: object,e: DragEventArgs)
Raises the appropriate drag event.
key: The event to raise.
e: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def RaiseKeyEvent(self,*args):
"""
RaiseKeyEvent(self: Control,key: object,e: KeyEventArgs)
Raises the appropriate key event.
key: The event to raise.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def RaiseMouseEvent(self,*args):
"""
RaiseMouseEvent(self: Control,key: object,e: MouseEventArgs)
Raises the appropriate mouse event.
key: The event to raise.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def RaisePaintEvent(self,*args):
"""
RaisePaintEvent(self: Control,key: object,e: PaintEventArgs)
Raises the appropriate paint event.
key: The event to raise.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def RealizeProperties(self,*args):
"""
RealizeProperties(self: ListView)
Initializes the properties of the System.Windows.Forms.ListView control that manage the
appearance of the control.
"""
pass
def RecreateHandle(self,*args):
"""
RecreateHandle(self: Control)
Forces the re-creation of the handle for the control.
"""
pass
def RedrawItems(self,startIndex,endIndex,invalidateOnly):
"""
RedrawItems(self: ListView,startIndex: int,endIndex: int,invalidateOnly: bool)
Forces a range of System.Windows.Forms.ListViewItem objects to be redrawn.
startIndex: The index for the first item in the range to be redrawn.
endIndex: The index for the last item of the range to be redrawn.
invalidateOnly: true to invalidate the range of items; false to invalidate and repaint the items.
"""
pass
def RescaleConstantsForDpi(self,*args):
""" RescaleConstantsForDpi(self: Control,deviceDpiOld: int,deviceDpiNew: int) """
pass
def ResetMouseEventArgs(self,*args):
"""
ResetMouseEventArgs(self: Control)
Resets the control to handle the System.Windows.Forms.Control.MouseLeave event.
"""
pass
def RtlTranslateAlignment(self,*args):
"""
RtlTranslateAlignment(self: Control,align: ContentAlignment) -> ContentAlignment
Converts the specified System.Drawing.ContentAlignment to the appropriate
System.Drawing.ContentAlignment to support right-to-left text.
align: One of the System.Drawing.ContentAlignment values.
Returns: One of the System.Drawing.ContentAlignment values.
RtlTranslateAlignment(self: Control,align: LeftRightAlignment) -> LeftRightAlignment
Converts the specified System.Windows.Forms.LeftRightAlignment to the appropriate
System.Windows.Forms.LeftRightAlignment to support right-to-left text.
align: One of the System.Windows.Forms.LeftRightAlignment values.
Returns: One of the System.Windows.Forms.LeftRightAlignment values.
RtlTranslateAlignment(self: Control,align: HorizontalAlignment) -> HorizontalAlignment
Converts the specified System.Windows.Forms.HorizontalAlignment to the appropriate
System.Windows.Forms.HorizontalAlignment to support right-to-left text.
align: One of the System.Windows.Forms.HorizontalAlignment values.
Returns: One of the System.Windows.Forms.HorizontalAlignment values.
"""
pass
def RtlTranslateContent(self,*args):
"""
RtlTranslateContent(self: Control,align: ContentAlignment) -> ContentAlignment
Converts the specified System.Drawing.ContentAlignment to the appropriate
System.Drawing.ContentAlignment to support right-to-left text.
align: One of the System.Drawing.ContentAlignment values.
Returns: One of the System.Drawing.ContentAlignment values.
"""
pass
def RtlTranslateHorizontal(self,*args):
"""
RtlTranslateHorizontal(self: Control,align: HorizontalAlignment) -> HorizontalAlignment
Converts the specified System.Windows.Forms.HorizontalAlignment to the appropriate
System.Windows.Forms.HorizontalAlignment to support right-to-left text.
align: One of the System.Windows.Forms.HorizontalAlignment values.
Returns: One of the System.Windows.Forms.HorizontalAlignment values.
"""
pass
def RtlTranslateLeftRight(self,*args):
"""
RtlTranslateLeftRight(self: Control,align: LeftRightAlignment) -> LeftRightAlignment
Converts the specified System.Windows.Forms.LeftRightAlignment to the appropriate
System.Windows.Forms.LeftRightAlignment to support right-to-left text.
align: One of the System.Windows.Forms.LeftRightAlignment values.
Returns: One of the System.Windows.Forms.LeftRightAlignment values.
"""
pass
def ScaleControl(self,*args):
"""
ScaleControl(self: Control,factor: SizeF,specified: BoundsSpecified)
Scales a control's location,size,padding and margin.
factor: The factor by which the height and width of the control will be scaled.
specified: A System.Windows.Forms.BoundsSpecified value that specifies the bounds of the control to use
when defining its size and position.
"""
pass
def ScaleCore(self,*args):
"""
ScaleCore(self: Control,dx: Single,dy: Single)
This method is not relevant for this class.
dx: The horizontal scaling factor.
dy: The vertical scaling factor.
"""
pass
def Select(self):
"""
Select(self: Control,directed: bool,forward: bool)
Activates a child control. Optionally specifies the direction in the tab order to select the
control from.
directed: true to specify the direction of the control to select; otherwise,false.
forward: true to move forward in the tab order; false to move backward in the tab order.
"""
pass
def SetAutoSizeMode(self,*args):
"""
SetAutoSizeMode(self: Control,mode: AutoSizeMode)
Sets a value indicating how a control will behave when its System.Windows.Forms.Control.AutoSize
property is enabled.
mode: One of the System.Windows.Forms.AutoSizeMode values.
"""
pass
def SetBoundsCore(self,*args):
"""
SetBoundsCore(self: Control,x: int,y: int,width: int,height: int,specified: BoundsSpecified)
Performs the work of setting the specified bounds of this control.
x: The new System.Windows.Forms.Control.Left property value of the control.
y: The new System.Windows.Forms.Control.Top property value of the control.
width: The new System.Windows.Forms.Control.Width property value of the control.
height: The new System.Windows.Forms.Control.Height property value of the control.
specified: A bitwise combination of the System.Windows.Forms.BoundsSpecified values.
"""
pass
def SetClientSizeCore(self,*args):
"""
SetClientSizeCore(self: Control,x: int,y: int)
Sets the size of the client area of the control.
x: The client area width,in pixels.
y: The client area height,in pixels.
"""
pass
def SetStyle(self,*args):
"""
SetStyle(self: Control,flag: ControlStyles,value: bool)
Sets a specified System.Windows.Forms.ControlStyles flag to either true or false.
flag: The System.Windows.Forms.ControlStyles bit to set.
value: true to apply the specified style to the control; otherwise,false.
"""
pass
def SetTopLevel(self,*args):
"""
SetTopLevel(self: Control,value: bool)
Sets the control as the top-level control.
value: true to set the control as the top-level control; otherwise,false.
"""
pass
def SetVisibleCore(self,*args):
"""
SetVisibleCore(self: Control,value: bool)
Sets the control to the specified visible state.
value: true to make the control visible; otherwise,false.
"""
pass
def SizeFromClientSize(self,*args):
"""
SizeFromClientSize(self: Control,clientSize: Size) -> Size
Determines the size of the entire control from the height and width of its client area.
clientSize: A System.Drawing.Size value representing the height and width of the control's client area.
Returns: A System.Drawing.Size value representing the height and width of the entire control.
"""
pass
def Sort(self):
"""
Sort(self: ListView)
Sorts the items of the list view.
"""
pass
def ToString(self):
"""
ToString(self: ListView) -> str
Returns a string representation of the System.Windows.Forms.ListView control.
Returns: A string that states the control type,the count of items in the System.Windows.Forms.ListView
control,and the type of the first item in the System.Windows.Forms.ListView,if the count is
not 0.
"""
pass
def UpdateBounds(self,*args):
"""
UpdateBounds(self: Control,x: int,y: int,width: int,height: int,clientWidth: int,clientHeight: int)
Updates the bounds of the control with the specified size,location,and client size.
x: The System.Drawing.Point.X coordinate of the control.
y: The System.Drawing.Point.Y coordinate of the control.
width: The System.Drawing.Size.Width of the control.
height: The System.Drawing.Size.Height of the control.
clientWidth: The client System.Drawing.Size.Width of the control.
clientHeight: The client System.Drawing.Size.Height of the control.
UpdateBounds(self: Control,x: int,y: int,width: int,height: int)
Updates the bounds of the control with the specified size and location.
x: The System.Drawing.Point.X coordinate of the control.
y: The System.Drawing.Point.Y coordinate of the control.
width: The System.Drawing.Size.Width of the control.
height: The System.Drawing.Size.Height of the control.
UpdateBounds(self: Control)
Updates the bounds of the control with the current size and location.
"""
pass
def UpdateExtendedStyles(self,*args):
"""
UpdateExtendedStyles(self: ListView)
Updates the extended styles applied to the list view control.
"""
pass
def UpdateStyles(self,*args):
"""
UpdateStyles(self: Control)
Forces the assigned styles to be reapplied to the control.
"""
pass
def UpdateZOrder(self,*args):
"""
UpdateZOrder(self: Control)
Updates the control in its parent's z-order.
"""
pass
def WndProc(self,*args):
"""
WndProc(self: ListView,m: Message) -> Message
Overrides System.Windows.Forms.Control.WndProc(System.Windows.Forms.Message@).
m: The Windows System.Windows.Forms.Message to process.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self,*args):
pass
Activation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the type of action the user must take to activate an item.
Get: Activation(self: ListView) -> ItemActivation
Set: Activation(self: ListView)=value
"""
Alignment=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the alignment of items in the control.
Get: Alignment(self: ListView) -> ListViewAlignment
Set: Alignment(self: ListView)=value
"""
AllowColumnReorder=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the user can drag column headers to reorder columns in the control.
Get: AllowColumnReorder(self: ListView) -> bool
Set: AllowColumnReorder(self: ListView)=value
"""
AutoArrange=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets whether icons are automatically kept arranged.
Get: AutoArrange(self: ListView) -> bool
Set: AutoArrange(self: ListView)=value
"""
BackColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the background color.
Get: BackColor(self: ListView) -> Color
Set: BackColor(self: ListView)=value
"""
BackgroundImageLayout=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets an System.Windows.Forms.ImageLayout value.
Get: BackgroundImageLayout(self: ListView) -> ImageLayout
Set: BackgroundImageLayout(self: ListView)=value
"""
BackgroundImageTiled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the background image of the System.Windows.Forms.ListView should be tiled.
Get: BackgroundImageTiled(self: ListView) -> bool
Set: BackgroundImageTiled(self: ListView)=value
"""
BorderStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the border style of the control.
Get: BorderStyle(self: ListView) -> BorderStyle
Set: BorderStyle(self: ListView)=value
"""
CanEnableIme=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Windows.Forms.Control.ImeMode property can be set to an active value,to enable IME support.
"""
CanRaiseEvents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Determines if events can be raised on the control.
"""
CheckBoxes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether a check box appears next to each item in the control.
Get: CheckBoxes(self: ListView) -> bool
Set: CheckBoxes(self: ListView)=value
"""
CheckedIndices=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the indexes of the currently checked items in the control.
Get: CheckedIndices(self: ListView) -> CheckedIndexCollection
"""
CheckedItems=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the currently checked items in the control.
Get: CheckedItems(self: ListView) -> CheckedListViewItemCollection
"""
Columns=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the collection of all column headers that appear in the control.
Get: Columns(self: ListView) -> ColumnHeaderCollection
"""
CreateParams=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This property is not relevant for this class.
"""
DefaultCursor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the default cursor for the control.
"""
DefaultImeMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the default Input Method Editor (IME) mode supported by the control.
"""
DefaultMargin=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the space,in pixels,that is specified by default between controls.
"""
DefaultMaximumSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the length and height,in pixels,that is specified as the default maximum size of a control.
"""
DefaultMinimumSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the length and height,in pixels,that is specified as the default minimum size of a control.
"""
DefaultPadding=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the internal spacing,in pixels,of the contents of a control.
"""
DefaultSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
DesignMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
DoubleBuffered=property(lambda self: object(),lambda self,v: None,lambda self: None)
Events=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
FocusedItem=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the item in the control that currently has focus.
Get: FocusedItem(self: ListView) -> ListViewItem
Set: FocusedItem(self: ListView)=value
"""
FontHeight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the height of the font of the control.
"""
ForeColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the foreground color.
Get: ForeColor(self: ListView) -> Color
Set: ForeColor(self: ListView)=value
"""
FullRowSelect=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether clicking an item selects all its subitems.
Get: FullRowSelect(self: ListView) -> bool
Set: FullRowSelect(self: ListView)=value
"""
GridLines=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether grid lines appear between the rows and columns containing the items and subitems in the control.
Get: GridLines(self: ListView) -> bool
Set: GridLines(self: ListView)=value
"""
Groups=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the collection of System.Windows.Forms.ListViewGroup objects assigned to the control.
Get: Groups(self: ListView) -> ListViewGroupCollection
"""
HeaderStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the column header style.
Get: HeaderStyle(self: ListView) -> ColumnHeaderStyle
Set: HeaderStyle(self: ListView)=value
"""
HideSelection=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the selected item in the control remains highlighted when the control loses focus.
Get: HideSelection(self: ListView) -> bool
Set: HideSelection(self: ListView)=value
"""
HotTracking=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the text of an item or subitem has the appearance of a hyperlink when the mouse pointer passes over it.
Get: HotTracking(self: ListView) -> bool
Set: HotTracking(self: ListView)=value
"""
HoverSelection=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether an item is automatically selected when the mouse pointer remains over the item for a few seconds.
Get: HoverSelection(self: ListView) -> bool
Set: HoverSelection(self: ListView)=value
"""
ImeModeBase=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the IME mode of a control.
"""
InsertionMark=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an object used to indicate the expected drop location when an item is dragged within a System.Windows.Forms.ListView control.
Get: InsertionMark(self: ListView) -> ListViewInsertionMark
"""
Items=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a collection containing all items in the control.
Get: Items(self: ListView) -> ListViewItemCollection
"""
LabelEdit=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the user can edit the labels of items in the control.
Get: LabelEdit(self: ListView) -> bool
Set: LabelEdit(self: ListView)=value
"""
LabelWrap=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether item labels wrap when items are displayed in the control as icons.
Get: LabelWrap(self: ListView) -> bool
Set: LabelWrap(self: ListView)=value
"""
LargeImageList=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Forms.ImageList to use when displaying items as large icons in the control.
Get: LargeImageList(self: ListView) -> ImageList
Set: LargeImageList(self: ListView)=value
"""
ListViewItemSorter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the sorting comparer for the control.
Get: ListViewItemSorter(self: ListView) -> IComparer
Set: ListViewItemSorter(self: ListView)=value
"""
MultiSelect=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether multiple items can be selected.
Get: MultiSelect(self: ListView) -> bool
Set: MultiSelect(self: ListView)=value
"""
OwnerDraw=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the System.Windows.Forms.ListView control is drawn by the operating system or by code that you provide.
Get: OwnerDraw(self: ListView) -> bool
Set: OwnerDraw(self: ListView)=value
"""
Padding=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the space between the System.Windows.Forms.ListView control and its contents.
Get: Padding(self: ListView) -> Padding
Set: Padding(self: ListView)=value
"""
RenderRightToLeft=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This property is now obsolete.
"""
ResizeRedraw=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the control redraws itself when resized.
"""
RightToLeftLayout=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the control is laid out from right to left.
Get: RightToLeftLayout(self: ListView) -> bool
Set: RightToLeftLayout(self: ListView)=value
"""
ScaleChildren=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that determines the scaling of child controls.
"""
Scrollable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether a scroll bar is added to the control when there is not enough room to display all items.
Get: Scrollable(self: ListView) -> bool
Set: Scrollable(self: ListView)=value
"""
SelectedIndices=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the indexes of the selected items in the control.
Get: SelectedIndices(self: ListView) -> SelectedIndexCollection
"""
SelectedItems=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the items that are selected in the control.
Get: SelectedItems(self: ListView) -> SelectedListViewItemCollection
"""
ShowFocusCues=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the control should display focus rectangles.
"""
ShowGroups=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether items are displayed in groups.
Get: ShowGroups(self: ListView) -> bool
Set: ShowGroups(self: ListView)=value
"""
ShowItemToolTips=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether ToolTips are shown for the System.Windows.Forms.ListViewItem objects contained in the System.Windows.Forms.ListView.
Get: ShowItemToolTips(self: ListView) -> bool
Set: ShowItemToolTips(self: ListView)=value
"""
ShowKeyboardCues=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the user interface is in the appropriate state to show or hide keyboard accelerators.
"""
SmallImageList=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Forms.ImageList to use when displaying items as small icons in the control.
Get: SmallImageList(self: ListView) -> ImageList
Set: SmallImageList(self: ListView)=value
"""
Sorting=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the sort order for items in the control.
Get: Sorting(self: ListView) -> SortOrder
Set: Sorting(self: ListView)=value
"""
StateImageList=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Forms.ImageList associated with application-defined states in the control.
Get: StateImageList(self: ListView) -> ImageList
Set: StateImageList(self: ListView)=value
"""
Text=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This property is not relevant for this class.
Get: Text(self: ListView) -> str
Set: Text(self: ListView)=value
"""
TileSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the size of the tiles shown in tile view.
Get: TileSize(self: ListView) -> Size
Set: TileSize(self: ListView)=value
"""
TopItem=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the first visible item in the control.
Get: TopItem(self: ListView) -> ListViewItem
Set: TopItem(self: ListView)=value
"""
UseCompatibleStateImageBehavior=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the System.Windows.Forms.ListView uses state image behavior that is compatible with the .NET Framework 1.1 or the .NET Framework 2.0.
Get: UseCompatibleStateImageBehavior(self: ListView) -> bool
Set: UseCompatibleStateImageBehavior(self: ListView)=value
"""
View=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets how items are displayed in the control.
Get: View(self: ListView) -> View
Set: View(self: ListView)=value
"""
VirtualListSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the number of System.Windows.Forms.ListViewItem objects contained in the list when in virtual mode.
Get: VirtualListSize(self: ListView) -> int
Set: VirtualListSize(self: ListView)=value
"""
VirtualMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether you have provided your own data-management operations for the System.Windows.Forms.ListView control.
Get: VirtualMode(self: ListView) -> bool
Set: VirtualMode(self: ListView)=value
"""
AfterLabelEdit=None
BackgroundImageLayoutChanged=None
BeforeLabelEdit=None
CacheVirtualItems=None
CheckedIndexCollection=None
CheckedListViewItemCollection=None
ColumnClick=None
ColumnHeaderCollection=None
ColumnReordered=None
ColumnWidthChanged=None
ColumnWidthChanging=None
DrawColumnHeader=None
DrawItem=None
DrawSubItem=None
ItemActivate=None
ItemCheck=None
ItemChecked=None
ItemDrag=None
ItemMouseHover=None
ItemSelectionChanged=None
ListViewItemCollection=None
PaddingChanged=None
Paint=None
RetrieveVirtualItem=None
RightToLeftLayoutChanged=None
SearchForVirtualItem=None
SelectedIndexChanged=None
SelectedIndexCollection=None
SelectedListViewItemCollection=None
TextChanged=None
VirtualItemsSelectionRangeChanged=None
|
models/fpcnn_s3dis.py
|
lyqun/FPConv
| 129 |
76555
|
import torch
import torch.nn as nn
from fpconv.pointnet2.pointnet2_modules import PointnetFPModule, PointnetSAModule
import fpconv.pointnet2.pytorch_utils as pt_utils
from fpconv.base import AssemRes_BaseBlock
from fpconv.fpconv import FPConv4x4_BaseBlock, FPConv6x6_BaseBlock
NPOINTS = [8192, 2048, 512, 128]
RADIUS = [0.1, 0.2, 0.4, 0.8, 1.6]
NSAMPLE = [32, 32, 32, 32, 16]
MLPS = [[64,64], [128,128], [256,256], [512,512], [1024,1024]]
FP_MLPS = [[128,128], [256,128], [512,256], [1024,512]]
CLS_FC = [128]
DP_RATIO = 0.5
def get_model(num_class, input_channels=3):
return Pointnet2SSG(num_class, input_channels)
class Pointnet2SSG(nn.Module):
def __init__(self, num_class, input_channels=3, use_xyz=False):
# input_channels: input feature channels (not include xyz)
super().__init__()
print(NPOINTS)
self.SA_modules = nn.ModuleList()
self.conv0 = AssemRes_BaseBlock(
CONV_BASE=FPConv6x6_BaseBlock,
npoint=None,
radius=RADIUS[0],
nsample=NSAMPLE[0],
channel_list=[input_channels] + MLPS[0],
use_xyz=use_xyz)
channel_in = MLPS[0][-1]
skip_channel_list = [channel_in]
for k in range(NPOINTS.__len__()):
mlps = [MLPS[k+1].copy()]
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
print(mlps[0], RADIUS[k], RADIUS[k+1])
if k < 2:
self.SA_modules.append(
AssemRes_BaseBlock(
CONV_BASE=FPConv6x6_BaseBlock,
npoint=NPOINTS[k],
nsample=NSAMPLE[k],
radius=RADIUS[k],
channel_list=mlps[0],
nsample_ds=NSAMPLE[k+1],
radius_ds=RADIUS[k+1],
use_xyz=use_xyz))
else:
self.SA_modules.append(
AssemRes_BaseBlock(
CONV_BASE=FPConv4x4_BaseBlock,
npoint=NPOINTS[k],
nsample=NSAMPLE[k],
radius=RADIUS[k],
channel_list=mlps[0],
nsample_ds=NSAMPLE[k+1],
radius_ds=RADIUS[k+1],
use_xyz=use_xyz))
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(FP_MLPS.__len__()):
pre_channel = FP_MLPS[k + 1][-1] if k + 1 < len(FP_MLPS) else channel_out
mlp = [pre_channel + skip_channel_list[k]] + FP_MLPS[k]
print(mlp)
self.FP_modules.append(PointnetFPModule(mlp=mlp))
cls_layers = []
pre_channel = FP_MLPS[0][-1]
for k in range(0, CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv2d(pre_channel, CLS_FC[k], bn=True))
pre_channel = CLS_FC[k]
cls_layers.append(pt_utils.Conv2d(pre_channel, num_class, activation=None, bn=False))
cls_layers.insert(1, nn.Dropout(0.5))
self.cls_layer = nn.Sequential(*cls_layers)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor):
xyz, features = self._break_up_pc(pointcloud)
_, features = self.conv0(xyz, features)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
fn_feats = l_features[0].unsqueeze(-1) # B, C, N, 1
pred_cls = self.cls_layer(fn_feats).squeeze(-1).transpose(1, 2).contiguous() # B, N, C
return pred_cls
|
tests/custom_ops_attributes_test.py
|
gglin001/poptorch
| 128 |
76571
|
<reponame>gglin001/poptorch
#!/usr/bin/env python3
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import collections
import ctypes
import pathlib
import random
import sys
import pytest
import torch
import poptorch
import helpers
myso = list(pathlib.Path("tests").rglob("libcustom_*.*"))
assert myso, "Failed to find libcustom_* libraries"
for single_so in myso:
ctypes.cdll.LoadLibrary(single_so)
def test_float_attribute():
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"AddScalarFloat",
"test.poptorch",
1,
example_outputs=[x],
attributes={"scalar": 3.5})
return x
model = Model()
x = torch.tensor([5.0])
inference_model = poptorch.inferenceModel(model)
out = inference_model(x)
expected = torch.tensor([8.5])
helpers.assert_allclose(actual=out[0], expected=expected)
def test_float_attribute_too_low():
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"AddScalarFloat",
"test.poptorch",
1,
example_outputs=[x],
attributes={"scalar": -sys.float_info.max})
return x
model = Model()
x = torch.tensor([5.0])
inference_model = poptorch.inferenceModel(model)
with pytest.raises(
poptorch.Error,
match=r"-1\.79769e\+308 is too low for a Popart float " +
r"attribute\."):
inference_model(x)
def test_float_attribute_too_high():
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"AddScalarFloat",
"test.poptorch",
1,
example_outputs=[x],
attributes={"scalar": sys.float_info.max})
return x
model = Model()
x = torch.tensor([5.0])
inference_model = poptorch.inferenceModel(model)
with pytest.raises(
poptorch.Error,
match=r"1\.79769e\+308 is too high for a Popart float " +
r"attribute\."):
inference_model(x)
def test_int_attribute():
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"AddScalarInt",
"test.poptorch",
1,
example_outputs=[x],
attributes={"scalar": 3})
return x
model = Model()
x = torch.tensor([5])
inference_model = poptorch.inferenceModel(model)
out = inference_model(x)
helpers.assert_allequal(actual=out[0],
expected=torch.tensor([8], dtype=torch.int32))
def test_float_list_attribute():
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"AddScalarVecFloat",
"test.poptorch",
1,
example_outputs=[x],
attributes={"vec": [1.0, 2.0, 3.0]})
return x
model = Model()
x = torch.tensor([3.0, 4.0, 5.0])
inference_model = poptorch.inferenceModel(model)
out = inference_model(x)
helpers.assert_allclose(actual=out[0],
expected=torch.tensor([4.0, 6.0, 8.0]))
def test_float_list_attribute_too_low():
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op(
[x],
"AddScalarVecFloat",
"test.poptorch",
1,
example_outputs=[x],
attributes={"vec": [1.0, 2.0, -sys.float_info.max]})
return x
model = Model()
x = torch.tensor([3.0, 4.0, 5.0])
inference_model = poptorch.inferenceModel(model)
with pytest.raises(
poptorch.Error,
match=r"-1\.79769e\+308 is too low for a Popart float " +
r"attribute\."):
inference_model(x)
def test_float_list_attribute_too_high():
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op(
[x],
"AddScalarVecFloat",
"test.poptorch",
1,
example_outputs=[x],
attributes={"vec": [sys.float_info.max, 2.0, 3.0]})
return x
model = Model()
x = torch.tensor([3.0, 4.0, 5.0])
inference_model = poptorch.inferenceModel(model)
with pytest.raises(
poptorch.Error,
match=r"1\.79769e\+308 is too high for a Popart float " +
r"attribute\."):
inference_model(x)
def test_float_tuple_attribute():
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"AddScalarVecFloat",
"test.poptorch",
1,
example_outputs=[x],
attributes={"vec": (1.0, 2.0, 3.0)})
return x
model = Model()
x = torch.tensor([3.0, 4.0, 5.0])
inference_model = poptorch.inferenceModel(model)
out = inference_model(x)
helpers.assert_allclose(expected=out[0],
actual=torch.tensor([4.0, 6.0, 8.0]))
def test_int_list_attribute():
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"AddScalarVecInt",
"test.poptorch",
1,
example_outputs=[x],
attributes={"vec": [1, 2, 3]})
return x
model = Model()
x = torch.tensor([3, 4, 5])
inference_model = poptorch.inferenceModel(model)
out = inference_model(x)
helpers.assert_allequal(actual=out[0],
expected=torch.tensor([4, 6, 8],
dtype=torch.int32))
def test_float_combined_attributes():
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"AddVecScalarMulFloat",
"test.poptorch",
1,
example_outputs=[x],
attributes={
"vec": [1.0, 2.0, 3.0],
"scalar": 2.0
})
return x
model = Model()
x = torch.tensor([3.0, 4.0, 5.0])
inference_model = poptorch.inferenceModel(model)
out = inference_model(x)
helpers.assert_allequal(actual=out[0],
expected=torch.tensor([8.0, 12.0, 16.0]))
def test_int_two_attributes():
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"AddScalarInt",
"test.poptorch",
1,
example_outputs=[x],
attributes={"scalar": 3})
x = poptorch.custom_op(x,
"AddScalarInt",
"test.poptorch",
1,
example_outputs=x,
attributes={"scalar": 2})
return x
model = Model()
x = torch.tensor([5])
inference_model = poptorch.inferenceModel(model)
out = inference_model(x)
helpers.assert_allequal(actual=out[0],
expected=torch.tensor([10], dtype=torch.int32))
@pytest.mark.parametrize("attr", ("sum", "mean"))
def test_string_attribute(attr):
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"ReduceOp",
"test.poptorch",
1,
example_outputs=[x],
attributes={"reduction": attr})
return x
model = Model()
x = torch.tensor([5.0, 6.0, 7.0])
inference_model = poptorch.inferenceModel(model)
out = inference_model(x)
if attr == "mean":
helpers.assert_allclose(actual=out[0], expected=torch.tensor(6.0))
else:
helpers.assert_allclose(actual=out[0], expected=torch.tensor(18.0))
def test_non_ascii_string_attribute():
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"ReduceOp",
"test.poptorch",
1,
example_outputs=[x],
attributes={"reduction": "a\u1f00b"})
return x
model = Model()
x = torch.tensor([5.0, 6.0, 7.0])
inference_model = poptorch.inferenceModel(model)
with pytest.raises(ValueError,
match="a\u1f00b contains non-ASCII characters."):
inference_model(x)
def test_string_list_attribute():
class Model(torch.nn.Module):
def forward(self, x, y, z):
x = poptorch.custom_op(
[x, y, z],
"ThreeReduceOp",
"test.poptorch",
1,
example_outputs=[x, y, z],
attributes={"reductions": ["mean", "sum", "mean"]})
return x
model = Model()
x = torch.tensor([1.0, 2.0, 3.0])
y = torch.tensor([2.0, 3.0, 4.0])
z = torch.tensor([3.0, 4.0, 5.0])
inference_model = poptorch.inferenceModel(model)
out = inference_model(x, y, z)
helpers.assert_allequal(actual=out[0], expected=torch.tensor(2.0))
helpers.assert_allequal(actual=out[1], expected=torch.tensor(9.0))
helpers.assert_allequal(actual=out[2], expected=torch.tensor(4.0))
def test_non_asciistring_list_attribute():
class Model(torch.nn.Module):
def forward(self, x, y, z):
x = poptorch.custom_op(
[x, y, z],
"ThreeReduceOp",
"test.poptorch",
1,
example_outputs=[x, y, z],
attributes={"reductions": ["a\u1f00b", "sum", "mean"]})
return x
model = Model()
x = torch.tensor([1.0, 2.0, 3.0])
y = torch.tensor([2.0, 3.0, 4.0])
z = torch.tensor([3.0, 4.0, 5.0])
inference_model = poptorch.inferenceModel(model)
with pytest.raises(ValueError,
match="a\u1f00b contains non-ASCII characters."):
inference_model(x, y, z)
ALL_ATTRIBUTES = {
"float_one": 1.0,
"float_minus_two": -2.0,
"int_zero": 0,
"int_minus_five": -5,
"floats_one_two_three": [1.0, 2.0, 3.0],
"floats_minus_one_two_three": [-1.0, -2.0, -3.0],
"ints_one_two_three": [1, 2, 3],
"ints_minus_one_two_three": [-1, -2, -3],
"a_string": "string with quotes and slash \" ' \\ end",
"strs": ["\x01", "\x02", "\x03"]
}
@pytest.mark.parametrize("seed", range(10))
def test_many_attributes(seed):
attr_keys = list(ALL_ATTRIBUTES.keys())
random.seed(seed)
random.shuffle(attr_keys)
attrs_shuff = collections.OrderedDict()
for key in attr_keys:
attrs_shuff[key] = ALL_ATTRIBUTES[key]
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"ManyAttributeOp",
"test.poptorch",
1,
example_outputs=[x],
attributes=attrs_shuff)
return x
model = Model()
x = torch.tensor([0.0])
inference_model = poptorch.inferenceModel(model)
out = inference_model(x)
helpers.assert_allequal(actual=out[0],
expected=torch.tensor(1.0).reshape((1, )))
@pytest.mark.parametrize("seed", range(3))
def test_many_attributes_one_wrong(seed):
attr_keys = list(ALL_ATTRIBUTES.keys())
random.seed(seed)
random.shuffle(attr_keys)
attrs_shuff = collections.OrderedDict()
for key in attr_keys:
attrs_shuff[key] = ALL_ATTRIBUTES[key]
attrs_shuff["a_string"] = "Very wrong"
class Model(torch.nn.Module):
def forward(self, x):
x = poptorch.custom_op([x],
"ManyAttributeOp",
"test.poptorch",
1,
example_outputs=[x],
attributes=attrs_shuff)
return x
model = Model()
x = torch.tensor([0.0])
inference_model = poptorch.inferenceModel(model)
out = inference_model(x)
helpers.assert_allequal(actual=out[0],
expected=torch.tensor(0.0).reshape((1, )))
#many_attribtes_examples_start
def test_many_attributes_examples():
class Model(torch.nn.Module):
def forward(self, x):
attributes = {
"float_one": 1.0,
"float_minus_two": -2.0,
"int_zero": 0,
"int_minus_five": -5,
"floats_one_two_three": [1.0, 2.0, 3.0],
"floats_minus_one_two_three": [-1.0, -2.0, -3.0],
"ints_one_two_three": [1, 2, 3],
"ints_minus_one_two_three": [-1, -2, -3],
"a_string": "string with quotes and slash \" ' \\ end",
"strs": ["abc", "def", "ghi"]
}
x = poptorch.custom_op([x],
"ManyAttributeOp",
"test.poptorch",
1,
example_outputs=[x],
attributes=attributes)
#many_attribtes_examples_end
return x
model = Model()
x = torch.tensor([0.0])
inference_model = poptorch.inferenceModel(model)
inference_model(x)
|
tensorflow2/datasets/seg_dataset.py
|
naviocean/imgclsmob
| 2,649 |
76572
|
<reponame>naviocean/imgclsmob
import random
import threading
import numpy as np
from PIL import Image, ImageOps, ImageFilter
from tensorflow.keras.preprocessing.image import ImageDataGenerator, DirectoryIterator
class SegDataset(object):
"""
Segmentation base dataset.
Parameters:
----------
root : str
Path to data folder.
mode : str
'train', 'val', 'test', or 'demo'.
transform : callable
A function that transforms the image.
"""
def __init__(self,
root,
mode,
transform,
base_size=520,
crop_size=480):
super(SegDataset, self).__init__()
assert (mode in ("train", "val", "test", "demo"))
assert (mode in ("test", "demo"))
self.root = root
self.mode = mode
self.transform = transform
self.base_size = base_size
self.crop_size = crop_size
def _val_sync_transform(self, image, mask):
outsize = self.crop_size
short_size = outsize
w, h = image.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
image = image.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = image.size
x1 = int(round(0.5 * (w - outsize)))
y1 = int(round(0.5 * (h - outsize)))
image = image.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# final transform
image, mask = self._img_transform(image), self._mask_transform(mask)
return image, mask
def _sync_transform(self, image, mask):
# random mirror
if random.random() < 0.5:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = image.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
image = image.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
image = ImageOps.expand(image, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = image.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
image = image.crop((x1, y1, x1 + crop_size, y1 + crop_size))
mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# gaussian blur as in PSP
if random.random() < 0.5:
image = image.filter(ImageFilter.GaussianBlur(
radius=random.random()))
# final transform
image, mask = self._img_transform(image), self._mask_transform(mask)
return image, mask
@staticmethod
def _img_transform(image):
return np.array(image)
@staticmethod
def _mask_transform(mask):
return np.array(mask).astype(np.int32)
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class SegDirectoryIterator(DirectoryIterator):
allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None}
def __init__(self,
directory,
image_data_generator,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dtype='float32',
dataset=None):
super(SegDirectoryIterator, self).set_processing_attrs(
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation)
self.dataset = dataset
self.class_mode = class_mode
self.dtype = dtype
self.n = len(self.dataset)
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns:
A batch of transformed samples.
"""
# batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype)
# batch_y = np.zeros((len(index_array),) + self.image_shape, dtype=np.int32)
batch_x = None
batch_y = None
for i, j in enumerate(index_array):
x, y = self.dataset[j]
if batch_x is None:
batch_x = np.zeros((len(index_array),) + x.shape, dtype=self.dtype)
batch_y = np.zeros((len(index_array),) + y.shape, dtype=np.int32)
# if self.data_format == "channel_first":
# print("*")
# print("batch_x.shape={}".format(batch_x.shape))
# print("batch_y.shape={}".format(batch_y.shape))
# print("x.shape={}".format(x.shape))
# print("y.shape={}".format(y.shape))
batch_x[i] = x
batch_y[i] = y
return batch_x, batch_y
class SegImageDataGenerator(ImageDataGenerator):
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dataset=None):
return SegDirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
dataset=dataset)
|
tasks/R2R/follower.py
|
zhangybzbo/speaker_follower
| 117 |
76586
|
''' Agents: stop/random/shortest/seq2seq '''
import json
import sys
import numpy as np
import random
from collections import namedtuple
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torch.distributions as D
from utils import vocab_pad_idx, vocab_eos_idx, flatten, structured_map, try_cuda
#from env import FOLLOWER_MODEL_ACTIONS, FOLLOWER_ENV_ACTIONS, IGNORE_ACTION_INDEX, LEFT_ACTION_INDEX, RIGHT_ACTION_INDEX, START_ACTION_INDEX, END_ACTION_INDEX, FORWARD_ACTION_INDEX, index_action_tuple
InferenceState = namedtuple("InferenceState", "prev_inference_state, world_state, observation, flat_index, last_action, last_action_embedding, action_count, score, h_t, c_t, last_alpha")
Cons = namedtuple("Cons", "first, rest")
def cons_to_list(cons):
l = []
while True:
l.append(cons.first)
cons = cons.rest
if cons is None:
break
return l
def backchain_inference_states(last_inference_state):
states = []
observations = []
actions = []
inf_state = last_inference_state
scores = []
last_score = None
attentions = []
while inf_state is not None:
states.append(inf_state.world_state)
observations.append(inf_state.observation)
actions.append(inf_state.last_action)
attentions.append(inf_state.last_alpha)
if last_score is not None:
scores.append(last_score - inf_state.score)
last_score = inf_state.score
inf_state = inf_state.prev_inference_state
scores.append(last_score)
return list(reversed(states)), list(reversed(observations)), list(reversed(actions))[1:], list(reversed(scores))[1:], list(reversed(attentions))[1:] # exclude start action
def least_common_viewpoint_path(inf_state_a, inf_state_b):
# return inference states traversing from A to X, then from Y to B,
# where X and Y are the least common ancestors of A and B respectively that share a viewpointId
path_to_b_by_viewpoint = {
}
b = inf_state_b
b_stack = Cons(b, None)
while b is not None:
path_to_b_by_viewpoint[b.world_state.viewpointId] = b_stack
b = b.prev_inference_state
b_stack = Cons(b, b_stack)
a = inf_state_a
path_from_a = [a]
while a is not None:
vp = a.world_state.viewpointId
if vp in path_to_b_by_viewpoint:
path_to_b = cons_to_list(path_to_b_by_viewpoint[vp])
assert path_from_a[-1].world_state.viewpointId == path_to_b[0].world_state.viewpointId
return path_from_a + path_to_b[1:]
a = a.prev_inference_state
path_from_a.append(a)
raise AssertionError("no common ancestor found")
def batch_instructions_from_encoded(encoded_instructions, max_length, reverse=False, sort=False):
# encoded_instructions: list of lists of token indices (should not be padded, or contain BOS or EOS tokens)
#seq_tensor = np.array(encoded_instructions)
# make sure pad does not start any sentence
num_instructions = len(encoded_instructions)
seq_tensor = np.full((num_instructions, max_length), vocab_pad_idx)
seq_lengths = []
for i, inst in enumerate(encoded_instructions):
if len(inst) > 0:
assert inst[-1] != vocab_eos_idx
if reverse:
inst = inst[::-1]
inst = np.concatenate((inst, [vocab_eos_idx]))
inst = inst[:max_length]
seq_tensor[i,:len(inst)] = inst
seq_lengths.append(len(inst))
seq_tensor = torch.from_numpy(seq_tensor)
if sort:
seq_lengths, perm_idx = torch.from_numpy(np.array(seq_lengths)).sort(0, True)
seq_lengths = list(seq_lengths)
seq_tensor = seq_tensor[perm_idx]
mask = (seq_tensor == vocab_pad_idx)[:, :max(seq_lengths)]
ret_tp = try_cuda(Variable(seq_tensor, requires_grad=False).long()), \
try_cuda(mask.byte()), \
seq_lengths
if sort:
ret_tp = ret_tp + (list(perm_idx),)
return ret_tp
class BaseAgent(object):
''' Base class for an R2R agent to generate and save trajectories. '''
def __init__(self, env, results_path):
self.env = env
self.results_path = results_path
random.seed(1)
self.results = {}
self.losses = [] # For learning agents
def write_results(self):
results = {}
for key, item in self.results.items():
results[key] = {
'instr_id': item['instr_id'],
'trajectory': item['trajectory'],
}
with open(self.results_path, 'w') as f:
json.dump(results, f)
def rollout(self):
''' Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)] '''
raise NotImplementedError
@staticmethod
def get_agent(name):
return globals()[name+"Agent"]
def test(self):
self.env.reset_epoch()
self.losses = []
self.results = {}
# We rely on env showing the entire batch before repeating anything
#print 'Testing %s' % self.__class__.__name__
looped = False
rollout_scores = []
beam_10_scores = []
while True:
rollout_results = self.rollout()
# if self.feedback == 'argmax':
# beam_results = self.beam_search(1, load_next_minibatch=False)
# assert len(rollout_results) == len(beam_results)
# for rollout_traj, beam_trajs in zip(rollout_results, beam_results):
# assert rollout_traj['instr_id'] == beam_trajs[0]['instr_id']
# assert rollout_traj['trajectory'] == beam_trajs[0]['trajectory']
# assert np.allclose(rollout_traj['score'], beam_trajs[0]['score'])
# print("passed check: beam_search with beam_size=1")
#
# self.env.set_beam_size(10)
# beam_results = self.beam_search(10, load_next_minibatch=False)
# assert len(rollout_results) == len(beam_results)
# for rollout_traj, beam_trajs in zip(rollout_results, beam_results):
# rollout_score = rollout_traj['score']
# rollout_scores.append(rollout_score)
# beam_score = beam_trajs[0]['score']
# beam_10_scores.append(beam_score)
# # assert rollout_score <= beam_score
# self.env.set_beam_size(1)
# # print("passed check: beam_search with beam_size=10")
# if self.feedback == 'teacher' and self.beam_size == 1:
# rollout_loss = self.loss
# path_obs, path_actions, encoded_instructions = self.env.gold_obs_actions_and_instructions(self.episode_len, load_next_minibatch=False)
# for i in range(len(rollout_results)):
# assert rollout_results[i]['actions'] == path_actions[i]
# assert [o1['viewpoint'] == o2['viewpoint']
# for o1, o2 in zip(rollout_results[i]['observations'], path_obs[i])]
# trajs, loss = self._score_obs_actions_and_instructions(path_obs, path_actions, encoded_instructions)
# for traj, rollout in zip(trajs, rollout_results):
# assert traj['instr_id'] == rollout['instr_id']
# assert traj['actions'] == rollout['actions']
# assert np.allclose(traj['score'], rollout['score'])
# assert np.allclose(rollout_loss.data[0], loss.data[0])
# print('passed score test')
for result in rollout_results:
if result['instr_id'] in self.results:
looped = True
else:
self.results[result['instr_id']] = result
if looped:
break
# if self.feedback == 'argmax':
# print("avg rollout score: ", np.mean(rollout_scores))
# print("avg beam 10 score: ", np.mean(beam_10_scores))
return self.results
def path_element_from_observation(ob):
return (ob['viewpoint'], ob['heading'], ob['elevation'])
class StopAgent(BaseAgent):
''' An agent that doesn't move! '''
def rollout(self):
world_states = self.env.reset()
obs = self.env.observe(world_states)
traj = [{
'instr_id': ob['instr_id'],
'trajectory': [path_element_from_observation(ob) ]
} for ob in obs]
return traj
class RandomAgent(BaseAgent):
''' An agent that picks a random direction then tries to go straight for
five viewpoint steps and then stops. '''
def rollout(self):
world_states = self.env.reset()
obs = self.env.observe(world_states)
traj = [{
'instr_id': ob['instr_id'],
'trajectory': [path_element_from_observation(ob)]
} for ob in obs]
ended = [False] * len(obs)
self.steps = [0] * len(obs)
for t in range(6):
actions = []
for i, ob in enumerate(obs):
if self.steps[i] >= 5:
actions.append(0) # do nothing, i.e. end
ended[i] = True
elif self.steps[i] == 0:
a = np.random.randint(len(ob['adj_loc_list']) - 1) + 1
actions.append(a) # choose a random adjacent loc
self.steps[i] += 1
else:
assert len(ob['adj_loc_list']) > 1
actions.append(1) # go forward
self.steps[i] += 1
world_states = self.env.step(world_states, actions, obs)
obs = self.env.observe(world_states)
for i,ob in enumerate(obs):
if not ended[i]:
traj[i]['trajectory'].append(path_element_from_observation(ob))
return traj
class ShortestAgent(BaseAgent):
''' An agent that always takes the shortest path to goal. '''
def rollout(self):
world_states = self.env.reset()
#obs = self.env.observe(world_states)
all_obs, all_actions = self.env.shortest_paths_to_goals(world_states, 20)
return [
{
'instr_id': obs[0]['instr_id'],
# end state will appear twice because stop action is a no-op, so exclude it
'trajectory': [path_element_from_observation(ob) for ob in obs[:-1]]
}
for obs in all_obs
]
class Seq2SeqAgent(BaseAgent):
''' An agent based on an LSTM seq2seq model with attention. '''
# For now, the agent can't pick which forward move to make - just the one in the middle
# env_actions = FOLLOWER_ENV_ACTIONS
# start_index = START_ACTION_INDEX
# ignore_index = IGNORE_ACTION_INDEX
# forward_index = FORWARD_ACTION_INDEX
# end_index = END_ACTION_INDEX
feedback_options = ['teacher', 'argmax', 'sample']
def __init__(self, env, results_path, encoder, decoder, episode_len=10, beam_size=1, reverse_instruction=True, max_instruction_length=80):
super(Seq2SeqAgent, self).__init__(env, results_path)
self.encoder = encoder
self.decoder = decoder
self.episode_len = episode_len
self.losses = []
self.criterion = nn.CrossEntropyLoss(ignore_index=-1)
self.beam_size = beam_size
self.reverse_instruction = reverse_instruction
self.max_instruction_length = max_instruction_length
# @staticmethod
# def n_inputs():
# return len(FOLLOWER_MODEL_ACTIONS)
#
# @staticmethod
# def n_outputs():
# return len(FOLLOWER_MODEL_ACTIONS)-2 # Model doesn't output start or ignore
def _feature_variables(self, obs, beamed=False):
''' Extract precomputed features into variable. '''
feature_lists = list(zip(*[ob['feature'] for ob in (flatten(obs) if beamed else obs)]))
assert len(feature_lists) == len(self.env.image_features_list)
batched = []
for featurizer, feature_list in zip(self.env.image_features_list, feature_lists):
batched.append(featurizer.batch_features(feature_list))
return batched
def _action_variable(self, obs):
# get the maximum number of actions of all sample in this batch
max_num_a = -1
for i, ob in enumerate(obs):
max_num_a = max(max_num_a, len(ob['adj_loc_list']))
is_valid = np.zeros((len(obs), max_num_a), np.float32)
action_embedding_dim = obs[0]['action_embedding'].shape[-1]
action_embeddings = np.zeros(
(len(obs), max_num_a, action_embedding_dim),
dtype=np.float32)
for i, ob in enumerate(obs):
adj_loc_list = ob['adj_loc_list']
num_a = len(adj_loc_list)
is_valid[i, 0:num_a] = 1.
for n_a, adj_dict in enumerate(adj_loc_list):
action_embeddings[i, :num_a, :] = ob['action_embedding']
return (
Variable(torch.from_numpy(action_embeddings), requires_grad=False).cuda(),
Variable(torch.from_numpy(is_valid), requires_grad=False).cuda(),
is_valid)
def _teacher_action(self, obs, ended):
''' Extract teacher actions into variable. '''
a = torch.LongTensor(len(obs))
for i,ob in enumerate(obs):
# Supervised teacher only moves one axis at a time
a[i] = ob['teacher'] if not ended[i] else -1
return try_cuda(Variable(a, requires_grad=False))
def _proc_batch(self, obs, beamed=False):
encoded_instructions = [ob['instr_encoding'] for ob in (flatten(obs) if beamed else obs)]
return batch_instructions_from_encoded(encoded_instructions, self.max_instruction_length, reverse=self.reverse_instruction)
def rollout(self):
if self.beam_size == 1:
return self._rollout_with_loss()
else:
assert self.beam_size >= 1
beams, _, _ = self.beam_search(self.beam_size)
return [beam[0] for beam in beams]
def _score_obs_actions_and_instructions(self, path_obs, path_actions, encoded_instructions):
batch_size = len(path_obs)
assert len(path_actions) == batch_size
assert len(encoded_instructions) == batch_size
for path_o, path_a in zip(path_obs, path_actions):
assert len(path_o) == len(path_a) + 1
seq, seq_mask, seq_lengths, perm_indices = \
batch_instructions_from_encoded(
encoded_instructions, self.max_instruction_length,
reverse=self.reverse_instruction, sort=True)
loss = 0
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
u_t_prev = self.decoder.u_begin.expand(batch_size, -1) # init action
ended = np.array([False] * batch_size)
sequence_scores = try_cuda(torch.zeros(batch_size))
traj = [{
'instr_id': path_o[0]['instr_id'],
'trajectory': [path_element_from_observation(path_o[0])],
'actions': [],
'scores': [],
'observations': [path_o[0]],
'instr_encoding': path_o[0]['instr_encoding']
} for path_o in path_obs]
obs = None
for t in range(self.episode_len):
next_obs = []
next_target_list = []
for perm_index, src_index in enumerate(perm_indices):
path_o = path_obs[src_index]
path_a = path_actions[src_index]
if t < len(path_a):
next_target_list.append(path_a[t])
next_obs.append(path_o[t])
else:
next_target_list.append(-1)
next_obs.append(obs[perm_index])
obs = next_obs
target = try_cuda(Variable(torch.LongTensor(next_target_list), requires_grad=False))
f_t_list = self._feature_variables(obs) # Image features from obs
all_u_t, is_valid, _ = self._action_variable(obs)
assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature'
h_t, c_t, alpha, logit, alpha_v = self.decoder(
u_t_prev, all_u_t, f_t_list[0], h_t, c_t, ctx, seq_mask)
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# Supervised training
loss += self.criterion(logit, target)
# Determine next model inputs
a_t = torch.clamp(target, min=0) # teacher forcing
# update the previous action
u_t_prev = all_u_t[np.arange(batch_size), a_t, :].detach()
action_scores = -F.cross_entropy(logit, target, ignore_index=-1, reduce=False).data
sequence_scores += action_scores
# Save trajectory output
for perm_index, src_index in enumerate(perm_indices):
ob = obs[perm_index]
if not ended[perm_index]:
traj[src_index]['trajectory'].append(path_element_from_observation(ob))
traj[src_index]['score'] = float(sequence_scores[perm_index])
traj[src_index]['scores'].append(action_scores[perm_index])
traj[src_index]['actions'].append(a_t.data[perm_index])
# traj[src_index]['observations'].append(ob)
# Update ended list
for i in range(batch_size):
action_idx = a_t[i].data[0]
if action_idx == 0:
ended[i] = True
# Early exit if all ended
if ended.all():
break
return traj, loss
def _rollout_with_loss(self):
initial_world_states = self.env.reset(sort=True)
initial_obs = self.env.observe(initial_world_states)
initial_obs = np.array(initial_obs)
batch_size = len(initial_obs)
# get mask and lengths
seq, seq_mask, seq_lengths = self._proc_batch(initial_obs)
# Forward through encoder, giving initial hidden state and memory cell for decoder
# TODO consider not feeding this into the decoder, and just using attention
self.loss = 0
feedback = self.feedback
ctx,h_t,c_t = self.encoder(seq, seq_lengths)
# Record starting point
traj = [{
'instr_id': ob['instr_id'],
'trajectory': [path_element_from_observation(ob)],
'actions': [],
'scores': [],
'observations': [ob],
'instr_encoding': ob['instr_encoding']
} for ob in initial_obs]
obs = initial_obs
world_states = initial_world_states
# Initial action
u_t_prev = self.decoder.u_begin.expand(batch_size, -1) # init action
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Do a sequence rollout and calculate the loss
env_action = [None] * batch_size
sequence_scores = try_cuda(torch.zeros(batch_size))
for t in range(self.episode_len):
f_t_list = self._feature_variables(obs) # Image features from obs
all_u_t, is_valid, _ = self._action_variable(obs)
assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature'
h_t, c_t, alpha, logit, alpha_v = self.decoder(
u_t_prev, all_u_t, f_t_list[0], h_t, c_t, ctx, seq_mask)
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# Supervised training
target = self._teacher_action(obs, ended)
self.loss += self.criterion(logit, target)
# Determine next model inputs
if feedback == 'teacher':
# turn -1 (ignore) to 0 (stop) so that the action is executable
a_t = torch.clamp(target, min=0)
elif feedback == 'argmax':
_,a_t = logit.max(1) # student forcing - argmax
a_t = a_t.detach()
elif feedback == 'sample':
probs = F.softmax(logit, dim=1) # sampling an action from model
# Further mask probs where agent can't move forward
# Note input to `D.Categorical` does not have to sum up to 1
# http://pytorch.org/docs/stable/torch.html#torch.multinomial
probs[is_valid == 0] = 0.
m = D.Categorical(probs)
a_t = m.sample()
else:
sys.exit('Invalid feedback option')
# update the previous action
u_t_prev = all_u_t[np.arange(batch_size), a_t, :].detach()
action_scores = -F.cross_entropy(logit, a_t, ignore_index=-1, reduce=False).data
sequence_scores += action_scores
# dfried: I changed this so that the ended list is updated afterward; this causes <end> to be added as the last action, along with its score, and the final world state will be duplicated (to more closely match beam search)
# Make environment action
for i in range(batch_size):
action_idx = a_t[i].data[0]
env_action[i] = action_idx
world_states = self.env.step(world_states, env_action, obs)
obs = self.env.observe(world_states)
# print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, world_states[0], a_t.data[0], sequence_scores[0]))
# Save trajectory output
for i,ob in enumerate(obs):
if not ended[i]:
traj[i]['trajectory'].append(path_element_from_observation(ob))
traj[i]['score'] = sequence_scores[i]
traj[i]['scores'].append(action_scores[i])
traj[i]['actions'].append(a_t.data[i])
traj[i]['observations'].append(ob)
# Update ended list
for i in range(batch_size):
action_idx = a_t[i].data[0]
if action_idx == 0:
ended[i] = True
# Early exit if all ended
if ended.all():
break
#self.losses.append(self.loss.data[0] / self.episode_len)
# shouldn't divide by the episode length because of masking
self.losses.append(self.loss.data[0])
return traj
def beam_search(self, beam_size, load_next_minibatch=True, mask_undo=False):
assert self.env.beam_size >= beam_size
world_states = self.env.reset(sort=True, beamed=True, load_next_minibatch=load_next_minibatch)
obs = self.env.observe(world_states, beamed=True)
batch_size = len(world_states)
# get mask and lengths
seq, seq_mask, seq_lengths = self._proc_batch(obs, beamed=True)
# Forward through encoder, giving initial hidden state and memory cell for decoder
ctx,h_t,c_t = self.encoder(seq, seq_lengths)
completed = []
for _ in range(batch_size):
completed.append([])
beams = [
[InferenceState(prev_inference_state=None,
world_state=ws[0],
observation=o[0],
flat_index=i,
last_action=-1,
last_action_embedding=self.decoder.u_begin.view(-1),
action_count=0,
score=0.0, h_t=None, c_t=None, last_alpha=None)]
for i, (ws, o) in enumerate(zip(world_states, obs))
]
# Do a sequence rollout and calculate the loss
for t in range(self.episode_len):
flat_indices = []
beam_indices = []
u_t_list = []
for beam_index, beam in enumerate(beams):
for inf_state in beam:
beam_indices.append(beam_index)
flat_indices.append(inf_state.flat_index)
u_t_list.append(inf_state.last_action_embedding)
u_t_prev = torch.stack(u_t_list, dim=0)
assert len(u_t_prev.shape) == 2
flat_obs = flatten(obs)
f_t_list = self._feature_variables(flat_obs) # Image features from obs
all_u_t, is_valid, is_valid_numpy = self._action_variable(flat_obs)
assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature'
h_t, c_t, alpha, logit, alpha_v = self.decoder(
u_t_prev, all_u_t, f_t_list[0], h_t[flat_indices], c_t[flat_indices], ctx[beam_indices], seq_mask[beam_indices])
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# # Mask outputs where agent can't move forward
# no_forward_mask = [len(ob['navigableLocations']) <= 1 for ob in flat_obs]
if mask_undo:
masked_logit = logit.clone()
else:
masked_logit = logit
log_probs = F.log_softmax(logit, dim=1).data
# force ending if we've reached the max time steps
# if t == self.episode_len - 1:
# action_scores = log_probs[:,self.end_index].unsqueeze(-1)
# action_indices = torch.from_numpy(np.full((log_probs.size()[0], 1), self.end_index))
# else:
#action_scores, action_indices = log_probs.topk(min(beam_size, logit.size()[1]), dim=1)
_, action_indices = masked_logit.data.topk(min(beam_size, logit.size()[1]), dim=1)
action_scores = log_probs.gather(1, action_indices)
assert action_scores.size() == action_indices.size()
start_index = 0
new_beams = []
assert len(beams) == len(world_states)
all_successors = []
for beam_index, (beam, beam_world_states, beam_obs) in enumerate(zip(beams, world_states, obs)):
successors = []
end_index = start_index + len(beam)
assert len(beam_world_states) == len(beam)
assert len(beam_obs) == len(beam)
if beam:
for inf_index, (inf_state, world_state, ob, action_score_row, action_index_row) in \
enumerate(zip(beam, beam_world_states, beam_obs, action_scores[start_index:end_index], action_indices[start_index:end_index])):
flat_index = start_index + inf_index
for action_score, action_index in zip(action_score_row, action_index_row):
if is_valid_numpy[flat_index, action_index] == 0:
continue
successors.append(
InferenceState(prev_inference_state=inf_state,
world_state=world_state, # will be updated later after successors are pruned
observation=ob, # will be updated later after successors are pruned
flat_index=flat_index,
last_action=action_index,
last_action_embedding=all_u_t[flat_index, action_index].detach(),
action_count=inf_state.action_count + 1,
score=float(inf_state.score + action_score), h_t=None, c_t=None,
last_alpha=alpha[flat_index].data)
)
start_index = end_index
successors = sorted(successors, key=lambda t: t.score, reverse=True)[:beam_size]
all_successors.append(successors)
successor_world_states = [
[inf_state.world_state for inf_state in successors]
for successors in all_successors
]
successor_env_actions = [
[inf_state.last_action for inf_state in successors]
for successors in all_successors
]
successor_last_obs = [
[inf_state.observation for inf_state in successors]
for successors in all_successors
]
successor_world_states = self.env.step(successor_world_states, successor_env_actions, successor_last_obs, beamed=True)
successor_obs = self.env.observe(successor_world_states, beamed=True)
all_successors = structured_map(lambda inf_state, world_state, obs: inf_state._replace(world_state=world_state, observation=obs),
all_successors, successor_world_states, successor_obs, nested=True)
# if all_successors[0]:
# print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, all_successors[0][0].world_state, all_successors[0][0].last_action, all_successors[0][0].score))
for beam_index, successors in enumerate(all_successors):
new_beam = []
for successor in successors:
if successor.last_action == 0 or t == self.episode_len - 1:
completed[beam_index].append(successor)
else:
new_beam.append(successor)
if len(completed[beam_index]) >= beam_size:
new_beam = []
new_beams.append(new_beam)
beams = new_beams
world_states = [
[inf_state.world_state for inf_state in beam]
for beam in beams
]
obs = [
[inf_state.observation for inf_state in beam]
for beam in beams
]
# Early exit if all ended
if not any(beam for beam in beams):
break
trajs = []
for this_completed in completed:
assert this_completed
this_trajs = []
for inf_state in sorted(this_completed, key=lambda t: t.score, reverse=True)[:beam_size]:
path_states, path_observations, path_actions, path_scores, path_attentions = backchain_inference_states(inf_state)
# this will have messed-up headings for (at least some) starting locations because of
# discretization, so read from the observations instead
## path = [(obs.viewpointId, state.heading, state.elevation)
## for state in path_states]
trajectory = [path_element_from_observation(ob) for ob in path_observations]
this_trajs.append({
'instr_id': path_observations[0]['instr_id'],
'instr_encoding': path_observations[0]['instr_encoding'],
'trajectory': trajectory,
'observations': path_observations,
'actions': path_actions,
'score': inf_state.score,
'scores': path_scores,
'attentions': path_attentions
})
trajs.append(this_trajs)
traversed_lists = None # todo
return trajs, completed, traversed_lists
def state_factored_search(self, completion_size, successor_size, load_next_minibatch=True, mask_undo=False, first_n_ws_key=4):
assert self.env.beam_size >= successor_size
world_states = self.env.reset(sort=True, beamed=True, load_next_minibatch=load_next_minibatch)
initial_obs = self.env.observe(world_states, beamed=True)
batch_size = len(world_states)
# get mask and lengths
seq, seq_mask, seq_lengths = self._proc_batch(initial_obs, beamed=True)
# Forward through encoder, giving initial hidden state and memory cell for decoder
ctx,h_t,c_t = self.encoder(seq, seq_lengths)
completed = []
completed_holding = []
for _ in range(batch_size):
completed.append({})
completed_holding.append({})
state_cache = [
{ws[0][0:first_n_ws_key]: (InferenceState(prev_inference_state=None,
world_state=ws[0],
observation=o[0],
flat_index=None,
last_action=-1,
last_action_embedding=self.decoder.u_begin.view(-1),
action_count=0,
score=0.0, h_t=h_t[i], c_t=c_t[i], last_alpha=None), True)}
for i, (ws, o) in enumerate(zip(world_states, initial_obs))
]
beams = [[inf_state for world_state, (inf_state, expanded) in sorted(instance_cache.items())]
for instance_cache in state_cache] # sorting is a noop here since each instance_cache should only contain one
# traversed_lists = None
# list of inference states containing states in order of the states being expanded
last_expanded_list = []
traversed_lists = []
for beam in beams:
assert len(beam) == 1
first_state = beam[0]
last_expanded_list.append(first_state)
traversed_lists.append([first_state])
def update_traversed_lists(new_visited_inf_states):
assert len(new_visited_inf_states) == len(last_expanded_list)
assert len(new_visited_inf_states) == len(traversed_lists)
for instance_index, instance_states in enumerate(new_visited_inf_states):
last_expanded = last_expanded_list[instance_index]
# todo: if this passes, shouldn't need traversed_lists
assert last_expanded.world_state.viewpointId == traversed_lists[instance_index][-1].world_state.viewpointId
for inf_state in instance_states:
path_from_last_to_next = least_common_viewpoint_path(last_expanded, inf_state)
# path_from_last should include last_expanded's world state as the first element, so check and drop that
assert path_from_last_to_next[0].world_state.viewpointId == last_expanded.world_state.viewpointId
assert path_from_last_to_next[-1].world_state.viewpointId == inf_state.world_state.viewpointId
traversed_lists[instance_index].extend(path_from_last_to_next[1:])
last_expanded = inf_state
last_expanded_list[instance_index] = last_expanded
# Do a sequence rollout and calculate the loss
while any(len(comp) < completion_size for comp in completed):
beam_indices = []
u_t_list = []
h_t_list = []
c_t_list = []
flat_obs = []
for beam_index, beam in enumerate(beams):
for inf_state in beam:
beam_indices.append(beam_index)
u_t_list.append(inf_state.last_action_embedding)
h_t_list.append(inf_state.h_t.unsqueeze(0))
c_t_list.append(inf_state.c_t.unsqueeze(0))
flat_obs.append(inf_state.observation)
u_t_prev = torch.stack(u_t_list, dim=0)
assert len(u_t_prev.shape) == 2
f_t_list = self._feature_variables(flat_obs) # Image features from obs
all_u_t, is_valid, is_valid_numpy = self._action_variable(flat_obs)
h_t = torch.cat(h_t_list, dim=0)
c_t = torch.cat(c_t_list, dim=0)
assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature'
h_t, c_t, alpha, logit, alpha_v = self.decoder(
u_t_prev, all_u_t, f_t_list[0], h_t, c_t, ctx[beam_indices], seq_mask[beam_indices])
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# # Mask outputs where agent can't move forward
# no_forward_mask = [len(ob['navigableLocations']) <= 1 for ob in flat_obs]
if mask_undo:
masked_logit = logit.clone()
else:
masked_logit = logit
log_probs = F.log_softmax(logit, dim=1).data
# force ending if we've reached the max time steps
# if t == self.episode_len - 1:
# action_scores = log_probs[:,self.end_index].unsqueeze(-1)
# action_indices = torch.from_numpy(np.full((log_probs.size()[0], 1), self.end_index))
# else:
#_, action_indices = masked_logit.data.topk(min(successor_size, logit.size()[1]), dim=1)
_, action_indices = masked_logit.data.topk(logit.size()[1], dim=1) # todo: fix this
action_scores = log_probs.gather(1, action_indices)
assert action_scores.size() == action_indices.size()
start_index = 0
assert len(beams) == len(world_states)
all_successors = []
for beam_index, (beam, beam_world_states) in enumerate(zip(beams, world_states)):
successors = []
end_index = start_index + len(beam)
assert len(beam_world_states) == len(beam)
if beam:
for inf_index, (inf_state, world_state, action_score_row) in \
enumerate(zip(beam, beam_world_states, log_probs[start_index:end_index])):
flat_index = start_index + inf_index
for action_index, action_score in enumerate(action_score_row):
if is_valid_numpy[flat_index, action_index] == 0:
continue
successors.append(
InferenceState(prev_inference_state=inf_state,
world_state=world_state, # will be updated later after successors are pruned
observation=flat_obs[flat_index], # will be updated later after successors are pruned
flat_index=None,
last_action=action_index,
last_action_embedding=all_u_t[flat_index, action_index].detach(),
action_count=inf_state.action_count + 1,
score=inf_state.score + action_score,
h_t=h_t[flat_index], c_t=c_t[flat_index],
last_alpha=alpha[flat_index].data)
)
start_index = end_index
successors = sorted(successors, key=lambda t: t.score, reverse=True)
all_successors.append(successors)
successor_world_states = [
[inf_state.world_state for inf_state in successors]
for successors in all_successors
]
successor_env_actions = [
[inf_state.last_action for inf_state in successors]
for successors in all_successors
]
successor_last_obs = [
[inf_state.observation for inf_state in successors]
for successors in all_successors
]
successor_world_states = self.env.step(successor_world_states, successor_env_actions, successor_last_obs, beamed=True)
all_successors = structured_map(lambda inf_state, world_state: inf_state._replace(world_state=world_state),
all_successors, successor_world_states, nested=True)
# if all_successors[0]:
# print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, all_successors[0][0].world_state, all_successors[0][0].last_action, all_successors[0][0].score))
assert len(all_successors) == len(state_cache)
new_beams = []
for beam_index, (successors, instance_cache) in enumerate(zip(all_successors, state_cache)):
# early stop if we've already built a sizable completion list
instance_completed = completed[beam_index]
instance_completed_holding = completed_holding[beam_index]
if len(instance_completed) >= completion_size:
new_beams.append([])
continue
for successor in successors:
ws_keys = successor.world_state[0:first_n_ws_key]
if successor.last_action == 0 or successor.action_count == self.episode_len:
if ws_keys not in instance_completed_holding or instance_completed_holding[ws_keys][0].score < successor.score:
instance_completed_holding[ws_keys] = (successor, False)
else:
if ws_keys not in instance_cache or instance_cache[ws_keys][0].score < successor.score:
instance_cache[ws_keys] = (successor, False)
# third value: did this come from completed_holding?
uncompleted_to_consider = ((ws_keys, inf_state, False) for (ws_keys, (inf_state, expanded)) in instance_cache.items() if not expanded)
completed_to_consider = ((ws_keys, inf_state, True) for (ws_keys, (inf_state, expanded)) in instance_completed_holding.items() if not expanded)
import itertools
import heapq
to_consider = itertools.chain(uncompleted_to_consider, completed_to_consider)
ws_keys_and_inf_states = heapq.nlargest(successor_size, to_consider, key=lambda pair: pair[1].score)
new_beam = []
for ws_keys, inf_state, is_completed in ws_keys_and_inf_states:
if is_completed:
assert instance_completed_holding[ws_keys] == (inf_state, False)
instance_completed_holding[ws_keys] = (inf_state, True)
if ws_keys not in instance_completed or instance_completed[ws_keys].score < inf_state.score:
instance_completed[ws_keys] = inf_state
else:
instance_cache[ws_keys] = (inf_state, True)
new_beam.append(inf_state)
if len(instance_completed) >= completion_size:
new_beams.append([])
else:
new_beams.append(new_beam)
beams = new_beams
# Early exit if all ended
if not any(beam for beam in beams):
break
world_states = [
[inf_state.world_state for inf_state in beam]
for beam in beams
]
successor_obs = self.env.observe(world_states, beamed=True)
beams = structured_map(lambda inf_state, obs: inf_state._replace(observation=obs),
beams, successor_obs, nested=True)
update_traversed_lists(beams)
completed_list = []
for this_completed in completed:
completed_list.append(sorted(this_completed.values(), key=lambda t: t.score, reverse=True)[:completion_size])
completed_ws = [
[inf_state.world_state for inf_state in comp_l]
for comp_l in completed_list
]
completed_obs = self.env.observe(completed_ws, beamed=True)
completed_list = structured_map(lambda inf_state, obs: inf_state._replace(observation=obs),
completed_list, completed_obs, nested=True)
# TODO: consider moving observations and this update earlier so that we don't have to traverse as far back
update_traversed_lists(completed_list)
# TODO: sanity check the traversed lists here
trajs = []
for this_completed in completed_list:
assert this_completed
this_trajs = []
for inf_state in this_completed:
path_states, path_observations, path_actions, path_scores, path_attentions = backchain_inference_states(inf_state)
# this will have messed-up headings for (at least some) starting locations because of
# discretization, so read from the observations instead
## path = [(obs.viewpointId, state.heading, state.elevation)
## for state in path_states]
trajectory = [path_element_from_observation(ob) for ob in path_observations]
this_trajs.append({
'instr_id': path_observations[0]['instr_id'],
'instr_encoding': path_observations[0]['instr_encoding'],
'trajectory': trajectory,
'observations': path_observations,
'actions': path_actions,
'score': inf_state.score,
'scores': path_scores,
'attentions': path_attentions
})
trajs.append(this_trajs)
# completed_list: list of lists of final inference states corresponding to the candidates, one list per instance
# traversed_lists: list of "physical states" that the robot has explored, one per instance
return trajs, completed_list, traversed_lists
def set_beam_size(self, beam_size):
if self.env.beam_size < beam_size:
self.env.set_beam_size(beam_size)
self.beam_size = beam_size
def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, beam_size=1):
''' Evaluate once on each instruction in the current environment '''
if not allow_cheat: # permitted for purpose of calculating validation loss only
assert feedback in ['argmax', 'sample'] # no cheating by using teacher at test time!
self.feedback = feedback
if use_dropout:
self.encoder.train()
self.decoder.train()
else:
self.encoder.eval()
self.decoder.eval()
self.set_beam_size(beam_size)
return super(Seq2SeqAgent, self).test()
def train(self, encoder_optimizer, decoder_optimizer, n_iters, feedback='teacher'):
''' Train for a given number of iterations '''
assert all(f in self.feedback_options for f in feedback.split("+"))
self.feedback = feedback
self.encoder.train()
self.decoder.train()
self.losses = []
it = range(1, n_iters + 1)
try:
import tqdm
it = tqdm.tqdm(it)
except:
pass
for _ in it:
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
self._rollout_with_loss()
self.loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
def _encoder_and_decoder_paths(self, base_path):
return base_path + "_enc", base_path + "_dec"
def save(self, path):
''' Snapshot models '''
encoder_path, decoder_path = self._encoder_and_decoder_paths(path)
torch.save(self.encoder.state_dict(), encoder_path)
torch.save(self.decoder.state_dict(), decoder_path)
def load(self, path, **kwargs):
''' Loads parameters (but not training state) '''
encoder_path, decoder_path = self._encoder_and_decoder_paths(path)
self.encoder.load_state_dict(torch.load(encoder_path, **kwargs))
self.decoder.load_state_dict(torch.load(decoder_path, **kwargs))
|
twitch/twitch_models.py
|
Flame442/Trusty-cogs
| 148 |
76608
|
from dataclasses import dataclass
import discord
@dataclass(init=False)
class TwitchProfile:
def __init__(self, **kwargs):
self.id = kwargs.get("id")
self.login = kwargs.get("login")
self.display_name = kwargs.get("display_name")
self.acc_type = kwargs.get("acc_type")
self.broadcaster_type = kwargs.get("broadcaster_type")
self.description = kwargs.get("description")
self.profile_image_url = kwargs.get("profile_image_url")
self.offline_image_url = kwargs.get("offline_image_url")
self.view_count = kwargs.get("view_count")
@classmethod
def from_json(cls, data: dict):
data = data["data"][0]
return cls(**data)
def make_user_embed(self) -> discord.Embed:
# makes the embed for a twitch profile
em = discord.Embed(colour=int("6441A4", 16))
em.description = self.description
url = "https://twitch.tv/{}".format(self.login)
em.set_author(name=self.display_name, url=url, icon_url=self.profile_image_url)
em.set_image(url=self.offline_image_url)
em.set_thumbnail(url=self.profile_image_url)
footer_text = "{} Viewer count".format(self.view_count)
em.set_footer(text=footer_text, icon_url=self.profile_image_url)
return em
@dataclass(init=False)
class TwitchFollower:
def __init__(self, **kwargs):
self.from_id = kwargs.get("from_id")
self.to_id = kwargs.get("to_id")
self.followed_at = kwargs.get("followed_at")
@classmethod
def from_json(cls, data: dict):
return cls(**data)
|
libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_options.py
|
Fl4v/botbuilder-python
| 388 |
76626
|
<filename>libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_options.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List
from botbuilder.schema import Activity
from botbuilder.dialogs.choices import Choice, ListStyle
class PromptOptions:
"""
Contains settings to pass to a :class:`Prompt` object when the prompt is started.
"""
def __init__(
self,
prompt: Activity = None,
retry_prompt: Activity = None,
choices: List[Choice] = None,
style: ListStyle = None,
validations: object = None,
number_of_attempts: int = 0,
):
"""
Sets the initial prompt to send to the user as an :class:`botbuilder.schema.Activity`.
:param prompt: The initial prompt to send to the user
:type prompt: :class:`botbuilder.schema.Activity`
:param retry_prompt: The retry prompt to send to the user
:type retry_prompt: :class:`botbuilder.schema.Activity`
:param choices: The choices to send to the user
:type choices: :class:`List`
:param style: The style of the list of choices to send to the user
:type style: :class:`ListStyle`
:param validations: The prompt validations
:type validations: :class:`Object`
:param number_of_attempts: The number of attempts allowed
:type number_of_attempts: :class:`int`
"""
self.prompt = prompt
self.retry_prompt = retry_prompt
self.choices = choices
self.style = style
self.validations = validations
self.number_of_attempts = number_of_attempts
|
Chapter2/stddev.py
|
buiksat/Learn-Algorithmic-Trading
| 449 |
76627
|
import pandas as pd
from pandas_datareader import data
start_date = '2014-01-01'
end_date = '2018-01-01'
SRC_DATA_FILENAME = 'goog_data.pkl'
try:
goog_data2 = pd.read_pickle(SRC_DATA_FILENAME)
except FileNotFoundError:
goog_data2 = data.DataReader('GOOG', 'yahoo', start_date, end_date)
goog_data2.to_pickle(SRC_DATA_FILENAME)
goog_data = goog_data2.tail(620)
close = goog_data['Close']
'''
Standard Deviation is a statistical calculation
used to measure the variability. In trading this value is known
as volatility. A low standard deviation indicates that the data
points tend to be very close to the mean, whereas high standard
deviation indicates that the data points are spread out over a large
range of values.
n = number of periods
Calculate the moving average.
The formula is:
d = ((P1-MA)^2 + (P2-MA)^2 + ... (Pn-MA)^2)/n
Pn is the price you pay for the nth interval
n is the number of periods you select
Take the square root of d. This gives you the standard deviation.
stddev = sqrt(d)
'''
import statistics as stats
import math as math
time_period = 20 # look back period
history = [] # history of prices
sma_values = [] # to track moving average values for visualization purposes
stddev_values = [] # history of computed stdev values
for close_price in close:
history.append(close_price)
if len(history) > time_period: # we track at most 'time_period' number of prices
del (history[0])
sma = stats.mean(history)
sma_values.append(sma)
variance = 0 # variance is square of standard deviation
for hist_price in history:
variance = variance + ((hist_price - sma) ** 2)
stdev = math.sqrt(variance / len(history))
stddev_values.append(stdev)
goog_data = goog_data.assign(ClosePrice=pd.Series(close, index=goog_data.index))
goog_data = goog_data.assign(StandardDeviationOver20Days=pd.Series(stddev_values, index=goog_data.index))
close_price = goog_data['ClosePrice']
stddev = goog_data['StandardDeviationOver20Days']
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(211, ylabel='Google price in $')
close_price.plot(ax=ax1, color='g', lw=2., legend=True)
ax2 = fig.add_subplot(212, ylabel='Stddev in $')
stddev.plot(ax=ax2, color='b', lw=2., legend=True)
ax2.axhline(y=stats.mean(stddev_values), color='k')
plt.show()
|
falcon/bench/nuts/nuts/controllers/root.py
|
RioAtHome/falcon
| 8,217 |
76641
|
import random
import pecan
from pecan import expose, response, request
_body = pecan.x_test_body
_headers = pecan.x_test_headers
class TestController:
def __init__(self, account_id):
self.account_id = account_id
@expose(content_type='text/plain')
def test(self):
user_agent = request.headers['User-Agent'] # NOQA
limit = request.params.get('limit', '10') # NOQA
response.headers.update(_headers)
return _body
class HelloController:
@expose()
def _lookup(self, account_id, *remainder):
return TestController(account_id), remainder
class RootController:
@expose(content_type='text/plain')
def index(self):
response.headers.update(_headers)
return _body
hello = HelloController()
|
fingerprint/feature_extractor.py
|
claraeyoon/FAST
| 126 |
76648
|
########################################################################################################
## pyFAST - Fingerprint and Similarity Thresholding in python
##
## <NAME>
## 11/14/2016
##
## (see Yoon et. al. 2015, Sci. Adv. for algorithm details)
##
########################################################################################################
##
## Feature Extraction (Fingerprinting)
##
########################################################################################################
import numpy as np
import pywt as wt
from sklearn.preprocessing import normalize
from scipy.signal import spectrogram
from scipy.misc import imresize
def init_feature_extractor(params, ntimes):
feats = FeatureExtractor(sampling_rate=params['fingerprint']['sampling_rate'],
window_length=params['fingerprint']['spec_length'],
window_lag=params['fingerprint']['spec_lag'],
fingerprint_length=params['fingerprint']['fp_length'],
fingerprint_lag=params['fingerprint']['fp_lag'],
min_freq=params['fingerprint']["min_freq"],
max_freq=params['fingerprint']["max_freq"],
nfreq=params['fingerprint']['nfreq'],
ntimes=ntimes)
return feats
class FeatureExtractor(object):
def __init__(self, sampling_rate, window_length, window_lag, fingerprint_length, fingerprint_lag,
min_freq = 0, max_freq = None, nfreq = 32, ntimes = 64):
self.sampling_rate = sampling_rate #/ sampling rate
self.window_len = window_length #/ length of window (seconds) used in spectrogram
self.window_lag = window_lag #/ window lag (seconds) used in spectrogram
self.fp_len = fingerprint_length #/ width of fingerprint (samples)
self.fp_lag = fingerprint_lag #/ lag between fingerprints (samples)
self.max_freq = self._initialize_frequencies(max_freq) #/ minimum and maximum frequencies for bandpass filter
self.min_freq = min_freq
self.new_d1 = int(nfreq) #/ number of frequency / time bins in fingerprints (must be power of 2) - TODO: error checking
self.new_d2 = int(ntimes)
self.d1 = None #/ dimension of spectral images prior to resizing
self.d2 = None
self.haar_means = None
self.haar_stddevs = None
self.haar_medians = None
self.haar_absdevs = None
def _initialize_frequencies(self, max_freq): #/ initializes data structure
if max_freq is None:
max_freq = self.sampling_rate/2.0
return max_freq
def update(self, field, value):
if hasattr(self, field):
setattr(self, field, value)
else:
print('WARNING: object has no attribute: ' + field)
print('object has the following attributes:' + self.__dict__.keys())
return
def get_params(self):
mdict = dict()
for k in self.__dict__.keys():
if k not in ['haar_means','haar_stddevs','haar_absdevs','haar_medians']:
mdict[k] = self.__dict__[k]
return mdict
#/ returns indicies for overlapping windows
def get_window_params(self, N, L, dL):
idx0 = np.asarray(range(0, N+1, dL))
idx2 = np.asarray(range(L,N+1,dL))
nWindows = len(idx2)
idx1 = idx0[0:nWindows]
return nWindows, idx1, idx2
########################################################################
## FOR COMPUTING FINGERPRINTS ##
########################################################################
#/ computes spectrogram from continous timeseries data
def data_to_spectrogram(self, x_data, window_type = 'hanning'):
f, t, Sxx = spectrogram(x_data, fs=self.sampling_rate,
window=window_type, nperseg=int(self.sampling_rate*self.window_len),
noverlap = int(self.sampling_rate*(self.window_len - self.window_lag)))
# Truncate spectrogram, keep only passband frequencies
if self.min_freq > 0:
fidx_keep = (f >= self.min_freq)
Sxx = Sxx[fidx_keep, :]
f = f[fidx_keep]
if self.max_freq < f[-1]:
fidx_keep = (f <= self.max_freq)
Sxx = Sxx[fidx_keep, :]
f = f[fidx_keep]
self.frequencies = f
self.times = t
return f, t, Sxx
#/ breaks spectrogram into overlapping spectral images
def spectrogram_to_spectral_images(self, Sxx):
nFreq, nTimes = np.shape(Sxx)
nWindows, idx1, idx2 = self.get_window_params(nTimes, self.fp_len, self.fp_lag)
spectral_images = np.zeros([nWindows, nFreq, self.fp_len])
for i in range(nWindows):
spectral_images[i,:,:] = Sxx[:,idx1[i]:idx2[i]]
self.nwindows = nWindows
nWindows, self.d1, self.d2 = np.shape(spectral_images)
#self.new_d1, self.new_d2 = np.exp2(np.floor(np.log2([self.d1, self.d2])))
return spectral_images, nWindows, idx1, idx2
#/ resizes each spectral image to specified dimensions
def _resize_spectral_images(self, spectral_images, new_d1, new_d2):
new_spectral_images = np.zeros([self.nwindows,new_d1,new_d2])
for i in range(self.nwindows):
new_spectral_images[i,:,:] = imresize(spectral_images[i,:,:], (new_d1, new_d2), interp='bilinear', mode='F')
return new_spectral_images
#/ reshapes output from PyWavelets 2d wavelet transform into image
def _unwrap_wavelet_coeffs(self,coeffs):
L = len(coeffs)
cA = coeffs[0]
for i in range(1,L):
(cH, cV, cD) = coeffs[i]
cA = np.concatenate((np.concatenate((cA, cV),axis= 1),np.concatenate((cH, cD),axis = 1)),axis=0)
return cA
#/ computes wavelet transform for each spectral image
def spectral_images_to_wavelet(self, spectral_images, wavelet = wt.Wavelet('db1')):
if (int(self.new_d1)!=self.d1) or (int(self.new_d2)!=self.d2):
spectral_images = self._resize_spectral_images(spectral_images, self.new_d1, self.new_d2)
haar_images = np.zeros([self.nwindows,self.new_d1,self.new_d2])
for i in range(self.nwindows):
coeffs = wt.wavedec2(spectral_images[i,:,:], wavelet)
haar_images[i,:,:] = self._unwrap_wavelet_coeffs(coeffs)
return haar_images
#/ computes (normalized) haar_images from continous timeseries data
def data_to_haar_images(self, x_data):
f, t, Sxx = self.data_to_spectrogram(x_data)
spectral_images, nWindows, idx1, idx2 = self.spectrogram_to_spectral_images(Sxx)
haar_images = self.spectral_images_to_wavelet(spectral_images)
haar_images = normalize(self._images_to_vectors(haar_images), axis=1)
return haar_images, nWindows, idx1, idx2, Sxx, t
#/ converts set of images to array of vectors
def _images_to_vectors(self,images):
N,d1,d2 = np.shape(images)
vectors = np.zeros([N,d1*d2])
for i in range(N):
vectors[i,:] = np.reshape(images[i,:,:], (1,d1*d2))
return vectors
#/ converts set of vectors into set of images (of dimension d1 x d2)
def _vectors_to_images(self, vectors, d1, d2):
N,D = np.shape(vectors)
if D != d1*d2:
print('warning: invalid dimensions')
return vectors
else:
images = np.zeros([N,d1,d2])
for i in range(N):
images[i,:,:] = np.reshape(vectors[i,:], (d1,d2))
return images
def compute_haar_stats(self, haar_images,type = None):
if type is 'MAD':
shape = haar_images.shape
medians = []
for i in range(shape[1]):
medians.append(np.median(haar_images[:, i]))
self.haar_medians = np.array(medians)
mad = []
for i in range(shape[1]):
tmp = abs(haar_images[:, i] - medians[i])
mad.append(np.median(tmp))
self.haar_absdevs = np.array(mad)
return self.haar_medians, self.haar_absdevs
if type is 'Zscore':
self.haar_means = np.mean(haar_images,axis=0)
self.haar_stddevs = np.std(haar_images,axis=0)
return self.haar_means, self.haar_stddevs
def standardize_haar(self, haar_images, type = 'MAD'):
if type is 'Zscore':
haar_images = (haar_images - self.haar_means)/self.haar_stddevs
return haar_images
elif type is 'MAD':
haar_images = (haar_images - self.haar_medians)/self.haar_absdevs
return haar_images
else:
print('Warning: invalid type - select type MAD or Zscore')
return None
def binarize_vectors_topK_sign(self, coeff_vectors, K):
self.K = K
N,M = np.shape(coeff_vectors)
binary_vectors = np.zeros((N,2*M), dtype=bool)
for i in range(N):
idx = np.argsort(abs(coeff_vectors[i,:]))[-K:]
binary_vectors[i,idx] = coeff_vectors[i,idx] > 0
binary_vectors[i,idx+M] = coeff_vectors[i,idx] < 0
return binary_vectors
def vectors_to_topK_sign(self, coeff_vectors, K):
self.K = K
N,M = np.shape(coeff_vectors)
sign_vectors = np.zeros([N,M])
for i in range(N):
idx = np.argsort(abs(coeff_vectors[i,:]))[-K:]
sign_vectors[i,idx] = np.sign(coeff_vectors[i,idx])
return sign_vectors
def sign_to_binary(self, vector):
L = len(vector)
new_vec = np.zeros((L,2), dtype=bool)
new_vec[:,0] = vector > 0
new_vec[:,1] = vector < 0
return np.reshape(new_vec, (1,2*L))
def binarize_vectors_topK(self, coeff_vectors, K):
self.K = K
N,M = np.shape(coeff_vectors)
sign_vectors = np.zeros((N,M),dtype=bool)
for i in range(N):
idx = np.argsort(coeff_vectors[i,:])[-K:]
sign_vectors[i,idx] = 1
return sign_vectors
def jaccard_sim(self, vec1, vec2):
return sum(vec1 & vec2)/ (1.0*sum(vec1 | vec2))
|
hnsw/helper.py
|
Tumao727/covidex
| 128 |
76657
|
import csv
import os
def remove_if_exist(path):
if os.path.exists(path):
os.remove(path)
def load_metadata(path):
res = {}
headers = None
with open(path, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
if headers is None:
headers = row
continue
item = {}
uid = row[0]
for index, token in enumerate(row):
if index != 0:
item[headers[index]] = token
res[uid] = item
return res
def load_specter_embeddings(path):
res = {}
dim = None
with open(path, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
uid = row[0]
vector = row[1:]
res[uid] = vector
if dim is None:
dim = len(vector)
else:
assert dim == len(
vector), "Embedding dimension mismatch"
return res, dim
def save_index_to_uid_file(index_to_uid, index, path):
remove_if_exist(path)
with open(path, 'w') as f:
for index, uid in enumerate(index_to_uid):
f.write(f"{index} {uid}\n")
|
urbansim/models/relocation.py
|
waddell/urbansim
| 351 |
76658
|
"""
Use the ``RelocationModel`` class to choose movers based on
relocation rates.
"""
import logging
import numpy as np
import pandas as pd
from . import util
logger = logging.getLogger(__name__)
def find_movers(choosers, rates, rate_column):
"""
Returns an array of the indexes of the `choosers` that are slated
to move.
Parameters
----------
choosers : pandas.DataFrame
Table of agents from which to find movers.
rates : pandas.DataFrame
Table of relocation rates. Index is unused.
Other columns describe filters on the `choosers`
table so that different segments can have different relocation
rates. Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
An example `rates` structure:
age_of_head_max age_of_head_min
nan 65
65 40
In this example the `choosers` table would need to have an
'age_of_head' column on which to filter.
nan should be used to flag filters that do not apply
in a given row.
rate_column : object
Name of column in `rates` table that has relocation rates.
Returns
-------
movers : pandas.Index
Suitable for indexing `choosers` by index.
"""
logger.debug('start: find movers for relocation')
relocation_rates = pd.Series(
np.zeros(len(choosers)), index=choosers.index)
for _, row in rates.iterrows():
indexes = util.filter_table(choosers, row, ignore={rate_column}).index
relocation_rates.loc[indexes] = row[rate_column]
movers = relocation_rates.index[
relocation_rates > np.random.random(len(choosers))]
logger.debug('picked {} movers for relocation'.format(len(movers)))
logger.debug('finish: find movers for relocation')
return movers
class RelocationModel(object):
"""
Find movers within a population according to a table of
relocation rates.
Parameters
----------
rates : pandas.DataFrame
Table of relocation rates. Index is unused.
Other columns describe filters on the `choosers`
table so that different segments can have different relocation
rates. Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
An example `rates` structure:
age_of_head_max age_of_head_min
nan 65
65 40
In this example the `choosers` table would need to have an
'age_of_head' column on which to filter.
nan should be used to flag filters that do not apply
in a given row.
rate_column : object, optional
Name of column in `rates` table that contains relocation rates.
If not given 'probability_of_relocating' is used.
"""
def __init__(self, rates, rate_column=None):
self.relocation_rates = rates
self.rate_column = rate_column or 'probability_of_relocating'
def find_movers(self, choosers):
"""
Select movers from among a table of `choosers` according to the
stored relocation rates.
Parameters
----------
choosers : pandas.DataFrame
Table of agents from which to find movers.
Returns
-------
movers : pandas.Index
Suitable for indexing `choosers` by index.
"""
return find_movers(choosers, self.relocation_rates, self.rate_column)
|
feudal_networks/policies/feudal_policy.py
|
zqcchris/feudal_networks
| 141 |
76698
|
<reponame>zqcchris/feudal_networks
import distutils.version
import numpy as np
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
import feudal_networks.policies.policy as policy
import feudal_networks.policies.policy_utils as policy_utils
from feudal_networks.models.models import SingleStepLSTM
from feudal_networks.policies.configs.feudal_config import config
from feudal_networks.policies.feudal_batch_processor import FeudalBatchProcessor
class FeudalPolicy(policy.Policy):
"""
Policy of the Feudal network architecture.
"""
def __init__(self, obs_space, act_space,global_step):
self.global_step = global_step
self.obs_space = obs_space
self.act_space = act_space
self.config = config
self.k = config.k #Dimensionality of w
self.g_dim = config.g_dim
self.c = config.c
self.batch_processor = FeudalBatchProcessor(self.c)
self._build_model()
def _build_model(self):
"""
Builds the manager and worker models.
"""
with tf.variable_scope('FeUdal'):
self._build_placeholders()
self._build_perception()
self._build_manager()
self._build_worker()
self._build_loss()
self.var_list = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
# for v in self.var_list:
# print v.name
self.state_in = [self.worker_lstm.state_in[0],\
self.worker_lstm.state_in[1],\
self.manager_lstm.state_in[0],\
self.manager_lstm.state_in[1]\
]
self.state_out = [self.worker_lstm.state_out[0],\
self.worker_lstm.state_out[1],\
self.manager_lstm.state_out[0],\
self.manager_lstm.state_out[1]\
]
# for v in self.var_list:
# print v
def _build_placeholders(self):
#standard for all policies
self.obs = tf.placeholder(tf.float32, [None] + list(self.obs_space))
self.r = tf.placeholder(tf.float32,(None,))
self.ac = tf.placeholder(tf.float32,(None,self.act_space))
self.adv = tf.placeholder(tf.float32, [None]) #unused
#specific to FeUdal
self.prev_g = tf.placeholder(tf.float32, (None,None,self.g_dim))
self.ri = tf.placeholder(tf.float32,(None,))
self.s_diff = tf.placeholder(tf.float32,(None,self.g_dim))
def _build_perception(self):
conv1 = tf.layers.conv2d(inputs=self.obs,
filters=16,
kernel_size=[8, 8],
activation=tf.nn.elu,
strides=4)
conv2 = tf.layers.conv2d(inputs=conv1,
filters=32,
kernel_size=[4,4],
activation=tf.nn.elu,
strides=2)
flattened_filters = policy_utils.flatten(conv2)
self.z = tf.layers.dense(inputs=flattened_filters,\
units=256,\
activation=tf.nn.elu)
def _build_manager(self):
with tf.variable_scope('manager'):
# Calculate manager internal state
self.s = tf.layers.dense(inputs=self.z,\
units=self.g_dim,\
activation=tf.nn.elu)
# Calculate manager output g
x = tf.expand_dims(self.s, [0])
self.manager_lstm = SingleStepLSTM(x,\
self.g_dim,\
step_size=tf.shape(self.obs)[:1])
g_hat = self.manager_lstm.output
self.g = tf.nn.l2_normalize(g_hat, dim=1)
self.manager_vf = self._build_value(g_hat)
# self.manager_vf = tf.Print(self.manager_vf,[self.manager_vf])
def _build_worker(self):
with tf.variable_scope('worker'):
num_acts = self.act_space
# Calculate U
self.worker_lstm = SingleStepLSTM(tf.expand_dims(self.z, [0]),\
size=num_acts * self.k,
step_size=tf.shape(self.obs)[:1])
flat_logits = self.worker_lstm.output
self.worker_vf = self._build_value(flat_logits)
U = tf.reshape(flat_logits,[-1,num_acts,self.k])
# Calculate w
cut_g = tf.stop_gradient(self.g)
cut_g = tf.expand_dims(cut_g, [1])
gstack = tf.concat([self.prev_g,cut_g], axis=1)
self.last_c_g = gstack[:,1:]
# print self.last_c_g
gsum = tf.reduce_sum(gstack, axis=1)
phi = tf.get_variable("phi", (self.g_dim, self.k))
w = tf.matmul(gsum,phi)
w = tf.expand_dims(w,[2])
# Calculate policy and sample
logits = tf.reshape(tf.matmul(U,w),[-1,num_acts])
self.pi = tf.nn.softmax(logits)
self.log_pi = tf.nn.log_softmax(logits)
self.sample = policy_utils.categorical_sample(
tf.reshape(logits,[-1,num_acts]), num_acts)[0, :]
def _build_value(self,input):
with tf.variable_scope('VF'):
hidden = tf.layers.dense(inputs=input,\
units=self.config.vf_hidden_size,\
activation=tf.nn.elu)
w = tf.get_variable("weights", (self.config.vf_hidden_size, 1))
return tf.matmul(hidden,w)
def _build_loss(self):
cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf),[-1])
dot = tf.reduce_sum(tf.multiply(self.s_diff,self.g ),axis=1)
gcut = tf.stop_gradient(self.g)
mag = tf.norm(self.s_diff,axis=1)*tf.norm(gcut,axis=1)+.0001
dcos = dot/mag
manager_loss = -tf.reduce_sum((self.r-cutoff_vf_manager)*dcos)
cutoff_vf_worker = tf.reshape(tf.stop_gradient(self.worker_vf),[-1])
log_p = tf.reduce_sum(self.log_pi*self.ac,[1])
worker_loss = (self.r + self.config.alpha*self.ri - cutoff_vf_worker)*log_p
worker_loss = -tf.reduce_sum(worker_loss,axis=0)
Am = self.r-self.manager_vf
manager_vf_loss = .5*tf.reduce_sum(tf.square(Am))
Aw = (self.r + self.config.alpha*self.ri)-self.worker_vf
worker_vf_loss = .5*tf.reduce_sum(tf.square(Aw))
entropy = -tf.reduce_sum(self.pi * self.log_pi)
beta = tf.train.polynomial_decay(config.beta_start, self.global_step,
end_learning_rate=config.beta_end,
decay_steps=config.decay_steps,
power=1)
# worker_loss = tf.Print(worker_loss,[manager_loss,worker_loss,manager_vf_loss,worker_vf_loss,entropy])
self.loss = worker_loss+manager_loss+\
worker_vf_loss + manager_vf_loss-\
entropy*beta
bs = tf.to_float(tf.shape(self.obs)[0])
tf.summary.scalar("model/manager_loss", manager_loss / bs)
tf.summary.scalar("model/worker_loss", worker_loss / bs)
tf.summary.scalar("model/value_mean", tf.reduce_mean(self.manager_vf))
tf.summary.scalar("model/value_loss", manager_vf_loss / bs)
tf.summary.scalar("model/value_loss_scaled", manager_vf_loss / bs * .5)
tf.summary.scalar("model/entropy", entropy / bs)
tf.summary.scalar("model/entropy_loss_scaleed", -entropy / bs * beta)
# tf.summary.scalar("model/grad_global_norm", tf.global_norm(grads))
tf.summary.scalar("model/var_global_norm", tf.global_norm(tf.get_collection(\
tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)))
tf.summary.scalar("model/beta", beta)
tf.summary.image("model/state", self.obs)
self.summary_op = tf.summary.merge_all()
def get_initial_features(self):
return np.zeros((1,1,self.g_dim),np.float32),self.worker_lstm.state_init+self.manager_lstm.state_init
def act(self, ob, g,cw,hw,cm,hm):
sess = tf.get_default_session()
return sess.run([self.sample, self.manager_vf, self.g, self.s, self.last_c_g] + self.state_out,
{self.obs: [ob], self.state_in[0]: cw, self.state_in[1]: hw,\
self.state_in[2]: cm, self.state_in[3]: hm,\
self.prev_g: g})
def value(self, ob, g, cw, hw, cm, hm):
sess = tf.get_default_session()
return sess.run(self.manager_vf,
{self.obs: [ob], self.state_in[0]: cw, self.state_in[1]: hw,\
self.state_in[2]: cm, self.state_in[3]: hm,\
self.prev_g: g})[0]
def update_batch(self,batch):
return self.batch_processor.process_batch(batch)
|
etl/parsers/etw/Microsoft_Windows_Sdstor.py
|
IMULMUL/etl-parser
| 104 |
76699
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Sdstor
GUID : afe654eb-0a83-4eb4-948f-d4510ec39c30
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("afe654eb-0a83-4eb4-948f-d4510ec39c30"), event_id=100, version=0)
class Microsoft_Windows_Sdstor_100_0(Etw):
pattern = Struct(
"Port" / Int8ul,
"Bus" / Int8ul,
"Target" / Int8ul,
"LUN" / Int8ul,
"RequestDuration" / Int64ul,
"CDBLength" / Int32ul,
"CDB" / Bytes(lambda this: this.CDBLength),
"SrbStatus" / Int8ul,
"Irp" / Int64ul,
"OriginalIrp" / Int64ul
)
@declare(guid=guid("afe654eb-0a83-4eb4-948f-d4510ec39c30"), event_id=101, version=0)
class Microsoft_Windows_Sdstor_101_0(Etw):
pattern = Struct(
"PackedCommandCount" / Int32ul,
"NumIrpsPacked" / Int32ul
)
@declare(guid=guid("afe654eb-0a83-4eb4-948f-d4510ec39c30"), event_id=102, version=0)
class Microsoft_Windows_Sdstor_102_0(Etw):
pattern = Struct(
"PackedCommandCount" / Int32ul,
"NumIrpsPacked" / Int32ul
)
@declare(guid=guid("afe654eb-0a83-4eb4-948f-d4510ec39c30"), event_id=105, version=0)
class Microsoft_Windows_Sdstor_105_0(Etw):
pattern = Struct(
"ResultCode" / Int32ul
)
@declare(guid=guid("afe654eb-0a83-4eb4-948f-d4510ec39c30"), event_id=107, version=0)
class Microsoft_Windows_Sdstor_107_0(Etw):
pattern = Struct(
"LBA" / Int64ul,
"Length" / Int32ul
)
|
Algo and DSA/LeetCode-Solutions-master/Python/maximum-vacation-days.py
|
Sourav692/FAANG-Interview-Preparation
| 3,269 |
76718
|
<gh_stars>1000+
# Time: O(n^2 * k)
# Space: O(k)
class Solution(object):
def maxVacationDays(self, flights, days):
"""
:type flights: List[List[int]]
:type days: List[List[int]]
:rtype: int
"""
if not days or not flights:
return 0
dp = [[0] * len(days) for _ in xrange(2)]
for week in reversed(xrange(len(days[0]))):
for cur_city in xrange(len(days)):
dp[week % 2][cur_city] = days[cur_city][week] + dp[(week+1) % 2][cur_city]
for dest_city in xrange(len(days)):
if flights[cur_city][dest_city] == 1:
dp[week % 2][cur_city] = max(dp[week % 2][cur_city], \
days[dest_city][week] + dp[(week+1) % 2][dest_city])
return dp[0][0]
|
py_entitymatching/matcherselector/mlmatchercombinerselection.py
|
kvpradap/py_entitymatching
| 165 |
76719
|
<gh_stars>100-1000
"""
This module contains functions for ML-matcher combiner selection.
Note: This is not going to be there for the first release of py_entitymatching.
"""
import itertools
import six
from py_entitymatching.matcherselector.mlmatcherselection import select_matcher
from py_entitymatching.matcher.ensemblematcher import EnsembleMatcher
def selector_matcher_combiner(matchers, combiners, x=None, y=None, table=None, exclude_attrs=None, target_attr=None,
weights=None, threshold=None, k=5):
if not isinstance(matchers, list):
matchers = [matchers]
if not isinstance(combiners, list):
combiners = [combiners]
matcher_list = get_matcher_list(matchers, combiners, weights, threshold)
return select_matcher(matcher_list, x=x, y=y, table=table, exclude_attrs=exclude_attrs, target_attr=target_attr,
k=k)
def get_matcher_list(matchers, combiners, weights, threshold):
ensemble_len = range(2, len(matchers) + 1)
matcher_list = []
matcher_list.extend(matchers)
for l in ensemble_len:
iter_combns = itertools.combinations(six.moves.xrange(0,
len(matchers)), l)
for ic in iter_combns:
for c in combiners:
m = [matchers[i] for i in ic]
if c is 'Weighted':
em = EnsembleMatcher(m, voting=c, weights=weights, threshold=threshold)
else:
em = EnsembleMatcher(m, voting=c)
matcher_list.append(em)
return matcher_list
|
xdfile/tests/test_utils.py
|
jmviz/xd
| 179 |
76721
|
"""unit tests for utils.py"""
import os
from xdfile import utils
TEST_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
def test_find_files():
mygen = utils.find_files(TEST_DIRECTORY)
for fullfn, contents in mygen:
# It should throw out anything starting with '.'
assert not fullfn.startswith('.')
|
DiffAugment-biggan-imagenet/compare_gan/gans/abstract_gan.py
|
Rian-T/data-efficient-gans
| 1,902 |
76743
|
# coding=utf-8
# Copyright 2018 Google LLC & <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for GAN models that can be trained using the Estimator API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
import tensorflow as tf
@six.add_metaclass(abc.ABCMeta)
class AbstractGAN(object):
"""Interface for GAN models that can be training using the Estimator API."""
def __init__(self,
dataset,
parameters,
model_dir):
super(AbstractGAN, self).__init__()
self._dataset = dataset
self._parameters = parameters
self._model_dir = model_dir
def as_estimator(self, run_config, batch_size, use_tpu):
"""Returns a TPUEstimator for this GAN."""
return tf.contrib.tpu.TPUEstimator(
config=run_config,
use_tpu=use_tpu,
model_fn=self.model_fn,
train_batch_size=batch_size)
@abc.abstractmethod
def as_module_spec(self, params, mode):
"""Returns the generator network as TFHub module spec."""
@abc.abstractmethod
def input_fn(self, params, mode):
"""Input function that retuns a `tf.data.Dataset` object.
This function will be called once for each host machine.
Args:
params: Python dictionary with parameters given to TPUEstimator.
Additional TPUEstimator will set the key `batch_size` with the batch
size for this host machine and `tpu_contextu` with a TPUContext
object.
mode: `tf.estimator.MoedeKeys` value.
Returns:
A `tf.data.Dataset` object with batched features and labels.
"""
@abc.abstractmethod
def model_fn(self, features, labels, params, mode):
"""Constructs the model for the given features and mode.
This interface only requires implementing the TRAIN mode.
On TPUs the model_fn should construct a graph for a single TPU core.
Wrap the optimizer with a `tf.contrib.tpu.CrossShardOptimizer` to do
synchronous training with all TPU cores.c
Args:
features: A dictionary with the feature tensors.
labels: Tensor will labels. Will be None if mode is PREDICT.
params: Dictionary with hyperparameters passed to TPUEstimator.
Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`,
`tpu_context`. `batch_size` is the batch size for this core.
mode: `tf.estimator.ModeKeys` value (TRAIN, EVAL, PREDICT). The mode
should be passed to the TPUEstimatorSpec and your model should be
build this mode.
Returns:
A `tf.contrib.tpu.TPUEstimatorSpec`.
"""
|
RecoBTag/PerformanceDB/python/measure/Pool_btagMuJetsWpNoTtbar.py
|
ckamtsikis/cmssw
| 852 |
76767
|
import FWCore.ParameterSet.Config as cms
from CondCore.DBCommon.CondDBCommon_cfi import *
PoolDBESSourcebtagMuJetsWpNoTtbar = cms.ESSource("PoolDBESSource",
CondDBCommon,
toGet = cms.VPSet(
#
# working points
#
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVL_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVL_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVL_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVL_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVM_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVM_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVM_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVM_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVT_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVT_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVT_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVT_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVV1L_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1L_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVV1L_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1L_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVV1M_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1M_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVV1M_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1M_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVV1T_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1T_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVV1T_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1T_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVSLV1L_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1L_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVSLV1L_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1L_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVSLV1M_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1M_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVSLV1M_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1M_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVSLV1T_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1T_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVSLV1T_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1T_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARJPL_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPL_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARJPL_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPL_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARJPM_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPM_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARJPM_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPM_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARJPT_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPT_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARJPT_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPT_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARTCHPT_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARTCHPT_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARTCHPT_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARTCHPT_WP')
),
))
PoolDBESSourcebtagMuJetsWpNoTtbar.connect = 'frontier://FrontierProd/CMS_COND_PAT_000'
|
tests/test_labels.py
|
long-long-float/py-videocore
| 783 |
76805
|
'Test of label scope and label exporter'
import numpy as np
from videocore.assembler import qpu, get_label_positions
from videocore.driver import Driver
@qpu
def given_jmp(asm):
mov(ra0, uniform)
mov(r0, 0)
L.entry
jmp(reg=ra0)
nop()
nop()
nop()
iadd(r0, r0, 1)
L.test
iadd(r0, r0, 4)
setup_vpm_write()
mov(vpm, r0)
setup_dma_store(nrows=1)
start_dma_store(uniform)
wait_dma_store()
exit()
def test_given_jump():
lbls = get_label_positions(given_jmp)
entry_pc = 0
test_pc = 0
for lbl, pc in lbls:
if lbl.name == 'entry':
entry_pc = pc
if lbl.name == 'test':
test_pc = pc
with Driver() as drv:
X = drv.alloc((1, 16), 'int32')
X[:] = 1234
drv.execute(
n_threads=1,
program=drv.program(given_jmp),
uniforms=[test_pc-entry_pc-32, X.address]
)
assert np.all(X == 4)
@qpu
def with_namespace(asm):
mov(r0, 0)
with namespace('ns1'):
jmp(L.test)
nop()
nop()
nop()
iadd(r0, r0, 10)
L.test
iadd(r0, r0, 1)
with namespace('nested'):
jmp(L.test)
nop()
nop()
nop()
iadd(r0, r0, 10)
L.test
iadd(r0, r0, 1)
with namespace('ns2'):
jmp(L.test)
nop()
nop()
nop()
iadd(r0, r0, 10)
L.test
iadd(r0, r0, 1)
jmp(L.test)
nop()
nop()
nop()
iadd(r0, r0, 10)
L.test
iadd(r0, r0, 1)
setup_vpm_write()
mov(vpm, r0)
setup_dma_store(nrows=1)
start_dma_store(uniform)
wait_dma_store()
exit()
def test_with_namespace():
with Driver() as drv:
X = drv.alloc((1, 16), 'int32')
X[:] = 1234
drv.execute(
n_threads=1,
program=drv.program(with_namespace),
uniforms=[X.address]
)
assert np.all(X == 4)
|
kratos/tests/test_gid_io_gauss_points.py
|
lkusch/Kratos
| 778 |
76808
|
<reponame>lkusch/Kratos
from KratosMultiphysics import *
import KratosMultiphysics.KratosUnittest as UnitTest
import KratosMultiphysics.kratos_utilities as kratos_utils
try:
from KratosMultiphysics.FluidDynamicsApplication import *
have_fluid_dynamics = True
except ImportError:
have_fluid_dynamics = False
import filecmp
import os
class WorkFolderScope(object):
'''Auxiliary class to define a work folder for the tests.'''
def __init__(self, work_folder):
self.currentPath = os.getcwd()
self.scope = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)),work_folder))
def __enter__(self):
os.chdir(self.scope)
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(self.currentPath)
@UnitTest.skipUnless(have_fluid_dynamics,"Missing required application: FluidDynamicsApplication")
class TestGiDIOGaussPoints(UnitTest.TestCase):
'''Tests related to GiD I/O Gauss point results printing.'''
def setUp(self):
self.setModelPart()
self.workFolder = "auxiliar_files_for_python_unittest/gid_io"
def tearDown(self):
with WorkFolderScope(self.workFolder):
for suffix in ['_0.post.res', '_0.post.msh']:
kratos_utils.DeleteFileIfExisting(self.output_file_name+suffix)
def setModelPart(self):
self.model = Model()
modelPart = self.model.CreateModelPart("Test ModelPart")
modelPart.AddNodalSolutionStepVariable(DISTANCE)
modelPart.AddNodalSolutionStepVariable(VELOCITY)
nodes = list()
nodes.append( modelPart.CreateNewNode(1, 0.0, 0.0, 0.0) )
nodes.append( modelPart.CreateNewNode(2, 1.0, 0.0, 0.0) )
nodes.append( modelPart.CreateNewNode(3, 1.0, 1.0, 0.0) )
nodes.append( modelPart.CreateNewNode(4, 0.0, 1.0, 0.0) )
nodes.append( modelPart.CreateNewNode(5, 0.5, 0.5, 1.0) )
for node in nodes:
rx = node.X - 0.5
rz = node.Z - 0.5
r = (rx**2 + rz**2)**0.5
vel = Array3()
vel[0] = - rz/r
vel[1] = 0.0
vel[2] = rx/r
node.SetSolutionStepValue(VELOCITY,0,vel)
node.SetSolutionStepValue(DISTANCE,0,r)
properties = modelPart.GetProperties()[0]
modelPart.CreateNewElement("VMS3D",1,[1,2,4,5],properties)
modelPart.CreateNewElement("VMS3D",2,[2,3,4,5],properties)
modelPart.CreateNewCondition("MonolithicWallCondition3D",1,[1,5,4],properties)
modelPart.CreateNewCondition("MonolithicWallCondition3D",2,[1,2,5],properties)
modelPart.CreateNewCondition("MonolithicWallCondition3D",3,[2,3,5],properties)
modelPart.CreateNewCondition("MonolithicWallCondition3D",4,[3,4,5],properties)
modelPart.SetBufferSize(2)
self.modelPart = modelPart
def deactivateSome(self):
for elem in self.modelPart.Elements:
if elem.Id % 2 == 0:
elem.Set(ACTIVE,False)
for cond in self.modelPart.Conditions:
if cond.Id % 2 == 0:
cond.Set(ACTIVE,False)
def initializeOutputFile(self):
self.gid_io = GidIO(
self.output_file_name,
self.post_mode,
MultiFileFlag.SingleFile,
WriteDeformedMeshFlag.WriteUndeformed,
WriteConditionsFlag.WriteConditions)
self.gid_io.InitializeMesh(0)
self.gid_io.WriteMesh(self.modelPart.GetMesh())
self.gid_io.FinalizeMesh()
self.gid_io.InitializeResults(0.0, self.modelPart.GetMesh())
def writeResults(self,label):
self.gid_io.WriteNodalResults(VELOCITY, self.modelPart.Nodes, label, 0)
self.gid_io.PrintOnGaussPoints(VORTICITY, self.modelPart, label)
self.gid_io.PrintOnGaussPoints(NORMAL, self.modelPart, label)
self.gid_io.PrintFlagsOnGaussPoints(ACTIVE, "ACTIVE", self.modelPart, label)
def finalizeOutputFile(self):
self.gid_io.FinalizeResults()
def outputMatchesReferenceSolution(self):
msh_file_matches = filecmp.cmp(self.reference_file_name+'_0.post.msh',self.output_file_name+'_0.post.msh')
res_file_matches = filecmp.cmp(self.reference_file_name+'_0.post.res',self.output_file_name+'_0.post.res')
return msh_file_matches and res_file_matches
def test_write_active_only(self):
self.post_mode = GiDPostMode.GiD_PostAscii
self.output_file_name = "test_gid_io_gp_active_only"
self.reference_file_name = "ref_gid_io_gp_active_only"
self.deactivateSome()
with WorkFolderScope(self.workFolder):
self.initializeOutputFile()
self.writeResults(0.0)
self.finalizeOutputFile()
self.assertTrue(self.outputMatchesReferenceSolution())
def test_write_dynamic_deactivation(self):
self.post_mode = GiDPostMode.GiD_PostAscii
self.output_file_name = "test_gid_io_gp_dynamic_deactivation"
self.reference_file_name = "ref_gid_io_gp_dynamic_deactivation"
with WorkFolderScope(self.workFolder):
self.initializeOutputFile()
self.writeResults(0.0)
self.deactivateSome()
self.writeResults(1.0)
self.finalizeOutputFile()
self.assertTrue(self.outputMatchesReferenceSolution())
if __name__ == '__main__':
test = TestGiDIOGaussPoints()
test.setUp()
test.test_write_active_only()
test.tearDown()
test.setUp()
test.test_write_dynamic_deactivation()
test.tearDown()
|
btrack/__init__.py
|
dstansby/BayesianTracker
| 196 |
76869
|
# __all__ = ['core','utils','constants','render']
from .core import BayesianTracker, __version__
|
tests/hubstorage/test_jobsmeta.py
|
pardo/python-scrapinghub
| 163 |
76888
|
"""
Test job metadata
System tests for operations on stored job metadata
"""
from ..conftest import TEST_SPIDER_NAME
from .conftest import start_job
def _assertMetadata(meta1, meta2):
def _clean(m):
return dict((k, v) for k, v in m.items() if k != 'updated_time')
meta1 = _clean(meta1)
meta2 = _clean(meta2)
assert meta1 == meta2
def test_basic(hsclient, hsproject):
job = hsproject.push_job(TEST_SPIDER_NAME)
assert 'auth' not in job.metadata
assert 'state' in job.metadata
assert job.metadata['spider'] == TEST_SPIDER_NAME
# set some metadata and forget it
job.metadata['foo'] = 'bar'
assert job.metadata['foo'] == 'bar'
job.metadata.expire()
assert 'foo' not in job.metadata
# set it again and persist it
job.metadata['foo'] = 'bar'
assert job.metadata['foo'] == 'bar'
job.metadata.save()
assert job.metadata['foo'] == 'bar'
job.metadata.expire()
assert job.metadata['foo'] == 'bar'
# refetch the job and compare its metadata
job2 = hsclient.get_job(job.key)
_assertMetadata(job2.metadata, job.metadata)
# delete foo but do not persist it
del job.metadata['foo']
assert 'foo' not in job.metadata
job.metadata.expire()
assert job.metadata.get('foo') == 'bar'
# persist it to be sure it is not removed
job.metadata.save()
assert job.metadata.get('foo') == 'bar'
# and finally delete again and persist it
del job.metadata['foo']
assert 'foo' not in job.metadata
job.metadata.save()
assert 'foo' not in job.metadata
job.metadata.expire()
assert 'foo' not in job.metadata
job2 = hsclient.get_job(job.key)
_assertMetadata(job.metadata, job2.metadata)
def test_updating(hsproject):
job = hsproject.push_job(TEST_SPIDER_NAME)
assert job.metadata.get('foo') is None
job.update_metadata({'foo': 'bar'})
# metadata attr should change
assert job.metadata.get('foo') == 'bar'
# as well as actual metadata
job = hsproject.get_job(job.key)
assert job.metadata.get('foo') == 'bar'
job.update_metadata({'foo': None})
assert not job.metadata.get('foo', False)
# there are ignored fields like: auth, _key, state
state = job.metadata['state']
job.update_metadata({'state': 'running'})
assert job.metadata['state'] == state
def test_representation(hsproject):
job = hsproject.push_job(TEST_SPIDER_NAME)
meta = job.metadata
assert str(meta) != repr(meta)
assert meta == eval(str(meta))
assert meta.__class__.__name__ in repr(meta)
assert meta.__class__.__name__ not in str(meta)
def test_jobauth(hsclient, hsproject):
job = hsproject.push_job(TEST_SPIDER_NAME)
assert job.jobauth is None
assert job.auth == hsproject.auth
assert job.items.auth == hsproject.auth
samejob = hsclient.get_job(job.key)
assert samejob.auth is None
assert samejob.jobauth is None
assert samejob.items.auth == hsproject.auth
def test_authtoken(hsproject):
pendingjob = hsproject.push_job(TEST_SPIDER_NAME)
runningjob = start_job(hsproject)
assert pendingjob.key == runningjob.key
assert runningjob.jobauth
assert runningjob.jobauth == runningjob.auth
assert runningjob.auth[0] == runningjob.key
assert runningjob.auth[1]
|
platypush/message/response/printer/cups.py
|
RichardChiang/platypush
| 228 |
76893
|
from typing import Optional, List
from platypush.message.response import Response
class PrinterResponse(Response):
def __init__(self,
*args,
name: str,
printer_type: int,
info: str,
uri: str,
state: int,
is_shared: bool,
state_message: Optional[str] = None,
state_reasons: Optional[List[str]] = None,
location: Optional[str] = None,
uri_supported: Optional[str] = None,
make_and_model: Optional[str] = None,
**kwargs):
super().__init__(*args, output={
'name': name,
'printer_type': printer_type,
'info': info,
'uri': uri,
'state': state,
'is_shared': is_shared,
'state_message': state_message,
'state_reasons': state_reasons,
'location': location,
'uri_supported': uri_supported,
'make_and_model': make_and_model,
}, **kwargs)
class PrintersResponse(Response):
def __init__(self,
*args,
printers: List[PrinterResponse],
**kwargs):
super().__init__(*args, output={p.output['name']: p.output for p in printers}, **kwargs)
class PrinterJobAddedResponse(Response):
def __init__(self,
*args,
printer: str,
job_id: int,
**kwargs):
super().__init__(*args, output={
'printer': printer,
'job_id': job_id,
}, **kwargs)
# vim:sw=4:ts=4:et:
|
bert-quantization/bert-tf-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/layers/utils.py
|
dujiangsu/FasterTransformer
| 777 |
76897
|
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Some helper functions for implementing quantized layers"""
import copy
from ft_tensorflow_quantization.python.layers.tensor_quantizer import QuantDescriptor
class QuantMixin():
"""Mixin class for adding basic quantization logic to quantized modules"""
default_quant_desc_input = QuantDescriptor('input')
default_quant_desc_kernel = QuantDescriptor('kernel', axis=-1)
@classmethod
def set_default_quant_desc_input(cls, value):
"""
Args:
value: An instance of :func:`QuantDescriptor <quantization.QuantDescriptor>`
"""
if not isinstance(value, QuantDescriptor):
raise ValueError("{} is not an instance of QuantDescriptor!")
cls.default_quant_desc_input = copy.deepcopy(value)
@classmethod
def set_default_quant_desc_kernel(cls, value):
"""
Args:
value: An instance of :func:`QuantDescriptor <quantization.QuantDescriptor>`
"""
if not isinstance(value, QuantDescriptor):
raise ValueError("{} is not an instance of QuantDescriptor!")
cls.default_quant_desc_kernel = copy.deepcopy(value)
def pop_quant_desc_in_kwargs(quant_cls, **kwargs):
"""Pop quant descriptors in kwargs
If there is no descriptor in kwargs, the default one in quant_cls will be used
Arguments:
quant_cls: A class that has default quantization descriptors
Keyword Arguments:
quant_desc_input: An instance of QuantDescriptor. Quantization descriptor of input.
quant_desc_kernel: An instance of QuantDescriptor. Quantization descriptor of kernel.
"""
quant_desc_input = kwargs.pop('quant_desc_input', quant_cls.default_quant_desc_input)
quant_desc_kernel = kwargs.pop('quant_desc_kernel', quant_cls.default_quant_desc_kernel)
# base layers may use kwargs, so do not check if anything is left in **kwargs
return quant_desc_input, quant_desc_kernel
|
api/collections/nodeman.py
|
brookylin/bk-sops
| 881 |
76910
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.conf import settings
import env
from api.client import BKComponentClient
NODEMAN_API_ENTRY = env.BK_NODEMAN_API_ENTRY or "{}/{}".format(settings.BK_PAAS_ESB_HOST, "api/c/compapi/v2/nodeman")
NODEMAN_API_ENTRY_V2 = env.BK_NODEMAN_API_ENTRY or "{}/{}".format(
settings.BK_PAAS_ESB_HOST, "api/c/compapi/{bk_api_ver}/nodeman/api".format(bk_api_ver=settings.DEFAULT_BK_API_VER),
)
def _get_nodeman_api(api_name):
return "{}/{}/".format(NODEMAN_API_ENTRY, api_name)
def _get_nodeman_api_v2(api_name):
return "{}/{}/".format(NODEMAN_API_ENTRY_V2, api_name)
class BKNodeManClient(BKComponentClient):
def create_task(self, bk_biz_id, bk_cloud_id, node_type, op_type, creator, hosts):
return self._request(
method="post",
url=_get_nodeman_api("create_task"),
data={
"bk_biz_id": bk_biz_id,
"bk_cloud_id": bk_cloud_id,
"node_type": node_type,
"op_type": op_type,
"creator": creator,
"hosts": hosts,
},
)
def get_task_info(self, bk_biz_id, job_id):
return self._request(
method="get", url=_get_nodeman_api("get_task_info"), data={"bk_biz_id": bk_biz_id, "job_id": job_id},
)
def get_log(self, host_id, bk_biz_id):
return self._request(
method="get", url=_get_nodeman_api("get_log"), data={"host_id": host_id, "bk_biz_id": bk_biz_id},
)
def search_host_plugin(self, bk_biz_id, pagesize, conditions):
return self._request(
method="post",
url=_get_nodeman_api_v2("plugin/search"),
data={"bk_biz_id": bk_biz_id, "pagesize": pagesize, "conditions": conditions},
)
def job_install(self, job_type, hosts, **kwargs):
data = {"job_type": job_type, "hosts": hosts}
data.update(kwargs)
return self._request(method="post", url=_get_nodeman_api_v2("job/install"), data=data)
def remove_host(self, bk_biz_id, bk_host_id, is_proxy):
return self._request(
method="post",
url=_get_nodeman_api_v2("remove_host"),
data={"bk_biz_id": bk_biz_id, "bk_host_id": bk_host_id, "is_proxy": is_proxy}, # 是否移除PROXY
)
def job_operate(self, job_type, bk_biz_id, bk_host_id):
return self._request(
method="post",
url=_get_nodeman_api_v2("job/operate"),
data={"job_type": job_type, "bk_biz_id": bk_biz_id, "bk_host_id": bk_host_id},
)
def job_details(self, job_id):
return self._request(method="post", url=_get_nodeman_api_v2("job/details"), data={"job_id": job_id})
def get_job_log(self, job_id, instance_id):
return self._request(
method="post", url=_get_nodeman_api_v2("job/log"), data={"job_id": job_id, "instance_id": instance_id},
)
def cloud_list(self):
print(_get_nodeman_api_v2("cloud"))
return self._request(method="get", url=_get_nodeman_api_v2("cloud"), data={})
def ap_list(self):
return self._request(method="get", url=_get_nodeman_api_v2("ap"), data={})
def plugin_operate(self, params: dict):
return self._request(method="post", url=_get_nodeman_api_v2("plugin/operate"), data=params)
def plugin_process(self, category):
return self._request(method="post", url=_get_nodeman_api_v2("plugin/process"), data={"category": category})
def plugin_package(self, name, os):
return self._request(method="post", url=_get_nodeman_api_v2("plugin/package"), data={"name": name, "os": os})
def get_rsa_public_key(self, executor):
return self._request(
method="post",
url=_get_nodeman_api("core/api/encrypt_rsa/fetch_public_keys"),
data={
"bk_app_code": settings.APP_CODE,
"bk_app_secret": settings.SECRET_KEY,
"bk_username": executor,
"names": ["DEFAULT"],
},
)
|
example/RunModel/Abaqus_Example/abaqus_input.py
|
marrov/UQpy
| 132 |
76911
|
<gh_stars>100-1000
# -*- coding: mbcs -*-
# Do not delete the following import lines
from abaqus import *
from abaqusConstants import *
import __main__
import numpy as np
def time_temperature_curve(qtd=None):
# Define the other parameters of the curve
O = 0.14
b = 1500
typ = 'medium'
# Heating phase
t_lim = 20 / 60.0
if typ.lower() == 'slow':
t_lim = 25 / 60.0
elif typ.lower() == 'medium':
t_lim = 20 / 60.0
elif typ.lower() == 'fast':
t_lim = 15 / 60.0
gamma = ((O / b) ** 2) / ((0.04 / 1160) ** 2)
t_max = max((0.2e-3 * qtd / O, t_lim))
if t_max == t_lim:
O_lim = 0.1e-3 * qtd / t_lim
gamma_lim = ((O_lim / b) ** 2) / ((0.04 / 1160) ** 2)
gamma = gamma_lim
t_max_star = t_max * gamma
n_points = 50
t = np.linspace(0, t_max_star, n_points)
t_star = t
theta_room = 0
theta_g = theta_room + 1325 * (1 - 0.324 * np.exp(-0.2 * t_star) -
0.204 * np.exp(-1.7 * t_star) - 0.472 * np.exp(-19 * t_star))
# Cooling phase
theta_max = max(theta_g)
t_max_star = 0.2e-3 * qtd / O * gamma
# TODO: Check the cooling phase when qtd = 125
x = 1.0
if t_max == t_lim:
x = t_lim * gamma / t_max_star
if t_max_star <= 0.5:
t_star_room = (theta_max - theta_room) / 625 + t_max_star * x
# t_star_cooling = np.linspace(t_max_star, t_star_room, n_points + 1)
# theta_cooling = theta_max - 625 * (t_star_cooling - t_max_star * x)
elif t_max_star < 2.0:
t_star_room = (theta_max - theta_room) / (250 * (3 - t_max_star)) + t_max_star * x
# t_star_cooling = np.linspace(t_max_star, t_star_room, n_points + 1)
# theta_cooling = theta_max - 250 * (3 - t_max_star) * (t_star_cooling - t_max_star * x)
elif t_max_star >= 2.0:
t_star_room = (theta_max - theta_room) / 250 + t_max_star * x
# t_star_cooling = np.linspace(t_max_star, t_star_room, n_points + 1)
# theta_cooling = theta_max - 250 * (t_star_cooling - t_max_star * x)
# theta_g = np.append(theta_g, theta_cooling)
theta_g = np.append(theta_g, theta_room)
t_star = np.append(t_star, t_star_room)
t = t_star / gamma * 60.0
t, idx = np.unique(t, return_index=True)
theta_g = theta_g[idx]
max_time = max(t)
t /= max_time
time_temp_curve = []
for i in range(len(t)):
time_temp_curve.append((t[i], theta_g[i],))
time_temp_curve = tuple(time_temp_curve)
return time_temp_curve, max_time
def FireProblem():
# Import Abaqus modules
import section
import regionToolset
import displayGroupMdbToolset as dgm
import part
import material
import assembly
import step
import interaction
import load
import mesh
import optimization
import job
import sketch
import visualization
import xyPlot
import displayGroupOdbToolset as dgo
import connectorBehavior
# Create new model
mdb.Model(name='FireBenchmark', modelType=STANDARD_EXPLICIT)
session.viewports['Viewport: 1'].setValues(displayedObject=None)
# Create part
s = mdb.models['FireBenchmark'].ConstrainedSketch(name='__profile__',
sheetSize=5.0)
g, v, d, c = s.geometry, s.vertices, s.dimensions, s.constraints
s.setPrimaryObject(option=STANDALONE)
s.rectangle(point1=(0.0, 0.0), point2=(1.0, 0.035))
p = mdb.models['FireBenchmark'].Part(name='Beam', dimensionality=THREE_D,
type=DEFORMABLE_BODY)
p = mdb.models['FireBenchmark'].parts['Beam']
p.BaseShell(sketch=s)
s.unsetPrimaryObject()
p = mdb.models['FireBenchmark'].parts['Beam']
session.viewports['Viewport: 1'].setValues(displayedObject=p)
del mdb.models['FireBenchmark'].sketches['__profile__']
session.viewports['Viewport: 1'].partDisplay.setValues(sectionAssignments=ON,
engineeringFeatures=ON)
session.viewports['Viewport: 1'].partDisplay.geometryOptions.setValues(
referenceRepresentation=OFF)
# Create material
mdb.models['FireBenchmark'].Material(name='Steel')
mdb.models['FireBenchmark'].materials['Steel'].Elastic(
temperatureDependency=ON, table=((207000000000.0, 0.33, 0.0), (1.0,
0.33, 1200.0)))
mdb.models['FireBenchmark'].materials['Steel'].Plastic(
temperatureDependency=ON, table=((<fy>, 0.0, 0.0), (1.0, 0.0,
1200.0)))
mdb.models['FireBenchmark'].materials['Steel'].Expansion(table=((1.2e-05,
),))
# Create section
mdb.models['FireBenchmark'].HomogeneousShellSection(name='ShellSection',
preIntegrate=OFF, material='Steel', thicknessType=UNIFORM,
thickness=0.035, thicknessField='',
idealization=NO_IDEALIZATION,
poissonDefinition=DEFAULT, thicknessModulus=None,
temperature=GRADIENT,
useDensity=OFF, integrationRule=SIMPSON, numIntPts=5)
# Assign section
p = mdb.models['FireBenchmark'].parts['Beam']
f = p.faces
faces = f.getSequenceFromMask(mask=('[#1 ]',), )
region = p.Set(faces=faces, name='BeamWhole')
p = mdb.models['FireBenchmark'].parts['Beam']
p.SectionAssignment(region=region, sectionName='ShellSection', offset=0.0,
offsetType=MIDDLE_SURFACE, offsetField='',
thicknessAssignment=FROM_SECTION)
# Assembly
a = mdb.models['FireBenchmark'].rootAssembly
session.viewports['Viewport: 1'].setValues(displayedObject=a)
session.viewports['Viewport: 1'].assemblyDisplay.setValues(
optimizationTasks=OFF, geometricRestrictions=OFF, stopConditions=OFF)
a = mdb.models['FireBenchmark'].rootAssembly
a.DatumCsysByDefault(CARTESIAN)
p = mdb.models['FireBenchmark'].parts['Beam']
# Create instance of part in assembly
a.Instance(name='Beam-1', part=p, dependent=OFF)
a = mdb.models['FireBenchmark'].rootAssembly
a.translate(instanceList=('Beam-1',), vector=(0.0, -0.0175, 0.0))
a = mdb.models['FireBenchmark'].rootAssembly
# Create partition to define midpoint
f1 = a.instances['Beam-1'].faces
pickedFaces = f1.getSequenceFromMask(mask=('[#1 ]',), )
e1 = a.instances['Beam-1'].edges
a.PartitionFaceByShortestPath(faces=pickedFaces,
point1=a.instances['Beam-1'].InterestingPoint(edge=e1[2], rule=MIDDLE),
point2=a.instances['Beam-1'].InterestingPoint(edge=e1[0], rule=MIDDLE))
a = mdb.models['FireBenchmark'].rootAssembly
v1 = a.instances['Beam-1'].vertices
verts1 = v1.getSequenceFromMask(mask=('[#2 ]',), )
a.Set(vertices=verts1, name='Midpoint')
# Create reference points
session.viewports['Viewport: 1'].assemblyDisplay.setValues(interactions=ON,
constraints=ON, connectors=ON, engineeringFeatures=ON)
a = mdb.models['FireBenchmark'].rootAssembly
e11 = a.instances['Beam-1'].edges
a.ReferencePoint(point=a.instances['Beam-1'].InterestingPoint(edge=e11[5],
rule=MIDDLE))
a = mdb.models['FireBenchmark'].rootAssembly
e1 = a.instances['Beam-1'].edges
a.ReferencePoint(point=a.instances['Beam-1'].InterestingPoint(edge=e1[2],
rule=MIDDLE))
mdb.models['FireBenchmark'].rootAssembly.features.changeKey(fromName='RP-1',
toName='RP_LHS')
mdb.models['FireBenchmark'].rootAssembly.features.changeKey(fromName='RP-2',
toName='RP_RHS')
# Create constraints
a = mdb.models['FireBenchmark'].rootAssembly
r1 = a.referencePoints
refPoints1 = (r1[6],)
region1 = a.Set(referencePoints=refPoints1, name='RP_LHS')
a = mdb.models['FireBenchmark'].rootAssembly
e1 = a.instances['Beam-1'].edges
edges1 = e1.getSequenceFromMask(mask=('[#20 ]',), )
region2 = a.Set(edges=edges1, name='LHS')
mdb.models['FireBenchmark'].Coupling(name='LHS', controlPoint=region1,
surface=region2, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
localCsys=None, u1=ON, u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)
a = mdb.models['FireBenchmark'].rootAssembly
r1 = a.referencePoints
refPoints1 = (r1[7],)
region1 = a.Set(referencePoints=refPoints1, name='RP_RHS')
a = mdb.models['FireBenchmark'].rootAssembly
e1 = a.instances['Beam-1'].edges
edges1 = e1.getSequenceFromMask(mask=('[#4 ]',), )
region2 = a.Set(edges=edges1, name='RHS')
mdb.models['FireBenchmark'].Coupling(name='RHS', controlPoint=region1,
surface=region2, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
localCsys=None, u1=ON, u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)
# # Create axial spring
# a = mdb.models['FireBenchmark'].rootAssembly
# region = a.sets['RP_RHS']
# mdb.models['FireBenchmark'].rootAssembly.engineeringFeatures.SpringDashpotToGround(
# name='AxialSpring', region=region, orientation=None, dof=1,
# springBehavior=ON, springStiffness=0.1901812500, dashpotBehavior=OFF,
# dashpotCoefficient=0.0)
# session.viewports['Viewport: 1'].assemblyDisplay.setValues(interactions=OFF,
# constraints=OFF, connectors=OFF, engineeringFeatures=OFF,
# adaptiveMeshConstraints=ON)
# Create steps
mdb.models['FireBenchmark'].StaticStep(name='MechStep', previous='Initial',
initialInc=0.1, maxInc=0.1, nlgeom=ON)
session.viewports['Viewport: 1'].assemblyDisplay.setValues(step='MechStep')
mdb.models['FireBenchmark'].StaticStep(name='ThermalStep', previous='MechStep',
maxNumInc=1000, initialInc=0.0001, maxInc=0.02)
session.viewports['Viewport: 1'].assemblyDisplay.setValues(step='ThermalStep')
# Create field output request
mdb.models['FireBenchmark'].fieldOutputRequests['F-Output-1'].setValues(
variables=('power_spectrum', 'PE', 'PEEQ', 'PEMAG', 'LE', 'U'))
# Create history output requests
mdb.models['FireBenchmark'].historyOutputRequests['H-Output-1'].setValues(
variables=('ALLAE', 'ALLIE', 'ALLKE', 'ALLPD', 'ALLSE', 'ALLWK',
'ETOTAL'))
regionDef = mdb.models['FireBenchmark'].rootAssembly.sets['Midpoint']
mdb.models['FireBenchmark'].HistoryOutputRequest(name='Midpoint',
createStepName='ThermalStep', variables=('U2', 'NT'),
region=regionDef,
sectionPoints=DEFAULT, rebar=EXCLUDE)
regionDef = mdb.models['FireBenchmark'].rootAssembly.sets['RP_LHS']
mdb.models['FireBenchmark'].HistoryOutputRequest(name='RP_LHS',
createStepName='ThermalStep', variables=('RF1',), region=regionDef,
sectionPoints=DEFAULT, rebar=EXCLUDE)
# Create amplitude of the time-temperature curve
time_temp_curve, max_time = time_temperature_curve(qtd=<qtd>)
mdb.models['FireBenchmark'].TabularAmplitude(name='TimeTempCurve',
timeSpan=STEP, smooth=SOLVER_DEFAULT, data=time_temp_curve)
# Apply mechanical load
session.viewports['Viewport: 1'].assemblyDisplay.setValues(loads=ON, bcs=ON, predefinedFields=ON, connectors=ON,
adaptiveMeshConstraints=OFF)
session.viewports['Viewport: 1'].assemblyDisplay.setValues(step='MechStep')
a = mdb.models['FireBenchmark'].rootAssembly
s1 = a.instances['Beam-1'].faces
side1Faces1 = s1.getSequenceFromMask(mask=('[#3 ]',), )
region = a.Surface(side1Faces=side1Faces1, name='BeamWeb')
mdb.models['FireBenchmark'].SurfaceTraction(name='Traction',
createStepName='MechStep', region=region, magnitude=94444.44,
directionVector=((0.0, 0.0, 0.0), (0.0, -1.0, 0.0)),
distributionType=UNIFORM, field='', localCsys=None, traction=GENERAL,
follower=OFF, resultant=ON)
# Apply displacement boundary condition to reference point at LHS
session.viewports['Viewport: 1'].assemblyDisplay.setValues(step='Initial')
a = mdb.models['FireBenchmark'].rootAssembly
region = a.sets['RP_LHS']
mdb.models['FireBenchmark'].DisplacementBC(name='LHS',
createStepName='Initial', region=region, u1=SET, u2=SET, u3=SET,
ur1=SET, ur2=SET, ur3=UNSET, amplitude=UNSET, distributionType=UNIFORM,
fieldName='', localCsys=None)
# Apply displacement boundary condition to reference point at RHS
a = mdb.models['FireBenchmark'].rootAssembly
region = a.sets['RP_RHS']
mdb.models['FireBenchmark'].DisplacementBC(name='RP_RHS',
createStepName='Initial', region=region, u1=UNSET, u2=SET, u3=SET,
ur1=SET, ur2=SET, ur3=UNSET, amplitude=UNSET, distributionType=UNIFORM,
fieldName='', localCsys=None)
# Apply temperature field during the thermal step
session.viewports['Viewport: 1'].assemblyDisplay.setValues(step='ThermalStep')
a = mdb.models['FireBenchmark'].rootAssembly
region = a.instances['Beam-1'].sets['BeamWhole']
mdb.models['FireBenchmark'].Temperature(name='TempField',
createStepName='ThermalStep', region=region, distributionType=UNIFORM,
crossSectionDistribution=CONSTANT_THROUGH_THICKNESS, magnitudes=(1.0,),
amplitude='TimeTempCurve')
# Define edge seeds for mesh
session.viewports['Viewport: 1'].assemblyDisplay.setValues(mesh=ON, loads=OFF,
bcs=OFF, predefinedFields=OFF, connectors=OFF)
session.viewports['Viewport: 1'].assemblyDisplay.meshOptions.setValues(meshTechnique=ON)
a = mdb.models['FireBenchmark'].rootAssembly
e1 = a.instances['Beam-1'].edges
pickedEdges = e1.getSequenceFromMask(mask=('[#25 ]',), )
a.seedEdgeByNumber(edges=pickedEdges, number=6, constraint=FINER)
a = mdb.models['FireBenchmark'].rootAssembly
e1 = a.instances['Beam-1'].edges
edges1 = e1.getSequenceFromMask(mask=('[#25 ]',), )
a.Set(edges=edges1, name='VerticalEdges')
a = mdb.models['FireBenchmark'].rootAssembly
e1 = a.instances['Beam-1'].edges
pickedEdges = e1.getSequenceFromMask(mask=('[#5a ]',), )
a.seedEdgeBySize(edges=pickedEdges, size=0.02, deviationFactor=0.1,
constraint=FINER)
a = mdb.models['FireBenchmark'].rootAssembly
e1 = a.instances['Beam-1'].edges
edges1 = e1.getSequenceFromMask(mask=('[#5a ]',), )
a.Set(edges=edges1, name='HorizontalEdges')
# Assign element type
elemType1 = mesh.ElemType(elemCode=S4, elemLibrary=STANDARD,
secondOrderAccuracy=OFF)
elemType2 = mesh.ElemType(elemCode=S3, elemLibrary=STANDARD)
a = mdb.models['FireBenchmark'].rootAssembly
f1 = a.instances['Beam-1'].faces
faces1 = f1.getSequenceFromMask(mask=('[#3 ]',), )
pickedRegions = (faces1,)
a.setElementType(regions=pickedRegions, elemTypes=(elemType1, elemType2))
# Generate mesh
a = mdb.models['FireBenchmark'].rootAssembly
partInstances = (a.instances['Beam-1'],)
a.generateMesh(regions=partInstances)
# Create the job
session.viewports['Viewport: 1'].assemblyDisplay.setValues(mesh=OFF)
session.viewports['Viewport: 1'].assemblyDisplay.meshOptions.setValues(
meshTechnique=OFF)
jobname = 'fire_analysis'
mdb.Job(name=jobname, model='FireBenchmark', description='',
type=ANALYSIS, atTime=None, waitMinutes=0, waitHours=0, queue=None,
memory=90, memoryUnits=PERCENTAGE, getMemoryFromAnalysis=True,
explicitPrecision=SINGLE, nodalOutputPrecision=SINGLE, echoPrint=OFF,
modelPrint=OFF, contactPrint=OFF, historyPrint=OFF, userSubroutine='',
scratch='', multiprocessingMode=DEFAULT, numCpus=1, numGPUs=0)
# Submit the job
mdb.jobs[jobname].submit(consistencyChecking=OFF)
if __name__ == "__main__":
FireProblem()
|
setup.py
|
Andrej1A/underwear
| 112 |
76918
|
<reponame>Andrej1A/underwear<filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='underwear',
version='0.5.0',
description='Dead simple LAMP-stack deployments for Python-' + \
'powered web applications',
long_description=readme + '\n\n' + history,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/makaimc/underwear',
packages=[
'underwear',
],
scripts=[
'bin/underwear',
],
package_dir={'underwear': 'underwear'},
include_package_data=True,
install_requires=[
'ansible',
],
license="MIT",
zip_safe=False,
keywords=['underwear', 'deployment', 'django', 'LAMP',
'WSGI', 'ansible'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
)
|
days/97-99-online-game-api/demo_app/web/game_logic/game.py
|
aarogyaswamy/100daysofpython
| 1,858 |
76925
|
<reponame>aarogyaswamy/100daysofpython
from collections import defaultdict
from game_logic import game_service, game_decider
from game_logic.game_decider import Decision
from game_logic.models.player import Player
from game_logic.models.roll import Roll
class GameRound:
def __init__(self, game_id: str, player1: Player, player2: Player,
p1_roll: Roll, p2_roll: Roll):
self.p2_roll = p2_roll
self.p1_roll = p1_roll
self.game_id = game_id
self.player1 = player1
self.player2 = player2
self.decision_p1_to_p2 = None
history = game_service.get_game_history(game_id)
self.round = len(history) // 2 + 1
self.player1_wins = GameRound.count_wins(self.player1, history)
self.player2_wins = GameRound.count_wins(self.player2, history)
self.WIN_COUNT_MIN = 3
self.PLAY_COUNT_MIN = 5
self.is_over = game_service.is_game_over(game_id)
def play(self):
if self.is_over:
raise Exception("Game is already over, cannot play further.")
d = game_decider.decide(self.p1_roll, self.p2_roll)
self.decision_p1_to_p2 = d
self.record_roll(d, self.player1, self.p1_roll, self.player1_wins)
self.record_roll(d.reversed(), self.player2, self.p2_roll, self.player2_wins)
print("RECORDING ROUND")
print("Player 1: {}, prior wins {}, outcome: {}".format(self.p1_roll.name, self.player1_wins, d))
print("Player 2: {}, prior wins {}, outcome: {}".format(self.p2_roll.name, self.player2_wins, d.reversed()))
print()
self.is_over = game_service.is_game_over(self.game_id)
def record_roll(self, decision: Decision, player: Player, roll: Roll, win_count: int):
final_round_candidate = self.round >= self.PLAY_COUNT_MIN and win_count + 1 >= self.WIN_COUNT_MIN
wins_game = final_round_candidate and decision == Decision.win
game_service.record_roll(player, roll, self.game_id, wins_game, self.round)
@staticmethod
def count_wins(player, history):
grouped_moves = defaultdict(list)
for h in history:
grouped_moves[h.roll_number].append(h)
win_count = 0
for rnd_data in grouped_moves.values():
if len(rnd_data) != 2:
continue
player_move = [m for m in rnd_data if m.player_id == player.id][0]
opponent_move = [m for m in rnd_data if m.player_id != player.id][0]
player_roll = game_service.find_roll_by_id(player_move.roll_id)
opponent_roll = game_service.find_roll_by_id(opponent_move.roll_id)
if game_decider.decide(player_roll, opponent_roll) == Decision.win:
win_count += 1
return win_count
|
io/swig/io/vexport.py
|
ljktest/siconos
| 137 |
76932
|
<reponame>ljktest/siconos
#!/usr/bin/env @PYTHON_EXECUTABLE@
"""
Description: Export a Siconos mechanics-IO HDF5 file in VTK format.
"""
# Lighter imports before command line parsing
from __future__ import print_function
import sys
import os
import getopt
#
# a replacement for vview --vtk-export
#
def usage(long=False):
print(__doc__); print()
print('Usage: {0} [--help] [--version] [--ascii] <HDF5>'
.format(os.path.split(sys.argv[0])[1]))
if long:
print()
print("""Options:
--help display this message
--version display version information
--ascii export file in ascii format
""")
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], '',
['help','version','ascii'])
except getopt.GetoptError as err:
sys.stderr.write('{0}\n'.format(str(err)))
usage()
exit(2)
ascii_mode = False
for o, a in opts:
if o == '--help':
usage(long=True)
exit(0)
if o == '--version':
print('{0} @SICONOS_VERSION@'.format(os.path.split(sys.argv[0])[1]))
exit(0)
if o in ('--ascii'):
ascii_mode = True
min_time = None
max_time = None
cf_scale_factor = 1
normalcone_ratio = 1
time_scale_factor = 1
vtk_export_mode = True
if len(args) > 0:
io_filename = args[0]
else:
usage()
exit(1)
# Heavier imports after command line parsing
import vtk
from vtk.util import numpy_support
from math import atan2, pi
import bisect
from numpy.linalg import norm
import numpy
import random
from siconos.io.mechanics_hdf5 import MechanicsHdf5
# attach velocity
# contact points and associated forces are embedded in on a PolyData source
class UnstructuredGridSource(vtk.vtkProgrammableSource):
def GetOutputPort(self):
# 3: UnstructuredGridOutput for vtkProgrammableSource
return vtk.vtkProgrammableSource.GetOutputPort(self, 3)
class ConvexSource(UnstructuredGridSource):
def __init__(self, convex, points):
self._convex = convex
self._points = points
self.SetExecuteMethod(self.method)
def method(self):
output = self.GetUnstructuredGridOutput()
output.Allocate(1, 1)
output.InsertNextCell(
convex.GetCellType(), self._convex.GetPointIds())
output.SetPoints(self._points)
def add_compatiblity_methods(obj):
"""
Add missing methods in previous VTK versions.
"""
if hasattr(obj, 'SetInput'):
obj.SetInputData = obj.SetInput
if hasattr(obj, 'AddInput'):
obj.AddInputData = obj.AddInput
transforms = dict()
transformers = dict()
data_connectors_v = dict()
data_connectors_t = dict()
data_connectors_d = dict()
big_data_source = vtk.vtkMultiBlockDataGroupFilter()
add_compatiblity_methods(big_data_source)
big_data_writer = vtk.vtkXMLMultiBlockDataWriter()
add_compatiblity_methods(big_data_writer)
contactors = dict()
offsets = dict()
vtkmath = vtk.vtkMath()
class Quaternion():
def __init__(self, *args):
self._data = vtk.vtkQuaternion[float](*args)
def __mul__(self, q):
r = Quaternion()
vtkmath.MultiplyQuaternion(self._data, q._data, r._data)
return r
def __getitem__(self, i):
return self._data[i]
def conjugate(self):
r = Quaternion((self[0], self[1], self[2], self[3]))
r._data.Conjugate()
return r
def rotate(self, v):
pv = Quaternion((0, v[0], v[1], v[2]))
rv = self * pv * self.conjugate()
# assert(rv[0] == 0)
return [rv[1], rv[2], rv[3]]
def axisAngle(self):
r = [0, 0, 0]
a = self._data.GetRotationAngleAndAxis(r)
return r, a
def set_position(instance, q0, q1, q2, q3, q4, q5, q6):
q = Quaternion((q3, q4, q5, q6))
for transform, offset in zip(transforms[instance], offsets[instance]):
p = q.rotate(offset[0])
r = q * Quaternion(offset[1])
transform.Identity()
transform.Translate(q0 + p[0], q1 + p[1], q2 + p[2])
axis, angle = r.axisAngle()
transform.RotateWXYZ(angle * 180. / pi,
axis[0],
axis[1],
axis[2])
set_positionv = numpy.vectorize(set_position)
def build_set_velocity(dico):
def set_velocity(instance, v0, v1, v2, v3, v4, v5):
if instance in dico:
dico[instance]._data[:] = [v0, v1, v2, v3, v4, v5]
dico[instance]._connector.Update()
set_velocityv = numpy.vectorize(set_velocity)
return set_velocityv
def build_set_translation(dico):
def set_translation(instance, x0, x1, x2 ):
if instance in dico:
dico[instance]._data[:] = [x0, x1, x2]
dico[instance]._connector.Update()
set_translationv = numpy.vectorize(set_translation)
return set_translationv
def build_set_displacement(dico):
def set_displacement(instance, x0, x1, x2 ):
if instance in dico:
dico[instance]._data[:] = [x0, x1, x2]
dico[instance]._connector.Update()
set_displacementv = numpy.vectorize(set_displacement)
return set_displacementv
def step_reader(step_string):
from OCC.StlAPI import StlAPI_Writer
from OCC.STEPControl import STEPControl_Reader
from OCC.BRep import BRep_Builder
from OCC.TopoDS import TopoDS_Compound
from OCC.IFSelect import IFSelect_RetDone, IFSelect_ItemsByEntity
builder = BRep_Builder()
comp = TopoDS_Compound()
builder.MakeCompound(comp)
stl_writer = StlAPI_Writer()
stl_writer.SetASCIIMode(True)
with io.tmpfile(contents=io.shapes()[shape_name][:][0]) as tmpfile:
step_reader = STEPControl_Reader()
status = step_reader.ReadFile(tmpfile[1])
if status == IFSelect_RetDone: # check status
failsonly = False
step_reader.PrintCheckLoad(failsonly, IFSelect_ItemsByEntity)
step_reader.PrintCheckTransfer(failsonly, IFSelect_ItemsByEntity)
ok = step_reader.TransferRoot(1)
nbs = step_reader.NbShapes()
l = []
for i in range(1, nbs + 1):
shape = step_reader.Shape(i)
builder.Add(comp, shape)
with io.tmpfile(suffix='.stl') as tmpf:
stl_writer.Write(comp, tmpf[1])
tmpf[0].flush()
reader = vtk.vtkSTLReader()
reader.SetFileName(tmpf[1])
reader.Update()
return reader
def brep_reader(brep_string, indx):
from OCC.StlAPI import StlAPI_Writer
from OCC.BRepTools import BRepTools_ShapeSet
shape_set = BRepTools_ShapeSet()
shape_set.ReadFromString(brep_string)
shape = shape_set.Shape(shape_set.NbShapes())
location = shape_set.Locations().Location(indx)
shape.Location(location)
stl_writer = StlAPI_Writer()
with io.tmpfile(suffix='.stl') as tmpf:
stl_writer.Write(shape, tmpf[1])
tmpf[0].flush()
reader = vtk.vtkSTLReader()
reader.SetFileName(tmpf[1])
reader.Update()
return reader
refs = []
refs_attrs = []
shape = dict()
pos = dict()
instances = dict()
with MechanicsHdf5(io_filename=io_filename, mode='r') as io:
def load():
ispos_data = io.static_data()
idpos_data = io.dynamic_data()
ivelo_data = io.velocities_data()
icf_data = io.contact_forces_data()[:]
isolv_data = io.solver_data()
return ispos_data, idpos_data, ivelo_data, icf_data, isolv_data
spos_data, dpos_data, velo_data, cf_data, solv_data = load()
class DataConnector():
def __init__(self, instance, data_name='velocity', data_size=6):
self._instance = instance
self._data_name = data_name
self._data_size = data_size
self._connector = vtk.vtkProgrammableFilter()
self._connector.SetExecuteMethod(self.method)
self._data = numpy.zeros(data_size)
self._vtk_data = vtk.vtkFloatArray()
self._vtk_data.SetName(data_name)
self._vtk_data.SetNumberOfComponents(data_size)
self._vtk_data.SetNumberOfTuples(1)
def method(self):
input = self._connector.GetInput()
output = self._connector.GetOutput()
output.ShallowCopy(input)
if output.GetFieldData().GetArray(self._data_name) is None:
output.GetFieldData().AddArray(self._vtk_data)
data = self._data
data_t = tuple(data[0:self._data_size])
output.GetFieldData().GetArray(self._data_name).SetTuple(
0, data_t)
# contact forces provider
class ContactInfoSource():
def __init__(self, data):
self._data = None
if data is not None:
if len(data) > 0:
self._data = data
else:
self._data = None
if self._data is not None:
self._time = min(self._data[:, 0])
else:
self._time = 0
self._contact_source_a = vtk.vtkProgrammableSource()
self._contact_source_b = vtk.vtkProgrammableSource()
self._contact_source_a.SetExecuteMethod(self.method)
self._contact_source_b.SetExecuteMethod(self.method)
def method(self):
# multiblock += contact points
output_a = self._contact_source_a.GetPolyDataOutput()
output_b = self._contact_source_b.GetPolyDataOutput()
id_f = numpy.where(
abs(self._data[:, 0] - self._time) < 1e-15)[0]
self.cpa_export = self._data[
id_f, 2:5].copy()
self.cpb_export = self._data[
id_f, 5:8].copy()
self.cn_export = self._data[
id_f, 8:11].copy()
self.cf_export = self._data[
id_f, 11:14].copy()
self.cpa_ = numpy_support.numpy_to_vtk(
self.cpa_export)
self.cpa_.SetName('contact_positions_A')
self.cpb_ = numpy_support.numpy_to_vtk(
self.cpb_export)
self.cpb_.SetName('contact_positions_B')
self.cn_ = numpy_support.numpy_to_vtk(
self.cn_export)
self.cn_.SetName('contact_normals')
self.cf_ = numpy_support.numpy_to_vtk(
self.cf_export)
self.cf_.SetName('contact_forces')
output_a.Allocate(len(self.cpa_export), 1)
cpa_points = vtk.vtkPoints()
cpa_points.SetNumberOfPoints(len(self.cpa_export))
cpa_points.SetData(self.cpa_)
output_a.SetPoints(cpa_points)
# normal and forces are attached to A points
output_a.GetPointData().AddArray(self.cn_)
output_a.GetPointData().AddArray(self.cf_)
output_b.Allocate(len(self.cpb_export), 1)
cpb_points = vtk.vtkPoints()
cpb_points.SetNumberOfPoints(len(self.cpb_export))
cpb_points.SetData(self.cpb_)
output_b.SetPoints(cpb_points)
# Step 2
#
#
readers = dict()
vtk_reader = {'vtp': vtk.vtkXMLPolyDataReader,
'stl': vtk.vtkSTLReader}
for shape_name in io.shapes():
shape_type = io.shapes()[shape_name].attrs['type']
if shape_type in ['vtp', 'stl']:
with io.tmpfile() as tmpf:
tmpf[0].write(str(io.shapes()[shape_name][:][0]))
tmpf[0].flush()
reader = vtk_reader[shape_type]()
reader.SetFileName(tmpf[1])
reader.Update()
readers[shape_name] = reader
elif shape_type in ['brep']:
# try to find an associated shape
if 'associated_shape' in io.shapes()[shape_name].attrs:
associated_shape = \
io.shapes()[shape_name].\
attrs['associated_shape']
else:
if 'brep' in io.shapes()[shape_name].attrs:
brep = io.shapes()[shape_name].attrs['brep']
else:
brep = shape_name
reader = brep_reader(str(io.shapes()[brep][:][0]),
io.shapes()[brep].attrs['occ_indx'])
readers[shape_name] = reader
elif shape_type in ['stp', 'step']:
# try to find an associated shape
if 'associated_shape' in io.shapes()[shape_name].attrs:
associated_shape = \
io.shapes()[shape_name].\
attrs['associated_shape']
# delayed
else:
reader = step_reader(str(io.shapes()[shape_name][:]))
readers[shape_name] = reader
elif shape_type == 'convex':
# a convex shape
points = vtk.vtkPoints()
convex = vtk.vtkConvexPointSet()
data = io.shapes()[shape_name][:]
convex.GetPointIds().SetNumberOfIds(data.shape[0])
for id_, vertice in enumerate(io.shapes()[shape_name][:]):
points.InsertNextPoint(vertice[0], vertice[1], vertice[2])
convex.GetPointIds().SetId(id_, id_)
readers[shape_name] = ConvexSource(convex, points)
else:
assert shape_type == 'primitive'
primitive = io.shapes()[shape_name].attrs['primitive']
attrs = io.shapes()[shape_name][:][0]
if primitive == 'Sphere':
source = vtk.vtkSphereSource()
source.SetRadius(attrs[0])
elif primitive == 'Cone':
source = vtk.vtkConeSource()
source.SetRadius(attrs[0])
source.SetHeight(attrs[1])
source.SetResolution(15)
source.SetDirection(0, 1, 0) # needed
elif primitive == 'Cylinder':
source = vtk.vtkCylinderSource()
source.SetResolution(15)
source.SetRadius(attrs[0])
source.SetHeight(attrs[1])
# source.SetDirection(0,1,0)
elif primitive == 'Box':
source = vtk.vtkCubeSource()
source.SetXLength(attrs[0])
source.SetYLength(attrs[1])
source.SetZLength(attrs[2])
elif primitive == 'Capsule':
sphere1 = vtk.vtkSphereSource()
sphere1.SetRadius(attrs[0])
sphere1.SetCenter(0, attrs[1] / 2, 0)
sphere1.SetThetaResolution(15)
sphere1.SetPhiResolution(15)
sphere1.Update()
sphere2 = vtk.vtkSphereSource()
sphere2.SetRadius(attrs[0])
sphere2.SetCenter(0, -attrs[1] / 2, 0)
sphere2.SetThetaResolution(15)
sphere2.SetPhiResolution(15)
sphere2.Update()
cylinder = vtk.vtkCylinderSource()
cylinder.SetRadius(attrs[0])
cylinder.SetHeight(attrs[1])
cylinder.SetResolution(15)
cylinder.Update()
data = vtk.vtkMultiBlockDataSet()
data.SetNumberOfBlocks(3)
data.SetBlock(0, sphere1.GetOutput())
data.SetBlock(1, sphere2.GetOutput())
data.SetBlock(2, cylinder.GetOutput())
source = vtk.vtkMultiBlockDataGroupFilter()
add_compatiblity_methods(source)
source.AddInputData(data)
readers[shape_name] = source
for instance_name in io.instances():
instance = int(io.instances()[instance_name].attrs['id'])
contactors[instance] = []
transforms[instance] = []
offsets[instance] = []
for contactor_instance_name in io.instances()[instance_name]:
contactor_name = io.instances()[instance_name][
contactor_instance_name].attrs['name']
contactors[instance].append(contactor_name)
transform = vtk.vtkTransform()
transformer = vtk.vtkTransformFilter()
if contactor_name in readers:
transformer.SetInputConnection(
readers[contactor_name].GetOutputPort())
else:
print ('WARNING: cannot find a shape source for instance:',
instance)
transformer.SetTransform(transform)
transformers[contactor_name] = transformer
data_connectors_v[instance] = DataConnector(instance)
data_connectors_v[instance]._connector.SetInputConnection(
transformer.GetOutputPort())
data_connectors_v[instance]._connector.Update()
big_data_source.AddInputConnection(
data_connectors_v[instance]._connector.GetOutputPort())
data_connectors_t[instance] = DataConnector(instance, data_name='translation', data_size=3)
data_connectors_t[instance]._connector.SetInputConnection(
transformer.GetOutputPort())
data_connectors_t[instance]._connector.Update()
big_data_source.AddInputConnection(
data_connectors_t[instance]._connector.GetOutputPort())
data_connectors_d[instance] = DataConnector(instance, data_name='displacement', data_size=3)
data_connectors_d[instance]._connector.SetInputConnection(
transformer.GetOutputPort())
data_connectors_d[instance]._connector.Update()
big_data_source.AddInputConnection(
data_connectors_d[instance]._connector.GetOutputPort())
transforms[instance].append(transform)
offsets[instance].append(
(io.instances()[
instance_name][
contactor_instance_name].attrs['translation'],
io.instances()[instance_name][contactor_instance_name].attrs['orientation']))
pos_data = dpos_data[:].copy()
spos_data = spos_data[:].copy()
velo_data = velo_data[:].copy()
set_velocityv = build_set_velocity(data_connectors_v)
set_translationv = build_set_translation(data_connectors_t)
set_displacementv = build_set_displacement(data_connectors_d)
times = list(set(dpos_data[:, 0]))
times.sort()
contact_info_source = ContactInfoSource(cf_data)
pveloa = DataConnector(0)
pvelob = DataConnector(0)
pveloa._connector.SetInputConnection(
contact_info_source._contact_source_a.GetOutputPort())
pvelob._connector.SetInputConnection(
contact_info_source._contact_source_a.GetOutputPort())
big_data_source.AddInputConnection(
pveloa._connector.GetOutputPort())
big_data_source.AddInputConnection(
pvelob._connector.GetOutputPort())
big_data_writer.SetInputConnection(big_data_source.GetOutputPort())
ntime = len(times)
k=0
packet= int(ntime/100)+1
for time in times:
k=k+1
if (k%packet == 0):
sys.stdout.write('.')
index = bisect.bisect_left(times, time)
index = max(0, index)
index = min(index, len(times) - 1)
contact_info_source._time = times[index]
# fix: should be called by contact_source?
contact_info_source.method()
id_t = numpy.where(pos_data[:, 0] == times[index])
if numpy.shape(spos_data)[0] > 0:
set_positionv(spos_data[:, 1], spos_data[:, 2],
spos_data[:, 3],
spos_data[:, 4], spos_data[:, 5],
spos_data[:, 6],
spos_data[:, 7], spos_data[:, 8])
set_positionv(
pos_data[id_t, 1], pos_data[id_t, 2], pos_data[id_t, 3],
pos_data[id_t, 4], pos_data[id_t, 5], pos_data[id_t, 6],
pos_data[id_t, 7], pos_data[id_t, 8])
id_tv = numpy.where(velo_data[:, 0] == times[index])
set_velocityv(
velo_data[id_tv, 1],
velo_data[id_tv, 2],
velo_data[id_tv, 3],
velo_data[id_tv, 4],
velo_data[id_tv, 5],
velo_data[id_tv, 6],
velo_data[id_tv, 7])
set_translationv(
pos_data[id_t, 1],
pos_data[id_t, 2],
pos_data[id_t, 3],
pos_data[id_t, 4],
)
# set_displacementv(
# pos_data[id_t, 1],
# pos_data[id_t, 2]- pos_data[0, 2],
# pos_data[id_t, 3]- pos_data[0, 3],
# pos_data[id_t, 4]- pos_data[0, 4]
# ) # should be w.r.t initial position
big_data_writer.SetFileName('{0}-{1}.{2}'.format(os.path.splitext(
os.path.basename(io_filename))[0],
index, big_data_writer.GetDefaultFileExtension()))
big_data_writer.SetTimeStep(times[index])
big_data_source.Update()
if ascii_mode:
big_data_writer.SetDataModeToAscii()
big_data_writer.Write()
print(' ')
|
panel/tests/io/test_state.py
|
datalayer-contrib/holoviz-panel
| 1,130 |
76935
|
<filename>panel/tests/io/test_state.py
from panel.io.state import state
def test_as_cached_key_only():
global i
i = 0
def test_fn():
global i
i += 1
return i
assert state.as_cached('test', test_fn) == 1
assert state.as_cached('test', test_fn) == 1
state.cache.clear()
def test_as_cached_key_and_kwarg():
global i
i = 0
def test_fn(a):
global i
i += 1
return i
assert state.as_cached('test', test_fn, a=1) == 1
assert state.as_cached('test', test_fn, a=1) == 1
assert state.as_cached('test', test_fn, a=2) == 2
assert state.as_cached('test', test_fn, a=1) == 1
assert state.as_cached('test', test_fn, a=2) == 2
state.cache.clear()
|
criterion.py
|
ash368/FaceParsing
| 138 |
76936
|
# -*- coding: utf-8 -*-
# @Author: luoling
# @Date: 2019-12-06 10:41:34
# @Last Modified by: luoling
# @Last Modified time: 2019-12-18 17:52:49
import torch
import torch.nn.functional as F
import torch.nn as nn
def cross_entropy2d(input, target, weight=None, reduction='none'):
n, c, h, w = input.size()
nt, ht, wt = target.size()
# Handle inconsistent size between input and target
if h != ht or w != wt:
input = F.interpolate(input, size=(
ht, wt), mode="bilinear", align_corners=True)
# https://zhuanlan.zhihu.com/p/76583143
input = input.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
target = target.view(-1)
# https://www.cnblogs.com/marsggbo/p/10401215.html
loss = F.cross_entropy(
input, target, weight=weight, reduction=reduction, ignore_index=250
)
return loss
def bootstrapped_cross_entropy2d(input, target, K=100000, weight=None, size_average=True):
"""High-performance semantic segmentation using very deep fully convolutional networks"""
batch_size = input.size()[0]
def _bootstrap_xentropy_single(input, target, K, weight=None, size_average=True):
n, c, h, w = input.size()
input = input.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
target = target.view(-1)
loss = F.cross_entropy(
input, target, weight=weight, reduce=False, size_average=False, ignore_index=250
)
topk_loss, _ = loss.topk(K)
reduced_topk_loss = topk_loss.sum() / K
return reduced_topk_loss
loss = 0.0
# Bootstrap from each image not entire batch
for i in range(batch_size):
loss += _bootstrap_xentropy_single(
input=torch.unsqueeze(input[i], 0),
target=torch.unsqueeze(target[i], 0),
K=K,
weight=weight,
size_average=size_average,
)
return loss / float(batch_size)
class DiceLoss(nn.Module):
def __init__(self, smooth=1., ignore_index=255):
super(DiceLoss, self).__init__()
self.ignore_index = ignore_index
self.smooth = smooth
@staticmethod
def make_one_hot(labels, classes):
one_hot = torch.cuda.FloatTensor(labels.size()[0], classes, labels.size()[
2], labels.size()[3]).zero_()
target = one_hot.scatter_(1, labels.data, 1)
return target
def forward(self, output, target):
if self.ignore_index not in range(target.min(), target.max()):
if (target == self.ignore_index).sum() > 0:
target[target == self.ignore_index] = target.min()
target = self.make_one_hot(
target.unsqueeze(dim=1), classes=output.size()[1])
output = F.softmax(output, dim=1)
output_flat = output.contiguous().view(-1)
target_flat = target.contiguous().view(-1)
intersection = (output_flat * target_flat).sum()
loss = 1 - ((2. * intersection + self.smooth) /
(output_flat.sum() + target_flat.sum() + self.smooth))
return loss
class CriterionAll(nn.Module):
"""Segmentation aware and Edge aware loss."""
def __init__(self, alpha=50, ignore_index=255):
super(CriterionAll, self).__init__()
self.ignore_index = ignore_index
self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_index)
self.weighted_criterion = nn.CrossEntropyLoss(
ignore_index=ignore_index, reduction='none')
self.alpha = alpha
def parsing_loss(self, preds, target):
h, w = target[0].size(1), target[0].size(2)
pos_num = torch.sum(target[1] == 1, dtype=torch.float)
neg_num = torch.sum(target[1] == 0, dtype=torch.float)
weight_pos = neg_num / (pos_num + neg_num)
weight_neg = pos_num / (pos_num + neg_num)
weights = torch.tensor([weight_neg, weight_pos])
loss = 0
# Edge-aware branch
preds_edge = preds[1][0]
scale_pred = F.interpolate(input=preds_edge, size=(h, w),
mode='bilinear', align_corners=True)
loss += F.cross_entropy(scale_pred, target[1],
weights.cuda(), ignore_index=self.ignore_index)
# Segmentation-aware branch
preds_parsing = preds[0]
if isinstance(preds_parsing, list):
for idx, pred_parsing in enumerate(preds_parsing):
scale_pred = F.interpolate(input=pred_parsing, size=(h, w),
mode='bilinear', align_corners=True)
if idx == len(preds_parsing) - 1: # Is that the last term ?
loss += (torch.mul(self.weighted_criterion(scale_pred, target[0]), torch.where(
target[1] == 0, torch.Tensor([1]).cuda(), torch.Tensor([1 + self.alpha]).cuda()))).mean()
else:
loss += self.criterion(scale_pred, target[0])
else:
scale_pred = F.interpolate(input=preds_parsing, size=(h, w),
mode='bilinear', align_corners=True)
loss += self.criterion(scale_pred, target[0])
return loss
def forward(self, preds, target):
loss = self.parsing_loss(preds, target)
return loss
|
pydis_site/apps/api/migrations/0056_allow_blank_user_roles.py
|
Transfusion/site
| 700 |
76949
|
<filename>pydis_site/apps/api/migrations/0056_allow_blank_user_roles.py
# Generated by Django 3.0.8 on 2020-07-14 20:35
import django.contrib.postgres.fields
import django.core.validators
from django.db import migrations, models
import pydis_site.apps.api.models.bot.user
class Migration(migrations.Migration):
dependencies = [
('api', '0055_merge_20200714_2027'),
]
operations = [
migrations.AlterField(
model_name='user',
name='roles',
field=django.contrib.postgres.fields.ArrayField(base_field=models.BigIntegerField(validators=[django.core.validators.MinValueValidator(limit_value=0, message='Role IDs cannot be negative.'), pydis_site.apps.api.models.bot.user._validate_existing_role]), blank=True, default=list, help_text='IDs of roles the user has on the server', size=None),
),
]
|
tools/perf/core/results_merger_unittest.py
|
zealoussnow/chromium
| 14,668 |
76965
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from core import results_merger
class ResultMergerTest(unittest.TestCase):
def setUp(self):
self.sample_json_string = '''
{
"interrupted": false,
"num_failures_by_type": {},
"seconds_since_epoch": 10.0,
"tests": {},
"version": 3
}
'''
def test_json_version_check_exception(self):
json_string = '{"seconds_since_epoch": 1.0, "version": 2}'
result = json.loads(json_string)
with self.assertRaises(results_merger.MergeException) as c:
results_merger.merge_test_results([result])
self.assertTrue(
'Unsupported version' in str(c.exception),
'Version check failure message is not in exception. Exception: %s' %
c.exception)
def test_json_required_field_check_exception(self):
json_string = '{"seconds_since_epoch": 1.0, "version": 3}'
result = json.loads(json_string)
with self.assertRaises(results_merger.MergeException) as c:
results_merger.merge_test_results([result])
self.assertTrue(
'Invalid json test results' in str(c.exception),
'Required key check failure message is not in exception. Exception: %s'
% c.exception)
def test_json_merge_tests(self):
result_1 = json.loads(self.sample_json_string)
result_2 = json.loads(self.sample_json_string)
result_3 = json.loads(self.sample_json_string)
result_1['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS"
},
"Story-2": {
"actual": "SKIP"
}
}
}
''')
result_2['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-3": {
"actual": "FAIL"
}
},
"Benchmark-2": {
"Story-1": {
"actual": "SKIP"
}
}
}
''')
result_3['tests'] = json.loads('''
{
"Benchmark-2": {
"Story-2": {
"actual": "PASS"
}
},
"Benchmark-3": {
"Story-1": {
"actual": "PASS"
}
}
}
''')
merged_results = results_merger.merge_test_results(
[result_1, result_2, result_3])
self.assertEqual(len(merged_results['tests']), 3)
self.assertEqual(len(merged_results['tests']['Benchmark-1']), 3)
self.assertEqual(len(merged_results['tests']['Benchmark-2']), 2)
self.assertEqual(len(merged_results['tests']['Benchmark-3']), 1)
def test_json_merge_tests_non_dict_exception(self):
result_1 = json.loads(self.sample_json_string)
result_2 = json.loads(self.sample_json_string)
result_1['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS"
}
}
}
''')
result_2['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "FAIL"
}
}
}
''')
with self.assertRaises(results_merger.MergeException) as c:
results_merger.merge_test_results([result_1, result_2])
self.assertTrue(
'not mergable' in str(c.exception),
'Merge failure message is not in exception. Exception: %s' %
c.exception)
def test_json_merge_interrupted(self):
result_1 = json.loads(self.sample_json_string)
result_2 = json.loads(self.sample_json_string)
result_2['interrupted'] = True
merged_results = results_merger.merge_test_results([result_1, result_2])
self.assertEqual(merged_results['interrupted'], True)
def test_json_merge_seconds_since_epoch(self):
result_1 = json.loads(self.sample_json_string)
result_2 = json.loads(self.sample_json_string)
result_2['seconds_since_epoch'] = 5.0
merged_results = results_merger.merge_test_results([result_1, result_2])
self.assertEqual(merged_results['seconds_since_epoch'], 5.0)
def test_json_merge_nums(self):
result_1 = json.loads(self.sample_json_string)
result_2 = json.loads(self.sample_json_string)
result_1['num_failures_by_type'] = json.loads('''
{
"PASS": 1,
"SKIP": 5
}
''')
result_2['num_failures_by_type'] = json.loads('''
{
"PASS": 3,
"FAIL": 2
}
''')
merged_results = results_merger.merge_test_results([result_1, result_2])
self.assertEqual(merged_results['num_failures_by_type']['PASS'], 4)
self.assertEqual(merged_results['num_failures_by_type']['SKIP'], 5)
self.assertEqual(merged_results['num_failures_by_type']['FAIL'], 2)
def test_json_merge_tests_cross_device(self):
result_1 = json.loads(self.sample_json_string)
result_2 = json.loads(self.sample_json_string)
result_1['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS PASS",
"artifacts": {
"logs.txt": [
"123/1/logs.txt",
"123/2/logs.txt"
],
"trace.html": [
"123/1/trace.html",
"123/2/trace.html"
]
},
"expected": "PASS",
"is_unexpected": false,
"shard": 0,
"time": 1.0,
"times": [
1.0,
1.1
]
}
}
}
''')
result_2['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "FAIL PASS",
"artifacts": {
"logs.txt": [
"456/1/logs.txt",
"456/2/logs.txt"
],
"screenshot.png": [
"456/1/screenshot.png"
]
},
"expected": "PASS",
"is_unexpected": true,
"shard": 1,
"time": 1.0,
"times": [
1.0,
1.2
]
}
}
}
''')
merged_results = results_merger.merge_test_results([result_1, result_2],
True)
self.assertEqual(len(merged_results['tests']), 1)
self.assertEqual(len(merged_results['tests']['Benchmark-1']), 1)
self.assertIn(
'FAIL',
merged_results['tests']['Benchmark-1']['Story-1']['actual'].split())
self.assertIn(
'PASS',
merged_results['tests']['Benchmark-1']['Story-1']['actual'].split())
self.assertEqual(
4,
len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']
['logs.txt']))
self.assertEqual(
2,
len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']
['trace.html']))
self.assertEqual(
1,
len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']
['screenshot.png']))
self.assertEqual(
4, len(merged_results['tests']['Benchmark-1']['Story-1']['times']))
self.assertNotIn('shard', merged_results['tests']['Benchmark-1']['Story-1'])
self.assertEqual(
True,
merged_results['tests']['Benchmark-1']['Story-1']['is_unexpected'])
def test_json_merge_tests_cross_device_actual_pass(self):
result_1 = json.loads(self.sample_json_string)
result_2 = json.loads(self.sample_json_string)
result_1['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"is_unexpected": false
}
}
}
''')
result_2['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"is_unexpected": false
}
}
}
''')
merged_results = results_merger.merge_test_results([result_1, result_2],
True)
self.assertEqual(
'PASS PASS',
merged_results['tests']['Benchmark-1']['Story-1']['actual'])
self.assertEqual(
False,
merged_results['tests']['Benchmark-1']['Story-1']['is_unexpected'])
def test_json_merge_tests_cross_device_actual_fail(self):
result_1 = json.loads(self.sample_json_string)
result_2 = json.loads(self.sample_json_string)
result_1['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "FAIL PASS PASS",
"expected": "PASS",
"is_unexpected": true
}
}
}
''')
result_2['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"is_unexpected": false
}
}
}
''')
merged_results = results_merger.merge_test_results([result_1, result_2],
True)
self.assertIn('PASS',
merged_results['tests']['Benchmark-1']['Story-1']['actual'])
self.assertIn('FAIL',
merged_results['tests']['Benchmark-1']['Story-1']['actual'])
self.assertEqual(
True,
merged_results['tests']['Benchmark-1']['Story-1']['is_unexpected'])
def test_json_merge_tests_cross_device_artifacts(self):
result_1 = json.loads(self.sample_json_string)
result_2 = json.loads(self.sample_json_string)
result_1['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"artifacts": {
"logs.txt": [
"123/1/logs.txt"
]
}
}
}
}
''')
result_2['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"artifacts": {
"logs.txt": [
"456/1/logs.txt"
],
"trace.html": [
"123/1/trace.html"
]
}
}
}
}
''')
merged_results = results_merger.merge_test_results([result_1, result_2],
True)
self.assertEqual(
2,
len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']
['logs.txt']))
self.assertEqual(
1,
len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']
['trace.html']))
def test_json_merge_tests_cross_device_artifacts_missing(self):
result_1 = json.loads(self.sample_json_string)
result_2 = json.loads(self.sample_json_string)
result_1['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS"
}
}
}
''')
result_2['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"artifacts": {
"logs.txt": [
"456/1/logs.txt"
],
"trace.html": [
"123/1/trace.html"
]
}
}
}
}
''')
merged_results = results_merger.merge_test_results([result_1, result_2],
True)
self.assertEqual(
1,
len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']
['logs.txt']))
self.assertEqual(
1,
len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']
['trace.html']))
def test_json_merge_tests_cross_device_times(self):
result_1 = json.loads(self.sample_json_string)
result_2 = json.loads(self.sample_json_string)
result_1['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"time": 10.0,
"times": [10.0, 15.0, 25.0]
}
}
}
''')
result_2['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"time": 20.0,
"times": [20.0, 30.0]
}
}
}
''')
merged_results = results_merger.merge_test_results([result_1, result_2],
True)
self.assertEqual(
5, len(merged_results['tests']['Benchmark-1']['Story-1']['times']))
self.assertEqual(10.0,
merged_results['tests']['Benchmark-1']['Story-1']['time'])
def test_json_merge_tests_cross_device_times_missing(self):
result_1 = json.loads(self.sample_json_string)
result_2 = json.loads(self.sample_json_string)
result_1['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS"
}
}
}
''')
result_2['tests'] = json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"time": 20.0,
"times": [20.0, 30.0]
}
}
}
''')
merged_results = results_merger.merge_test_results([result_1, result_2],
True)
self.assertEqual(
2, len(merged_results['tests']['Benchmark-1']['Story-1']['times']))
self.assertEqual(20.0,
merged_results['tests']['Benchmark-1']['Story-1']['time'])
|
examples/gallery/lines/linestyles.py
|
jbusecke/pygmt
| 326 |
76967
|
<gh_stars>100-1000
"""
Line styles
-----------
The :meth:`pygmt.Figure.plot` method can plot lines in different styles.
The default line style is a 0.25-point wide, black, solid line, and can be
customized with the ``pen`` parameter.
A *pen* in GMT has three attributes: *width*, *color*, and *style*.
The *style* attribute controls the appearance of the line.
Giving "dotted" or "." yields a dotted line, whereas a dashed pen is requested
with "dashed" or "-". Also combinations of dots and dashes, like ".-" for a
dot-dashed line, are allowed.
For more advanced *pen* attributes, see the GMT cookbook
:gmt-docs:`cookbook/features.html#wpen-attrib`.
"""
import numpy as np
import pygmt
# Generate a two-point line for plotting
x = np.array([0, 7])
y = np.array([9, 9])
fig = pygmt.Figure()
fig.basemap(region=[0, 10, 0, 10], projection="X15c/8c", frame='+t"Line Styles"')
# Plot the line using the default line style
fig.plot(x=x, y=y)
fig.text(x=x[-1], y=y[-1], text="solid (default)", justify="ML", offset="0.2c/0c")
# Plot the line using different line styles
for linestyle in [
"1p,red,-", # dashed line
"1p,blue,.", # dotted line
"1p,lightblue,-.", # dash-dotted line
"2p,blue,..-", # dot-dot-dashed line
"2p,tomato,--.", # dash-dash-dotted line
# A pattern of 4-point-long line segments and 2-point-long gaps between
# segments, with pattern offset by 2 points from the origin
"2p,tomato,4_2:2p",
]:
y -= 1 # Move the current line down
fig.plot(x=x, y=y, pen=linestyle)
fig.text(x=x[-1], y=y[-1], text=linestyle, justify="ML", offset="0.2c/0c")
# Plot the line like a railway track (black/white).
# The trick here is plotting the same line twice but with different line styles
y -= 1 # move the current line down
fig.plot(x=x, y=y, pen="5p,black")
fig.plot(x=x, y=y, pen="4p,white,20p_20p")
fig.text(x=x[-1], y=y[-1], text="5p,black", justify="ML", offset="0.2c/0.2c")
fig.text(x=x[-1], y=y[-1], text="4p,white,20p_20p", justify="ML", offset="0.2c/-0.2c")
fig.show()
|
build/util/lib/results/result_types.py
|
zealoussnow/chromium
| 14,668 |
76985
|
<gh_stars>1000+
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing base test results classes."""
# The test passed.
PASS = 'SUCCESS'
# The test was intentionally skipped.
SKIP = 'SKIPPED'
# The test failed.
FAIL = 'FAILURE'
# The test caused the containing process to crash.
CRASH = 'CRASH'
# The test timed out.
TIMEOUT = 'TIMEOUT'
# The test ran, but we couldn't determine what happened.
UNKNOWN = 'UNKNOWN'
# The test did not run.
NOTRUN = 'NOTRUN'
|
analyze.py
|
rachmadaniHaryono/zsh-history-analysis
| 179 |
76986
|
<reponame>rachmadaniHaryono/zsh-history-analysis
#!/usr/bin/env python3
from collections import Counter, defaultdict
from itertools import groupby
import argparse
import os
import shutil
import statistics
import sys
import time
import warnings
try:
from termgraph.termgraph import chart
except ImportError:
chart = None
def groupByKey(m):
groupedM = defaultdict(list)
for k, v in m:
groupedM[k].append(v)
return groupedM
class Command:
def __init__(self, raw):
tup = raw.split(";")
# TODO: Should this be hard-coded?
self.timestamp_epoch = int(tup[0][2:-2])
self.timestamp_struct = time.gmtime(self.timestamp_epoch)
self.full_command = tup[1]
self.base_command = tup[1].split()[0]
class HistoryData:
def __init__(self, filenames):
if isinstance(filenames, str):
filenames = [filenames]
commands = []
for filename in filenames:
with open(filename, 'rb') as f:
it = iter(f)
for line in it:
try:
full_line = line.decode()
while full_line.strip()[-1] == '\\':
full_line += next(it).decode()
commands.append(Command(full_line))
except Exception as e:
# print("Warning: Exception parsing.")i
# print(e)
pass
self.commands = commands
def get_hourly_breakdowns(self):
days = self.group_by_day()
all_freqs = [[] for x in range(24)]
for day, cmds in sorted(days.items()):
day_times = [cmd.timestamp_struct.tm_hour for cmd in cmds]
freq_counter = Counter(day_times)
freqs = [0 for x in range(24)]
for hour, num in freq_counter.items():
freqs[hour] = num
for hour, num in enumerate(freqs):
all_freqs[hour].append(num)
return all_freqs
def get_weekday_breakdowns(self):
days = self.group_by_day()
all_freqs = [[] for x in range(7)]
for day, cmds in sorted(days.items()):
all_freqs[cmds[0].timestamp_struct.tm_wday].append(len(cmds))
return all_freqs
def get_command_lengths(self):
lengths = [(len(cmd.base_command), cmd) for cmd in self.commands]
sortedLengths = sorted(lengths, key=lambda x: x[0], reverse=True)
for c_len, cmd in sortedLengths[0:5]:
print(" {}: {}".format(c_len, cmd.base_command))
return [len(cmd.base_command) for cmd in self.commands]
def group_by_day(self):
ts = [(cmd.timestamp_struct, cmd) for cmd in self.commands]
kv = groupByKey(
[("{}-{}-{}".format(t.tm_year, t.tm_mon, t.tm_mday), cmd)
for t, cmd in ts])
return kv
def get_base_commands(self):
return [cmd.base_command for cmd in self.commands]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--history-dir', type=str, default="data")
parser.add_argument('--analysis-dir', type=str, default="analysis")
parser.add_argument('--plots-dir', type=str, default="plots")
home_dir = os.environ.get("HOME","~")
parser.add_argument('--history-file', type=str,
default="%s/.zsh_history" % home_dir)
subparsers = parser.add_subparsers(help='sub-command help', dest='cmd')
subparsers.required = True
parser_timeFrequencies = subparsers.add_parser('timeFrequencies')
parser_topCommands = subparsers.add_parser('topCommands')
parser_topCommands.add_argument("--num", type=int, default=15)
parser_commandLengths = subparsers.add_parser('commandLengths')
args = parser.parse_args()
def mkdir_p(path):
try:
os.makedirs(path)
except:
pass
mkdir_p(args.analysis_dir)
mkdir_p(args.plots_dir)
mkdir_p(args.history_dir)
shutil.copyfile(args.history_file, os.path.join(args.history_dir, 'history'))
hist_files = [args.history_dir+"/"+x for x in os.listdir(args.history_dir)]
all_hist = HistoryData(hist_files)
if args.cmd == 'timeFrequencies':
hourly_freqs = all_hist.get_hourly_breakdowns()
means = []
stdevs = []
for hour_freqs in hourly_freqs:
means.append(statistics.mean(hour_freqs))
stdevs.append(statistics.stdev(hour_freqs))
with open(args.analysis_dir+"/time-hours-stats.csv", "w") as f:
f.write(", ".join([str(h) for h in means])+"\n")
f.write(", ".join([str(h) for h in stdevs])+"\n")
with open(args.analysis_dir+"/time-hours-full.csv", "w") as f:
for hour in map(list, zip(*hourly_freqs)):
f.write(", ".join([str(h) for h in hour])+"\n")
if chart:
# draw using termgraph
print('y: Hour of Day, x: Average Commands Executed')
labels = list(map(str, range(24)))
data = [[x] for x in means]
chart_args = {
'stacked': False, 'width': 50, 'no_labels': False, 'format': '{:<5.2f}',
'suffix': '', "vertical": False
}
chart(colors=[], data=data, args=chart_args, labels=labels)
else:
warnings.warn('Termgraph package is not installed, no graph will be drawn')
wdays_freqs = all_hist.get_weekday_breakdowns()
means = []
stdevs = []
for day_freqs in wdays_freqs:
means.append(statistics.mean(day_freqs))
stdevs.append(statistics.stdev(day_freqs))
with open(args.analysis_dir+"/time-wdays-stats.csv", "w") as f:
f.write(", ".join([str(h) for h in means])+"\n")
f.write(", ".join([str(h) for h in stdevs])+"\n")
with open(args.analysis_dir+"/time-wdays-full.csv", "w") as f:
for wday in map(list, zip(*wdays_freqs)):
f.write(", ".join([str(h) for h in wday])+"\n")
if chart:
# draw using termgraph
print('y: Week Day, x: Average Commands Executed')
labels = ("Mon","Tues","Weds","Thurs","Fri","Sat","Sun")
data = [[x] for x in means]
chart_args = {
'stacked': False, 'width': 50, 'no_labels': False, 'format': '{:<5.2f}',
'suffix': '', "vertical": False
}
chart(colors=[], data=data, args=chart_args, labels=labels)
elif args.cmd == 'topCommands':
cmds = all_hist.get_base_commands()
with open(args.analysis_dir+"/top-cmds.csv", "w") as f:
print("Frequency | Command")
print("---|---")
f.write("{},{}\n".format("Frequency", "Command"))
mc_cmds_counter = Counter(cmds).most_common(args.num)
for tup in mc_cmds_counter:
print("{} | {}".format(tup[1], tup[0]))
f.write("{},{}\n".format(tup[1], tup[0]))
if chart:
# draw using termgraph
print('y: Command, x: Frequency')
labels = [x[0] for x in mc_cmds_counter]
data = [[x[1]] for x in mc_cmds_counter]
chart_args = {
'stacked': False, 'width': 50, 'no_labels': False, 'format': '{:<5.2f}',
'suffix': '', "vertical": False
}
chart(colors=[], data=data, args=chart_args, labels=labels)
else:
warnings.warn('Termgraph package is not installed, no graph will be drawn')
elif args.cmd == 'commandLengths':
cmd_lengths = all_hist.get_command_lengths()
with open(args.analysis_dir+"/cmd-lengths.csv", "w") as f:
f.write(", ".join([str(h) for h in cmd_lengths])+"\n")
|
sample/sample-python/sample-filter-tables-log.py
|
George-/tsduck
| 542 |
77028
|
#!/usr/bin/env python
#----------------------------------------------------------------------------
#
# TSDuck sample Python application running a chain of plugins:
# Filter tables using --log-hexa-line to get binary tables in a Python class.
#
# See sample-filter-tables-event.py for an equivalent example using plugin
# events to get a binary content of the tables.
#
#----------------------------------------------------------------------------
import tsduck
# This string is a user-defined marker to locate the hexa line in the log.
LOG_PREFIX = "#TABLE#"
# A Python class which handles TSDuck log messages.
class Logger(tsduck.AbstractAsyncReport):
# This method is invoked each time a message is logged by TSDuck.
def log(self, severity, message):
# Filter, locate, extract and parse the hexa output from plugin "tables".
pos = message.find(LOG_PREFIX)
if pos >= 0:
hexa = message[pos+len(LOG_PREFIX):]
print("Table: %s" % (hexa))
# Create an asynchronous report to log multi-threaded messages.
rep = Logger()
# Create a TS processor, set plugin chain.
tsp = tsduck.TSProcessor(rep)
tsp.input = ['http', 'https://github.com/tsduck/tsduck-test/raw/master/input/test-001.ts']
tsp.plugins = [ ['tables', '--pid', '0', '--log-hexa-line=' + LOG_PREFIX] ]
tsp.output = ['drop']
# Run the TS processing and wait until completion.
tsp.start()
tsp.waitForTermination()
tsp.delete()
# Terminate the asynchronous report.
rep.terminate()
rep.delete()
|
python/ray/util/collective/tests/distributed_multigpu_tests/test_distributed_multigpu_reducescatter.py
|
77loopin/ray
| 21,382 |
77065
|
"""Test the collective reducescatter API on a distributed Ray cluster."""
import pytest
import ray
import cupy as cp
import torch
from ray.util.collective.tests.util import \
create_collective_multigpu_workers, \
init_tensors_for_gather_scatter_multigpu
@pytest.mark.parametrize("tensor_backend", ["cupy", "torch"])
@pytest.mark.parametrize("array_size",
[2, 2**5, 2**10, 2**15, 2**20, [2, 2], [5, 5, 5]])
def test_reducescatter_different_array_size(
ray_start_distributed_multigpu_2_nodes_4_gpus, array_size,
tensor_backend):
world_size = 2
num_gpu_per_worker = 2
actual_world_size = world_size * num_gpu_per_worker
actors, _ = create_collective_multigpu_workers(world_size)
init_tensors_for_gather_scatter_multigpu(
actors, array_size=array_size, tensor_backend=tensor_backend)
results = ray.get([a.do_reducescatter_multigpu.remote() for a in actors])
for i in range(world_size):
for j in range(num_gpu_per_worker):
if tensor_backend == "cupy":
assert (results[i][j] == cp.ones(array_size, dtype=cp.float32)
* actual_world_size).all()
else:
assert (results[i][j] == torch.ones(
array_size, dtype=torch.float32).cuda(j) *
actual_world_size).all()
def test_reducescatter_torch_cupy(
ray_start_distributed_multigpu_2_nodes_4_gpus):
world_size = 2
num_gpu_per_worker = 2
actual_world_size = world_size * num_gpu_per_worker
shape = [10, 10]
actors, _ = create_collective_multigpu_workers(world_size)
# tensor is pytorch, list is cupy
for i, a in enumerate(actors):
ray.get([
a.set_buffer.remote(
shape, tensor_type0="torch", tensor_type1="torch")
])
ray.get([
a.set_list_buffer.remote(
shape, tensor_type0="cupy", tensor_type1="cupy")
])
results = ray.get([a.do_reducescatter_multigpu.remote() for a in actors])
for i in range(world_size):
for j in range(num_gpu_per_worker):
assert (results[i][j] == torch.ones(
shape, dtype=torch.float32).cuda(j) * actual_world_size).all()
# tensor is cupy, list is pytorch
for i, a in enumerate(actors):
ray.get([
a.set_buffer.remote(
shape, tensor_type0="cupy", tensor_type1="cupy")
])
ray.get([
a.set_list_buffer.remote(
shape, tensor_type0="torch", tensor_type1="torch")
])
results = ray.get([a.do_reducescatter_multigpu.remote() for a in actors])
for i in range(world_size):
for j in range(num_gpu_per_worker):
assert (results[i][j] == cp.ones(shape, dtype=cp.float32) *
actual_world_size).all()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
|
tests/test_efs/test_file_system.py
|
gtourkas/moto
| 5,460 |
77067
|
import re
from os import environ
import boto3
import pytest
from botocore.exceptions import ClientError
from moto import mock_efs
from tests.test_efs.junk_drawer import has_status_code
ARN_PATT = r"^arn:(?P<Partition>[^:\n]*):(?P<Service>[^:\n]*):(?P<Region>[^:\n]*):(?P<AccountID>[^:\n]*):(?P<Ignore>(?P<ResourceType>[^:\/\n]*)[:\/])?(?P<Resource>.*)$"
STRICT_ARN_PATT = r"^arn:aws:[a-z]+:[a-z]{2}-[a-z]+-[0-9]:[0-9]+:[a-z-]+\/[a-z0-9-]+$"
SAMPLE_1_PARAMS = {
"CreationToken": "myFileSystem1",
"PerformanceMode": "generalPurpose",
"Backup": True,
"Encrypted": True,
"Tags": [{"Key": "Name", "Value": "Test Group1"}],
}
SAMPLE_2_PARAMS = {
"CreationToken": "myFileSystem2",
"PerformanceMode": "generalPurpose",
"Backup": True,
"AvailabilityZoneName": "us-west-2b",
"Encrypted": True,
"ThroughputMode": "provisioned",
"ProvisionedThroughputInMibps": 60,
"Tags": [{"Key": "Name", "Value": "Test Group1"}],
}
@pytest.fixture(scope="function")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
environ["AWS_ACCESS_KEY_ID"] = "testing"
environ["AWS_SECRET_ACCESS_KEY"] = "testing"
environ["AWS_SECURITY_TOKEN"] = "testing"
environ["AWS_SESSION_TOKEN"] = "testing"
@pytest.fixture(scope="function")
def efs(aws_credentials):
with mock_efs():
yield boto3.client("efs", region_name="us-east-1")
# Testing Create
# ==============
def test_create_file_system_correct_use(efs):
from datetime import datetime
creation_token = "<PASSWORD>"
create_fs_resp = efs.create_file_system(
CreationToken=creation_token,
Tags=[{"Key": "Name", "Value": "Test EFS Container"}],
)
# Check the response.
assert has_status_code(create_fs_resp, 201)
assert create_fs_resp["CreationToken"] == creation_token
assert "fs-" in create_fs_resp["FileSystemId"]
assert isinstance(create_fs_resp["CreationTime"], datetime)
assert create_fs_resp["LifeCycleState"] == "available"
assert create_fs_resp["Tags"][0] == {"Key": "Name", "Value": "Test EFS Container"}
assert create_fs_resp["ThroughputMode"] == "bursting"
assert create_fs_resp["PerformanceMode"] == "generalPurpose"
assert create_fs_resp["Encrypted"] == False
assert create_fs_resp["NumberOfMountTargets"] == 0
for key_name in ["Value", "ValueInIA", "ValueInStandard"]:
assert key_name in create_fs_resp["SizeInBytes"]
assert create_fs_resp["SizeInBytes"][key_name] == 0
assert re.match(STRICT_ARN_PATT, create_fs_resp["FileSystemArn"])
# Check the (lack of the) backup policy.
with pytest.raises(ClientError) as exc_info:
efs.describe_backup_policy(FileSystemId=create_fs_resp["FileSystemId"])
resp = exc_info.value.response
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 404
assert "PolicyNotFound" in resp["Error"]["Message"]
# Check the arn in detail
match_obj = re.match(ARN_PATT, create_fs_resp["FileSystemArn"])
arn_parts = match_obj.groupdict()
assert arn_parts["ResourceType"] == "file-system"
assert arn_parts["Resource"] == create_fs_resp["FileSystemId"]
assert arn_parts["Service"] == "elasticfilesystem"
assert arn_parts["AccountID"] == create_fs_resp["OwnerId"]
def test_create_file_system_aws_sample_1(efs):
resp = efs.create_file_system(**SAMPLE_1_PARAMS)
resp_metadata = resp.pop("ResponseMetadata")
assert resp_metadata["HTTPStatusCode"] == 201
assert set(resp.keys()) == {
"OwnerId",
"CreationToken",
"Encrypted",
"PerformanceMode",
"FileSystemId",
"FileSystemArn",
"CreationTime",
"LifeCycleState",
"NumberOfMountTargets",
"SizeInBytes",
"Tags",
"ThroughputMode",
}
assert resp["Tags"] == [{"Key": "Name", "Value": "Test Group1"}]
assert resp["PerformanceMode"] == "generalPurpose"
assert resp["Encrypted"]
policy_resp = efs.describe_backup_policy(FileSystemId=resp["FileSystemId"])
assert policy_resp["BackupPolicy"]["Status"] == "ENABLED"
def test_create_file_system_aws_sample_2(efs):
resp = efs.create_file_system(**SAMPLE_2_PARAMS)
resp_metadata = resp.pop("ResponseMetadata")
assert resp_metadata["HTTPStatusCode"] == 201
assert set(resp.keys()) == {
"AvailabilityZoneId",
"AvailabilityZoneName",
"PerformanceMode",
"ProvisionedThroughputInMibps",
"SizeInBytes",
"Tags",
"ThroughputMode",
"CreationTime",
"CreationToken",
"Encrypted",
"LifeCycleState",
"FileSystemId",
"FileSystemArn",
"NumberOfMountTargets",
"OwnerId",
}
assert resp["ProvisionedThroughputInMibps"] == 60
assert resp["AvailabilityZoneId"] == "usw2-az1"
assert resp["AvailabilityZoneName"] == "us-west-2b"
assert resp["ThroughputMode"] == "provisioned"
policy_resp = efs.describe_backup_policy(FileSystemId=resp["FileSystemId"])
assert policy_resp["BackupPolicy"]["Status"] == "ENABLED"
def test_create_file_system_az_name_given_backup_default(efs):
resp = efs.create_file_system(AvailabilityZoneName="us-east-1e")
policy_resp = efs.describe_backup_policy(FileSystemId=resp["FileSystemId"])
assert policy_resp["BackupPolicy"]["Status"] == "ENABLED"
def test_create_file_system_no_creation_token_given(efs):
# Note that from the API docs, it would seem this should create an error. However it
# turns out that botocore just automatically assigns a UUID.
resp = efs.create_file_system()
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 201
assert "CreationToken" in resp
def test_create_file_system_file_system_already_exists(efs):
efs.create_file_system(CreationToken="foo")
with pytest.raises(ClientError) as exc_info:
efs.create_file_system(CreationToken="foo")
resp = exc_info.value.response
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 409
assert "FileSystemAlreadyExists" in resp["Error"]["Message"]
# Testing Describe
# ================
def test_describe_file_systems_minimal_case(efs):
# Create the file system.
create_fs_resp = efs.create_file_system(CreationToken="foobar")
create_fs_resp.pop("ResponseMetadata")
# Describe the file systems.
desc_fs_resp = efs.describe_file_systems()
desc_fs_resp_metadata = desc_fs_resp.pop("ResponseMetadata")
assert desc_fs_resp_metadata["HTTPStatusCode"] == 200
# Check the list results.
fs_list = desc_fs_resp["FileSystems"]
assert len(fs_list) == 1
file_system = fs_list[0]
assert set(file_system.keys()) == {
"CreationTime",
"CreationToken",
"Encrypted",
"LifeCycleState",
"PerformanceMode",
"SizeInBytes",
"Tags",
"ThroughputMode",
"FileSystemId",
"FileSystemArn",
"NumberOfMountTargets",
"OwnerId",
}
assert file_system["FileSystemId"] == create_fs_resp["FileSystemId"]
# Pop out the timestamps and see if the rest of the description is the same.
create_fs_resp["SizeInBytes"].pop("Timestamp")
file_system["SizeInBytes"].pop("Timestamp")
assert file_system == create_fs_resp
def test_describe_file_systems_aws_create_sample_2(efs):
efs.create_file_system(**SAMPLE_2_PARAMS)
# Describe the file systems.
desc_resp = efs.describe_file_systems()
desc_fs_resp_metadata = desc_resp.pop("ResponseMetadata")
assert desc_fs_resp_metadata["HTTPStatusCode"] == 200
# Check the list results.
fs_list = desc_resp["FileSystems"]
assert len(fs_list) == 1
file_system = fs_list[0]
assert set(file_system.keys()) == {
"AvailabilityZoneId",
"AvailabilityZoneName",
"CreationTime",
"CreationToken",
"Encrypted",
"LifeCycleState",
"PerformanceMode",
"ProvisionedThroughputInMibps",
"SizeInBytes",
"Tags",
"ThroughputMode",
"FileSystemId",
"FileSystemArn",
"NumberOfMountTargets",
"OwnerId",
}
assert file_system["ProvisionedThroughputInMibps"] == 60
assert file_system["AvailabilityZoneId"] == "usw2-az1"
assert file_system["AvailabilityZoneName"] == "us-west-2b"
assert file_system["ThroughputMode"] == "provisioned"
def test_describe_file_systems_paging(efs):
# Create several file systems.
for i in range(10):
efs.create_file_system(CreationToken="foobar_{}".format(i))
# First call (Start)
# ------------------
# Call the tested function
resp1 = efs.describe_file_systems(MaxItems=4)
# Check the response status
assert has_status_code(resp1, 200)
# Check content of the result.
resp1.pop("ResponseMetadata")
assert set(resp1.keys()) == {"NextMarker", "FileSystems"}
assert len(resp1["FileSystems"]) == 4
fs_id_set_1 = {fs["FileSystemId"] for fs in resp1["FileSystems"]}
# Second call (Middle)
# --------------------
# Get the next marker.
resp2 = efs.describe_file_systems(MaxItems=4, Marker=resp1["NextMarker"])
# Check the response status
resp2_metadata = resp2.pop("ResponseMetadata")
assert resp2_metadata["HTTPStatusCode"] == 200
# Check the response contents.
assert set(resp2.keys()) == {"NextMarker", "FileSystems", "Marker"}
assert len(resp2["FileSystems"]) == 4
assert resp2["Marker"] == resp1["NextMarker"]
fs_id_set_2 = {fs["FileSystemId"] for fs in resp2["FileSystems"]}
assert fs_id_set_1 & fs_id_set_2 == set()
# Third call (End)
# ----------------
# Get the last marker results
resp3 = efs.describe_file_systems(MaxItems=4, Marker=resp2["NextMarker"])
# Check the response status
resp3_metadata = resp3.pop("ResponseMetadata")
assert resp3_metadata["HTTPStatusCode"] == 200
# Check the response contents.
assert set(resp3.keys()) == {"FileSystems", "Marker"}
assert len(resp3["FileSystems"]) == 2
assert resp3["Marker"] == resp2["NextMarker"]
fs_id_set_3 = {fs["FileSystemId"] for fs in resp3["FileSystems"]}
assert fs_id_set_3 & (fs_id_set_1 | fs_id_set_2) == set()
def test_describe_file_systems_invalid_marker(efs):
with pytest.raises(ClientError) as exc_info:
efs.describe_file_systems(Marker="fiddlesticks")
resp = exc_info.value.response
assert has_status_code(resp, 400)
assert "BadRequest" in resp["Error"]["Message"]
def test_describe_file_systems_invalid_creation_token(efs):
resp = efs.describe_file_systems(CreationToken="fizzle")
assert has_status_code(resp, 200)
assert len(resp["FileSystems"]) == 0
def test_describe_file_systems_invalid_file_system_id(efs):
with pytest.raises(ClientError) as exc_info:
efs.describe_file_systems(FileSystemId="fs-29879313")
resp = exc_info.value.response
assert has_status_code(resp, 404)
assert "FileSystemNotFound" in resp["Error"]["Message"]
def test_describe_file_system_creation_token_and_file_system_id(efs):
with pytest.raises(ClientError) as exc_info:
efs.describe_file_systems(CreationToken="<PASSWORD>", FileSystemId="fs-07987987")
resp = exc_info.value.response
assert has_status_code(resp, 400)
assert "BadRequest" in resp["Error"]["Message"]
# Testing Delete
# ==============
def test_delete_file_system_minimal_case(efs):
# Create the file system
resp = efs.create_file_system()
# Describe the file system, prove it shows up.
desc1 = efs.describe_file_systems()
assert len(desc1["FileSystems"]) == 1
assert resp["FileSystemId"] in {fs["FileSystemId"] for fs in desc1["FileSystems"]}
# Delete the file system.
del_resp = efs.delete_file_system(FileSystemId=resp["FileSystemId"])
assert has_status_code(del_resp, 204)
# Check that the file system is no longer there.
desc2 = efs.describe_file_systems()
assert len(desc2["FileSystems"]) == 0
def test_delete_file_system_invalid_file_system_id(efs):
with pytest.raises(ClientError) as exc_info:
efs.delete_file_system(FileSystemId="fs-2394287")
resp = exc_info.value.response
assert has_status_code(resp, 404)
assert "FileSystemNotFound" in resp["Error"]["Message"]
|
MuonAnalysis/MomentumScaleCalibration/test/MuScleFitMuonProducer_cfg.py
|
ckamtsikis/cmssw
| 852 |
77101
|
# -*- coding: utf-8 -*-
import FWCore.ParameterSet.Config as cms
process = cms.Process("MUSCLEFITMUONPRODUCER")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
"file:/home/demattia/3C83C26B-8B91-DF11-9CE6-90E6BAE8CC13.root"
)
)
process.poolDBESSource = cms.ESSource("PoolDBESSource",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
timetype = cms.untracked.string('runnumber'),
# connect = cms.string('sqlite_file:dummyScale.db'),
connect = cms.string('oracle://cms_orcoff_prod/CMS_COND_31X_PHYSICSTOOLS'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('MuScleFitDBobjectRcd'),
tag = cms.string('MuScleFit_Scale_JPsi_1_3_invNb_innerTrack')
))
)
process.MuScleFitMuonProducer = cms.EDProducer(
'MuScleFitMuonProducer',
MuonLabel = cms.InputTag("muons"),
DbObjectLabel = cms.untracked.string(""),
PatMuons = cms.bool(False)
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('myOutputFile.root')
)
process.p = cms.Path(process.MuScleFitMuonProducer)
process.e = cms.EndPath(process.out)
|
opytimizer/spaces/search.py
|
anukaal/opytimizer
| 528 |
77151
|
"""Traditional-based search space.
"""
import copy
import opytimizer.utils.logging as l
from opytimizer.core import Space
logger = l.get_logger(__name__)
class SearchSpace(Space):
"""A SearchSpace class for agents, variables and methods
related to the search space.
"""
def __init__(self, n_agents, n_variables, lower_bound, upper_bound):
"""Initialization method.
Args:
n_agents (int): Number of agents.
n_variables (int): Number of decision variables.
lower_bound (float, list, tuple, np.array): Minimum possible values.
upper_bound (float, list, tuple, np.array): Maximum possible values.
"""
logger.info('Overriding class: Space -> SearchSpace.')
# Defines missing override arguments
n_dimensions = 1
super(SearchSpace, self).__init__(n_agents, n_variables, n_dimensions,
lower_bound, upper_bound)
self.build()
logger.info('Class overrided.')
def _initialize_agents(self):
"""Initializes agents with their positions and defines a best agent.
"""
for agent in self.agents:
agent.fill_with_uniform()
self.best_agent = copy.deepcopy(self.agents[0])
|
src/datasets/get_dataset.py
|
Immocat/ACTOR
| 164 |
77185
|
<filename>src/datasets/get_dataset.py
def get_dataset(name="ntu13"):
if name == "ntu13":
from .ntu13 import NTU13
return NTU13
elif name == "uestc":
from .uestc import UESTC
return UESTC
elif name == "humanact12":
from .humanact12poses import HumanAct12Poses
return HumanAct12Poses
def get_datasets(parameters):
name = parameters["dataset"]
DATA = get_dataset(name)
dataset = DATA(split="train", **parameters)
train = dataset
# test: shallow copy (share the memory) but set the other indices
from copy import copy
test = copy(train)
test.split = test
datasets = {"train": train,
"test": test}
# add specific parameters from the dataset loading
dataset.update_parameters(parameters)
return datasets
|
Autocoders/Python/src/fprime_ac/utils/ArrayGenerator.py
|
SSteve/fprime
| 9,182 |
77197
|
#!/usr/bin/env python3
# ===============================================================================
# NAME: EnumGenerator.py
#
# DESCRIPTION: A generator to produce serializable enum's
#
# AUTHOR: jishii
# EMAIL: <EMAIL>
# DATE CREATED : May 28, 2020
#
# Copyright 2020, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
import sys
import os
from fprime_ac.parsers import XmlParser
from fprime_ac.parsers import XmlArrayParser
from fprime_ac.generators.templates.arrays import array_cpp
from fprime_ac.generators.templates.arrays import array_hpp
def open_file(name, type):
"""
Open the file for writing
"""
#
gse_serializable_install_dir = "DefaultDict" + os.sep + "serializable"
if type == "py":
filename = name + ".py"
#
# Put Gse serializable is correct place for make system
#
if not os.path.exists(gse_serializable_install_dir):
os.makedirs(gse_serializable_install_dir)
os.chdir(gse_serializable_install_dir)
else:
filename = name + "ArrayAc." + type
#
fp = open(filename, "w")
return fp
def write_template(
fp,
c,
name,
namespace,
arr_type,
arr_typeinfo,
arr_size,
format_string,
default_values,
type_id,
string_size,
include_path,
comment,
include_headers,
import_serializables,
import_enums,
import_arrays,
):
"""
Set up and write out templates here
"""
c.name = name
c.namespace = namespace
c.type = arr_type
c.typeinfo = arr_typeinfo
c.size = arr_size
c.format = format_string
c.default = default_values
c.uuid = type_id # uuid = type_id
c.string_size = string_size
c.comment = comment
c.include_headers = include_headers
c.import_serializables = import_serializables
c.import_enums = import_enums
c.import_arrays = import_arrays
fp.writelines(c.__str__())
def generate_array(xml_file):
"""
Produce a *Ac.hpp, *Ac.cpp, and *.py files for serializable arrays.
Return True if xml file was an array, otherwise return False and
generate nothing.
"""
xml = XmlParser.XmlParser(xml_file)
if xml() == "array":
#
# Parse array xml here
#
array_xml = XmlArrayParser.XmlArrayParser(xml_file)
name = array_xml.get_name()
namespace = array_xml.get_namespace()
arr_type = array_xml.get_type()
arr_typeinfo = array_xml.get_typeinfo()
arr_size = int(array_xml.get_size())
format_string = array_xml.get_format()
default_values = array_xml.get_default()
type_id = array_xml.get_type_id()
string_size = array_xml.get_string_size()
if string_size:
string_size = int(string_size)
comment = array_xml.get_comment()
include_headers = array_xml.get_include_header_files()
import_serializables = array_xml.get_includes()
import_enums = array_xml.get_include_enum_files()
import_arrays = array_xml.get_include_array_files()
include_path = array_xml.get_include_path()
# Set up imports
headers = []
for h in include_headers:
headers.append(h.replace("Ai.xml", "Ac.hpp"))
serials = []
for s in import_serializables:
serials.append(s.replace("Ai.xml", "Ac.hpp"))
enums = []
for e in import_enums:
enums.append(e.replace("Ai.xml", "Ac.hpp"))
arrays = []
for a in import_arrays:
arrays.append(a.replace("Ai.xml", "Ac.hpp"))
#
# Generate the hpp file
#
fp = open_file(name, "hpp")
c = array_hpp.array_hpp()
write_template(
fp,
c,
name,
namespace,
arr_type,
arr_typeinfo,
arr_size,
format_string,
default_values,
type_id,
string_size,
include_path,
comment,
headers,
serials,
enums,
arrays,
)
fp.close()
#
# Generate the cpp file
#
fp = open_file(name, "cpp")
c = array_cpp.array_cpp()
write_template(
fp,
c,
name,
namespace,
arr_type,
arr_typeinfo,
arr_size,
format_string,
default_values,
type_id,
string_size,
include_path,
comment,
headers,
serials,
enums,
arrays,
)
fp.close()
return True
else:
return False
if __name__ == "__main__":
xmlfile = sys.argv[1]
print(generate_array(xmlfile))
|
tests/test_metrics/test_accuracy.py
|
Naoki-Wake/mmaction2
| 648 |
77222
|
import os.path as osp
import random
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.core import (ActivityNetLocalization,
average_recall_at_avg_proposals, confusion_matrix,
get_weighted_score, mean_average_precision,
mean_class_accuracy, mmit_mean_average_precision,
pairwise_temporal_iou, top_k_accuracy)
from mmaction.core.evaluation.ava_utils import ava_eval
def gt_confusion_matrix(gt_labels, pred_labels, normalize=None):
"""Calculate the ground truth confusion matrix."""
max_index = max(max(gt_labels), max(pred_labels))
confusion_mat = np.zeros((max_index + 1, max_index + 1), dtype=np.int64)
for gt, pred in zip(gt_labels, pred_labels):
confusion_mat[gt][pred] += 1
del_index = []
for i in range(max_index):
if sum(confusion_mat[i]) == 0 and sum(confusion_mat[:, i]) == 0:
del_index.append(i)
confusion_mat = np.delete(confusion_mat, del_index, axis=0)
confusion_mat = np.delete(confusion_mat, del_index, axis=1)
if normalize is not None:
confusion_mat = np.array(confusion_mat, dtype=np.float)
m, n = confusion_mat.shape
if normalize == 'true':
for i in range(m):
s = np.sum(confusion_mat[i], dtype=float)
if s == 0:
continue
confusion_mat[i, :] = confusion_mat[i, :] / s
print(confusion_mat[i, :])
elif normalize == 'pred':
for i in range(n):
s = sum(confusion_mat[:, i])
if s == 0:
continue
confusion_mat[:, i] = confusion_mat[:, i] / s
elif normalize == 'all':
s = np.sum(confusion_mat)
if s != 0:
confusion_mat /= s
return confusion_mat
def test_activitynet_localization():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../data/eval_localization'))
gt_path = osp.join(data_prefix, 'gt.json')
result_path = osp.join(data_prefix, 'result.json')
localization = ActivityNetLocalization(gt_path, result_path)
results = localization.evaluate()
mAP = np.array([
0.71428571, 0.71428571, 0.71428571, 0.6875, 0.6875, 0.59722222,
0.52083333, 0.52083333, 0.52083333, 0.5
])
average_mAP = 0.6177579365079365
assert_array_almost_equal(results[0], mAP)
assert_array_almost_equal(results[1], average_mAP)
def test_ava_detection():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../data/eval_detection'))
gt_path = osp.join(data_prefix, 'gt.csv')
result_path = osp.join(data_prefix, 'pred.csv')
label_map = osp.join(data_prefix, 'action_list.txt')
# eval bbox
detection = ava_eval(result_path, 'mAP', label_map, gt_path, None)
assert_array_almost_equal(detection['[email protected]'], 0.09385522)
def test_confusion_matrix():
# custom confusion_matrix
gt_labels = [np.int64(random.randint(0, 9)) for _ in range(100)]
pred_labels = np.random.randint(10, size=100, dtype=np.int64)
for normalize in [None, 'true', 'pred', 'all']:
cf_mat = confusion_matrix(pred_labels, gt_labels, normalize)
gt_cf_mat = gt_confusion_matrix(gt_labels, pred_labels, normalize)
assert_array_equal(cf_mat, gt_cf_mat)
with pytest.raises(ValueError):
# normalize must be in ['true', 'pred', 'all', None]
confusion_matrix([1], [1], 'unsupport')
with pytest.raises(TypeError):
# y_pred must be list or np.ndarray
confusion_matrix(0.5, [1])
with pytest.raises(TypeError):
# y_real must be list or np.ndarray
confusion_matrix([1], 0.5)
with pytest.raises(TypeError):
# y_pred dtype must be np.int64
confusion_matrix([0.5], [1])
with pytest.raises(TypeError):
# y_real dtype must be np.int64
confusion_matrix([1], [0.5])
def test_topk():
scores = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
# top1 acc
k = (1, )
top1_labels_0 = [3, 1, 1, 1]
top1_labels_25 = [2, 0, 4, 3]
top1_labels_50 = [2, 2, 3, 1]
top1_labels_75 = [2, 2, 2, 3]
top1_labels_100 = [2, 2, 2, 4]
res = top_k_accuracy(scores, top1_labels_0, k)
assert res == [0]
res = top_k_accuracy(scores, top1_labels_25, k)
assert res == [0.25]
res = top_k_accuracy(scores, top1_labels_50, k)
assert res == [0.5]
res = top_k_accuracy(scores, top1_labels_75, k)
assert res == [0.75]
res = top_k_accuracy(scores, top1_labels_100, k)
assert res == [1.0]
# top1 acc, top2 acc
k = (1, 2)
top2_labels_0_100 = [3, 1, 1, 1]
top2_labels_25_75 = [3, 1, 2, 3]
res = top_k_accuracy(scores, top2_labels_0_100, k)
assert res == [0, 1.0]
res = top_k_accuracy(scores, top2_labels_25_75, k)
assert res == [0.25, 0.75]
# top1 acc, top3 acc, top5 acc
k = (1, 3, 5)
top5_labels_0_0_100 = [1, 0, 3, 2]
top5_labels_0_50_100 = [1, 3, 4, 0]
top5_labels_25_75_100 = [2, 3, 0, 2]
res = top_k_accuracy(scores, top5_labels_0_0_100, k)
assert res == [0, 0, 1.0]
res = top_k_accuracy(scores, top5_labels_0_50_100, k)
assert res == [0, 0.5, 1.0]
res = top_k_accuracy(scores, top5_labels_25_75_100, k)
assert res == [0.25, 0.75, 1.0]
def test_mean_class_accuracy():
scores = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
# test mean class accuracy in [0, 0.25, 1/3, 0.75, 1.0]
mean_cls_acc_0 = np.int64([1, 4, 0, 2])
mean_cls_acc_25 = np.int64([2, 0, 4, 3])
mean_cls_acc_33 = np.int64([2, 2, 2, 3])
mean_cls_acc_75 = np.int64([4, 2, 2, 4])
mean_cls_acc_100 = np.int64([2, 2, 2, 4])
assert mean_class_accuracy(scores, mean_cls_acc_0) == 0
assert mean_class_accuracy(scores, mean_cls_acc_25) == 0.25
assert mean_class_accuracy(scores, mean_cls_acc_33) == 1 / 3
assert mean_class_accuracy(scores, mean_cls_acc_75) == 0.75
assert mean_class_accuracy(scores, mean_cls_acc_100) == 1.0
def test_mmit_mean_average_precision():
# One sample
y_true = [np.array([0, 0, 1, 1])]
y_scores = [np.array([0.1, 0.4, 0.35, 0.8])]
map = mmit_mean_average_precision(y_scores, y_true)
precision = [2.0 / 3.0, 0.5, 1., 1.]
recall = [1., 0.5, 0.5, 0.]
target = -np.sum(np.diff(recall) * np.array(precision)[:-1])
assert target == map
def test_pairwise_temporal_iou():
target_segments = np.array([])
candidate_segments = np.array([])
with pytest.raises(ValueError):
pairwise_temporal_iou(target_segments, candidate_segments)
# test temporal iou
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([[2, 3], [2.5, 3]])
temporal_iou = pairwise_temporal_iou(candidate_segments, target_segments)
assert_array_equal(temporal_iou, [[0, 0], [1, 0.5]])
# test temporal overlap_self
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([[2, 3], [2.5, 3]])
temporal_iou, temporal_overlap_self = pairwise_temporal_iou(
candidate_segments, target_segments, calculate_overlap_self=True)
assert_array_equal(temporal_overlap_self, [[0, 0], [1, 1]])
# test temporal overlap_self when candidate_segments is 1d
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([2.5, 3])
temporal_iou, temporal_overlap_self = pairwise_temporal_iou(
candidate_segments, target_segments, calculate_overlap_self=True)
assert_array_equal(temporal_overlap_self, [0, 1])
def test_average_recall_at_avg_proposals():
ground_truth1 = {
'v_test1': np.array([[0, 1], [1, 2]]),
'v_test2': np.array([[0, 1], [1, 2]])
}
ground_truth2 = {'v_test1': np.array([[0, 1]])}
proposals1 = {
'v_test1': np.array([[0, 1, 1], [1, 2, 1]]),
'v_test2': np.array([[0, 1, 1], [1, 2, 1]])
}
proposals2 = {
'v_test1': np.array([[10, 11, 0.6], [11, 12, 0.4]]),
'v_test2': np.array([[10, 11, 0.6], [11, 12, 0.4]])
}
proposals3 = {
'v_test1': np.array([[i, i + 1, 1 / (i + 1)] for i in range(100)])
}
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth1, proposals1, 4))
assert_array_equal(recall, [[0.] * 49 + [0.5] * 50 + [1.]] * 10)
assert_array_equal(avg_recall, [0.] * 49 + [0.5] * 50 + [1.])
assert_array_almost_equal(
proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10)
assert auc == 25.5
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth1, proposals2, 4))
assert_array_equal(recall, [[0.] * 100] * 10)
assert_array_equal(avg_recall, [0.] * 100)
assert_array_almost_equal(
proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10)
assert auc == 0
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth2, proposals3, 100))
assert_array_equal(recall, [[1.] * 100] * 10)
assert_array_equal(avg_recall, ([1.] * 100))
assert_array_almost_equal(
proposals_per_video, np.arange(1, 101, 1), decimal=10)
assert auc == 99.0
def test_get_weighted_score():
score_a = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
score_b = [
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]),
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526])
]
weighted_score = get_weighted_score([score_a], [1])
assert np.all(np.isclose(np.array(score_a), np.array(weighted_score)))
coeff_a, coeff_b = 2., 1.
weighted_score = get_weighted_score([score_a, score_b], [coeff_a, coeff_b])
ground_truth = [
x * coeff_a + y * coeff_b for x, y in zip(score_a, score_b)
]
assert np.all(np.isclose(np.array(ground_truth), np.array(weighted_score)))
def test_mean_average_precision():
def content_for_unittest(scores, labels, result):
gt = mean_average_precision(scores, labels)
assert gt == result
scores = [
np.array([0.1, 0.2, 0.3, 0.4]),
np.array([0.2, 0.3, 0.4, 0.1]),
np.array([0.3, 0.4, 0.1, 0.2]),
np.array([0.4, 0.1, 0.2, 0.3])
]
label1 = np.array([[1, 1, 0, 0], [1, 0, 1, 1], [1, 0, 1, 0], [1, 1, 0, 1]])
result1 = 2 / 3
label2 = np.array([[0, 1, 0, 1], [0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1]])
result2 = np.mean([0.5, 0.5833333333333333, 0.8055555555555556, 1.0])
content_for_unittest(scores, label1, result1)
content_for_unittest(scores, label2, result2)
|
training/layers.py
|
BrandoZhang/alis
| 176 |
77261
|
<reponame>BrandoZhang/alis
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
import torch.nn.functional as F
import numpy as np
from omegaconf import OmegaConf, DictConfig
from torch_utils import persistence
from torch_utils.ops import bias_act
from torch_utils import misc
from torch_utils.ops.fast_bilinear_mult import fast_manual_bilinear_mult_row
#----------------------------------------------------------------------------
@persistence.persistent_class
class FullyConnectedLayer(torch.nn.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 1, # Learning rate multiplier.
bias_init = 0, # Initial value for the additive bias.
):
super().__init__()
self.activation = activation
self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)
self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight.to(x.dtype) * self.weight_gain
b = self.bias
if b is not None:
b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
x = torch.addmm(b.unsqueeze(0), x, w.t())
else:
x = x.matmul(w.t())
x = bias_act.bias_act(x, b, act=self.activation)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class GenInput(nn.Module):
def __init__(self, cfg: DictConfig, channel_dim: int, w_dim: int):
super().__init__()
self.cfg = cfg
if self.cfg.type == 'multi_modal':
self.input = MultiModalInput(channel_dim, self.cfg.resolution, w_dim, **self.cfg.kwargs)
self.channel_dim = channel_dim
elif self.cfg.type == 'const':
self.input = torch.nn.Parameter(torch.randn([channel_dim, self.cfg.resolution, self.cfg.resolution]))
self.channel_dim = channel_dim
elif self.cfg.type == 'periodic_const':
self.input = PeriodicConstInput(channel_dim, self.cfg.resolution)
self.channel_dim = channel_dim
elif self.cfg.type == 'grid':
self.input = GridInput(channel_dim, self.cfg.resolution, w_dim, **self.cfg.kwargs)
self.channel_dim = self.input.get_channel_dim()
elif self.cfg.type == 'coords':
self.input = CoordsInput(self.cfg.resolution, **self.cfg.kwargs)
self.channel_dim = self.input.get_channel_dim()
elif self.cfg.type == 'modulated':
self.input = ModulatedInput(channel_dim, self.cfg.resolution, w_dim)
self.channel_dim = channel_dim
elif self.cfg.type == 'coord_noise':
self.input = CoordNoiseInput(channel_dim, self.cfg.resolution, **self.cfg.kwargs)
self.channel_dim = self.input.get_channel_dim()
else:
raise NotImplementedError
self.is_modulated = self.cfg.type in ('multi_modal', 'modulated', 'grid')
def forward(self, batch_size: int, w: Tensor=None, dtype=None, memory_format=None, w_context=None, left_borders_idx=None) -> Tensor:
if self.cfg.type == 'multi_modal':
x = self.input(w).to(dtype=dtype, memory_format=memory_format)
elif self.cfg.type == 'const':
x = self.input.to(dtype=dtype, memory_format=memory_format)
x = x.unsqueeze(0).repeat([batch_size, 1, 1, 1])
elif self.cfg.type == 'periodic_const':
x = self.input(batch_size)
x = x.to(dtype=dtype, memory_format=memory_format)
elif self.cfg.type == 'grid':
x = self.input(batch_size, w, w_context, left_borders_idx)
x = x.to(dtype=dtype, memory_format=memory_format)
elif self.cfg.type == 'coords':
x = self.input(batch_size).to(dtype=dtype, memory_format=memory_format)
elif self.cfg.type == 'modulated':
x = self.input(w).to(dtype=dtype, memory_format=memory_format)
elif self.cfg.type == 'coord_noise':
x = self.input(batch_size, left_borders_idx)
x = x.to(dtype=dtype, memory_format=memory_format)
else:
raise NotImplementedError
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class MultiModalInput(nn.Module):
def __init__(self,
channel_dim: int,
resolution: int,
w_dim: int,
num_groups: int,
num_modes: int,
demodulate: bool,
temperature: float):
super().__init__()
assert channel_dim % num_groups == 0
self.num_groups = num_groups
self.num_modes = num_modes
self.num_params = num_groups * num_modes
self.resolution = resolution
self.channel_dim = channel_dim
self.demodulate = demodulate
self.temperature = temperature
self.inputs = nn.Parameter(torch.randn(1, num_groups, num_modes, channel_dim // num_groups, resolution, resolution))
self.affine = FullyConnectedLayer(w_dim, self.num_params, bias_init=0)
def forward(self, w: Tensor) -> Tensor:
styles = self.affine(w) # [batch_size, num_groups * num_modes]
probs = (styles.view(batch_size, self.num_groups, self.num_modes) / self.temperature).softmax(dim=1)
probs = probs.view(batch_size, self.num_groups, self.num_modes, 1, 1, 1)
inputs = (self.inputs * probs).sum(dim=2) # [batch_size, num_groups, channel_dim // num_groups, resolution, resolution]
inputs = inputs.view(batch_size, self.channel_dim, self.resolution, self.resolution)
if self.demodulate:
inputs = inputs / inputs.norm(float('inf'), dim=[2, 3], keepdim=True)
return inputs
@persistence.persistent_class
class ModulatedInput(nn.Module):
def __init__(self, channel_dim: int, resolution: int, w_dim: int, demodulate: bool=True):
super().__init__()
self.const_input = torch.nn.Parameter(torch.randn([channel_dim, resolution, resolution]))
self.affine = FullyConnectedLayer(w_dim, channel_dim, bias_init=1)
self.channel_dim = channel_dim
self.demodulate = demodulate
def forward(self, w: Tensor) -> Tensor:
styles = self.affine(w) # [batch_size, channel_dim]
x = self.const_input * style.view(w.size(0), self.channel_dim, 1, 1)
if self.demodulate:
x = x * (x.square().sum(dim=[1,2,3]) + 1e-8).rsqrt()
return x
@persistence.persistent_class
class CoordsInput(nn.Module):
def __init__(self, resolution: int, **basis_gen_kwargs):
super().__init__()
batch_size = 1
raw_coords = generate_coords(1, resolution)
basis = generate_logarithmic_basis(resolution, **basis_gen_kwargs) # [dim, 2]
basis = basis.unsqueeze(0) # [1, dim, 2]
coord_embs = torch.einsum('bdc,bcxy->bdxy', basis, raw_coords).sin() # [batch_size, dim, img_size, img_size]
self.register_buffer('coord_embs', coord_embs) # [batch_size, dim, img_size, img_size]
self.coord_embs_cache = None
def get_channel_dim(self) -> int:
return self.coord_embs.shape[1]
def forward(self, batch_size: int) -> Tensor:
if (self.coord_embs_cache is None) or batch_size != self.coord_embs_cache.shape[0]:
self.coord_embs_cache = self.coord_embs.repeat(batch_size, 1, 1, 1)
self.coord_embs_cache = self.coord_embs_cache.contiguous()
return self.coord_embs_cache
@persistence.persistent_class
class CoordNoiseInput(nn.Module):
def __init__(self, channel_dim: int, resolution: int, coords_cfg={}):
super().__init__()
self.channel_dim = channel_dim
self.resolution = resolution
self.coord_fuser = ModulatedCoordFuser(OmegaConf.create(coords_cfg), w_dim=None, resolution=resolution, use_fp16=False)
def get_channel_dim(self) -> int:
return self.channel_dim + self.coord_fuser.compute_total_dim()
def forward(self, batch_size: int, left_borders_idx: Tensor) -> Tensor:
misc.assert_shape(left_borders_idx, [batch_size])
noise = torch.randn(batch_size, self.channel_dim, self.resolution, self.resolution, device=left_borders_idx.device)
out = self.coord_fuser(noise, left_borders_idx=left_borders_idx, memory_format=torch.contiguous_format)
return out
@persistence.persistent_class
class PeriodicConstInput(nn.Module):
"""
It is like constant input, but periodic
"""
def __init__(self, channel_dim: int, resolution: int):
super().__init__()
self.resolution = resolution
self.const_input = torch.nn.Parameter(torch.randn([channel_dim, resolution, resolution]))
def forward(self, batch_size: int, shifts: Optional[Tensor]=None) -> Tensor:
x = self.const_input.unsqueeze(0).repeat([batch_size, 1, 1, 1]) # [b, c, h, w]
if shifts is not None:
misc.assert_shape(shifts, [batch_size, 2])
assert shifts.max().item() <= 1.0
assert shifts.min().item() >= -1.0
coords = generate_coords(batch_size, self.const_input.shape[1], device=x.device, align_corners=True) # [b, 2, h, w]
# # Applying the shift
# coords = coords + shifts.view(batch_size, 2, 1, 1) # [b, 2, h, w]
# # Converting into F.grid_sample coords:
# # 1. Convert the range
# coords = coords + 1 # [-1, 1] => [0, 2]
# # 2. Perform padding_mode=replicate
# # coords[coords > 0] = coords[coords > 0] % (2 + 1e-12)
# # coords[coords < 0] = -(-coords[coords < 0] % 2) + 2 + (1e-12)
# # 3. Convert back to [-1, 1] range
# coords = coords - 1 # [0, 2] => [-1, 1]
# # 4. F.grid_sample uses flipped coordinates (TODO: should we too?)
# coords[:, 1] = coords[:, 1] * -1.0
# # 5. It also uses different shape
# coords = coords.permute(0, 2, 3, 1) # [b, h, w, 2]
# Performing a slower, but less error-prone approach
# (convert shifts from [-1, 1] to [-2, 2], so we are now [-3, 3])
coords = coords + 2 * shifts.view(batch_size, 2, 1, 1) # [b, 2, h, w]
coords = coords / 3 # [-3, 3] => [-1, 1] range
coords = coords.permute(0, 2, 3, 1)
assert coords.min().item() >= -1
assert coords.max().item() <= 1
x = torch.cat([x, x, x], dim=3) # [b, c, h, w * 3]
x = F.grid_sample(x, coords, mode='bilinear', align_corners=True) # [b, c, h, w]
# torch.save(coords.detach().cpu(), '/tmp/trash/coords')
# torch.save(x.detach().cpu(), '/tmp/trash/x')
# torch.save(self.const_input.detach().cpu(), '/tmp/trash/const_input')
# assert torch.allclose(x[0], self.const_input, atol=1e-4)
return x
@persistence.persistent_class
class GridInput(nn.Module):
"""
For COCO-GAN, our input is grid-like and consists on 2 things:
- learnable coordinates
- high-frequency (up to 1 image for the whole period)
"""
def __init__(self, channel_dim: int, resolution: int, w_dim: int, grid_size: int, w_coord_dist: int, w_lerp_multiplier: bool):
super().__init__()
self.resolution = resolution
self.grid_size = grid_size
self.channel_dim = channel_dim
self.w_dim = w_dim
self.w_lerp_multiplier = w_lerp_multiplier
# Distance between `w` and `w_after` measured in number of steps
# By default, it equals to grid_size, but should be increased I guess
self.w_coord_dist = w_coord_dist
# # Learnable patch coordinate embeddings
# self.const_input = torch.nn.Parameter(torch.randn([channel_dim // 2, resolution, resolution]))
self.input_column = torch.nn.Parameter(torch.randn([channel_dim, resolution]))
# Predictable patch embeddings
self.affine = FullyConnectedLayer(w_dim, channel_dim, bias_init=0)
# Fixed coordinate patch embeddings
self.register_buffer('basis', generate_logarithmic_basis(
resolution,
self.channel_dim,
remove_lowest_freq=True,
use_diagonal=True)) # [dim, 2]
def get_channel_dim(self) -> int:
return self.basis.shape[0] * 2 + self.channel_dim
def forward(self, batch_size: int, w: Tensor, w_context: Tensor, left_borders_idx: Tensor) -> Tensor:
misc.assert_shape(w, [batch_size, self.w_dim])
misc.assert_shape(w_context, [batch_size, 2, self.w_dim])
misc.assert_shape(left_borders_idx, [batch_size])
# Computing the global features
w_all = torch.stack([w_context[:, 0], w, w_context[:, 1]], dim=1) # [b, 3, w_dim]
styles = self.affine(w_all.view(-1, self.w_dim)).view(batch_size, 3, self.channel_dim) # [b, 2, c]
raw_const_inputs = self.input_column.unsqueeze(0).unsqueeze(3).repeat(batch_size, 1, 1, self.resolution) # [b, c, h, w]
latents = fast_manual_bilinear_mult_row(raw_const_inputs, styles, left_borders_idx, self.grid_size, self.w_coord_dist, self.w_lerp_multiplier)
# Ok, now for each cell in the grid we need to compute its high-frequency coordinates
# Otherwise, it will be too difficult for the model to understand the relative positions
coords = generate_shifted_coords(left_borders_idx, self.resolution, self.grid_size, self.w_coord_dist, device=w.device)
bases = self.basis.unsqueeze(0).repeat(batch_size, 1, 1) # [batch_size, dim, 2]
raw_coord_embs = torch.einsum('bdc,bcxy->bdxy', bases, coords) # [batch_size, dim, img_size, img_size]
coord_embs = torch.cat([raw_coord_embs.sin(), raw_coord_embs.cos()], dim=1) # [batch_size, dim * 2, img_size, img_size]
# Computing final inputs
inputs = torch.cat([latents, coord_embs], dim=1) # [b, c, grid_size, grid_size]
return inputs
@persistence.persistent_class
class ModulatedCoordFuser(nn.Module):
"""
CoordFuser which concatenates coordinates across dim=1 (we assume channel_first format)
"""
def __init__(self, cfg: DictConfig, w_dim: int, resolution: int, use_fp16: bool):
super().__init__()
self.cfg = cfg
self.dim = self.cfg.channels_dict[str(resolution)].dim
self.use_cosine = self.cfg.channels_dict[str(resolution)].use_cosine
self.W_size = self.dim * self.cfg.coord_dim
self.b_size = self.dim
self.resolution = resolution
if self.cfg.logarithmic:
self.register_buffer('basis', generate_logarithmic_basis(
resolution,
self.dim,
remove_lowest_freq=self.cfg.remove_lowest_freq,
use_diagonal=self.cfg.channels_dict[str(resolution)].use_diagonal)) # [dim, 2]
self.coord_embs_cache = None
if self.cfg.growth_schedule.enabled:
self.final_img_resolution = self.cfg.growth_schedule['final_img_resolution'] # TODO(universome): do not hardcode...
assert resolution <= self.final_img_resolution
self.num_freqs = int(np.ceil(np.log2(resolution)))
self.register_buffer('growth_weights', torch.zeros(self.num_freqs)) # [num_freqs]
self.progressive_growing_update(0)
# Here we use "* 4" because we have 1) vertical, 2) horizontal,
# 3) main-diagonal, 4) anti-diagonal wavefronts
assert self.num_freqs * 4 <= self.dim
else:
self.growth_weights = None
else:
assert not self.cfg.growth_schedule.enabled
if not self.cfg.no_fourier_fallback:
self.affine = FullyConnectedLayer(w_dim, self.W_size + self.b_size, bias_init=0)
if self.cfg.use_cips_embs > 0:
dtype = torch.fp16 if use_fp16 else torch.float32
self.cips_embs = nn.Parameter(torch.randn(1, self.dim, resolution, resolution).to(dtype).contiguous())
self.total_dim = self.compute_total_dim()
self.is_modulated = False if (self.cfg.fallback or self.cfg.no_fourier_fallback) else True
def compute_total_dim(self) -> int:
if self.cfg.fallback: return 0
total_dim = 0
if self.cfg.no_fourier_fallback:
total_dim += self.cfg.coord_dim
elif self.cfg.logarithmic:
if self.use_cosine:
total_dim += self.basis.shape[0] * 2
else:
total_dim += self.basis.shape[0]
else:
if self.use_cosine:
total_dim += self.dim * 2
else:
total_dim += self.dim
if self.cfg.use_cips_embs:
total_dim += self.dim
return total_dim
def progressive_growing_update(self, current_iteration: int):
if self.cfg.growth_schedule.enabled:
# TODO(universome): fix this
num_freqs = np.ceil(np.log2(self.final_img_resolution)).astype(int)
common_args = (self.cfg.growth_schedule.time_to_reach_all, num_freqs)
# We use (j-1) here instead of j as in the nerfies paper
# because they use (x,y) linear input for their coordinate embeddings and we don't
# Actually, maybe we should...
# So, in this way we want our lowest frequency to be always enabled
growth_weights = [compute_freq_weight(current_iteration, j - 1, *common_args) for j in range(self.growth_weights.shape[0])] # [num_freqs]
self.growth_weights.copy_(torch.tensor(growth_weights))
def forward(self, x: Tensor, w: Tensor=None, left_borders_idx: Tensor=None, dtype=None, memory_format=None) -> Tensor:
"""
Dims:
@arg x is [batch_size, in_channels, img_size, img_size]
@arg w is [batch_size, w_dim]
@return out is [batch_size, in_channels + fourier_dim + cips_dim, img_size, img_size]
"""
assert memory_format is torch.contiguous_format
if self.cfg.fallback:
return x
batch_size, in_channels, img_size = x.shape[:3]
if left_borders_idx is not None:
raw_coords = generate_shifted_coords(left_borders_idx, img_size, self.cfg.grid_size, self.cfg.w_coord_dist, device=x.device)
else:
raw_coords = generate_coords(batch_size, img_size, x.device) # [batch_size, coord_dim, img_size, img_size]
if self.cfg.no_fourier_fallback:
coord_embs = raw_coords
elif self.cfg.logarithmic:
if (not self.cfg.growth_schedule.enabled) \
or (self.coord_embs_cache is None) \
or (self.coord_embs_cache.shape != (batch_size, 2, self.basis.shape[0])) \
or (self.coord_embs_cache.device != x.device):
if self.cfg.growth_schedule.enabled:
growth_weights = self.growth_weights.repeat(4) # [0,1,2] => [0,1,2,0,1,2,...]
basis = self.basis * growth_weights.unsqueeze(1) # [dim, 2]
else:
basis = self.basis # [dim, 2]
bases = basis.unsqueeze(0).repeat(batch_size, 1, 1) # [batch_size, dim, 2]
raw_coord_embs = torch.einsum('bdc,bcxy->bdxy', bases, raw_coords) # [batch_size, dim, img_size, img_size]
if self.use_cosine:
self.coord_embs_cache = torch.cat([raw_coord_embs.sin(), raw_coord_embs.cos()], dim=1) # [batch_size, dim * 2, img_size, img_size]
else:
self.coord_embs_cache = raw_coord_embs.sin()
self.coord_embs_cache = self.coord_embs_cache.contiguous()
coord_embs = self.coord_embs_cache
else:
mod = self.affine(w) # [batch_size, W_size + b_size]
W = self.cfg.fourier_scale * mod[:, :self.W_size] # [batch_size, W_size]
W = W.view(batch_size, self.dim, self.cfg.coord_dim) # [batch_size, fourier_dim, coord_dim]
bias = mod[:, self.W_size:].view(batch_size, self.dim, 1, 1) # [batch_size, fourier_dim, 1]
raw_coord_embs = (torch.einsum('bdc,bcxy->bdxy', W, raw_coords) + bias) # [batch_size, coord_dim, img_size, img_size]
if self.use_cosine:
coord_embs = torch.cat([raw_coord_embs.sin(), raw_coord_embs.cos()], dim=1) # [batch_size, dim * 2, img_size, img_size]
else:
coord_embs = raw_coord_embs.sin()
coord_embs = coord_embs.to(dtype=dtype, memory_format=memory_format)
out = torch.cat([x, coord_embs], dim=1) # [batch_size, in_channels + fourier_dim, img_size, img_size]
if self.cfg.use_cips_embs > 0:
cips_embs = self.cips_embs.repeat([batch_size, 1, 1, 1])
cips_embs = cips_embs.to(dtype=dtype, memory_format=memory_format)
out = torch.cat([out, cips_embs], dim=1) # [batch_size, in_channels + fourier_dim + cips_emb, img_size, img_size]
return out
def fmm_modulate(
conv_weight: Tensor,
fmm_weights: nn.Module,
fmm_mod_type: str='mult',
demodulate: bool=False,
fmm_add_weight: float=1.0,
activation: Optional[str]=None) -> Tensor:
"""
Applies FMM fmm_weights to a given conv weight tensor
"""
batch_size, out_channels, in_channels, kh, kw = conv_weight.shape
assert fmm_weights.shape[1] % (in_channels + out_channels) == 0
rank = fmm_weights.shape[1] // (in_channels + out_channels)
lhs = fmm_weights[:, : rank * out_channels].view(batch_size, out_channels, rank)
rhs = fmm_weights[:, rank * out_channels :].view(batch_size, rank, in_channels)
modulation = lhs @ rhs # [batch_size, out_channels, in_channels]
modulation = modulation / np.sqrt(rank)
misc.assert_shape(modulation, [batch_size, out_channels, in_channels])
modulation = modulation.unsqueeze(3).unsqueeze(4) # [batch_size, out_channels, in_channels, 1, 1]
if activation == "tanh":
modulation = modulation.tanh()
elif activation in ['linear', None]:
pass
elif activation == 'sigmoid':
modulation = modulation.sigmoid() - 0.5
else:
raise NotImplementedError
if fmm_mod_type == 'mult':
out = conv_weight * (modulation + 1.0)
elif fmm_mod_type == 'add':
out = conv_weight + fmm_add_weight * modulation
else:
raise NotImplementedError
if demodulate:
out = out / out.norm(dim=[2, 3, 4], keepdim=True)
return out
def generate_coords(batch_size: int, img_size: int, device='cpu', align_corners: bool=False) -> Tensor:
"""
Generates the coordinates in [-1, 1] range for a square image
if size (img_size x img_size) in such a way that
- upper left corner: coords[0, 0] = (-1, -1)
- upper right corner: coords[img_size - 1, img_size - 1] = (1, 1)
"""
if align_corners:
row = torch.linspace(-1, 1, img_size, device=device).float() # [img_size]
else:
row = (torch.arange(0, img_size, device=device).float() / img_size) * 2 - 1 # [img_size]
x_coords = row.view(1, -1).repeat(img_size, 1) # [img_size, img_size]
y_coords = x_coords.t().flip(dims=(0,)) # [img_size, img_size]
coords = torch.stack([x_coords, y_coords], dim=2) # [img_size, img_size, 2]
coords = coords.view(-1, 2) # [img_size ** 2, 2]
coords = coords.t().view(1, 2, img_size, img_size).repeat(batch_size, 1, 1, 1) # [batch_size, 2, img_size, img_size]
return coords
def generate_logarithmic_basis(
resolution: int,
max_num_feats: int=np.float('inf'),
remove_lowest_freq: bool=False,
use_diagonal: bool=True) -> Tensor:
"""
Generates a directional logarithmic basis with the following directions:
- horizontal
- vertical
- main diagonal
- anti-diagonal
"""
max_num_feats_per_direction = np.ceil(np.log2(resolution)).astype(int)
bases = [
generate_horizontal_basis(max_num_feats_per_direction),
generate_vertical_basis(max_num_feats_per_direction),
]
if use_diagonal:
bases.extend([
generate_diag_main_basis(max_num_feats_per_direction),
generate_anti_diag_basis(max_num_feats_per_direction),
])
if remove_lowest_freq:
bases = [b[1:] for b in bases]
# If we do not fit into `max_num_feats`, then trying to remove the features in the order:
# 1) anti-diagonal 2) main-diagonal
while (max_num_feats_per_direction * len(bases) > max_num_feats) and (len(bases) > 2):
bases = bases[:-1]
basis = torch.cat(bases, dim=0)
# If we still do not fit, then let's remove each second feature,
# then each third, each forth and so on
# We cannot drop the whole horizontal or vertical direction since otherwise
# model won't be able to locate the position
# (unless the previously computed embeddings encode the position)
# while basis.shape[0] > max_num_feats:
# num_exceeding_feats = basis.shape[0] - max_num_feats
# basis = basis[::2]
assert basis.shape[0] <= max_num_feats, \
f"num_coord_feats > max_num_fixed_coord_feats: {basis.shape, max_num_feats}."
return basis
def generate_horizontal_basis(num_feats: int) -> Tensor:
return generate_wavefront_basis(num_feats, [0.0, 1.0], 4.0)
def generate_vertical_basis(num_feats: int) -> Tensor:
return generate_wavefront_basis(num_feats, [1.0, 0.0], 4.0)
def generate_diag_main_basis(num_feats: int) -> Tensor:
return generate_wavefront_basis(num_feats, [-1.0 / np.sqrt(2), 1.0 / np.sqrt(2)], 4.0 * np.sqrt(2))
def generate_anti_diag_basis(num_feats: int) -> Tensor:
return generate_wavefront_basis(num_feats, [1.0 / np.sqrt(2), 1.0 / np.sqrt(2)], 4.0 * np.sqrt(2))
def generate_wavefront_basis(num_feats: int, basis_block: List[float], period_length: float) -> Tensor:
period_coef = 2.0 * np.pi / period_length
basis = torch.tensor([basis_block]).repeat(num_feats, 1) # [num_feats, 2]
powers = torch.tensor([2]).repeat(num_feats).pow(torch.arange(num_feats)).unsqueeze(1) # [num_feats, 1]
result = basis * powers * period_coef # [num_feats, 2]
return result.float()
@torch.no_grad()
def generate_random_coords_shifts(batch_size: int, period_length: float=4.0, device: str='cpu') -> Tensor:
"""
Generates shift the coordinates 1 half-period to the right
Our half-period occupies the range [-1, 1]
To shift it to [1, 3] we need to add U[-1, 1] * 0.5 * period_length to each coordinates set
So, it just generates a random array of 0/2 values, like [0,0,2,2,0,2,...]
"""
horizontal_shifts = (torch.rand(batch_size, device=device) - 0.5) * 2.0 * (0.5 * period_length) # [batch_size]
vertical_shifts = torch.zeros(batch_size, device=device) # Do not shift vertical coordinates
shifts = torch.cat([
horizontal_shifts.unsqueeze(1),
vertical_shifts.unsqueeze(1),
], dim=1).unsqueeze(2).unsqueeze(3).contiguous() # [batch_size, 2, 1, 1]
return shifts
@torch.no_grad()
def compute_freq_weight(iteration: int, freq_idx: int, time_to_reach_all: int, total_num_freqs: int) -> float:
progress_alpha: float = total_num_freqs * iteration / time_to_reach_all
weight = (1.0 - np.cos(np.pi * np.clip(progress_alpha - freq_idx, 0, 1))) * 0.5
return weight
def generate_shifted_coords(left_borders_idx: Tensor, img_size: int, grid_size: int, w_coord_dist: float, device='cpu') -> Tensor:
coords = generate_coords(len(left_borders_idx), img_size, device=device) # [b, 2, grid_size, grid_size]
# We need to convert left_borders_idx to coordinates shifts, knowing that
# the relative unshifted coordinate position of the left border is -1.0 (i.e. at w_left)
patch_size = img_size // grid_size
w_dist = int(0.5 * w_coord_dist * img_size) # distance in pixels
left_border_rel_pos = (left_borders_idx.to(torch.float32) * patch_size - w_dist) / w_dist # in [-1, 1 - grid_size/w_dist] range
shifts = left_border_rel_pos * w_coord_dist # [batch_size]
# Add +1 to account for left_border => center
shifts = shifts + 1.0
# Finally, shift the x-axis
coords[:, 0] = coords[:, 0] + shifts.unsqueeze(1).unsqueeze(2) # [b, grid_size, grid_size]
return coords
|
examples/pybullet/gym/pybullet_envs/minitaur/robots/quadruped_base.py
|
felipeek/bullet3
| 9,136 |
77265
|
<reponame>felipeek/bullet3
# Lint as: python3
"""The base class for all quadrupeds."""
from typing import Any, Callable, Dict, Sequence, Tuple, Text, Union
import gin
import gym
import numpy as np
from pybullet_utils import bullet_client
from pybullet_envs.minitaur.envs_v2.sensors import sensor as sensor_lib
from pybullet_envs.minitaur.robots import hybrid_motor_model
from pybullet_envs.minitaur.robots import robot_base
from pybullet_envs.minitaur.robots import robot_config
from pybullet_envs.minitaur.robots import robot_urdf_loader
from pybullet_envs.minitaur.robots.safety import data_types as safety_data_types
from pybullet_envs.minitaur.robots.utilities import kinematics_utils
_UNIT_QUATERNION = (0, 0, 0, 1)
_GRAVITY_ACCELERATION_OFFSET = (0, 0, 10)
@gin.configurable
class QuadrupedBase(robot_base.RobotBase):
"""The basic quadruped class for both sim and real robots."""
def __init__(
self,
pybullet_client: bullet_client.BulletClient,
clock: Callable[..., float],
motor_control_mode: robot_config.MotorControlMode,
motor_limits: robot_config.MotorLimits,
motor_model_class: Any = hybrid_motor_model.HybridMotorModel,
action_filter: Any = None,
sensors: Sequence[sensor_lib.Sensor] = (),
safety_config: safety_data_types.SafetyConfig = None,
**kwargs,
):
"""Initializes the class.
Args:
pybullet_client: The PyBullet client.
clock: The sim or real clock. The clock function is typically provided by
the gym environment.
motor_control_mode: Specifies in which mode the motor operates.
motor_limits: The motor limits of the robot. Used by the motor_model_class
and action space building.
motor_model_class: The motor model to use. Not needed for real robots.
action_filter: The filter to smooth and/or regulate the actions.
sensors: All sensors mounted on the robot.
safety_config: The safety setting for the robot.
**kwargs: Additional args.
"""
self._pybullet_client = pybullet_client
self._clock = clock
self._motor_control_mode = motor_control_mode
self._motor_model_class = motor_model_class
self._motor_limits = motor_limits
self._action_space = None
self._action_names = None
self._action_filter = action_filter
self._sensors = sensors
self._safety_config = safety_config
self._urdf_loader = None
self._last_base_velocity = np.zeros(3)
self._last_observation_time = self._clock()
self._last_base_acceleration_world = np.zeros(3)
self._last_base_acceleration_accelerometer = np.zeros(3)
self.load()
def load(
self,
base_position: Tuple[float] = None,
base_orientation_quaternion: Tuple[float] = None,
joint_angles: Union[Dict[Text, float], Tuple[float]] = None,
):
"""Loads the URDF with the configured pose.
Args:
base_position: The base position after URDF loading. Will use the
configured pose in gin if None.
base_orientation_quaternion: The base orientation after URDF loading. Will
use the configured values in gin if not specified.
joint_angles: The desired joint angles after loading. Will use the
configured values if None.
"""
# A robot specific pre loading routing.
self._pre_load()
if not self._urdf_loader:
self._urdf_loader = robot_urdf_loader.RobotUrdfLoader(
pybullet_client=self._pybullet_client)
# Record the urdf pose at loading, which will be used as the rotation
# reference for base rotation computation.
self._init_urdf_position, self._init_orientation_quat = (
self._pybullet_client.getBasePositionAndOrientation(
self._urdf_loader.robot_id))
unused_position, self._init_orientation_inv_quat = (
self._pybullet_client.invertTransform(
position=(0, 0, 0), orientation=self._init_orientation_quat))
# Joint ids may be different from the motor ids.
self._joint_id_dict = self._urdf_loader.get_joint_id_dict()
for joint_id in self._joint_id_dict.values():
# Disables the default motors in PyBullet.
self._pybullet_client.setJointMotorControl2(
bodyIndex=self._urdf_loader.robot_id,
jointIndex=joint_id,
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=0)
# Removes the default joint damping in PyBullet.
self._pybullet_client.changeDynamics(
self._urdf_loader.robot_id,
joint_id,
linearDamping=0,
angularDamping=0)
# We expect that this is non-empty for all quadrupedes, and should be an
# OrderedDict.
self._motor_id_dict = self._urdf_loader.get_motor_id_dict()
if not self._motor_id_dict:
raise ValueError("Motor id dict cannot be empty for quadrupeds.")
self._motor_ids = self._motor_id_dict.values()
self._num_motors = len(self._motor_id_dict)
self._build_action_space()
# Not needed for real robots.
if self._motor_model_class:
# TODO(b/151664871): Also supports position/velocity limits in the motor
# model.
self._motor_model = self._motor_model_class(
num_motors=self._num_motors,
motor_control_mode=self._motor_control_mode,
torque_lower_limits=self._motor_limits.torque_lower_limits,
torque_upper_limits=self._motor_limits.torque_upper_limits,
)
# Caches the variable for faster computation during stepping.
self._motor_direction_dict = self._urdf_loader.get_joint_direction_dict(
self._motor_id_dict.keys())
self._motor_directions = np.array(list(self._motor_direction_dict.values()))
self._motor_offset_dict = self._urdf_loader.get_joint_offset_dict(
self._motor_id_dict.keys())
self._motor_offsets = np.array(list(self._motor_offset_dict.values()))
# A robot specific routine post loading.
self._on_load()
# Robot sensors may use information from the class. So we initialize them
# after the loading is done.
for sensor in self._sensors:
sensor.set_robot(self)
def _build_action_space(self):
"""Builds the action space of the robot using the motor limits."""
if self._motor_control_mode == robot_config.MotorControlMode.POSITION:
self._action_space = gym.spaces.Box(
low=self._motor_limits.angle_lower_limits,
high=self._motor_limits.angle_upper_limits,
shape=(self._num_motors,),
dtype=np.float32) # TODO(b/159160184) Make dtype configurable.
self._action_names = tuple(
"POSITION_{}".format(motor) for motor in self._motor_id_dict.keys())
elif self._motor_control_mode == robot_config.MotorControlMode.TORQUE:
self._action_space = gym.spaces.Box(
low=self._motor_limits.torque_lower_limits,
high=self._motor_limits.torque_upper_limits,
shape=(self._num_motors,),
dtype=np.float32)
self._action_names = tuple(
"TORQUE_{}".format(motor) for motor in self._motor_id_dict.keys())
elif self._motor_control_mode == robot_config.MotorControlMode.HYBRID:
hybrid_action_limits_low = [
self._motor_limits.angle_lower_limits, # q
# q_dot
self._motor_limits.velocity_lower_limits,
0, # kp
0, # kd
self._motor_limits.torque_lower_limits
] # tau
hybrid_action_limits_high = [
self._motor_limits.angle_upper_limits,
self._motor_limits.velocity_upper_limits, np.inf, np.inf,
self._motor_limits.torque_upper_limits
]
space_low = np.full(
(self._num_motors, robot_config.HYBRID_ACTION_DIMENSION),
hybrid_action_limits_low).ravel()
space_high = np.full(
(self._num_motors, robot_config.HYBRID_ACTION_DIMENSION),
hybrid_action_limits_high).ravel()
self._action_space = gym.spaces.Box(
low=space_low, high=space_high, dtype=np.float32)
self._action_names = tuple(
"HYBRID_{}".format(motor) for motor in self._motor_id_dict.keys())
else:
raise NotImplementedError("Not yet implemented!")
def _pre_load(self):
"""Robot specific pre load routine.
For example, this allows configuration of the URDF loader.
"""
pass
def _on_load(self):
"""Robot specific post load routine.
For example, we need to add add additional hinge constraints to the leg
components of Minitaur after loading.
"""
pass
@gin.configurable
def reset(
self,
base_position: Tuple[float] = None,
base_orientation_quaternion: Tuple[float] = None,
joint_angles: Union[Dict[Text, float], Tuple[float]] = None,
save_base_pose: bool = False,
**kwargs,
):
"""Resets the robot base and joint pose without reloading the URDF.
Base pose resetting only works for simulated robots or visualization of real
robots. This routine also updates the initial observation dict.
Args:
base_position: The desired base position. Will use the configured pose in
gin if None. Does not affect the position of the real robots in general.
base_orientation_quaternion: The base orientation after resetting. Will
use the configured values in gin if not specified.
joint_angles: The desired joint angles after resetting. Will use the
configured values if None.
save_base_pose: Save the base position and orientation as the default pose
after resetting.
**kwargs: Other args for backward compatibility. TODO(b/151975607): Remove
after migration.
"""
# Reset the robot's motor model.
self._motor_model.reset()
# Reset the quantities for computing base acceleration.
self._last_base_velocity = np.zeros(3)
self._last_observation_time = self._clock()
self._last_base_acceleration_world = np.zeros(3)
self._last_base_acceleration_accelerometer = np.zeros(3)
# Solves chicken and egg problem. We need to run a control step to obtain
# the first motor torques.
self._motor_torques = np.zeros(self._num_motors)
# Receives a set of observation from the robot in case the reset function
# needs to use them.
self.receive_observation()
self._reset_base_pose(base_position, base_orientation_quaternion)
self._reset_joint_angles(joint_angles)
if save_base_pose:
# Records the base pose at resetting again, in case Reset is called with a
# different base orientation. This base pose will be used as zero
# rotation reference for base rotation computation.
self._init_urdf_position, self._init_orientation_quat = (
self._pybullet_client.getBasePositionAndOrientation(
self._urdf_loader.robot_id))
unused_position, self._init_orientation_inv_quat = (
self._pybullet_client.invertTransform(
position=(0, 0, 0), orientation=self._init_orientation_quat))
# Updates the observation at the end of resetting.
self.receive_observation()
self._time_at_reset = self._clock()
def GetTimeSinceReset(self):
return self._clock() - self._time_at_reset
def _reset_base_pose(self, position=None, orientation_quat=None):
"""Resets the pose of the robot's base.
Base pose resetting only works for simulated robots or visualization of real
robots.
Args:
position: The desired base position. Will use the configured pose in gin
if None.
orientation_quat: The desired base rotation. Will use the configured
default pose in None.
"""
self._urdf_loader.reset_base_pose(position, orientation_quat)
def _reset_joint_angles(self,
joint_angles: Union[Tuple[float],
Dict[Text, float]] = None):
"""Resets the joint pose.
Real robots need to specify their routine to send joint angles. Simulated
Minitaur robots also needs to use dynamics to drive the motor joints, due to
the additional hinge joints not present in the URDF.
Args:
joint_angles: The joint pose if provided. Will use the robot default pose
from configuration.
"""
# TODO(b/148897311): Supports tuple as the input.
self._urdf_loader.reset_joint_angles(joint_angles)
def terminate(self):
"""The safe exit routine for the robot.
Only implemented for real robots.
"""
pass
def step(self, action: Any, num_sub_steps: int = 1):
"""Steps the simulation.
This is maintained for backward compatibility with the old robot class.
Args:
action: The control command to be executed by the robot.
num_sub_steps: Each action can be applied (possibly with interpolation)
multiple timesteps, to simulate the elapsed time between two consecutive
commands on real robots.
"""
action = self.pre_control_step(action)
for _ in range(num_sub_steps):
# TODO(b/149252003): Add sub sampling.
self.apply_action(action)
# Timestep is pre-determined at simulation setup.
self._pybullet_client.stepSimulation()
self.receive_observation()
self.post_control_step()
def pre_control_step(self, action: Any, control_timestep: float = None):
"""Processes the action and updates per control step quantities.
Args:
action: The input control command.
control_timestep: The control time step in the environment.
TODO(b/153835005), we can remove this once we pass env to the robot.
Returns:
The filtered action.
"""
if self._action_filter:
# We assume the filter will create a set of interpolated results.
action = self._action_filter.filter(action)
return action
def apply_action(self, motor_commands, motor_control_mode=None):
# TODO(b/148897311): Supports dict in the future.
motor_commands = np.asarray(motor_commands)
# We always use torque based control at the lowest level for quadrupeds.
unused_observed_torques, actual_torques = (
self._motor_model.get_motor_torques(motor_commands, motor_control_mode))
self._motor_torques = actual_torques
# Converts the motor torques to URDF joint space, which may have different
# directions.
applied_motor_torques = np.multiply(actual_torques, self._motor_directions)
self._pybullet_client.setJointMotorControlArray(
bodyIndex=self._urdf_loader.robot_id,
jointIndices=self._motor_ids,
controlMode=self._pybullet_client.TORQUE_CONTROL,
forces=applied_motor_torques)
def _get_base_roll_pitch_yaw_rate(self):
_, angular_velocity = self._pybullet_client.getBaseVelocity(
self._urdf_loader.robot_id)
return kinematics_utils.rotate_to_base_frame(
self._pybullet_client, self.urdf_loader.robot_id, angular_velocity,
self._init_orientation_inv_quat)
def _get_base_velocity(self):
base_velocity, _ = self._pybullet_client.getBaseVelocity(
self._urdf_loader.robot_id)
return base_velocity
def _update_base_acceleration(self):
"""Update the base acceleration using finite difference."""
if self._last_observation_time < self.timestamp:
self._last_base_acceleration_world = (
np.array(self._base_velocity) - self._last_base_velocity) / (
self.timestamp - self._last_observation_time)
_, inv_base_orientation = self.pybullet_client.invertTransform(
np.zeros(3), np.array(self.base_orientation_quaternion))
# An offset is added to the acceleration measured in the world frame
# because the accelerometer reading is in the frame of free-falling robot.
base_acceleration_accelerometer = self.pybullet_client.multiplyTransforms(
np.zeros(3), inv_base_orientation,
self._last_base_acceleration_world + _GRAVITY_ACCELERATION_OFFSET,
_UNIT_QUATERNION)[0]
self._last_base_acceleration_accelerometer = np.array(
base_acceleration_accelerometer)
def receive_observation(self):
"""Receives the observations for all sensors."""
# Update the intrinsic values including the joint angles, joint
# velocities, and imu readings.
self._base_position, base_orientation_quat = (
self._pybullet_client.getBasePositionAndOrientation(
self._urdf_loader.robot_id))
_, self._base_orientation_quat = self._pybullet_client.multiplyTransforms(
positionA=(0, 0, 0),
orientationA=self._init_orientation_inv_quat,
positionB=(0, 0, 0),
orientationB=base_orientation_quat)
self._base_velocity = self._get_base_velocity()
self._base_roll_pitch_yaw = self._pybullet_client.getEulerFromQuaternion(
self._base_orientation_quat)
self._base_roll_pitch_yaw_rate = self._get_base_roll_pitch_yaw_rate()
self._joint_states = self._pybullet_client.getJointStates(
self._urdf_loader.robot_id, self._motor_ids)
self._motor_angles = np.array(
[joint_state[0] for joint_state in self._joint_states])
self._motor_angles = (self._motor_angles -
self._motor_offsets) * self._motor_directions
self._motor_velocities = np.array(
[joint_state[1] for joint_state in self._joint_states])
self._motor_velocities = self._motor_velocities * self._motor_directions
# We use motor models to track the delayed motor positions and velocities
# buffer.
if self._motor_model:
self._motor_model.update(self._clock(), self._motor_angles,
self._motor_velocities)
self._update_base_acceleration()
# Update the latest base velocity and timestamp at the end of the API.
self._last_base_velocity = np.array(self._base_velocity)
self._last_observation_time = self.timestamp
def post_control_step(self):
"""Called at the end of a control step outside the action repeat loop."""
pass
# TODO(tingnan): Change from "foot_positions" to "feet_positions".
def motor_angles_from_foot_positions(self,
foot_positions,
position_in_world_frame=False):
"""Use IK to compute the motor angles, given the feet links' positions.
Args:
foot_positions: The foot links' positions in frame specified by the next
parameter. The input is a numpy array of size (4, 3).
position_in_world_frame: Whether the foot_positions are specified in the
world frame.
Returns:
A tuple. The position indices and the angles for all joints along the
leg. The position indices is consistent with the joint orders as returned
by GetMotorAngles API.
"""
joint_position_idxs = np.arange(self.num_motors)
foot_link_ids = tuple(self._urdf_loader.get_end_effector_id_dict().values())
joint_angles = kinematics_utils.joint_angles_from_link_positions(
pybullet_client=self.pybullet_client,
urdf_id=self.robot_id,
link_positions=foot_positions,
link_ids=foot_link_ids,
joint_dof_ids=joint_position_idxs,
positions_are_in_world_frame=position_in_world_frame)
joint_angles = np.multiply(
np.asarray(joint_angles) - np.asarray(self._motor_offsets),
self._motor_directions)
return joint_position_idxs, joint_angles
# TODO(tingnan): Change from "foot_positions" to "feet_positions".
def foot_positions(self, position_in_world_frame=False):
"""Returns the robot's foot positions in the base/world frame."""
foot_positions = []
foot_link_ids = tuple(self._urdf_loader.get_end_effector_id_dict().values())
for foot_id in foot_link_ids:
if not position_in_world_frame:
foot_positions.append(
kinematics_utils.link_position_in_base_frame(
pybullet_client=self.pybullet_client,
urdf_id=self.robot_id,
link_id=foot_id,
))
else:
foot_positions.append(
kinematics_utils.link_position_in_world_frame(
pybullet_client=self.pybullet_client,
urdf_id=self.robot_id,
link_id=foot_id,
))
return np.array(foot_positions)
def feet_contact_forces(self) -> Sequence[np.ndarray]:
"""Gets the contact forces on all feet.
Reals robot may use a robot specific implementation. For example, the
Laikago will measure each contact force in the corresponding foot's local
frame, and this force will not be the total contact force due to the sensor
limitation.
For simulated robots, we wll always report the force in the base frame.
Returns:
A list of foot contact forces.
"""
foot_link_ids = tuple(self._urdf_loader.get_end_effector_id_dict().values())
contact_forces = [np.zeros(3) for _ in range(len(foot_link_ids))]
all_contacts = self._pybullet_client.getContactPoints(
bodyA=self._urdf_loader.robot_id)
for contact in all_contacts:
(unused_flag, body_a_id, body_b_id, link_a_id, unused_link_b_id,
unused_pos_on_a, unused_pos_on_b, contact_normal_b_to_a, unused_distance,
normal_force, friction_1, friction_direction_1, friction_2,
friction_direction_2) = contact
# Ignore self contacts
if body_b_id == body_a_id:
continue
if link_a_id in foot_link_ids:
normal_force = np.array(contact_normal_b_to_a) * normal_force
friction_force = np.array(friction_direction_1) * friction_1 + np.array(
friction_direction_2) * friction_2
force = normal_force + friction_force
local_force = kinematics_utils.rotate_to_base_frame(
self._pybullet_client, self.urdf_loader.robot_id, force,
self._init_orientation_inv_quat)
local_force_norm = np.linalg.norm(local_force)
toe_link_order = foot_link_ids.index(link_a_id)
if local_force_norm > 0:
contact_forces[toe_link_order] += local_force
else:
continue
return contact_forces
def compute_jacobian_for_one_leg(self, leg_id: int) -> np.ndarray:
"""Compute the Jacobian for a given leg.
Args:
leg_id: Index of the leg for which the jacobian is computed.
Returns:
The 3 x N transposed Jacobian matrix. where N is the total DoFs of the
robot. For a quadruped, the first 6 columns of the matrix corresponds to
the CoM translation and rotation. The columns corresponds to a leg can be
extracted with indices [6 + leg_id * 3: 6 + leg_id * 3 + 3]. Note that
the jacobian is calculated for motors, which takes motor directions into
consideration.
"""
com_dof = self._urdf_loader.com_dof
foot_link_ids = tuple(self._urdf_loader.get_end_effector_id_dict().values())
return kinematics_utils.compute_jacobian(
pybullet_client=self.pybullet_client,
urdf_id=self.robot_id,
link_id=foot_link_ids[leg_id],
all_joint_positions=[
state[0] for state in self._joint_states
]) * np.concatenate([np.ones(com_dof), self._motor_directions])
def map_contact_force_to_joint_torques(
self, leg_id: int, contact_force: np.ndarray) -> Dict[int, float]:
"""Maps the foot contact force to the leg joint torques.
Args:
leg_id: Index of the leg for which the jacobian is computed.
contact_force: Desired contact force experted by the leg.
Returns:
A dict containing the torques for each motor on the leg.
"""
foot_link_ids = tuple(self._urdf_loader.get_end_effector_id_dict().values())
jv = self.compute_jacobian_for_one_leg(leg_id)
all_motor_torques = np.matmul(contact_force, jv)
motor_torques = {}
motors_per_leg = self.num_motors // len(foot_link_ids)
com_dof = self._urdf_loader.com_dof
for joint_id in range(leg_id * motors_per_leg,
(leg_id + 1) * motors_per_leg):
motor_torques[joint_id] = all_motor_torques[com_dof + joint_id]
return motor_torques
@classmethod
def get_constants(cls):
raise NotImplementedError("Not yet implemented!")
@property
def timestamp(self):
return self._clock()
@property
def action_space(self):
return self._action_space
@property
def action_names(self):
return self._action_names
@property
def base_orientation_quaternion(self):
"""Gets the base orientation as a quaternion.
The base orientation is always relative to the init_orientation, which
can be updated by Reset function. This is necessary as many URDF can have an
internal frame that is not z-up, so if we don't provide an init_orientation
(through Reset), the loaded robot can have its belly facing the horizontal
direction.
Returns:
The base orientation in quaternion.
"""
return self._base_orientation_quat
@property
def base_orientation_quaternion_default_frame(self):
"""Gets the base orientation in the robot's default frame.
This is the base orientation in whatever frame the robot specifies. For
simulated robot this is the URDF's internal frame. For real robot this can
be based on the rpy reading determined by the IMU.
Returns:
The base orientation in quaternion in a robot default frame.
"""
_, base_orientation_quat = (
self._pybullet_client.getBasePositionAndOrientation(
self._urdf_loader.robot_id))
return base_orientation_quat
@property
def sensors(self):
return self._sensors
@property
def base_roll_pitch_yaw(self):
return self._base_roll_pitch_yaw
@property
def base_roll_pitch_yaw_rate(self):
return self._base_roll_pitch_yaw_rate
@property
def base_position(self):
return self._base_position
@property
def base_velocity(self):
return self._base_velocity
@property
def is_safe(self):
return True
@property
def num_motors(self):
return self._num_motors
@property
def motor_model(self):
return self._motor_model
@property
def motor_limits(self) -> robot_config.MotorLimits:
return self._motor_limits
@property
def motor_angles(self):
return self._motor_angles
@property
def motor_velocities(self):
return self._motor_velocities
@property
def motor_torques(self):
return self._motor_torques
@property
def pybullet_client(self):
return self._pybullet_client
@property
def urdf_loader(self):
return self._urdf_loader
@property
def robot_id(self):
return self._urdf_loader.robot_id
@property
def initital_orientation_inverse_quaternion(self):
return self._init_orientation_inv_quat
@property
def base_acceleration_accelerometer(self):
"""Get the base acceleration measured by an accelerometer.
The acceleration is measured in the local frame of a free-falling robot,
which is consistent with the robot's IMU measurements. Here the
gravitational acceleration is first added to the acceleration in the world
frame, which is then converted to the local frame of the robot.
"""
return np.array(self._last_base_acceleration_accelerometer)
@property
def base_acceleration(self):
"""Get the base acceleration in the world frame."""
return np.array(self._last_base_acceleration_world)
|
skeleton/OUTPUT-skel.py
|
tt1379/mastiff
| 164 |
77268
|
#!/usr/bin/env python
"""
Copyright 2012-2013 The MASTIFF Project, All Rights Reserved.
This software, having been partly or wholly developed and/or
sponsored by KoreLogic, Inc., is hereby released under the terms
and conditions set forth in the project's "README.LICENSE" file.
For a list of all contributors and sponsors, please refer to the
project's "README.CREDITS" file.
"""
__doc__ = """
Output plugin skeleton code
Purpose:
This file provides the skeleton code for a plugin that formats the data
generated by the analysis plug-ins. This is an example that shows all
functions defined.
__init__(): MANDATORY: Any initialization code the plugin requires. It must
also call the __init__ for masOutput.MastiffOutputPlugin.
activate(): OPTIONAL: Activation code called by Yapsy to activate the plugin.
deactivate(): OPTIONAL: Deactivated code called by Yapsy.
output(config, output): MANDATORY: Function that formats the data from analysis
plug-ins into a specific format. Receives the MASTIFF configuration
as the config parameter, and the pages of data in the data
parameter.
"""
__version__ = "$Id: 960d687e79158fbba349a472f85ff2b75d8c9bb1 $"
import logging
import mastiff.plugins.output as masOutput
class OUTPUTSkeleton(masOutput.MastiffOutputPlugin):
"""Raw output plugin.."""
def __init__(self):
"""Initialize the plugin."""
masOutput.MastiffOutputPlugin.__init__(self)
def activate(self):
"""Activate the plugin."""
masOutput.MastiffOutputPlugin.activate(self)
def deactivate(self):
"""Deactivate the plugin."""
masOutput.MastiffOutputPlugin.deactivate(self)
def output(self, config, data):
log = logging.getLogger('Mastiff.Plugins.Output.' + self.name)
# see if we are enabled
if config.get_bvar(self.name, 'enabled') is False:
log.debug('Disabled. Exiting.')
return True
log.info('Writing FORMAT output.')
# loop through category data
for cats, catdata in data[data.keys()[0]].iteritems():
catstr = '{} Category Analysis Results'.format(cats)
log.debug('Writing {} results.'.format(cats))
# loop through plugin data and generate the output text
for plugin, pages in catdata.iteritems():
# process the page data into the specific format and
# output it to the appropriate file/files
# loop through each table in the page
for tabledata in sorted(pages, key=lambda page: pages[2]):
(title, mytable, index) = tabledata
# process table data here
for row in mytable:
# act on row data
# (REMOVE THE NEXT LINE)
pass
return True
|
tests/views/test_admin_statistics.py
|
priyanshu-kumar02/personfinder
| 561 |
77278
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import model
import view_tests_base
class AdminStatisticsViewTests(view_tests_base.ViewTestsBase):
def setUp(self):
super(AdminStatisticsViewTests, self).setUp()
self.data_generator.repo()
self.counter = model.UsageCounter.create('haiti')
self.login_as_manager()
def get_page_doc(self):
return self.to_doc(self.client.get('/global/admin/statistics/',
secure=True))
def test_person_counter(self):
self.counter.person = 3
self.counter.put()
doc = self.get_page_doc()
assert 'haiti' in doc.text
assert '# Persons' in doc.text
assert doc.cssselect_one('#haiti-persons').text == '3'
def test_note_counter(self):
self.counter.note = 5
self.counter.unspecified = 5
self.counter.put()
doc = self.get_page_doc()
assert 'haiti' in doc.text
assert '# Note' in doc.text
assert doc.cssselect_one('#haiti-notes').text == '5'
assert doc.cssselect_one('#haiti-num_notes_unspecified').text == '5'
def test_is_note_author_counter(self):
self.counter.note = 1
self.counter.is_note_author = 1
self.counter.put()
doc = self.get_page_doc()
assert doc.cssselect_one('#haiti-num_notes_is_note_author').text == '1'
def test_status_counter(self):
def set_counter_and_check(status_name, num):
setattr(self.counter, status_name, num)
self.counter.put()
doc = self.get_page_doc()
assert 'haiti' in doc.text
assert status_name in doc.text
assert doc.cssselect_one(
'#haiti-num_notes_%s' % status_name).text == str(num)
set_counter_and_check('is_note_author', 3)
set_counter_and_check('believed_alive', 5)
set_counter_and_check('believed_dead', 2)
set_counter_and_check('believed_missing', 4)
set_counter_and_check('information_sought', 6)
|
submit/base_submission.py
|
ABCDa102030/GoBigger-Challenge-2021
| 121 |
77292
|
class BaseSubmission:
def __init__(self, team_name, player_names):
self.team_name = team_name
self.player_names = player_names
def get_actions(self, obs):
'''
Overview:
You must implement this function.
'''
raise NotImplementedError
|
pyEX/timeseries/__init__.py
|
brandonhawi/pyEX
| 335 |
77326
|
# *****************************************************************************
#
# Copyright (c) 2021, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from .timeseries import (
timeSeries,
timeSeriesAsync,
timeSeriesDF,
timeSeriesInventory,
timeSeriesInventoryAsync,
timeSeriesInventoryDF,
)
|
hendrix/contrib/concurrency/signals.py
|
bliedblad/hendrix
| 309 |
77347
|
<reponame>bliedblad/hendrix
"""
Signals for easy use in django projects
"""
try:
from django import dispatch
short_task = dispatch.Signal(providing_args=["args", "kwargs"])
long_task = dispatch.Signal(providing_args=["args", "kwargs"])
message_signal = dispatch.Signal(providing_args=["data", "dispatcher"])
USE_DJANGO_SIGNALS = True
except ImportError:
USE_DJANGO_SIGNALS = False
|
bin/plist.py
|
NaturalHistoryMuseum/inselect
| 128 |
77361
|
<filename>bin/plist.py
#!/usr/bin/env python3
"""Alter Inselect's existing plist file
"""
# https://developer.apple.com/library/mac/documentation/Carbon/Conceptual/LaunchServicesConcepts/LSCConcepts/LSCConcepts.html#//apple_ref/doc/uid/TP30000999-CH202-CIHHEGGE
import plistlib
import sys
import inselect
plist = plistlib.readPlist(sys.argv[1])
plist['CFBundleShortVersionString'] = inselect.__version__
plist['CFBundleDisplayName'] = 'Inselect'
plist['CFBundleDocumentTypes'] = [{
'CFBundleTypeName': 'Inselect document',
'CFBundleTypeIconFile': 'inselect.icns',
'CFBundleTypeExtensions': ['inselect'],
'CFBundleTypeRole': 'Editor',
'LSTypeIsPackage': 'False',
}]
plistlib.writePlist(plist, sys.argv[1])
|
dapper/mods/Lorenz63/__init__.py
|
yoavfreund/DAPPER
| 225 |
77362
|
"""The classic exhibitor of chaos, consisting of 3 coupled ODEs.
The ODEs are derived by modelling, with many simplifications,
the fluid convection between horizontal plates with different temperatures.
Its phase-plot (with typical param settings) looks like a butterfly.
See demo.py for more info.
"""
import numpy as np
import dapper.mods as modelling
from .extras import LPs, d2x_dtdx, dstep_dx
# Constants
sig = 10.0
rho = 28.0
beta = 8.0/3
# Suggested values
x0 = np.array([1.509, -1.531, 25.46])
Tplot = 4.0
@modelling.ens_compatible
def dxdt(x):
"""Evolution equation (coupled ODEs) specifying the dynamics."""
x, y, z = x
dx = sig*(y - x)
dy = rho*x - y - x*z
dz = x*y - beta*z
return np.array([dx, dy, dz])
step = modelling.with_rk4(dxdt, autonom=True)
|
python/shopping/content/accounts/add_user.py
|
akgarchi/googleads-shopping-samples
| 149 |
77372
|
<filename>python/shopping/content/accounts/add_user.py
#!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds a user to the primary account."""
from __future__ import print_function
import sys
from shopping.content import common
def main(argv):
# Authenticate and construct service.
service, config, _ = common.init(argv, __doc__)
merchant_id = config['merchantId']
email = config.get('accountSampleUser')
if not email:
print('Must specify the user email to add in the samples configuration.')
sys.exit(1)
# First we need to retrieve the existing set of users.
response = service.accounts().get(
merchantId=merchant_id, accountId=merchant_id).execute()
account = response
# Add new user to existing user list.
new_user = {'emailAddress': email, 'admin': False}
account['users'].append(new_user)
# Update account with new user list.
response = service.accounts().update(
merchantId=merchant_id, accountId=merchant_id, body=account).execute()
print('Account %s was added to merchant ID %d' % (email, merchant_id))
if __name__ == '__main__':
main(sys.argv)
|
vec4ir/base.py
|
bees4ever/vec4ir
| 211 |
77401
|
<reponame>bees4ever/vec4ir<gh_stars>100-1000
#!/usr/bin/env python
# coding: utf-8
"""
File: base.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/lgalke
Description: Base classed for (embedding-based) retrieval.
"""
from timeit import default_timer as timer
from abc import abstractmethod
from collections import defaultdict
from sklearn.base import BaseEstimator
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from sklearn.exceptions import NotFittedError
from joblib import Parallel, delayed
import scipy.sparse as sp
import numpy as np
import pandas as pd
try:
from . import rank_metrics as rm
from .utils import argtopk
from .combination import CombinatorMixin
except (SystemError, ValueError):
from combination import CombinatorMixin
from utils import argtopk
import rank_metrics as rm
def f1_score(precision, recall):
"""
Computes the harmonic mean of precision and recall (f1 score)
"""
if precision == 0 and recall == 0:
return 0
return 2 * precision * recall / (precision + recall)
def harvest(source, query_id, doc_id=None, default=0):
""" harvest source for either a sorted list of relevancy scores for a given
query id or a relevance score for a queryid, docid pair)
Arguments:
source -- {pandas data frame, list of dicts, ndarray}
query_id -- the query id to harvest the answers for
Keyword Arguments:
doc_id -- if None, return sorted relevancy scores for query with
query_id
default -- default value if no relevance score is available in source
>>> ll = [[2,3,4,5],[22,33,42,55]]
>>> harvest(ll, 1, 2)
42
>>> harvest(ll, 1, -42, 1337)
1337
>>> sorted(harvest(ll, 1), reverse=True)
[55, 42, 33, 22]
>>> nda = np.array(ll)
>>> harvest(nda, 1, 2)
42
>>> harvest(nda, 1, -42, 1337)
1337
>>> ld = [{"d1":2,"d2":3,"d3":4,"d4":5},{"d1":22,"d2":33,"d3":42,"d4":55}]
>>> harvest(ld, 1, "d3")
42
>>> harvest(ld, 1, "fail", 1337)
1337
>>> sorted(harvest(ld, 0))
[2, 3, 4, 5]
"""
is_pd = isinstance(source, pd.Series)
is_dict = isinstance(source, dict)
if doc_id is None:
# Return sorted list of relevance scores for that query
if is_pd or is_dict:
# source is pandas series or dict
scores = source.get(query_id)
else:
# source is ndarray or list
scores = source[query_id]
if isinstance(scores, dict):
scores = np.asarray(list(scores.values()))
else:
scores = np.asarray(scores)
return scores
else:
# Return relevance score for the respective (query, document) pair
# try: # pandas multi index df
if is_pd:
score = source.get((query_id, doc_id), default)
else:
# default dict or ndarray
scores = source[query_id]
# no special treatment for ndarray since we want to raise exception
# when query id is out of bounds
try:
# ok now if scores provides a get
# (pandas or dict), lets use it:
score = scores.get(doc_id, default)
except AttributeError:
# try the brute force access
try:
score = scores[doc_id]
# the user should be aware when he wants to index his stuff
# by numpy indexing: only int doc ids allowed, of course.
except IndexError:
score = default
return score
def filter_none(some_list):
""" Just filters None elements out of a list """
old_len = len(some_list)
new_list = [l for l in some_list if l is not None]
diff = old_len - len(new_list)
return new_list, diff
def pad(r, k, padding=0):
""" pads relevance scores with zeros to given length """
r += [padding] * (k - len(r)) # python magic for padding
return r
def match_bool_or(X, q):
"""
Arguments
---------
X : ndarray of shape (documents, terms)
q : ndarray of shape (1, terms)
Returns
-------
An indicator array with matching indices
>>> X = np.array([[0,0,1], [0,1,0], [0,1,1], [1,0,0], [1,0,1], [1,1,0]])
>>> match_bool_or(X, np.array([[0,0,0]]))
array([], dtype=int64)
>>> match_bool_or(X, np.array([[0,0,1]]))
array([0, 2, 4])
>>> match_bool_or(X, np.array([[0,1,0]]))
array([1, 2, 5])
>>> match_bool_or(X, np.array([[0,1,1]]))
array([0, 1, 2, 4, 5])
>>> match_bool_or(X, np.array([[1,0,0]]))
array([3, 4, 5])
>>> match_bool_or(X, np.array([[1,0,1]]))
array([0, 2, 3, 4, 5])
>>> match_bool_or(X, np.array([[1,1,0]]))
array([1, 2, 3, 4, 5])
>>> match_bool_or(X, np.array([[1,1,1]]))
array([0, 1, 2, 3, 4, 5])
"""
# indices = np.unique(X.transpose()[q.nonzero()[1], :].nonzero()[1])
# print("matching X", X, file=sys.stderr)
# print("matching q", q, file=sys.stderr)
# inverted_index = X.transpose()
inverted_index = X.T
# print("matching inverted_index", inverted_index, file=sys.stderr)
query_terms = q.nonzero()[1]
# print("matching query_terms", query_terms, file=sys.stderr)
matching_terms = inverted_index[query_terms, :]
# print("matching matching_terms", matching_terms, file=sys.stderr)
matching_doc_indices = np.unique(matching_terms.nonzero()[1])
# print("matching matching_doc_indices", matching_doc_indices,
# file=sys.stderr)
return matching_doc_indices
def _checkXy(X, y):
if y is not None and len(X) != len(y):
raise ValueError("Shapes of X and y do not match.")
class Matching(BaseEstimator):
"""Typical Matching Operation of Retrieval Systems"""
def __init__(self, match_fn=match_bool_or, binary=True, dtype=np.bool_,
**cv_params):
"""initializes a Matching object
:match_fn: A matching function of signature `docs, query`
-> indices of matching docs
:binary: Store only binary term occurrences.
:dtype: Data type of internal feature matrix
:cv_params: Parameter for the count vectorizer such as lowercase=True
"""
self._match_fn = match_fn
self._vect = CountVectorizer(binary=binary, dtype=dtype,
**cv_params)
def fit(self, X):
cv = self._vect
self._fit_X = cv.fit_transform(X) # fit internal countvectorizer
return self
def predict(self, query):
cv, match_fn, fit_X = self._vect, self._match_fn, self._fit_X
# q = cv.transform(np.asarray([query]))
q = cv.transform(([query]))
ind = match_fn(fit_X, q)
return ind
class RetrievalBase(BaseEstimator):
"""
Provides:
_fit_X : the source documents
_inv_X : the (pseudo-) inverted index
_y: the document ids
such that _fit_X[i] ~ _inv_X[i] ~ _y[i] corresponds to each other.
_matching(Xquery) : returns the matching subset of _fit_X
For subclassing, the query method should return doc ids which are stored in
_y.
>>> retrieval = RetrievalBase()
>>> retrieval._init_params()
>>> docs = ["the quick brown fox", "jumps over the lazy dog"]
>>> _ = retrieval._fit(docs, [0,1])
>>> retrieval._inv_X.dtype
dtype('bool')
>>> retrieval.n_docs
2
>>> retrieval._inv_X.shape
(2, 8)
>>> retrieval._y.shape
(2,)
>>> ind = retrieval._matching( "fox" )
>>> print(ind.shape)
(1,)
>>> str(docs[ind[0]])
'the quick brown fox'
>>> ind
array([0], dtype=int32)
>>> len(retrieval._matching( "brown dog" ))
2
"""
@abstractmethod
def __init__(self, **kwargs):
self._init_params(**kwargs)
def _init_params(self,
name=None,
match_fn='term',
binary=True,
dtype=np.bool_,
**kwargs):
# reasonable defaults for indexing use case
self._match_fn = match_bool_or if match_fn == 'term' else match_fn
self._cv = CountVectorizer(binary=binary, dtype=dtype, **kwargs)
self.name = name
def _fit(self, X, y=None):
"""
learn vocab and construct (pseudo-inverted) index
"""
_checkXy(X, y)
cv = self._cv
self._inv_X = cv.fit_transform(X)
# self._fit_X = np.asarray(X)
n_docs = len(X)
self._y = np.arange(n_docs) if y is None else np.asarray(y)
self.n_docs = n_docs
return self
def fit(self, X, y=None):
return self._fit(X, y)
def _partial_fit(self, X, y=None):
_checkXy(X, y)
# update index
self._inv_X = sp.vstack([self._inv_X, self._cv.transform(X)])
# update source
# self._fit_X = np.hstack([self._fit_X, np.asarray(X)])
# try to infer viable doc ids
if y is None:
next_id = np.amax(self._y) + 1
y = np.arange(next_id, next_id + len(X))
else:
y = np.asarray(y)
self._y = np.hstack([self._y, y])
self.n_docs += len(X)
return self
def partial_fit(self, X, y=None):
self._partial_fit(X, y)
def _matching(self, query):
match_fn = self._match_fn
_X = self._inv_X
q = self._cv.transform(np.asarray([query]))
# q = self._cv.transform(query)
ind = match_fn(_X, q)
return ind
def process_and_evaluate(model, X, Y, k, n_jobs=1):
"""
Arguments:
X : query_id, query pairs
Y : dict of dicts (harvestable)
k : int how many to retrieve
"""
print("Starting query time with %d jobs" % n_jobs)
# TODO can we unzip Y and only pass the fucking chunk of y which
# it needs to harvest??
qids_rs = Parallel(n_jobs=n_jobs)(delayed(process_query)(model, x, Y, k)
for x in X)
print("Evaluating the results:")
scores = evaluate_results(qids_rs, Y, k)
return scores
def process_query(model, x, Y, k):
""" Processes one query, good for parallelization
:x: pair of query_id, query
:Y: dict of dict - like of relevance with query_id's as key in outer dict
and doc_ids as keys of inner dict
"""
qid, query = x
# t0 = timer()
print("{} : {}", qid, query)
result = model.query(query, k=k)
if k is not None:
result = result[:k]
# values["time_per_query"].append(timer() - t0)
relevance = [harvest(Y, qid, docid) for docid in result]
return (qid, np.asarray(relevance))
def evaluate_results(qids_rs, Y, k):
values = defaultdict(list)
for qid, r in qids_rs:
gold = harvest(Y, qid)
gold_topk = gold[argtopk(gold, k)]
R = np.count_nonzero(gold_topk)
# real ndcg
idcg = rm.dcg_at_k(gold_topk, k)
ndcg = rm.dcg_at_k(r, k) / idcg
values["ndcg"].append(ndcg)
# Verified
# MAP@k
ap = rm.average_precision(r)
values["MAP"].append(ap)
# MRR - compute by hand
ind = np.asarray(r).nonzero()[0]
mrr = (1. / (ind[0] + 1)) if ind.size else 0.
values["MRR"].append(mrr)
# R precision
# R = min(R, k) # ok lets be fair.. you cant get more than k
# we dont need that anymore, since we chop of the remainder
# before computing R
recall = rm.recall(r, R)
values["recall"].append(recall)
# precision = rm.precision_at_k(pad(scored_result, k), k)
precision = rm.precision(r)
values["precision"].append(precision)
f1 = f1_score(precision, recall)
values["f1_score"].append(f1)
# Safe variant does not fail if len(r) < k
p_at_5 = rm.safe_precision_at_k(r, 5)
values["precision@5"].append(p_at_5)
p_at_10 = rm.safe_precision_at_k(r, 10)
values["precision@10"].append(p_at_10)
return values
class RetriEvalMixin():
@abstractmethod
def __init__(self, **kwargs):
pass
@abstractmethod
def query(X, k=None):
pass
def evaluate(self, X, Y, k=20, verbose=0, replacement=0, n_jobs=1):
"""
:X: [(qid, str)] query id, query string pairs
:Y: pandas dataseries with qid,docid index or [dict]
:k: Limit the result for all metrics to this value, the models are also
given a hint of how many they should return.
:replacement: 0 means that (query, doc) pairs not prevalent in Y will
not be considered relevant, None means that those are not considered
(skipped).
"""
# rs = []
# if n_jobs > 1:
# return process_and_evaluate(self, X, Y, k, n_jobs)
values = defaultdict(list)
for qid, query in X:
# execute query
if verbose > 0:
print(qid, ":", query)
t0 = timer()
# if replacement is None, we need to drop after querying
result = self.query(query, k=(None if replacement is None else k))
values["time_per_query"].append(timer() - t0)
# if verbose > 0:
# print(result[:k])
# result = result[:k] # TRIM HERE
# soak the generator
scored_result = [harvest(Y, qid, docid, replacement)
for docid in result]
if replacement is None:
scored_result, notfound = filter_none(scored_result)
values["gold_not_found"].append(notfound)
if k is not None:
# dont let the models cheat by returning more than k
r = scored_result[:k]
else:
# if k is None, consider all
r = scored_result
# if verbose > 0:
# print(r)
# gold = np.array(list(Y[qid].values()))
gold = harvest(Y, qid)
import sys
# print(gold, file=sys.stderr)
topk_indices = argtopk(gold, k)
print(topk_indices, file=sys.stderr)
gold_topk = gold[topk_indices]
# print('Top k in gold standard:', gold_topk, file=sys.stderr)
R = np.count_nonzero(gold_topk)
if verbose > 0:
print("Retrieved {} relevant out of {} possible."
.format(np.count_nonzero(r), R))
# real ndcg
idcg = rm.dcg_at_k(gold_topk, k)
ndcg = rm.dcg_at_k(scored_result, k) / idcg
values["ndcg"].append(ndcg)
# Verified
# MAP@k
ap = rm.average_precision(r)
values["MAP"].append(ap)
# MRR - compute by hand
ind = np.asarray(r).nonzero()[0]
mrr = (1. / (ind[0] + 1)) if ind.size else 0.
values["MRR"].append(mrr)
# R precision
# R = min(R, k) # ok lets be fair.. you cant get more than k
# we dont need that anymore, since we chop of the remainder
# before computing R
recall = rm.recall(r, R)
values["recall"].append(recall)
# precision = rm.precision_at_k(pad(scored_result, k), k)
precision = rm.precision(r)
values["precision"].append(precision)
f1 = f1_score(precision, recall)
values["f1_score"].append(f1)
# Safe variant does not fail if len(r) < k
p_at_5 = rm.safe_precision_at_k(r, 5)
values["precision@5"].append(p_at_5)
p_at_10 = rm.safe_precision_at_k(r, 10)
values["precision@10"].append(p_at_10)
# rs.append(r)
if verbose > 0:
# print("Precision: {:.4f}".format(precision))
# print("Recall: {:.4f}".format(recall))
# print("F1-Score: {:.4f}".format(f1))
print("AP: {:.4f}".format(ap))
print("RR: {:.4f}".format(mrr))
print("NDCG: {:.4f}".format(ndcg))
return values
class Tfidf(TfidfVectorizer, CombinatorMixin):
def __init__(self, analyzer='word', use_idf=True):
TfidfVectorizer.__init__(self, analyzer=analyzer, use_idf=use_idf,
norm='l2')
self._fit_X = None
def fit(self, X):
Xt = super().fit_transform(X)
self._fit_X = Xt
return self
def query(self, query, k=None, indices=None, return_scores=False, sort=True):
if self._fit_X is None:
raise NotFittedError
q = super().transform([query])
if indices is not None:
fit_X = self._fit_X[indices]
else:
fit_X = self._fit_X
# both fit_X and q are l2-normalized
D = linear_kernel(q, fit_X)
ind = argtopk(D[0], k) if sort else np.arange(D.shape[1])
if return_scores:
return ind, D[0,ind]
else:
return ind
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Trakttv.bundle/Contents/Libraries/Shared/plex_database/models/media_part.py
|
disrupted/Trakttv.bundle
| 1,346 |
77404
|
from plex_database.core import db
from plex_database.models.directory import Directory
from plex_database.models.media_item import MediaItem
from peewee import *
class MediaPart(Model):
class Meta:
database = db
db_table = 'media_parts'
media_item = ForeignKeyField(MediaItem, null=True, related_name='media_parts')
directory = ForeignKeyField(Directory, null=True, related_name='media_parts')
hash = CharField(null=True)
open_subtitle_hash = CharField(null=True)
file = CharField(null=True)
index = IntegerField(null=True)
size = BigIntegerField(null=True)
duration = IntegerField(null=True)
created_at = DateTimeField(null=True)
updated_at = DateTimeField(null=True)
deleted_at = DateTimeField(null=True)
extra_data = CharField(null=True)
|
python/task_worker.py
|
renato145/zmq.rs
| 748 |
77407
|
# encoding: utf-8
#
# Task worker - design 2
# Adds pub-sub flow to receive and respond to kill signal
#
# Author: <NAME> (brainsik) <spork(dash)zmq(at)theory(dot)org>
#
import sys
import time
import zmq
context = zmq.Context()
# Socket to receive messages on
receiver = context.socket(zmq.PULL)
receiver.connect("tcp://localhost:5557")
# Socket to send messages to
sender = context.socket(zmq.PUSH)
sender.connect("tcp://localhost:5558")
# Socket for control input
controller = context.socket(zmq.SUB)
controller.connect("tcp://localhost:5559")
controller.setsockopt(zmq.SUBSCRIBE, b"")
# Process messages from receiver and controller
poller = zmq.Poller()
poller.register(receiver, zmq.POLLIN)
poller.register(controller, zmq.POLLIN)
# Process messages from both sockets
while True:
socks = dict(poller.poll())
if socks.get(receiver) == zmq.POLLIN:
message = receiver.recv_string()
# Process task
workload = int(message) # Workload in msecs
# Do the work
time.sleep(workload / 1000.0)
# Send results to sink
sender.send_string(message)
# Simple progress indicator for the viewer
sys.stdout.write(".")
sys.stdout.flush()
# Any waiting controller command acts as 'KILL'
if socks.get(controller) == zmq.POLLIN:
break
print("Done")
# Finished
receiver.close()
sender.close()
controller.close()
context.term()
|
arviz/data/io_json.py
|
sudojarvis/arviz
| 1,159 |
77413
|
<filename>arviz/data/io_json.py
"""Input and output support for data."""
from .io_dict import from_dict
try:
import ujson as json
except ImportError:
# Can't find ujson using json
# mypy struggles with conditional imports expressed as catching ImportError:
# https://github.com/python/mypy/issues/1153
import json # type: ignore
def from_json(filename):
"""Initialize object from a json file.
Will use the faster `ujson` (https://github.com/ultrajson/ultrajson) if it is available.
Parameters
----------
filename : str
location of json file
Returns
-------
InferenceData object
"""
with open(filename, "rb") as file:
idata_dict = json.load(file)
return from_dict(**idata_dict, save_warmup=True)
def to_json(idata, filename):
"""Save dataset as a json file.
Will use the faster `ujson` (https://github.com/ultrajson/ultrajson) if it is available.
WARNING: Only idempotent in case `idata` is InferenceData.
Parameters
----------
idata : InferenceData
Object to be saved
filename : str
name or path of the file to load trace
Returns
-------
str
filename saved to
"""
file_name = idata.to_json(filename)
return file_name
|
terminus/event_listeners.py
|
timfjord/Terminus
| 1,216 |
77425
|
<gh_stars>1000+
import sublime
import sublime_plugin
import logging
import difflib
from random import random
from .clipboard import g_clipboard_history
from .recency import RecencyManager
from .terminal import Terminal
logger = logging.getLogger('Terminus')
class TerminusCoreEventListener(sublime_plugin.EventListener):
def on_activated_async(self, view):
recency_manager = RecencyManager.from_view(view)
if not view.settings().get("terminus_view", False):
recency_manager.cycling_panels = False
return
if random() > 0.7:
# occassionally cull zombie terminals
Terminal.cull_terminals()
# clear undo stack
view.run_command("terminus_clear_undo_stack")
terminal = Terminal.from_id(view.id())
if terminal:
recency_manager.set_recent_terminal(view)
return
settings = view.settings()
if not settings.has("terminus_view.args"):
return
if settings.get("terminus_view.finished", False):
return
kwargs = settings.get("terminus_view.args")
if "cmd" not in kwargs:
return
sublime.set_timeout(lambda: view.run_command("terminus_activate", kwargs), 100)
def on_pre_close(self, view):
# panel doesn't trigger on_pre_close
terminal = Terminal.from_id(view.id())
if terminal:
terminal.kill()
def on_modified(self, view):
# to catch unicode input
terminal = Terminal.from_id(view.id())
if not terminal or not terminal.process.isalive():
return
command, args, _ = view.command_history(0)
if command.startswith("terminus"):
return
elif command == "insert" and "characters" in args and \
len(view.sel()) == 1 and view.sel()[0].empty():
chars = args["characters"]
current_cursor = view.sel()[0].end()
region = sublime.Region(
max(current_cursor - len(chars), self._cursor), current_cursor)
text = view.substr(region)
self._cursor = current_cursor
logger.debug("text {} detected".format(text))
view.run_command("terminus_paste_text", {"text": text, "bracketed": False})
elif command:
logger.debug("undo {}".format(command))
view.run_command("soft_undo")
def on_selection_modified(self, view):
terminal = Terminal.from_id(view.id())
if not terminal or not terminal.process.isalive():
return
if len(view.sel()) != 1 or not view.sel()[0].empty():
return
self._cursor = view.sel()[0].end()
def on_text_command(self, view, name, args):
if not view.settings().get('terminus_view'):
return
if name == "copy":
return ("terminus_copy", None)
elif name == "paste":
return ("terminus_paste", None)
elif name == "paste_and_indent":
return ("terminus_paste", None)
elif name == "paste_from_history":
return ("terminus_paste_from_history", None)
elif name == "paste_selection_clipboard":
self._pre_paste = view.substr(view.visible_region())
elif name == "undo":
return ("noop", None)
def on_post_text_command(self, view, name, args):
if not view.settings().get('terminus_view'):
return
if name == 'terminus_copy':
g_clipboard_history.push_text(sublime.get_clipboard())
elif name == "paste_selection_clipboard":
added = [
df[2:] for df in difflib.ndiff(self._pre_paste, view.substr(view.visible_region()))
if df[0] == '+']
view.run_command("terminus_paste_text", {"text": "".join(added)})
def on_window_command(self, window, command_name, args):
if command_name == "show_panel":
panel = args["panel"].replace("output.", "")
view = window.find_output_panel(panel)
if view:
terminal = Terminal.from_id(view.id())
if terminal and terminal.show_in_panel:
recency_manager = RecencyManager.from_view(view)
recency_manager.set_recent_terminal(view)
|
rl/rl_utils/optimization.py
|
luanagbmartins/cavia
| 122 |
77430
|
<gh_stars>100-1000
import torch
def conjugate_gradient(f_Ax, b, cg_iters=10, residual_tol=1e-10):
p = b.clone().detach()
r = b.clone().detach()
x = torch.zeros_like(b).float()
rdotr = torch.dot(r, r)
for i in range(cg_iters):
z = f_Ax(p).detach()
v = rdotr / torch.dot(p, z)
x += v * p
r -= v * z
newrdotr = torch.dot(r, r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr.item() < residual_tol:
break
return x.detach()
|
experimental/dang/esp32/script_test_nmpc_qpoases/test_nmpc_qpoases.py
|
mindThomas/acados
| 322 |
77444
|
#!/usr/bin/env python
# Tested with both Python 2.7.6 and Python 3.4.3
#
# This Python code collects the source code for testing acados
# on microcontrollers, putting all the necessary C files in
# one directory, and header files in the sub-directory include.
#
# The idea is that when compiling the testing code of acados for
# embedded platforms, when "make" does not fully function like
# on standard Linux platform, all the source code available in
# one directory would allow the compiler to process the code
# easier.
#
# To use for ESP32:
#
# Example usage:
# Assume the source directory of acados is: ~/acados
# The target folder to be created is: chen_nmpc_qpoases
# This command should be used:
# python test_nmpc_qpoases.py ~/acados chen_nmpc_qpoases
#
# Author: <NAME>
# Date: 2017.04.03
import sys
import os
import glob
from subprocess import call
from os.path import join
print('Running python script to grab chen_nmpc_qpoases...')
print(sys.version) # get python version, for debugging
if len(sys.argv)!= 3:
raise SyntaxError('This script needs exactly 2 arguments: \n \
test_nmpc_qpoases.py <acados_top_dir> <new_target_dir>\n \
Example:\n \
test_nmpc_qpoases.py ~/acados chen_nmpc_qpoases')
# 1. Bring all necessary files to one directory.
top_dir = str(sys.argv[1]).rstrip('/') # no trailing / in top_dir
target_dir = str(sys.argv[2]).rstrip('/') # no trailing / in target_dir
# List of file to collect
# Note: this hard-coded path doesnot work with Windows
workingcodefiles = [\
'examples/c/chen_nmpc_qpoases.c', \
'examples/c/Chen_model/chen_model.c', \
'acados/utils/print.c', \
'acados/utils/timing.c', \
'acados/ocp_qp/condensing.c', \
'acados/ocp_qp/condensing_helper_functions.c', \
'acados/ocp_qp/ocp_qp_condensing_qpoases.c', \
'acados/sim/sim_erk_integrator.c', \
'external/hpmpc/auxiliary/d_aux_extern_depend_lib4.c', \
'external/blasfeo/auxiliary/i_aux_extern_depend_lib.c', \
'external/qpOASES/src/Constraints.c', \
'external/qpOASES/src/Bounds.c', \
'external/qpOASES/src/Flipper.c', \
'external/qpOASES/src/Indexlist.c', \
'external/qpOASES/src/Matrices.c', \
'external/qpOASES/src/MessageHandling.c', \
'external/qpOASES/src/Options.c', \
'external/qpOASES/src/QProblem.c', \
'external/qpOASES/src/QProblemB.c', \
'external/qpOASES/src/Utils.c' \
]
workingheaderfiles = [\
'examples/c/Chen_model/chen_model.h', \
'acados/ocp_qp/ocp_qp_common.h', \
'acados/ocp_qp/condensing.h', \
'acados/ocp_qp/ocp_qp_condensing_qpoases.h', \
'acados/sim/sim_common.h', \
'acados/sim/sim_erk_integrator.h', \
'acados/sim/sim_collocation.h', \
'acados/sim/sim_rk_common.h', \
'acados/utils/print.h', \
'acados/utils/types.h', \
'acados/utils/timing.h', \
'external/hpmpc/include/aux_d.h', \
'external/hpmpc/include/block_size.h', \
'external/hpmpc/include/kernel_d_lib4.h', \
'external/blasfeo/include/blasfeo_i_aux.h', \
'external/qpOASES/include/qpOASES_e/Bounds.h', \
'external/qpOASES/include/qpOASES_e/Constants.h', \
'external/qpOASES/include/qpOASES_e/ConstraintProduct.h', \
'external/qpOASES/include/qpOASES_e/Constraints.h', \
'external/qpOASES/include/qpOASES_e/Flipper.h', \
'external/qpOASES/include/qpOASES_e/Indexlist.h', \
'external/qpOASES/include/qpOASES_e/Matrices.h', \
'external/qpOASES/include/qpOASES_e/MessageHandling.h', \
'external/qpOASES/include/qpOASES_e/Options.h', \
'external/qpOASES/include/qpOASES_e/QProblem.h', \
'external/qpOASES/include/qpOASES_e/QProblemB.h', \
'external/qpOASES/include/qpOASES_e/Utils.h' \
]
# Files that should be renamed to avoid conflicts
oldfiles = ['external/qpOASES/include/qpOASES_e/Types.h']
newfiles = ['include/qpOASES_e_Types.h']
# Create directory structure and copy files
if not os.path.exists(target_dir):
os.system('mkdir '+target_dir)
for filename in workingcodefiles:
os.system('cp '+top_dir+'/'+filename+' '+target_dir)
if not os.path.exists(target_dir+'/include'):
os.system('mkdir '+target_dir+'/include')
for filename in workingheaderfiles:
os.system('cp '+top_dir+'/'+filename+' '+target_dir+'/include/')
for kk in range(len(oldfiles)):
os.system('cp '+top_dir+'/'+oldfiles[kk]+' '+target_dir+'/'+newfiles[kk])
print('Step 1: Necessary files copied.')
# 2. Modify .h and .c files to adapt to the new code structure:
# List of texts to be replaced:
old_text = [\
'examples/c/Chen_model/chen_model.h', \
'acados/ocp_qp/condensing.h', \
'acados/ocp_qp/condensing_helper_functions.c', \
'acados/ocp_qp/ocp_qp_common.h', \
'acados/ocp_qp/ocp_qp_condensing_qpoases.h', \
'acados/ocp_qp/ocp_qp_hpmpc.h', \
'acados/sim/sim_common.h', \
'acados/sim/sim_erk_integrator.h', \
'acados/sim/sim_collocation.h', \
'acados/sim/sim_rk_common.h', \
'acados/utils/print.h', \
'acados/utils/timing.h', \
'acados/utils/types.h', \
'hpmpc/include/aux_d.h', \
'../include/block_size.h', \
'../include/kernel_d_lib4.h', \
'blasfeo/include/blasfeo_common.h', \
'blasfeo/include/blasfeo_i_aux.h', \
'qpOASES_e/Bounds.h', \
'qpOASES_e/Constants.h', \
'qpOASES_e/Constraints.h', \
'qpOASES_e/ConstraintProduct.h', \
'qpOASES_e/Flipper.h', \
'qpOASES_e/Indexlist.h', \
'qpOASES_e/Matrices.h', \
'qpOASES_e/MessageHandling.h', \
'qpOASES_e/Options.h', \
'qpOASES_e/QProblem.h', \
'qpOASES_e/QProblemB.h', \
'qpOASES_e/Types.h', \
'qpOASES_e/Utils.h' \
]
# List of new texts to replace old ones,
# in corresponding order to old_text:
new_text = [\
'chen_model.h', \
'condensing.h', \
'condensing_helper_functions.c', \
'ocp_qp_common.h', \
'ocp_qp_condensing_qpoases.h', \
'ocp_qp_hpmpc.h', \
'sim_common.h', \
'sim_erk_integrator.h', \
'sim_collocation.h', \
'sim_rk_common.h', \
'print.h', \
'timing.h', \
'types.h', \
'aux_d.h', \
'block_size.h', \
'kernel_d_lib4.h', \
'blasfeo_common.h', \
'blasfeo_i_aux.h', \
'Bounds.h', \
'Constants.h', \
'Constraints.h', \
'ConstraintProduct.h', \
'Flipper.h', \
'Indexlist.h', \
'Matrices.h', \
'MessageHandling.h', \
'Options.h', \
'QProblem.h', \
'QProblemB.h', \
'qpOASES_e_Types.h', \
'Utils.h' \
]
len_old_text = len(old_text)
len_new_text = len(new_text)
if len_old_text != len_new_text:
raise ValueError('Number of old and new texts not match')
files = glob.glob(target_dir+"/*.c")
for file in files:
objFile = open(file, "r")
txtFile = objFile.read()
objFile.close()
for replacetext in range(len_old_text):
txtFile = txtFile.replace(old_text[replacetext],new_text[replacetext])
objFile = open(file, "w")
objFile.write(txtFile)
objFile.close()
files = glob.glob(target_dir+"/include/*.h")
for file in files:
objFile = open(file, "r")
txtFile = objFile.read()
objFile.close()
for replacetext in range(len_old_text):
txtFile = txtFile.replace(old_text[replacetext],new_text[replacetext])
objFile = open(file, "w")
objFile.write(txtFile)
objFile.close()
print('Step 2: Path information in files modified to the new structure.')
# 3. Add specific code to HPMPC and BLASFEO files:
# List of files to be modified:
files = ['include/block_size.h']
# List of lines to be added in the beginning of files,
# in corresponding order with the list files:
lines = ['#include "target.h"\n']
if len(files) != len(lines):
raise ValueError('Number of files and added lines not match')
for kk in range(len(files)):
objFile = open(target_dir+'/'+files[kk], "r")
txtFile = objFile.read()
objFile.close()
objFile = open(target_dir+'/'+files[kk], "w")
objFile.write(lines[kk]) # write the line to the beginning
objFile.write(txtFile)
objFile.close()
print('Step 3: Common header file included in specific files.')
# 4. Copy Makefile and specific setting files
os.system('cp '+top_dir+'/experimental/dang/esp32/script_test_nmpc_qpoases/Makefile '+target_dir)
os.system('cp '+top_dir+'/experimental/dang/esp32/script_test_nmpc_qpoases/target.h '+target_dir+'/include/')
print('Step 4: Makefile, and HPMPC target.h replaced.')
# 5. Display further instructions
print('Please do next steps in terminal:')
print(' cd '+target_dir)
print(' make')
print('Then run the binary file in '+target_dir+'/bin')
print('To remove binary objects: make clean\n')
|
lib-src/lv2/lv2/waflib/extras/objcopy.py
|
joshrose/audacity
| 7,892 |
77459
|
#!/usr/bin/python
# <NAME> 2010
"""
Support for converting linked targets to ihex, srec or binary files using
objcopy. Use the 'objcopy' feature in conjunction with the 'cc' or 'cxx'
feature. The 'objcopy' feature uses the following attributes:
objcopy_bfdname Target object format name (eg. ihex, srec, binary).
Defaults to ihex.
objcopy_target File name used for objcopy output. This defaults to the
target name with objcopy_bfdname as extension.
objcopy_install_path Install path for objcopy_target file. Defaults to ${PREFIX}/fw.
objcopy_flags Additional flags passed to objcopy.
"""
from waflib.Utils import def_attrs
from waflib import Task
from waflib.TaskGen import feature, after_method
class objcopy(Task.Task):
run_str = '${OBJCOPY} -O ${TARGET_BFDNAME} ${OBJCOPYFLAGS} ${SRC} ${TGT}'
color = 'CYAN'
@feature('objcopy')
@after_method('apply_link')
def map_objcopy(self):
def_attrs(self,
objcopy_bfdname = 'ihex',
objcopy_target = None,
objcopy_install_path = "${PREFIX}/firmware",
objcopy_flags = '')
link_output = self.link_task.outputs[0]
if not self.objcopy_target:
self.objcopy_target = link_output.change_ext('.' + self.objcopy_bfdname).name
task = self.create_task('objcopy', src=link_output, tgt=self.path.find_or_declare(self.objcopy_target))
task.env.append_unique('TARGET_BFDNAME', self.objcopy_bfdname)
try:
task.env.append_unique('OBJCOPYFLAGS', getattr(self, 'objcopy_flags'))
except AttributeError:
pass
if self.objcopy_install_path:
self.add_install_files(install_to=self.objcopy_install_path, install_from=task.outputs[0])
def configure(ctx):
ctx.find_program('objcopy', var='OBJCOPY', mandatory=True)
|
Hinting/Add TTF Autohint Control Instructions for Current Glyph.py
|
justanotherfoundry/Glyphs-Scripts
| 283 |
77471
|
<reponame>justanotherfoundry/Glyphs-Scripts
#MenuTitle: Add TTF Autohint Control Instructions for Current Glyph
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
try:
from builtins import str
except Exception as e:
print("Warning: 'future' module not installed. Run 'sudo pip install future' in Terminal.")
__doc__="""
Adds a touch line for a given up/down amount to the Control Instructions of the current instance.
"""
from AppKit import NSPasteboard, NSStringPboardType
from Foundation import NSPoint
import math, vanilla
def sizeStringIsOK(sizeString):
"""
Checks if the size string adheres to the syntax.
"""
for character in sizeString:
if not character in "1234567890-, ":
return False
elif character == "#":
return True
return True
def italic( yOffset, italicAngle=0.0, pivotalY=0.0 ):
"""
Returns the italicized position of an NSPoint 'thisPoint'
for a given angle 'italicAngle' and the pivotal height 'pivotalY',
around which the italic slanting is executed, usually half x-height.
Usage: myPoint = italicize(myPoint,10,xHeight*0.5)
"""
x = 0.0
#yOffset = thisPoint.y - pivotalY # calculate vertical offset
italicAngle = math.radians( italicAngle ) # convert to radians
tangens = math.tan( italicAngle ) # math.tan needs radians
horizontalDeviance = tangens * yOffset # vertical distance from pivotal point
return horizontalDeviance
def addToInstructions(instructionLine, currentInstance):
parameterName = "TTFAutohint control instructions"
currentInstanceName = currentInstance.name
commentHeadline = "# %s" % currentInstanceName.upper()
# determine existing instructions:
instructions = currentInstance.customParameters[parameterName]
# normalize single space after comma:
instructionLine = instructionLine.replace(",",", ").replace(" "," ").replace(" "," ")
# add to custom parameter:
if instructions:
if not instructions.startswith(commentHeadline):
instructions = "%s\n%s" % (commentHeadline,instructions)
currentInstance.customParameters[parameterName] = "%s\n%s" % (instructions,instructionLine)
else:
currentInstance.customParameters[parameterName] = "%s\n%s" % (commentHeadline,instructionLine)
# trigger redraw for TTF Control Instructions Palette:
thisFont = currentInstance.font
if thisFont:
NSNotificationCenter.defaultCenter().postNotificationName_object_("GSUpdateInterface", thisFont)
def setClipboard( myText ):
"""
Sets the contents of the clipboard to myText.
Returns True if successful, False if unsuccessful.
"""
try:
myClipboard = NSPasteboard.generalPasteboard()
myClipboard.declareTypes_owner_( [NSStringPboardType], None )
myClipboard.setString_forType_( myText, NSStringPboardType )
return True
except Exception as e:
return False
def numberIndexStringFromNumbers(indexes):
"""
Turns sequence 1,2,3,4,7,8,9,10,14,19,21,22,23,27,30,31,32
into "1-4, 7-10, 14, 19, 21-23, 27, 30-32"
"""
indexes = sorted( indexes )
outputString = ""
previousNumber = -100
for i, thisNumber in enumerate(indexes):
if not outputString:
outputString += str(thisNumber)
else:
if previousNumber == thisNumber-1:
if outputString[-1] != "-":
outputString += "-"
elif i == len(indexes)-1:
outputString += "%i" % thisNumber
else:
if outputString[-1] == "-":
outputString += "%i" % previousNumber
outputString += ", %i" % thisNumber
previousNumber = thisNumber
return outputString
class AddTTFAutohintControlInstructionsForCurrentGlyph( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 155
windowHeight = 410
windowWidthResize = 400 # user can resize width by this value
windowHeightResize = 100 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Add ttfAutohint Control Instructions for Current Glyph", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.AddTTFAutohintControlInstructionsForCurrentGlyph.mainwindow" # stores last window position and size
)
# UI elements:
linePos, inset, lineHeight = 12, 15, 24
self.w.explanatoryText = vanilla.TextBox( (inset, linePos+2, -inset, 60), "Touch instruction with px offset for active glyph & instance, respects italic angle.", sizeStyle='small', selectable=True )
linePos += 3*lineHeight
sectionOptions = (
"All Points",
"Upper Half",
"Upper Third",
"Upper Quarter",
"Lower Half",
"Lower Third",
"Lower Quarter",
)
self.w.sectionToMoveText = vanilla.TextBox( (inset, linePos+2, 38, 14), u"Touch", sizeStyle='small', selectable=True )
self.w.sectionToMove = vanilla.PopUpButton( (inset+38, linePos, -inset, 17), sectionOptions, sizeStyle='small', callback=self.SavePreferences )
linePos += lineHeight
self.w.runButtonAdd100 = vanilla.Button( (inset, linePos, -inset, 20), "+1.00", sizeStyle='regular', callback=self.AddTTFAutohintControlInstructionsForCurrentGlyphMain )
linePos += lineHeight
self.w.runButtonAdd075 = vanilla.Button( (inset, linePos, -inset, 20), "+0.75", sizeStyle='regular', callback=self.AddTTFAutohintControlInstructionsForCurrentGlyphMain )
linePos += lineHeight
self.w.runButtonAdd050 = vanilla.Button( (inset, linePos, -inset, 20), "+0.50", sizeStyle='regular', callback=self.AddTTFAutohintControlInstructionsForCurrentGlyphMain )
linePos += lineHeight
self.w.runButtonAdd025 = vanilla.Button( (inset, linePos, -inset, 20), "+0.25", sizeStyle='regular', callback=self.AddTTFAutohintControlInstructionsForCurrentGlyphMain )
linePos += lineHeight
self.w.runButtonSub025 = vanilla.Button( (inset, linePos, -inset, 20), "-0.25", sizeStyle='regular', callback=self.AddTTFAutohintControlInstructionsForCurrentGlyphMain )
linePos += lineHeight
self.w.runButtonSub050 = vanilla.Button( (inset, linePos, -inset, 20), "-0.50", sizeStyle='regular', callback=self.AddTTFAutohintControlInstructionsForCurrentGlyphMain )
linePos += lineHeight
self.w.runButtonSub075 = vanilla.Button( (inset, linePos, -inset, 20), "-0.75", sizeStyle='regular', callback=self.AddTTFAutohintControlInstructionsForCurrentGlyphMain )
linePos += lineHeight
self.w.runButtonSub100 = vanilla.Button( (inset, linePos, -inset, 20), "-1.00", sizeStyle='regular', callback=self.AddTTFAutohintControlInstructionsForCurrentGlyphMain )
linePos += lineHeight
self.w.ppmText = vanilla.TextBox( (inset, linePos+2, 14, 14), "@", sizeStyle='small', selectable=True )
self.w.ppm = vanilla.EditText( (inset+14, linePos, -inset, 19), "8-12,20", sizeStyle='small', callback=self.SavePreferences )
linePos += lineHeight *1.5
# self.w.upperHalf = vanilla.CheckBox( (inset, linePos-1, -inset, 20), "Upper half (one)", value=False, callback=self.SavePreferences, sizeStyle='small' )
# linePos += lineHeight
self.w.rightAtTop = vanilla.Button( (inset, linePos, -inset, 20), "right at top", sizeStyle='regular', callback=self.InsertRightAtTop )
linePos += lineHeight
self.w.leftAtTop = vanilla.Button( (inset, linePos, -inset, 20), "left at top", sizeStyle='regular', callback=self.InsertLeftAtTop )
linePos += lineHeight
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Add ttfAutohint Control Instructions for Current Glyph' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.AddTTFAutohintControlInstructionsForCurrentGlyph.ppm"] = self.w.ppm.get()
Glyphs.defaults["com.mekkablue.AddTTFAutohintControlInstructionsForCurrentGlyph.sectionToMove"] = self.w.sectionToMove.get()
except:
return False
return True
def LoadPreferences( self ):
try:
Glyphs.registerDefault("com.mekkablue.AddTTFAutohintControlInstructionsForCurrentGlyph.ppm", "8-12,20")
Glyphs.registerDefault("com.mekkablue.AddTTFAutohintControlInstructionsForCurrentGlyph.sectionToMove", 0)
self.w.ppm.set( Glyphs.defaults["com.mekkablue.AddTTFAutohintControlInstructionsForCurrentGlyph.ppm"] )
self.w.sectionToMove.set( Glyphs.defaults["com.mekkablue.AddTTFAutohintControlInstructionsForCurrentGlyph.sectionToMove"] )
except:
return False
return True
def fontInstanceToolGlyphLayer(self):
Font = Glyphs.font
# determine current instance:
currentInstance = Font.instances[ Font.currentTab.selectedInstance() ]
# switch to Instructor tool:
ttInstructorClass = NSClassFromString("GlyphsToolTrueTypeInstructor")
Font.parent.windowController().setToolForClass_(ttInstructorClass)
tool = Font.parent.windowController().toolForClass_(ttInstructorClass)
# double check Instructor tool is on:
if not tool.className() == "GlyphsToolTrueTypeInstructor":
Message(title="Tool Error", message="TT Instructor tool (I) must be active", OKButton=None)
else:
# determine glyph name:
layer = Font.currentTab.activeLayer()
if not layer and tool.activeLayers():
# fallback attempt if line above fails:
layer = tool.activeLayers()[0]
if not layer:
Message(title="ttfAutohint Error", message="Cannot determine current glyph. Perhaps try closing and reopening the tab. Sorry.", OKButton=None)
else:
glyph = layer.glyph()
glyphName = glyph.name
# prefix comment with glyph name:
addToInstructions( "# %s" % glyphName, currentInstance )
# overwrite glyph name with production name, if any:
if glyph.productionName:
glyphName = glyph.productionName
# tt outline:
glyf = tool.valueForKey_("fontOutlineGlyf")
glyfBounds = glyf.bounds()
# tt points:
coords = glyf.coordinates()
pointCount = coords.count()
return Font, currentInstance, tool, glyphName, layer, glyf, glyfBounds, coords, pointCount
return None, None, None, None, None, None, None, None, None
def InsertRightAtTop( self, sender ):
Font, currentInstance, tool, glyphName, layer, glyf, glyfBounds, coords, pointCount = self.fontInstanceToolGlyphLayer()
if not Font:
print("ERROR: Could not determine font.")
else:
# add right instruction for topmost point if desired:
highestPointIndex = -1
highestY = -1000
for i in range(pointCount):
thisPoint = coords.pointAtIndex_(i)
if thisPoint.y > highestY:
highestPointIndex = i
highestY = thisPoint.y
if highestPointIndex > -1:
instructionLine = "%s right %i" % (glyphName,highestPointIndex)
addToInstructions(instructionLine, currentInstance)
else:
print("ERROR: Could not determine highest point in %s." % glyphName)
def InsertLeftAtTop( self, sender ):
Font, currentInstance, tool, glyphName, layer, glyf, glyfBounds, coords, pointCount = self.fontInstanceToolGlyphLayer()
if not Font:
print("ERROR: Could not determine font.")
else:
# add left instruction for topmost point if desired:
highestPointIndex = -1
highestY = -1000
topBound = glyfBounds.origin.y + glyfBounds.size.height
for i in range(pointCount):
thisPoint = coords.pointAtIndex_(i)
prevPoint = coords.pointAtIndex_((i-1)%pointCount)
nextPoint = coords.pointAtIndex_((i+1)%pointCount)
if thisPoint.y < topBound and thisPoint.y > highestY and thisPoint.y > prevPoint.y and thisPoint.y >= nextPoint.y and (thisPoint.x < prevPoint.x or nextPoint.x < thisPoint.x):
highestPointIndex = i
highestY = thisPoint.y
if highestPointIndex > -1:
instructionLine = "%s left %i" % (glyphName,highestPointIndex)
addToInstructions(instructionLine, currentInstance)
else:
print("ERROR: Could not determine highest point in %s." % glyphName)
def AddTTFAutohintControlInstructionsForCurrentGlyphMain( self, sender ):
try:
if not self.SavePreferences( self ):
print("Note: 'Add ttfAutohint Control Instructions for Current Glyph' could not write preferences.")
shift = float(sender.getTitle())
print(shift)
if shift:
Font, currentInstance, tool, glyphName, layer, glyf, glyfBounds, coords, pointCount = self.fontInstanceToolGlyphLayer()
if Font:
# determine x/y move based on italic angle:
currentMaster = Font.selectedFontMaster
italicAngle = currentMaster.italicAngle
if italicAngle:
moveString = "x %1.2f y %1.2f" % ( italic(shift, italicAngle), shift )
else:
moveString = "y %1.2f" % shift
# determine PPMs
sizeString = Glyphs.defaults["com.mekkablue.AddTTFAutohintControlInstructionsForCurrentGlyph.ppm"]
if not sizeString:
print("ERROR: Could not determine PPMs, will use a default. Did you enter any?")
sizeString = "17"
elif not sizeStringIsOK(sizeString):
print("ERROR: Illegal character found in PPM specification (%s), will use default instead." % sizeString)
sizeString = "17"
# build point indexes to be moved:
sectionChoice = Glyphs.defaults["com.mekkablue.AddTTFAutohintControlInstructionsForCurrentGlyph.sectionToMove"]
pointIndexString = None
if sectionChoice > 0:
pointIndexes = []
# ranges:
halfHeight = glyfBounds.origin.y + 0.5 * glyfBounds.size.height
upperThird = glyfBounds.origin.y + 0.666667 * glyfBounds.size.height
lowerThird = glyfBounds.origin.y + 0.333333 * glyfBounds.size.height
upperQuarter = glyfBounds.origin.y + 0.75 * glyfBounds.size.height
lowerQuarter = glyfBounds.origin.y + 0.25 * glyfBounds.size.height
for i in range(pointCount):
thisPoint = coords.pointAtIndex_(i)
if sectionChoice == 1 and thisPoint.y > halfHeight:
# Upper Half
pointIndexes.append(i)
elif sectionChoice == 2 and thisPoint.y > upperThird:
# Upper Third
pointIndexes.append(i)
elif sectionChoice == 3 and thisPoint.y > upperQuarter:
# Upper Quarter
pointIndexes.append(i)
elif sectionChoice == 4 and thisPoint.y < halfHeight:
# Lower Half
pointIndexes.append(i)
elif sectionChoice == 5 and thisPoint.y < lowerThird:
# Lower Third
pointIndexes.append(i)
elif sectionChoice == 6 and thisPoint.y < lowerQuarter:
# Lower Quarter
pointIndexes.append(i)
if pointIndexes:
pointIndexString = numberIndexStringFromNumbers(pointIndexes)
else:
# all points, choice = 0
# count of tt paths:
endPoints = glyf.endPtsOfContours()
pathCount = len(glyf.endPtsOfContours())
pointIndexStrings = []
# all points, in ranges, separated by path:
j = 0
for i in range(pathCount):
k = endPoints.elementAtIndex_(i)
pointIndexStrings.append( "%i-%i"%(j,k) )
j = k+1
pointIndexString = ", ".join(pointIndexStrings)
if not pointIndexString:
print("ERROR: no point indexes matching your criteria could be found.")
else:
# build the instruction line:
instructionLine = "%s touch %s %s @ %s" % (
glyphName,
pointIndexString,
moveString,
sizeString,
)
# add the instruction line to the parameter:
if instructionLine:
addToInstructions(instructionLine,currentInstance)
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Add ttfAutohint Control Instructions for Current Glyph Error: %s" % e)
import traceback
print(traceback.format_exc())
Glyphs.defaults["TTPreviewAlsoShowOffCurveIndexes"] = True
AddTTFAutohintControlInstructionsForCurrentGlyph()
|
tests/r/test_grunfeld1.py
|
hajime9652/observations
| 199 |
77479
|
<reponame>hajime9652/observations
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.grunfeld1 import grunfeld1
def test_grunfeld1():
"""Test module grunfeld1.py by downloading
grunfeld1.csv and testing shape of
extracted data has 200 rows and 5 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = grunfeld1(test_path)
try:
assert x_train.shape == (200, 5)
except:
shutil.rmtree(test_path)
raise()
|
tests/unit2/test_physics_engine_platformer.py
|
yegarti/arcade
| 824 |
77502
|
<filename>tests/unit2/test_physics_engine_platformer.py
import arcade
CHARACTER_SCALING = 0.5
GRAVITY = 0.5
def test_physics_engine(window):
arcade.set_background_color(arcade.color.AMAZON)
character_list = arcade.SpriteList()
character_sprite = arcade.Sprite(":resources:images/animated_characters/female_person/femalePerson_idle.png", CHARACTER_SCALING)
character_sprite.center_x = 150
character_sprite.center_y = 110
character_list.append(character_sprite)
wall_list = arcade.SpriteList()
for x in range(0, 1200, 64):
sprite = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", CHARACTER_SCALING)
sprite.center_x = x
sprite.center_y = 32
wall_list.append(sprite)
physics_engine = arcade.PhysicsEnginePlatformer(
character_sprite,
wall_list,
gravity_constant=GRAVITY,
)
def on_draw():
arcade.start_render()
wall_list.draw()
character_list.draw()
def update(td):
physics_engine.update()
window.on_draw = on_draw
window.on_update = update
physics_engine.enable_multi_jump(2)
physics_engine.jumps_since_ground = 0
assert physics_engine.can_jump() is True
character_sprite.change_y = 15
physics_engine.increment_jump_counter()
window.test()
assert physics_engine.can_jump() is True
character_sprite.change_y = 15
physics_engine.increment_jump_counter()
window.test()
assert physics_engine.can_jump() is False
physics_engine.disable_multi_jump()
|
python/cuml/test/dask/test_coordinate_descent.py
|
Nicholas-7/cuml
| 2,743 |
77519
|
<reponame>Nicholas-7/cuml
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml.dask.datasets import make_regression
from cuml.dask.linear_model import ElasticNet
from cuml.dask.linear_model import Lasso
from cuml.metrics import r2_score
from cuml.test.utils import unit_param, quality_param, stress_param
import numpy as np
@pytest.mark.mg
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('alpha', [0.001])
@pytest.mark.parametrize('algorithm', ['cyclic', 'random'])
@pytest.mark.parametrize('nrows', [unit_param(50),
quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('column_info', [unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500])])
@pytest.mark.parametrize('n_parts', [unit_param(4),
quality_param(32),
stress_param(64)])
@pytest.mark.parametrize("delayed", [True, False])
def test_lasso(dtype, alpha, algorithm,
nrows, column_info, n_parts, delayed, client):
ncols, n_info = column_info
X, y = make_regression(n_samples=nrows,
n_features=ncols,
n_informative=n_info,
n_parts=n_parts,
client=client,
dtype=dtype)
lasso = Lasso(alpha=np.array([alpha]), fit_intercept=True,
normalize=False, max_iter=1000,
selection=algorithm, tol=1e-10,
client=client)
lasso.fit(X, y)
y_hat = lasso.predict(X, delayed=delayed)
assert r2_score(y.compute(), y_hat.compute()) >= 0.99
@pytest.mark.mg
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('nrows', [unit_param(50),
quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('column_info', [unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500])])
@pytest.mark.parametrize('n_parts', [unit_param(16),
quality_param(32),
stress_param(64)])
def test_lasso_default(dtype, nrows, column_info, n_parts, client):
ncols, n_info = column_info
X, y = make_regression(n_samples=nrows,
n_features=ncols,
n_informative=n_info,
client=client,
dtype=dtype)
lasso = Lasso(client=client)
lasso.fit(X, y)
y_hat = lasso.predict(X)
assert r2_score(y.compute(), y_hat.compute()) >= 0.99
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('alpha', [0.5])
@pytest.mark.parametrize('algorithm', ['cyclic', 'random'])
@pytest.mark.parametrize('nrows', [unit_param(500),
quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('column_info', [unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500])])
@pytest.mark.parametrize('n_parts', [unit_param(16),
quality_param(32),
stress_param(64)])
@pytest.mark.parametrize("delayed", [True, False])
def test_elastic_net(dtype, alpha, algorithm,
nrows, column_info, n_parts, client, delayed):
ncols, n_info = column_info
X, y = make_regression(n_samples=nrows,
n_features=ncols,
n_informative=n_info,
n_parts=n_parts,
client=client,
dtype=dtype)
elasticnet = ElasticNet(alpha=np.array([alpha]), fit_intercept=True,
normalize=False, max_iter=1000,
selection=algorithm, tol=1e-10,
client=client)
elasticnet.fit(X, y)
y_hat = elasticnet.predict(X, delayed=delayed)
# based on differences with scikit-learn 0.22
if alpha == 0.2:
assert r2_score(y.compute(), y_hat.compute()) >= 0.96
else:
assert r2_score(y.compute(), y_hat.compute()) >= 0.80
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('nrows', [unit_param(500),
quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('column_info', [unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500])])
@pytest.mark.parametrize('n_parts', [unit_param(16),
quality_param(32),
stress_param(64)])
def test_elastic_net_default(dtype, nrows, column_info, n_parts, client):
ncols, n_info = column_info
X, y = make_regression(n_samples=nrows,
n_features=ncols,
n_informative=n_info,
n_parts=n_parts,
client=client,
dtype=dtype)
elasticnet = ElasticNet(client=client)
elasticnet.fit(X, y)
y_hat = elasticnet.predict(X)
assert r2_score(y.compute(), y_hat.compute()) >= 0.96
|
examples/rel_client.py
|
mrkeuz/websocket-client
| 2,372 |
77523
|
<filename>examples/rel_client.py
import websocket, rel
addr = "wss://api.gemini.com/v1/marketdata/%s"
if __name__ == "__main__":
rel.safe_read()
for symbol in ["BTCUSD", "ETHUSD", "ETHBTC"]:
ws = websocket.WebSocketApp(addr % (symbol,), on_message=lambda w, m : print(m))
ws.run_forever(dispatcher=rel)
rel.signal(2, rel.abort) # Keyboard Interrupt
rel.dispatch()
|
tests/test_visitors/test_ast/test_complexity/test_access/test_access.py
|
cdhiraj40/wemake-python-styleguide
| 1,931 |
77540
|
import pytest
from wemake_python_styleguide.violations.complexity import (
TooDeepAccessViolation,
)
from wemake_python_styleguide.visitors.ast.complexity.access import (
AccessVisitor,
)
# boundary expressions
subscript_access = 'my_matrix[0][0][0][0]'
attribute_access = 'self.attr.inner.wrapper.value'
mixed_access = 'self.attr[0].wrapper[0]'
mixed_with_calls_access = 'self.attr[0]().wrapper[0][0].bar().foo[0]()'
# correct expressions
call_chain = 'manager.filter().exclude().annotate().values().first()'
# incorrect expressions
deep_access = 'self.some.other.attr().first.second.third.fourth.boom'
@pytest.mark.parametrize('code', [
subscript_access,
attribute_access,
mixed_access,
mixed_with_calls_access,
call_chain,
])
def test_correct_access(
assert_errors,
parse_ast_tree,
code,
options,
mode,
):
"""Testing that expressions with correct access level work well."""
tree = parse_ast_tree(mode(code))
option_values = options(max_access_level=4)
visitor = AccessVisitor(option_values, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize(('code', 'access_level'), [
(subscript_access, 4),
(attribute_access, 4),
(mixed_access, 4),
(mixed_with_calls_access, 4),
(deep_access, 5),
])
def test_incorrect_access(
assert_errors,
assert_error_text,
parse_ast_tree,
code,
access_level,
options,
mode,
):
"""Testing that violations are raised when reaching too deep access."""
tree = parse_ast_tree(mode(code))
option_values = options(max_access_level=3)
visitor = AccessVisitor(option_values, tree=tree)
visitor.run()
assert_errors(visitor, [TooDeepAccessViolation])
assert_error_text(
visitor,
access_level,
option_values.max_access_level,
)
|
src/quantum/azext_quantum/vendored_sdks/azure_quantum/models/_quantum_client_enums.py
|
ravithanneeru/azure-cli-extensions
| 207 |
77542
|
<gh_stars>100-1000
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class DimensionScope(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The scope at which the quota is applied.
"""
WORKSPACE = "Workspace"
SUBSCRIPTION = "Subscription"
class JobStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The job status.
"""
WAITING = "Waiting"
EXECUTING = "Executing"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELLED = "Cancelled"
class MeterPeriod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The time period in which the quota's underlying meter is accumulated. Based on calendar year.
'None' is used for concurrent quotas.
"""
NONE = "None"
MONTHLY = "Monthly"
class ProviderAvailability(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Provider availability.
"""
AVAILABLE = "Available"
DEGRADED = "Degraded"
UNAVAILABLE = "Unavailable"
class TargetAvailability(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Target availability.
"""
AVAILABLE = "Available"
DEGRADED = "Degraded"
UNAVAILABLE = "Unavailable"
|
guillotina/json/__init__.py
|
rboixaderg/guillotina
| 173 |
77544
|
# make sure configure registrations are executed
from . import deserialize_content # noqa
from . import deserialize_value # noqa
from . import serialize_content # noqa
from . import serialize_schema # noqa
from . import serialize_schema_field # noqa
from . import serialize_value # noqa
|
PyFlow/Packages/PyFlowBase/Tools/LoggerTool.py
|
luzpaz/PyFlow
| 1,463 |
77547
|
<filename>PyFlow/Packages/PyFlowBase/Tools/LoggerTool.py
## Copyright 2015-2019 <NAME>, <NAME>
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from nine import str
from Qt import QtCore
from Qt import QtGui
from Qt.QtWidgets import QAction, QTextBrowser
from PyFlow.UI.Tool.Tool import DockTool
from PyFlow.UI.Views.NodeBox import NodesBox
from PyFlow.UI.Utils.stylesheet import editableStyleSheet
from PyFlow.Core.GraphManager import GraphManagerSingleton
from PyFlow.Core.Common import SingletonDecorator
from PyFlow.ConfigManager import ConfigManager
import sys
import logging
import json
import os
import subprocess
REDIRECT = ConfigManager().shouldRedirectOutput()
logger = logging.getLogger(None)
def addLoggingLevel(levelName, levelNum, methodName=None):
"""
Comprehensively adds a new logging level to the `logging` module and the
currently configured logging class.
`levelName` becomes an attribute of the `logging` module with the value
`levelNum`. `methodName` becomes a convenience method for both `logging`
itself and the class returned by `logging.getLoggerClass()` (usually just
`logging.Logger`). If `methodName` is not specified, `levelName.lower()` is
used.
To avoid accidental clobberings of existing attributes, this method will
raise an `AttributeError` if the level name is already an attribute of the
`logging` module or if the method name is already present
Example
-------
>>> addLoggingLevel('TRACE', logging.DEBUG - 5)
>>> logging.getLogger(__name__).setLevel("TRACE")
>>> logging.getLogger(__name__).trace('that worked')
>>> logging.trace('so did this')
>>> logging.TRACE
5
"""
if not methodName:
methodName = levelName.lower()
if hasattr(logging, levelName):
raise AttributeError('{} already defined in logging module'.format(levelName))
if hasattr(logging, methodName):
raise AttributeError('{} already defined in logging module'.format(methodName))
if hasattr(logging.getLoggerClass(), methodName):
raise AttributeError('{} already defined in logger class'.format(methodName))
# This method was inspired by the answers to Stack Overflow post
# http://stackoverflow.com/q/2183233/2988730, especially
# http://stackoverflow.com/a/13638084/2988730
def logForLevel(self, message, *args, **kwargs):
if self.isEnabledFor(levelNum):
self._log(levelNum, message, args, **kwargs)
def logToRoot(message, *args, **kwargs):
logging.log(levelNum, message, *args, **kwargs)
logging.addLevelName(levelNum, levelName)
setattr(logging, levelName, levelNum)
setattr(logging.getLoggerClass(), methodName, logForLevel)
setattr(logging, methodName, logToRoot)
addLoggingLevel('CONSOLEOUTPUT', logging.ERROR + 5)
@SingletonDecorator
class SignalHandler(QtCore.QObject):
messageWritten = QtCore.Signal(str)
errorWritten = QtCore.Signal(str)
warningWritten = QtCore.Signal(str)
flushSig = QtCore.Signal()
progressSig = QtCore.Signal(int)
_stdout = None
_stderr = None
text = ""
def __init__(self, parent):
QtCore.QObject.__init__(self, parent)
sys.stdout = self
def write(self, msg):
if (not self.signalsBlocked()):
if msg != '\n':
self.text = msg
logger.info(str(msg))
def flush(self):
print('flusing from handler')
class QtHandler(logging.Handler):
def __init__(self, parent):
logging.Handler.__init__(self)
self.messageHolder = SignalHandler(parent)
def emit(self, record):
if record:
msj = self.format(record)
if 'flusing from handler' in msj:
self.messageHolder.flushSig.emit()
elif 'bytes Downloaded' in msj:
nb = int(float(msj.split('(')[-1][:-2]))
self.messageHolder.progressSig.emit(nb)
self.messageHolder.messageWritten.emit('%s\n' % msj)
else:
if record.levelname in ['ERROR', 'CRITICAL']:
self.messageHolder.errorWritten.emit('%s\n' % msj)
elif record.levelname == 'WARNING':
self.messageHolder.warningWritten.emit('%s\n' % msj)
else:
self.messageHolder.messageWritten.emit('%s\n' % msj)
class LoggerTool(DockTool):
"""docstring for NodeBox tool."""
formater = logging.Formatter("[%(levelname)s %(asctime)s]: %(message)s", "%H:%M:%S")
def __init__(self):
super(LoggerTool, self).__init__()
self.logView = QTextBrowser()
self.logView.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.logView.setOpenLinks(False)
self.logView.setReadOnly(True)
self.logView.setStyleSheet("background-color: %s; Font: 10pt 'Consolas'" %
"rgba%s" % str(editableStyleSheet().LoggerBgColor.getRgb()))
self.clearAction = QAction("Clear", None)
self.clearAction.triggered.connect(self.clearView)
self.logView.addAction(self.clearAction)
self.logView.anchorClicked.connect(self.anchorClickedMethod)
self.logView.setTextColor(QtGui.QColor('white'))
self.setWidget(self.logView)
#####################################################
# Sys Output Redirection
#####################################################
self.handler = None
if REDIRECT:
self.handler = QtHandler(self)
else:
self.handler = logging.StreamHandler(sys.stdout)
logger.setLevel(logging.DEBUG)
sys.excepthook = LoggerTool.exceptHook
if self.handler and REDIRECT:
self.handler.setFormatter(LoggerTool.formater)
logger.addHandler(self.handler)
self.handler.messageHolder.messageWritten.connect(
lambda value: self.logPython(value, 0))
self.handler.messageHolder.warningWritten.connect(
lambda value: self.logPython(value, 1))
self.handler.messageHolder.errorWritten.connect(
lambda value: self.logPython(value, 2))
self.handler.messageHolder.flushSig.connect(self.flushPython)
#####################################################
# Logger
#####################################################
@staticmethod
def exceptHook(excType, excValue, traceback, logger=logger):
logger.error(excValue, exc_info=(excType, excValue, traceback))
def clearView(self, *args):
self.logView.clear()
@staticmethod
def supportedSoftwares():
return ["standalone"]
def onDestroy(self):
if REDIRECT:
try:
sys.stdout = sys.__stdout__
self.handler.messageHolder._stdout = None
self.handler.messageHolder._stderr = None
self.handler.messageHolder.messageWritten.disconnect()
self.handler.messageHolder.warningWritten.disconnect()
self.handler.messageHolder.errorWritten.disconnect()
self.handler.messageHolder.flushSig.disconnect()
del self.handler
self.handler = None
except:
pass
def logPython(self, text, mode=0):
colorchart = {
0: 'white',
1: 'yellow',
2: 'red'
}
for l in text.split('\n'):
if len(l) > 0:
splitted = l.split(",")
if len(splitted) >= 3:
if "File" in splitted[0] and "line" in splitted[1] and "in" in splitted[2]:
file = splitted[0].split('"')[1]
line = splitted[1].split("line ")[1]
if os.path.exists(file):
file = file.replace("\\", "//")
errorLink = """<a href=%s><span style=" text-decoration: underline; color:red;">%s</span></a></p>""" % (
str(file + "::%s" % line), l)
self.logView.append(errorLink)
else:
self.logView.append(
'<span style=" color:%s;">%s<span>' % (colorchart[mode], l))
else:
self.logView.append(
'<span style=" color:%s;">%s<span>' % (colorchart[mode], l))
def flushPython(self):
self.logView.moveCursor(QtWidgets.QTextCursor.End,
QtWidgets.QTextCursor.MoveAnchor)
self.logView.moveCursor(QtWidgets.QTextCursor.Up,
QtWidgets.QTextCursor.MoveAnchor)
self.logView.moveCursor(
QtWidgets.QTextCursor.StartOfLine, QtWidgets.QTextCursor.MoveAnchor)
self.logView.moveCursor(QtWidgets.QTextCursor.End,
QtWidgets.QTextCursor.KeepAnchor)
self.logView.textCursor().removeSelectedText()
def loglevelChanged(self, int):
logger.setLevel(self.loggerLevels[int])
def anchorClickedMethod(self, url):
if os.path.exists(url.url().split("::")[0]):
editCmd = ConfigManager().getPrefsValue("PREFS", "General/EditorCmd")
editCmd = editCmd.replace("@FILE", url.url().replace("::", ":"))
subprocess.Popen(editCmd)
else:
man = self.pyFlowInstance.graphManager
node = man.get().findNode(str(url.url()))
if node:
self.pyFlowInstance.getCanvas().clearSelection()
node.getWrapper().setSelected(True)
self.pyFlowInstance.getCanvas().frameSelectedNodes()
def update(self):
self.logView.setStyleSheet("background-color: %s; Font: 10pt 'Consolas'" %
"rgba%s" % str(editableStyleSheet().LoggerBgColor.getRgb()))
super(LoggerTool, self).update()
def onShow(self):
super(LoggerTool, self).onShow()
def closeEvent(self, event):
self.hide()
@staticmethod
def isSingleton():
return True
@staticmethod
def defaultDockArea():
return QtCore.Qt.BottomDockWidgetArea
@staticmethod
def toolTip():
return "Logger"
@staticmethod
def name():
return str("Logger")
|
deepctr/layers/utils.py
|
dzzxjl/DeepCTR
| 6,192 |
77549
|
<gh_stars>1000+
# -*- coding:utf-8 -*-
"""
Author:
<NAME>,<EMAIL>
"""
import tensorflow as tf
from tensorflow.python.keras.layers import Flatten
from tensorflow.python.ops.lookup_ops import TextFileInitializer
try:
from tensorflow.python.ops.lookup_ops import StaticHashTable
except ImportError:
from tensorflow.python.ops.lookup_ops import HashTable as StaticHashTable
class NoMask(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(NoMask, self).__init__(**kwargs)
def build(self, input_shape):
# Be sure to call this somewhere!
super(NoMask, self).build(input_shape)
def call(self, x, mask=None, **kwargs):
return x
def compute_mask(self, inputs, mask):
return None
class Hash(tf.keras.layers.Layer):
"""Looks up keys in a table when setup `vocabulary_path`, which outputs the corresponding values.
If `vocabulary_path` is not set, `Hash` will hash the input to [0,num_buckets). When `mask_zero` = True,
input value `0` or `0.0` will be set to `0`, and other value will be set in range [1,num_buckets).
The following snippet initializes a `Hash` with `vocabulary_path` file with the first column as keys and
second column as values:
* `1,emerson`
* `2,lake`
* `3,palmer`
>>> hash = Hash(
... num_buckets=3+1,
... vocabulary_path=filename,
... default_value=0)
>>> hash(tf.constant('lake')).numpy()
2
>>> hash(tf.constant('lakeemerson')).numpy()
0
Args:
num_buckets: An `int` that is >= 1. The number of buckets or the vocabulary size + 1
when `vocabulary_path` is setup.
mask_zero: default is False. The `Hash` value will hash input `0` or `0.0` to value `0` when
the `mask_zero` is `True`. `mask_zero` is not used when `vocabulary_path` is setup.
vocabulary_path: default `None`. The `CSV` text file path of the vocabulary hash, which contains
two columns seperated by delimiter `comma`, the first column is the value and the second is
the key. The key data type is `string`, the value data type is `int`. The path must
be accessible from wherever `Hash` is initialized.
default_value: default '0'. The default value if a key is missing in the table.
**kwargs: Additional keyword arguments.
"""
def __init__(self, num_buckets, mask_zero=False, vocabulary_path=None, default_value=0, **kwargs):
self.num_buckets = num_buckets
self.mask_zero = mask_zero
self.vocabulary_path = vocabulary_path
self.default_value = default_value
if self.vocabulary_path:
initializer = TextFileInitializer(vocabulary_path, 'string', 1, 'int64', 0, delimiter=',')
self.hash_table = StaticHashTable(initializer, default_value=self.default_value)
super(Hash, self).__init__(**kwargs)
def build(self, input_shape):
# Be sure to call this somewhere!
super(Hash, self).build(input_shape)
def call(self, x, mask=None, **kwargs):
if x.dtype != tf.string:
zero = tf.as_string(tf.zeros([1], dtype=x.dtype))
x = tf.as_string(x, )
else:
zero = tf.as_string(tf.zeros([1], dtype='int32'))
if self.vocabulary_path:
hash_x = self.hash_table.lookup(x)
return hash_x
num_buckets = self.num_buckets if not self.mask_zero else self.num_buckets - 1
try:
hash_x = tf.string_to_hash_bucket_fast(x, num_buckets,
name=None) # weak hash
except AttributeError:
hash_x = tf.strings.to_hash_bucket_fast(x, num_buckets,
name=None) # weak hash
if self.mask_zero:
mask = tf.cast(tf.not_equal(x, zero), dtype='int64')
hash_x = (hash_x + 1) * mask
return hash_x
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self, ):
config = {'num_buckets': self.num_buckets, 'mask_zero': self.mask_zero, 'vocabulary_path': self.vocabulary_path,
'default_value': self.default_value}
base_config = super(Hash, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Linear(tf.keras.layers.Layer):
def __init__(self, l2_reg=0.0, mode=0, use_bias=False, seed=1024, **kwargs):
self.l2_reg = l2_reg
# self.l2_reg = tf.contrib.layers.l2_regularizer(float(l2_reg_linear))
if mode not in [0, 1, 2]:
raise ValueError("mode must be 0,1 or 2")
self.mode = mode
self.use_bias = use_bias
self.seed = seed
super(Linear, self).__init__(**kwargs)
def build(self, input_shape):
if self.use_bias:
self.bias = self.add_weight(name='linear_bias',
shape=(1,),
initializer=tf.keras.initializers.Zeros(),
trainable=True)
if self.mode == 1:
self.kernel = self.add_weight(
'linear_kernel',
shape=[int(input_shape[-1]), 1],
initializer=tf.keras.initializers.glorot_normal(self.seed),
regularizer=tf.keras.regularizers.l2(self.l2_reg),
trainable=True)
elif self.mode == 2:
self.kernel = self.add_weight(
'linear_kernel',
shape=[int(input_shape[1][-1]), 1],
initializer=tf.keras.initializers.glorot_normal(self.seed),
regularizer=tf.keras.regularizers.l2(self.l2_reg),
trainable=True)
super(Linear, self).build(input_shape) # Be sure to call this somewhere!
def call(self, inputs, **kwargs):
if self.mode == 0:
sparse_input = inputs
linear_logit = reduce_sum(sparse_input, axis=-1, keep_dims=True)
elif self.mode == 1:
dense_input = inputs
fc = tf.tensordot(dense_input, self.kernel, axes=(-1, 0))
linear_logit = fc
else:
sparse_input, dense_input = inputs
fc = tf.tensordot(dense_input, self.kernel, axes=(-1, 0))
linear_logit = reduce_sum(sparse_input, axis=-1, keep_dims=False) + fc
if self.use_bias:
linear_logit += self.bias
return linear_logit
def compute_output_shape(self, input_shape):
return (None, 1)
def compute_mask(self, inputs, mask):
return None
def get_config(self, ):
config = {'mode': self.mode, 'l2_reg': self.l2_reg, 'use_bias': self.use_bias, 'seed': self.seed}
base_config = super(Linear, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def concat_func(inputs, axis=-1, mask=False):
if not mask:
inputs = list(map(NoMask(), inputs))
if len(inputs) == 1:
return inputs[0]
else:
return tf.keras.layers.Concatenate(axis=axis)(inputs)
def reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
try:
return tf.reduce_mean(input_tensor,
axis=axis,
keep_dims=keep_dims,
name=name,
reduction_indices=reduction_indices)
except TypeError:
return tf.reduce_mean(input_tensor,
axis=axis,
keepdims=keep_dims,
name=name)
def reduce_sum(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
try:
return tf.reduce_sum(input_tensor,
axis=axis,
keep_dims=keep_dims,
name=name,
reduction_indices=reduction_indices)
except TypeError:
return tf.reduce_sum(input_tensor,
axis=axis,
keepdims=keep_dims,
name=name)
def reduce_max(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
try:
return tf.reduce_max(input_tensor,
axis=axis,
keep_dims=keep_dims,
name=name,
reduction_indices=reduction_indices)
except TypeError:
return tf.reduce_max(input_tensor,
axis=axis,
keepdims=keep_dims,
name=name)
def div(x, y, name=None):
try:
return tf.div(x, y, name=name)
except AttributeError:
return tf.divide(x, y, name=name)
def softmax(logits, dim=-1, name=None):
try:
return tf.nn.softmax(logits, dim=dim, name=name)
except TypeError:
return tf.nn.softmax(logits, axis=dim, name=name)
class Add(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(Add, self).__init__(**kwargs)
def build(self, input_shape):
# Be sure to call this somewhere!
super(Add, self).build(input_shape)
def call(self, inputs, **kwargs):
if not isinstance(inputs, list):
return inputs
if len(inputs) == 1:
return inputs[0]
if len(inputs) == 0:
return tf.constant([[0.0]])
return tf.keras.layers.add(inputs)
def add_func(inputs):
return Add()(inputs)
def combined_dnn_input(sparse_embedding_list, dense_value_list):
if len(sparse_embedding_list) > 0 and len(dense_value_list) > 0:
sparse_dnn_input = Flatten()(concat_func(sparse_embedding_list))
dense_dnn_input = Flatten()(concat_func(dense_value_list))
return concat_func([sparse_dnn_input, dense_dnn_input])
elif len(sparse_embedding_list) > 0:
return Flatten()(concat_func(sparse_embedding_list))
elif len(dense_value_list) > 0:
return Flatten()(concat_func(dense_value_list))
else:
raise NotImplementedError("dnn_feature_columns can not be empty list")
|
data_collection/gazette/spiders/pi_teresina.py
|
kaiocp/querido-diario
| 454 |
77575
|
<filename>data_collection/gazette/spiders/pi_teresina.py
import datetime
from urllib.parse import urlencode
import scrapy
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class PiTeresina(BaseGazetteSpider):
TERRITORY_ID = "2211001"
name = "pi_teresina"
allowed_domains = ["dom.pmt.pi.gov.br"]
start_date = datetime.date(2005, 1, 7)
def start_requests(self):
initial_date = self.start_date.strftime("%d/%m/%Y")
end_date = self.end_date.strftime("%d/%m/%Y")
params = {
"pagina": 1,
"filtra_data": initial_date,
"filtra_dataf": end_date,
}
url_params = urlencode(params)
yield scrapy.Request(
f"http://dom.pmt.pi.gov.br/lista_diario.php?{url_params}",
)
def parse(self, response):
for entry in response.css("tbody tr"):
edition_number = entry.xpath(".//td[1]/text()").get()
gazette_date = entry.xpath(".//td[2]/text()").get()
gazette_date = datetime.datetime.strptime(gazette_date, "%d/%m/%Y").date()
gazettes_pdfs = entry.css("a::attr(href)").getall()
yield Gazette(
date=gazette_date,
edition_number=edition_number,
file_urls=gazettes_pdfs,
is_extra_edition=False,
power="executive",
)
for next_page_url in response.css("a.paginacao::attr(href)").getall():
yield scrapy.Request(response.urljoin(next_page_url))
|
tests/layer_tests/tensorflow_tests/test_tf_LogSoftmax.py
|
ryanloney/openvino-1
| 1,127 |
77577
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from distutils.version import LooseVersion
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from unit_tests.utils.graph import build_graph
class TestLogSoftmax(CommonTFLayerTest):
def create_log_softmax_net(self, shape, reduction_axis, ir_version, use_new_frontend):
"""
Tensorflow net IR net
Input->LogSoftmax => Input->Softmax->Log
"""
#
# Create Tensorflow model
#
import tensorflow as tf
tf.compat.v1.reset_default_graph()
# Create the graph and model
with tf.compat.v1.Session() as sess:
tf_x_shape = shape.copy()
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
input = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
tf.nn.log_softmax(input, name='Operation', axis=reduction_axis)
else:
tf.nn.log_softmax(input, axis=reduction_axis, name='Operation')
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
ref_net = None
reduce_sum_shape = np.copy(shape)
rank = len(shape)
if rank in {4, 5}:
reduction_axis = reduction_axis if reduction_axis >= 0 else rank + reduction_axis
if rank == 4:
reduction_axis = {0: 0, 1: 2, 2: 3, 3: 1}[reduction_axis]
else:
reduction_axis = {0: 0, 1: 2, 2: 3, 3: 4, 4: 1}[reduction_axis]
reduce_sum_shape[reduction_axis] = 1
converted_shape = shape if rank != 1 else shape[0]
if check_ir_version(10, None, ir_version) and not use_new_frontend:
ref_nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter', 'shape': converted_shape},
'input_data': {'shape': shape, 'kind': 'data', 'value': None},
'reduce_max_axis_val': {'shape': int64_array([reduction_axis]).shape,
'kind': 'data',
'value': int64_array([reduction_axis])},
'reduce_max_axis': {'type': 'Const', 'kind': 'op', 'shape': 1},
'reduce_max_axis_data': {'shape': int64_array([1]), 'kind': 'data', 'value': None},
'reduce_max': {'type': 'ReduceMax', 'kind': 'op', 'keep_dims': True},
'reduce_max_data': {'shape': reduce_sum_shape, 'kind': 'data', 'value': None},
'sub_first': {'type': 'Subtract', 'kind': 'op'},
'sub_first_data': {'shape': shape, 'kind': 'data', 'value': None},
'reduce_sum_axis_val': {'shape': int64_array([reduction_axis]).shape,
'kind': 'data',
'value': int64_array([reduction_axis])},
'reduce_sum_axis': {'type': 'Const', 'kind': 'op', 'shape': 1},
'reduce_sum_axis_data': {'shape': int64_array([1]), 'kind': 'data', 'value': None},
'reduce_sum': {'type': 'ReduceSum', 'kind': 'op', 'keep_dims': True},
'reduce_sum_data': {'shape': reduce_sum_shape, 'kind': 'data', 'value': None},
'exp': {'type': 'Exp', 'kind': 'op'},
'exp_data': {'shape': shape, 'kind': 'data', 'value': None},
'log': {'type': 'Log', 'kind': 'op'},
'log_data': {'shape': reduce_sum_shape, 'kind': 'data', 'value': None},
'sub_second': {'type': 'Subtract', 'kind': 'op'},
'sub_second_data': {'shape': shape, 'kind': 'data', 'value': None},
'result': {'kind': 'op', 'type': 'Result'},
}
ref_edges = [
('input', 'input_data'),
('reduce_max_axis_val', 'reduce_max_axis'),
('reduce_max_axis', 'reduce_max_axis_data'),
('reduce_max_axis_data', 'reduce_max', {'in': 1}),
('reduce_max', 'reduce_max_data'),
('input_data', 'reduce_max', {'out': 0, 'in': 0}),
('input_data', 'sub_first', {'out': 0, 'in': 0}),
('reduce_max_data', 'sub_first', {'in': 1}),
('sub_first', 'sub_first_data'),
('reduce_sum_axis_val', 'reduce_sum_axis'),
('reduce_sum_axis', 'reduce_sum_axis_data'),
('reduce_sum_axis_data', 'reduce_sum', {'in': 1}),
('reduce_sum', 'reduce_sum_data'),
('sub_first_data', 'exp'),
('exp', 'exp_data'),
('exp_data', 'reduce_sum', {'in': 0}),
('reduce_sum_data', 'log'),
('log', 'log_data'),
('log_data', 'sub_second', {'in': 1}),
('sub_second', 'sub_second_data'),
('sub_first_data', 'sub_second', {'out': 0, 'in': 0}),
('sub_second_data', 'result'),
]
ref_net = build_graph(ref_nodes_attributes, ref_edges)
return tf_net, ref_net
test_data_precommit = [
pytest.param(dict(shape=[3, 2, 3, 7, 6], reduction_axis=-1),
marks=pytest.mark.skip(reason="Skipped until fixed"))
]
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_log_softmax_precommit(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_log_softmax_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data = [dict(shape=[1], reduction_axis=-1),
dict(shape=[2, 5], reduction_axis=-1),
dict(shape=[5, 3, 7, 4], reduction_axis=-1),
dict(shape=[3, 2, 3, 7, 6], reduction_axis=-1)]
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_log_softmax(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_log_softmax_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
|
src/oci/optimizer/models/update_enrollment_status_details.py
|
Manny27nyc/oci-python-sdk
| 249 |
77580
|
<filename>src/oci/optimizer/models/update_enrollment_status_details.py
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateEnrollmentStatusDetails(object):
"""
The request object for updating the enrollment status details.
"""
#: A constant which can be used with the status property of a UpdateEnrollmentStatusDetails.
#: This constant has a value of "ACTIVE"
STATUS_ACTIVE = "ACTIVE"
#: A constant which can be used with the status property of a UpdateEnrollmentStatusDetails.
#: This constant has a value of "INACTIVE"
STATUS_INACTIVE = "INACTIVE"
def __init__(self, **kwargs):
"""
Initializes a new UpdateEnrollmentStatusDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param status:
The value to assign to the status property of this UpdateEnrollmentStatusDetails.
Allowed values for this property are: "ACTIVE", "INACTIVE"
:type status: str
"""
self.swagger_types = {
'status': 'str'
}
self.attribute_map = {
'status': 'status'
}
self._status = None
@property
def status(self):
"""
**[Required]** Gets the status of this UpdateEnrollmentStatusDetails.
The Cloud Advisor enrollment status.
Allowed values for this property are: "ACTIVE", "INACTIVE"
:return: The status of this UpdateEnrollmentStatusDetails.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this UpdateEnrollmentStatusDetails.
The Cloud Advisor enrollment status.
:param status: The status of this UpdateEnrollmentStatusDetails.
:type: str
"""
allowed_values = ["ACTIVE", "INACTIVE"]
if not value_allowed_none_or_none_sentinel(status, allowed_values):
raise ValueError(
"Invalid value for `status`, must be None or one of {0}"
.format(allowed_values)
)
self._status = status
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
src/oddt/build/lib/oddt/shape.py
|
annacarbery/VS_ECFP
| 264 |
77615
|
import logging
import numpy as np
from numpy.linalg import norm
from scipy.stats import moment
from scipy.special import cbrt
def common_usr(molecule, ctd=None, cst=None, fct=None, ftf=None, atoms_type=None):
"""Function used in USR and USRCAT function
Parameters
----------
molecule : oddt.toolkit.Molecule
Molecule to compute USR shape descriptor
ctd : numpy array or None (default = None)
Coordinates of the molecular centroid
If 'None', the point is calculated
cst : numpy array or None (default = None)
Coordinates of the closest atom to the molecular centroid
If 'None', the point is calculated
fct : numpy array or None (default = None)
Coordinates of the farthest atom to the molecular centroid
If 'None', the point is calculated
ftf : numpy array or None (default = None)
Coordinates of the farthest atom
to the farthest atom to the molecular centroid
If 'None', the point is calculated
atoms_type : str or None (default None)
Type of atoms to be selected from atom_dict
If 'None', all atoms are used to calculate shape descriptor
Returns
-------
shape_descriptor : numpy array, shape = (12)
Array describing shape of molecule
"""
if atoms_type is None:
atoms = molecule.atom_dict['coords']
else:
if atoms_type == 'ishydrophobe':
mask = (molecule.atom_dict['ishalogen'] |
molecule.atom_dict['ishydrophobe'] |
(molecule.atom_dict['atomicnum'] == 16))
else:
mask = molecule.atom_dict[atoms_type]
atoms = molecule.atom_dict[mask]['coords']
if len(atoms) == 0:
return np.zeros(12), ((0., 0., 0.),) * 4
if ctd is None:
ctd = atoms.mean(0)
distances_ctd = norm(atoms - ctd, axis=1)
if cst is None:
cst = atoms[distances_ctd.argmin()]
distances_cst = norm(atoms - cst, axis=1)
if fct is None:
fct = atoms[distances_ctd.argmax()]
distances_fct = norm(atoms - fct, axis=1)
if ftf is None:
ftf = atoms[distances_fct.argmax()]
distances_ftf = norm(atoms - ftf, axis=1)
distances_list = [distances_ctd, distances_cst, distances_fct, distances_ftf]
shape_descriptor = np.zeros(12)
for i, distances in enumerate(distances_list):
shape_descriptor[i * 3 + 0] = np.mean(distances)
shape_descriptor[i * 3 + 1] = np.var(distances)
shape_descriptor[i * 3 + 2] = moment(distances, moment=3)
return shape_descriptor, (ctd, cst, fct, ftf)
def usr(molecule):
"""Computes USR shape descriptor based on
<NAME>, <NAME> (2007). Ultrafast shape recognition to search
compound databases for similar molecular shapes. Journal of
computational chemistry, 28(10):1711-23.
http://dx.doi.org/10.1002/jcc.20681
Parameters
----------
molecule : oddt.toolkit.Molecule
Molecule to compute USR shape descriptor
Returns
-------
shape_descriptor : numpy array, shape = (12)
Array describing shape of molecule
"""
return common_usr(molecule)[0]
def usr_cat(molecule):
"""Computes USRCAT shape descriptor based on
<NAME>, <NAME> (2012). USRCAT: real-time ultrafast
shape recognition with pharmacophoric constraints. Journal of
Cheminformatics, 2012 4:27.
http://dx.doi.org/10.1186/1758-2946-4-27
Parameters
----------
molecule : oddt.toolkit.Molecule
Molecule to compute USRCAT shape descriptor
Returns
-------
shape_descriptor : numpy array, shape = (60)
Array describing shape of molecule
"""
all_atoms_shape, points = common_usr(molecule)
ctd, cst, fct, ftf = points
hydrophobic_shape = common_usr(
molecule, ctd, cst, fct, ftf, 'ishydrophobe')[0]
aromatic_shape = common_usr(molecule, ctd, cst, fct, ftf, 'isaromatic')[0]
acceptor_shape = common_usr(molecule, ctd, cst, fct, ftf, 'isacceptor')[0]
donor_shape = common_usr(molecule, ctd, cst, fct, ftf, 'isdonor')[0]
cat_shape = np.hstack((all_atoms_shape, hydrophobic_shape,
aromatic_shape, acceptor_shape, donor_shape))
return np.nan_to_num(cat_shape)
def electroshape(mol):
"""Computes shape descriptor based on
<NAME> al. ElectroShape: fast molecular similarity
calculations incorporating shape, chirality and electrostatics.
J Comput Aided Mol Des 24, 789-801 (2010).
http://dx.doi.org/doi:10.1007/s10822-010-9374-0
Aside from spatial coordinates, atoms' charges are also used
as the fourth dimension to describe shape of the molecule.
Parameters
----------
mol : oddt.toolkit.Molecule
Molecule to compute Electroshape descriptor
Returns
-------
shape_descriptor : numpy array, shape = (15)
Array describing shape of molecule
"""
if (mol.atom_dict['coords'] == 0).all():
raise Exception('Molecule needs 3D coordinates')
if (mol.atom_dict['charge'] == 0).all():
logging.warning('All partial charges are zero. ElectroShape strongly relies on them.')
if np.isnan(mol.atom_dict['charge']).any():
logging.warning('Nan values in charge values of molecule ' + mol.title)
charge = np.nan_to_num(mol.atom_dict['charge'])
mi = 25 # scaling factor converting electron charges to Angstroms
four_dimensions = np.column_stack((mol.atom_dict['coords'], charge * mi))
c1 = four_dimensions.mean(0) # geometric centre of the molecule
distances_c1 = norm(four_dimensions - c1, axis=1)
c2 = four_dimensions[distances_c1.argmax()] # atom position furthest from c1
distances_c2 = norm(four_dimensions - c2, axis=1)
c3 = four_dimensions[distances_c2.argmax()] # atom position furthest from c2
distances_c3 = norm(four_dimensions - c3, axis=1)
vector_a = c2 - c1
vector_b = c3 - c1
vector_as = vector_a[:3] # spatial parts of these vectors -
vector_bs = vector_b[:3] # the first three coordinates
vector_c = ((norm(vector_a) /
(2 * norm(np.cross(vector_as, vector_bs))))
* np.cross(vector_as, vector_bs))
vector_c1s = c1[:3]
max_charge = np.array(np.amax(charge) * mi)
min_charge = np.array(np.amin(charge) * mi)
c4 = np.append(vector_c1s + vector_c, max_charge)
c5 = np.append(vector_c1s + vector_c, min_charge)
distances_c4 = norm(four_dimensions - c4, axis=1)
distances_c5 = norm(four_dimensions - c5, axis=1)
distances_list = [distances_c1, distances_c2, distances_c3,
distances_c4, distances_c5]
shape_descriptor = np.zeros(15)
i = 0
for distances in distances_list:
mean = np.mean(distances)
shape_descriptor[0 + i] = mean
shape_descriptor[1 + i] = np.std(distances)
shape_descriptor[2 + i] = cbrt(np.sum(((distances - mean) ** 3) / distances.size))
i += 3
return shape_descriptor
def usr_similarity(mol1_shape, mol2_shape, ow=1., hw=1., rw=1., aw=1., dw=1.):
"""Computes similarity between molecules
Parameters
----------
mol1_shape : numpy array
USR shape descriptor
mol2_shape : numpy array
USR shape descriptor
ow : float (default = 1.)
Scaling factor for all atoms
Only used for USRCAT, ignored for other types
hw : float (default = 1.)
Scaling factor for hydrophobic atoms
Only used for USRCAT, ignored for other types
rw : float (default = 1.)
Scaling factor for aromatic atoms
Only used for USRCAT, ignored for other types
aw : float (default = 1.)
Scaling factor for acceptors
Only used for USRCAT, ignored for other types
dw : float (default = 1.)
Scaling factor for donors
Only used for USRCAT, ignored for other types
Returns
-------
similarity : float from 0 to 1
Similarity between shapes of molecules,
1 indicates identical molecules
"""
if mol1_shape.shape[0] == 12 and mol2_shape.shape[0] == 12:
sim = 1. / (1. + (1. / 12) * np.sum(np.fabs(mol1_shape - mol2_shape)))
elif mol1_shape.shape[0] == 60 and mol2_shape.shape[0] == 60:
w = np.array([ow, hw, rw, aw, dw])
# Normalize weights
w = w / w.sum()
shape_diff = np.abs(mol1_shape - mol2_shape).reshape(-1, 12)
sim = 1. / (1 + (w * (1. / 12) * shape_diff.sum(axis=1)).sum())
elif mol1_shape.shape[0] == 15 and mol2_shape.shape[0] == 15:
sim = 1. / (1 + (1. / 15) * np.sum(np.fabs(mol1_shape - mol2_shape)))
else:
raise Exception('Given vectors are not valid USR shape descriptors '
'or come from different methods. Correct vector lengths'
'are: 12 for USR, 60 for USRCAT, 15 for Electroshape')
return sim
|
examples/maml_toy.py
|
Brikwerk/learn2learn
| 1,774 |
77655
|
#!/usr/bin/env python3
"""
This script demonstrates how to use the MAML implementation of L2L.
Each task i consists of learning the parameters of a Normal distribution N(mu_i, sigma_i).
The parameters mu_i, sigma_i are themselves sampled from a distribution N(mu, sigma).
"""
import torch as th
from torch import nn, optim, distributions as dist
import learn2learn as l2l
DIM = 5
TIMESTEPS = 1000
TASKS_PER_STEP = 50
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.mu = nn.Parameter(th.randn(DIM))
self.sigma = nn.Parameter(th.randn(DIM))
def forward(self, x=None):
return dist.Normal(self.mu, self.sigma)
def main():
task_dist = dist.Normal(th.zeros(2 * DIM), th.ones(2 * DIM))
model = Model()
maml = l2l.algorithms.MAML(model, lr=1e-2)
opt = optim.Adam(maml.parameters())
for i in range(TIMESTEPS):
step_loss = 0.0
for t in range(TASKS_PER_STEP):
# Sample a task
task_params = task_dist.sample()
mu_i, sigma_i = task_params[:DIM], task_params[DIM:]
# Adaptation: Instanciate a copy of model
learner = maml.clone()
proposal = learner()
# Adaptation: Compute and adapt to task loss
loss = (mu_i - proposal.mean).pow(2).sum() + (sigma_i - proposal.variance).pow(2).sum()
learner.adapt(loss)
# Adaptation: Evaluate the effectiveness of adaptation
adapt_loss = (mu_i - proposal.mean).pow(2).sum() + (sigma_i - proposal.variance).pow(2).sum()
# Accumulate the error over all tasks
step_loss += adapt_loss
# Meta-learning step: compute gradient through the adaptation step, automatically.
step_loss = step_loss / TASKS_PER_STEP
print(i, step_loss.item())
opt.zero_grad()
step_loss.backward()
opt.step()
if __name__ == '__main__':
main()
|
apex/amp/lists/torch_overrides.py
|
oyj0594/apex
| 6,523 |
77675
|
import torch
from .. import utils
MODULE = torch
FP16_FUNCS = [
# Low level functions wrapped by torch.nn layers.
# The wrapper layers contain the weights which are then passed in as a parameter
# to these functions.
'conv1d',
'conv2d',
'conv3d',
'conv_transpose1d',
'conv_transpose2d',
'conv_transpose3d',
'conv_tbc',
'prelu',
# BLAS
'addmm',
'addmv',
'addr',
'matmul',
'mm',
'mv',
]
FP32_FUNCS = [
# Pointwise
'acos',
'asin',
'cosh',
'erfinv',
'exp',
'expm1',
'log',
'log10',
'log2',
'reciprocal',
'rsqrt',
'sinh',
'tan',
# Other math
'pow',
# Reduction
'cumprod',
'cumsum',
'dist',
# 'mean',
'norm',
'prod',
'std',
'sum',
'var',
# Misc
'renorm'
]
version_strings = torch.__version__.split('.')
version_major = version_strings[0]
version_minor = version_strings[1]
version_num = float(version_major + "." + version_minor)
# Before torch 1.1, mean must be blacklisted.
if version_num < 1.1:
FP32_FUNCS.append('mean')
# Before CUDA 9.1, batched matmul was missing fast FP16 kernels. We
# check the CUDA version -- if at least 9.1, then put the bmm
# functions on the fp16 list. Otherwise, put them on the fp32 list.
_bmms = ['addbmm',
'baddbmm',
'bmm']
if utils.is_cuda_enabled():
# workaround https://github.com/facebookresearch/maskrcnn-benchmark/issues/802
if utils.get_cuda_version() >= (9, 1, 0):
FP16_FUNCS.extend(_bmms)
else:
FP32_FUNCS.extend(_bmms)
# Multi-tensor fns that may need type promotion
CASTS = [
# Multi-tensor math
'addcdiv',
'addcmul',
'atan2',
'cross',
'bilinear',
'dot',
# Element-wise _or_ tensor-wise math
'add',
'div',
'mul',
# Comparison
'eq',
'equal',
'ge',
'gt',
'le',
'lt',
'ne'
]
# Functions that take sequence arguments. We need to inspect the whole
# sequence and cast to the widest type.
SEQUENCE_CASTS = [
'cat',
'stack'
]
|
celer/datasets/libsvm.py
|
Badr-MOUFAD/celer
| 137 |
77679
|
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import warnings
import libsvmdata
def fetch_libsvm(dataset, replace=False, normalize=True, min_nnz=3):
"""
This function is deprecated, we now rely on the libsvmdata package.
Parameters
----------
dataset: string
Name of the dataset.
replace: bool
Whether to redownload the data.
normalize: bool
Whether to divide the columns by their norm.
min_nnz: int
Columns with strictly less than `nnz` non-zero entries are discarded.
"""
warnings.simplefilter("always", FutureWarning)
warnings.warn("celer.datasets.fetch_libsvm is deprecated and will be "
"removed in version 0.6. Use the lightweight "
"libsvmadata package instead.", FutureWarning)
return libsvmdata.fetch_libsvm(dataset, replace=replace,
normalize=normalize, min_nnz=min_nnz)
if __name__ == "__main__":
for dataset in libsvmdata.datasets.NAMES:
fetch_libsvm(dataset, replace=False)
|
src/python/test/test_alpha_complex.py
|
m0baxter/gudhi-devel
| 146 |
77694
|
<gh_stars>100-1000
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): <NAME>
Copyright (C) 2016 Inria
Modification(s):
- YYYY/MM Author: Description of the modification
"""
import gudhi as gd
import math
import numpy as np
import pytest
try:
# python3
from itertools import zip_longest
except ImportError:
# python2
from itertools import izip_longest as zip_longest
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
def _empty_alpha(precision):
alpha_complex = gd.AlphaComplex(points=[[0, 0]], precision = precision)
assert alpha_complex.__is_defined() == True
def test_empty_alpha():
for precision in ['fast', 'safe', 'exact']:
_empty_alpha(precision)
def _infinite_alpha(precision):
point_list = [[0, 0], [1, 0], [0, 1], [1, 1]]
alpha_complex = gd.AlphaComplex(points=point_list, precision = precision)
assert alpha_complex.__is_defined() == True
simplex_tree = alpha_complex.create_simplex_tree()
assert simplex_tree.__is_persistence_defined() == False
assert simplex_tree.num_simplices() == 11
assert simplex_tree.num_vertices() == 4
assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
([3], 0.0),
([0, 1], 0.25),
([0, 2], 0.25),
([1, 3], 0.25),
([2, 3], 0.25),
([1, 2], 0.5),
([0, 1, 2], 0.5),
([1, 2, 3], 0.5),
]
assert simplex_tree.get_star([0]) == [
([0], 0.0),
([0, 1], 0.25),
([0, 1, 2], 0.5),
([0, 2], 0.25),
]
assert simplex_tree.get_cofaces([0], 1) == [([0, 1], 0.25), ([0, 2], 0.25)]
assert point_list[0] == alpha_complex.get_point(0)
assert point_list[1] == alpha_complex.get_point(1)
assert point_list[2] == alpha_complex.get_point(2)
assert point_list[3] == alpha_complex.get_point(3)
try:
alpha_complex.get_point(4) == []
except IndexError:
pass
else:
assert False
try:
alpha_complex.get_point(125) == []
except IndexError:
pass
else:
assert False
def test_infinite_alpha():
for precision in ['fast', 'safe', 'exact']:
_infinite_alpha(precision)
def _filtered_alpha(precision):
point_list = [[0, 0], [1, 0], [0, 1], [1, 1]]
filtered_alpha = gd.AlphaComplex(points=point_list, precision = precision)
simplex_tree = filtered_alpha.create_simplex_tree(max_alpha_square=0.25)
assert simplex_tree.num_simplices() == 8
assert simplex_tree.num_vertices() == 4
assert point_list[0] == filtered_alpha.get_point(0)
assert point_list[1] == filtered_alpha.get_point(1)
assert point_list[2] == filtered_alpha.get_point(2)
assert point_list[3] == filtered_alpha.get_point(3)
try:
filtered_alpha.get_point(4) == []
except IndexError:
pass
else:
assert False
try:
filtered_alpha.get_point(125) == []
except IndexError:
pass
else:
assert False
assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
([3], 0.0),
([0, 1], 0.25),
([0, 2], 0.25),
([1, 3], 0.25),
([2, 3], 0.25),
]
assert simplex_tree.get_star([0]) == [([0], 0.0), ([0, 1], 0.25), ([0, 2], 0.25)]
assert simplex_tree.get_cofaces([0], 1) == [([0, 1], 0.25), ([0, 2], 0.25)]
def test_filtered_alpha():
for precision in ['fast', 'safe', 'exact']:
_filtered_alpha(precision)
def _safe_alpha_persistence_comparison(precision):
#generate periodic signal
time = np.arange(0, 10, 1)
signal = [math.sin(x) for x in time]
delta = math.pi
delayed = [math.sin(x + delta) for x in time]
#construct embedding
embedding1 = [[signal[i], -signal[i]] for i in range(len(time))]
embedding2 = [[signal[i], delayed[i]] for i in range(len(time))]
#build alpha complex and simplex tree
alpha_complex1 = gd.AlphaComplex(points=embedding1, precision = precision)
simplex_tree1 = alpha_complex1.create_simplex_tree()
alpha_complex2 = gd.AlphaComplex(points=embedding2, precision = precision)
simplex_tree2 = alpha_complex2.create_simplex_tree()
diag1 = simplex_tree1.persistence()
diag2 = simplex_tree2.persistence()
for (first_p, second_p) in zip_longest(diag1, diag2):
assert first_p[0] == pytest.approx(second_p[0])
assert first_p[1] == pytest.approx(second_p[1])
def test_safe_alpha_persistence_comparison():
# Won't work for 'fast' version
_safe_alpha_persistence_comparison('safe')
_safe_alpha_persistence_comparison('exact')
def _delaunay_complex(precision):
point_list = [[0, 0], [1, 0], [0, 1], [1, 1]]
filtered_alpha = gd.AlphaComplex(points=point_list, precision = precision)
simplex_tree = filtered_alpha.create_simplex_tree(default_filtration_value = True)
assert simplex_tree.num_simplices() == 11
assert simplex_tree.num_vertices() == 4
assert point_list[0] == filtered_alpha.get_point(0)
assert point_list[1] == filtered_alpha.get_point(1)
assert point_list[2] == filtered_alpha.get_point(2)
assert point_list[3] == filtered_alpha.get_point(3)
try:
filtered_alpha.get_point(4) == []
except IndexError:
pass
else:
assert False
try:
filtered_alpha.get_point(125) == []
except IndexError:
pass
else:
assert False
for filtered_value in simplex_tree.get_filtration():
assert math.isnan(filtered_value[1])
for filtered_value in simplex_tree.get_star([0]):
assert math.isnan(filtered_value[1])
for filtered_value in simplex_tree.get_cofaces([0], 1):
assert math.isnan(filtered_value[1])
def test_delaunay_complex():
for precision in ['fast', 'safe', 'exact']:
_delaunay_complex(precision)
def _3d_points_on_a_plane(precision, default_filtration_value):
alpha = gd.AlphaComplex(off_file='alphacomplexdoc.off', precision = precision)
simplex_tree = alpha.create_simplex_tree(default_filtration_value = default_filtration_value)
assert simplex_tree.dimension() == 2
assert simplex_tree.num_vertices() == 7
assert simplex_tree.num_simplices() == 25
def test_3d_points_on_a_plane():
off_file = open("alphacomplexdoc.off", "w")
off_file.write("OFF \n" \
"7 0 0 \n" \
"1.0 1.0 0.0\n" \
"7.0 0.0 0.0\n" \
"4.0 6.0 0.0\n" \
"9.0 6.0 0.0\n" \
"0.0 14.0 0.0\n" \
"2.0 19.0 0.0\n" \
"9.0 17.0 0.0\n" )
off_file.close()
for default_filtration_value in [True, False]:
for precision in ['fast', 'safe', 'exact']:
_3d_points_on_a_plane(precision, default_filtration_value)
def _3d_tetrahedrons(precision):
points = 10*np.random.rand(10, 3)
alpha = gd.AlphaComplex(points=points, precision = precision)
st_alpha = alpha.create_simplex_tree(default_filtration_value = False)
# New AlphaComplex for get_point to work
delaunay = gd.AlphaComplex(points=points, precision = precision)
st_delaunay = delaunay.create_simplex_tree(default_filtration_value = True)
delaunay_tetra = []
for sk in st_delaunay.get_skeleton(4):
if len(sk[0]) == 4:
tetra = [delaunay.get_point(sk[0][0]),
delaunay.get_point(sk[0][1]),
delaunay.get_point(sk[0][2]),
delaunay.get_point(sk[0][3]) ]
delaunay_tetra.append(sorted(tetra, key=lambda tup: tup[0]))
alpha_tetra = []
for sk in st_alpha.get_skeleton(4):
if len(sk[0]) == 4:
tetra = [alpha.get_point(sk[0][0]),
alpha.get_point(sk[0][1]),
alpha.get_point(sk[0][2]),
alpha.get_point(sk[0][3]) ]
alpha_tetra.append(sorted(tetra, key=lambda tup: tup[0]))
# Check the tetrahedrons from one list are in the second one
assert len(alpha_tetra) == len(delaunay_tetra)
for tetra_from_del in delaunay_tetra:
assert tetra_from_del in alpha_tetra
def test_3d_tetrahedrons():
for precision in ['fast', 'safe', 'exact']:
_3d_tetrahedrons(precision)
|
openfermioncirq/experiments/hfvqe/molecular_example_odd_qubits.py
|
unpilbaek/OpenFermion-Cirq
| 278 |
77720
|
<reponame>unpilbaek/OpenFermion-Cirq<gh_stars>100-1000
import os
from typing import Tuple
import numpy as np
import openfermion as of
import scipy as sp
import openfermioncirq.experiments.hfvqe as hfvqe
from openfermioncirq.experiments.hfvqe.gradient_hf import rhf_minimization
from openfermioncirq.experiments.hfvqe.objective import (
RestrictedHartreeFockObjective, generate_hamiltonian)
def make_h3_2_5() -> Tuple[RestrictedHartreeFockObjective, of.MolecularData, np.
ndarray, np.ndarray, np.ndarray]:
# load the molecule from moelcular data
h3_2_5_path = os.path.join(
hfvqe.__path__[0],
'molecular_data/hydrogen_chains/h_3_p_sto-3g/bond_distance_2.5')
molfile = os.path.join(h3_2_5_path,
'H3_plus_sto-3g_singlet_linear_r-2.5.hdf5')
molecule = of.MolecularData(filename=molfile)
molecule.load()
S = np.load(os.path.join(h3_2_5_path, 'overlap.npy'))
Hcore = np.load(os.path.join(h3_2_5_path, 'h_core.npy'))
TEI = np.load(os.path.join(h3_2_5_path, 'tei.npy'))
_, X = sp.linalg.eigh(Hcore, S)
obi = of.general_basis_change(Hcore, X, (1, 0))
tbi = np.einsum('psqr', of.general_basis_change(TEI, X, (1, 0, 1, 0)))
molecular_hamiltonian = generate_hamiltonian(obi, tbi,
molecule.nuclear_repulsion)
rhf_objective = RestrictedHartreeFockObjective(molecular_hamiltonian,
molecule.n_electrons)
scipy_result = rhf_minimization(rhf_objective)
return rhf_objective, molecule, scipy_result.x, obi, tbi
|
binance_d/model/commissionrate.py
|
vinayakpathak/Binance_Futures_python
| 640 |
77743
|
<gh_stars>100-1000
"""
return the taker/maker commission rate
"""
class CommissionRate:
def __init__(self):
self.symbol = ""
self.makerCommissionRate = 0.0
self.takerCommissionRate = 0.0
@staticmethod
def json_parse(json_data):
result = CommissionRate()
result.symbol = json_data.get_string("symbol")
result.makerCommissionRate = json_data.get_float("makerCommissionRate")
result.takerCommissionRate = json_data.get_float("takerCommissionRate")
return result
|
tests/test_describe_graph_line.py
|
winbo/GitSavvy
| 2,058 |
77751
|
from unittesting import DeferrableTestCase
from GitSavvy.tests.parameterized import parameterized as p
from GitSavvy.core.commands.log_graph import describe_graph_line
examples = [
(
"|",
{},
None
),
(
"● a3062b2 (HEAD -> optimize-graph-render, origin/optimize-graph-render) Abort .. | Thu 21:07, herr kaste",
{"origin"},
{
"commit": "a3062b2",
"HEAD": "optimize-graph-render",
"branches": ["optimize-graph-render", "origin/optimize-graph-render"],
"local_branches": ["optimize-graph-render"]
}
),
(
"● a3062b2 (HEAD, origin/optimize-graph-render) Abort re.. | Thu 21:07, herr kaste",
{"origin"},
{
"commit": "a3062b2",
"HEAD": "a3062b2",
"branches": ["origin/optimize-graph-render"]
}
),
(
"● a3062b2 (HEAD -> optimize-graph-render, feat/optimize-graph-render) Abort .. | Thu 21:07, herr kaste",
{"origin"},
{
"commit": "a3062b2",
"HEAD": "optimize-graph-render",
"branches": ["optimize-graph-render", "feat/optimize-graph-render"],
"local_branches": ["optimize-graph-render", "feat/optimize-graph-render"]
}
),
(
"● ad6d88c (HEAD) Use view from the argument instead of on self | Thu 20:56, herr kaste",
{"origin"},
{
"commit": "ad6d88c",
"HEAD": "ad6d88c",
}
),
(
"● ad6d88c Use view from the argument instead of on self | Thu 20:56, herr kaste",
{"origin"},
{
"commit": "ad6d88c",
}
),
(
"| ● 153dca0 (HEAD, tag: 2.20.0) Merge branch 'dev' (2 months ago) <<NAME>>",
{"origin"},
{
"commit": "153dca0",
"HEAD": "153dca0",
"tags": ["2.20.0"]
}
),
]
class TestDescribeGraphLine(DeferrableTestCase):
@p.expand(examples)
def test_a(self, input_line, remotes, output):
self.assertEqual(output, describe_graph_line(input_line, remotes))
|
tensorflow/python/keras/_impl/keras/layers/__init__.py
|
M155K4R4/Tensorflow
| 522 |
77797
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers module.
"""
# pylint: disable=wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.engine import Input
from tensorflow.python.keras._impl.keras.engine import InputLayer
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
from tensorflow.python.keras._impl.keras.layers.advanced_activations import *
from tensorflow.python.keras._impl.keras.layers.convolutional import *
from tensorflow.python.keras._impl.keras.layers.convolutional_recurrent import *
from tensorflow.python.keras._impl.keras.layers.core import *
from tensorflow.python.keras._impl.keras.layers.embeddings import *
from tensorflow.python.keras._impl.keras.layers.local import *
from tensorflow.python.keras._impl.keras.layers.merge import *
from tensorflow.python.keras._impl.keras.layers.noise import *
from tensorflow.python.keras._impl.keras.layers.normalization import *
from tensorflow.python.keras._impl.keras.layers.pooling import *
from tensorflow.python.keras._impl.keras.layers.recurrent import *
from tensorflow.python.keras._impl.keras.layers.serialization import deserialize
from tensorflow.python.keras._impl.keras.layers.serialization import serialize
from tensorflow.python.keras._impl.keras.layers.wrappers import *
|
src/test/tests/unit/atts_assign.py
|
visit-dav/vis
| 226 |
77837
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: atts_assign.py
#
# Tests: Behavior of assignment for attribute objects. Ensures good cases
# succeed and bad cases fail with specific python exceptions. Tests variety
# of types present in members of VisIt attribute objects. Tests both
# assignment usage (e.g. atts.memberName=...) and setter function usage
# (e.g. atts.SetMemberName(...))
#
# <NAME>, Tue Jun 8 15:51:59 PDT 2021
#
# Modifications:
# <NAME>, Tue July 27, 2021
# Assigning Max32BitInt+1 to int on Windows causes TypeError, not
# ValueError, so change expected results in those cases.
#
# ----------------------------------------------------------------------------
import copy, io, sys
# Some useful global variables
X = [2,4,6]
Max32BitInt = 2147483647
Max32BitInt1 = Max32BitInt+1
MaxIntAs32BitFloat = 16777216
MaxIntAs32BitFloat1 = MaxIntAs32BitFloat+1
MaxIntAs64BitFloat = 9007199254740992
MaxIntAs64BitFloat1 = MaxIntAs64BitFloat+1
Max32BitFloat = 3.402823E+38
Max32BitFloatA = 3.402820E+37 # One order mag down from Max
Max32BitFloatB = 3.402823E+39 # One order mag up from Max
Min32BitFloat = 1.175494E-38
# version of repr that strips parens at end
def repr2(s):
return repr(s).lstrip('(').rstrip(')')
def TestAssignmentToTuple():
TestSection('Assignment to tuple, "point1", member (of CylinderAttributes())')
ca = CylinderAttributes()
# Non-existent member name 'point'
try:
ca.point = 1,2,3
TestFOA('ca.point=1,2,3', LINE())
except NameError:
TestPOA('ca.point=1,2,3')
pass
except:
TestFOA('ca.point=1,2,3', LINE())
pass
# Non-existent member name 'point'
try:
ca.SetPoint(1,2,3)
TestFOA('ca.SetPoint(1,2,3)', LINE())
except ValueError:
TestPOA('ca.SetPoint(1,2,3)')
pass
except:
TestFOA('ca.SetPoint(1,2,3)', LINE())
pass
# CSV too short
try:
ca.point1 = 1,2
TestFOA('ca.point1=1,2', LINE())
except TypeError:
TestPOA('ca.point1=1,2')
pass
except:
TestFOA('ca.point1=1,2', LINE())
pass
# CSV too long
try:
ca.point1 = 1,2,3,4
TestFOA('ca.point1=1,2,3,4', LINE())
except TypeError:
TestPOA('ca.point1=1,2,3,4')
pass
except:
TestFOA('ca.point1=1,2,3,4', LINE())
pass
# The above cases can't be put in a loop. Put remaining cases in a loop
fails = [(1,2), (1,2,3,4), '123', (1,1+2j,3), (1,X,3), (1,'b',3), (1,None,3)]
for i in range(len(fails)):
try:
ca.point1 = fails[i]
TestFOA('ca.point1=%s'%repr2(fails[i]), LINE())
except TypeError:
TestPOA('ca.point1=%s'%repr2(fails[i]))
pass
except:
TestFOA('ca.point1=%s'%repr2(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ca.SetPoint1(fails[i])
TestFOA('ca.SetPoint1(%s)'%repr2(fails[i]), LINE())
except TypeError:
TestPOA('ca.SetPoint1(%s)'%repr2(fails[i]))
pass
except:
TestFOA('ca.SetPoint1(%s)'%repr2(fails[i]), LINE())
pass
try:
ca.point1 = 1,2,3
TestPOA('ca.point1=1,2,3')
except:
TestFOA('ca.point1=1,2,3', LINE())
pass
works = [(1,2,3), (1.1,2.2,3.3), tuple(X)]
for i in range(len(works)):
try:
ca.point1 = works[i]
TestPOA('ca.point1=%s'%repr2(works[i]))
except:
TestFOA('ca.point1=%s'%repr2(works[i]), LINE())
pass
for i in range(len(works)):
try:
ca.SetPoint1(*works[i])
TestPOA('ca.SetPoint1(%s)'%repr2(works[i]))
except:
TestFOA('ca.SetPoint1(%s)'%repr2(works[i]), LINE())
pass
def TestAssignmentToBool():
TestSection('Assignment to bool member, "inverse", (of CylinderAttributes())')
ca = CylinderAttributes()
try:
ca.inverse = 1,2
TestFOA('ca.inverse=1,2', LINE())
except TypeError:
TestPOA('ca.inverse=1,2')
pass
except:
TestFOA('ca.inverse=1,2', LINE())
pass
fails = [ '123', 1+2j, X, None, 5]
excpts = [TypeError, TypeError, TypeError, TypeError, ValueError]
for i in range(len(fails)):
try:
ca.inverse = fails[i]
TestFOA('ca.inverse=%s'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('ca.inverse=%s'%repr(fails[i]))
pass
except:
TestFOA('ca.inverse=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ca.SetInverse(fails[i])
TestFOA('ca.SetInverse(%s)'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('ca.SetInverse(%s)'%repr(fails[i]))
pass
except:
TestFOA('ca.SetInverse(%s)'%repr(fails[i]), LINE())
pass
works = [0, 1, True, False]
for i in range(len(works)):
try:
ca.inverse = works[i]
TestPOA('ca.inverse=%s'%repr(works[i]))
except:
TestFOA('ca.inverse=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
ca.SetInverse(works[i])
TestPOA('ca.SetInverse(%s)'%repr(works[i]))
except:
TestFOA('ca.SetInverse(%s)'%repr(works[i]), LINE())
def TestAssignmentToInt():
TestSection('Assignment to int member, "samplesPerRay", (of VolumeAttributes())')
va = VolumeAttributes()
try:
va.samplesPerRay = 1,2
TestFOA('va.samplesPerRay=1,2', LINE())
except TypeError:
TestPOA('va.samplesPerRay=1,2')
pass
except:
TestFOA('va.samplesPerRay=1,2', LINE())
pass
fails = [ '123', 1+2j, None, X, Max32BitInt1]
if sys.platform.startswith("win"):
excpts = [TypeError, TypeError, TypeError, TypeError, TypeError]
else:
excpts = [TypeError, TypeError, TypeError, TypeError, ValueError]
for i in range(len(fails)):
try:
va.samplesPerRay = fails[i]
TestFOA('va.samplesPerRay=%s'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('va.samplesPerRay=%s'%repr(fails[i]))
pass
except:
TestFOA('va.samplesPerRay=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
va.SetSamplesPerRay(fails[i])
TestFOA('va.SetSamplesPerRay(%s)'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('va.SetSamplesPerRay(%s)'%repr(fails[i]))
pass
except:
TestFOA('va.SetSamplesPerRay(%s)'%repr(fails[i]), LINE())
pass
works = [0, 1, -1, 5, True, False, Max32BitInt]
for i in range(len(works)):
try:
va.samplesPerRay = works[i]
TestPOA('va.samplesPerRay=%s'%repr(works[i]))
except:
TestFOA('va.samplesPerRay=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
va.SetSamplesPerRay(works[i])
TestPOA('va.SetSamplesPerRay(%s)'%repr(works[i]))
except:
TestFOA('va.SetSamplesPerRay(%s)'%repr(works[i]), LINE())
def TestAssignmentToFloat():
TestSection('Assignment to float member, "opacityAttenuation", (of VolumeAttributes())')
va = VolumeAttributes()
try:
va.opacityAttenuation = 1,2
TestFOA('va.opacityAttenuation=1,2', LINE())
except TypeError:
TestPOA('va.opacityAttenuation=1,2')
pass
except:
TestFOA('va.opacityAttenuation=1,2', LINE())
pass
fails = [ '123', 1+2j, None, X, Max32BitFloatB]
excpts = [TypeError, TypeError, TypeError, TypeError, ValueError]
for i in range(len(fails)):
try:
va.opacityAttenuation = fails[i]
TestFOA('va.opacityAttenuation=%s'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('va.opacityAttenuation=%s'%repr(fails[i]))
pass
except:
TestFOA('va.opacityAttenuation=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
va.SetOpacityAttenuation(fails[i])
TestFOA('va.SetOpacityAttenuation(%s)'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('va.SetOpacityAttenuation(%s)'%repr(fails[i]))
pass
except:
TestFOA('va.SetOpacityAttenuation(%s)'%repr(fails[i]), LINE())
pass
works = [0, 1, -1, 0.3, Max32BitFloatA, True, False]
for i in range(len(works)):
try:
va.opacityAttenuation = works[i]
TestPOA('va.opacityAttenuation=%s'%repr(works[i]))
except:
TestFOA('va.opacityAttenuation=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
va.SetOpacityAttenuation(works[i])
TestPOA('va.SetOpacityAttenuation(%s)'%repr(works[i]))
except:
TestFOA('va.SetOpacityAttenuation(%s)'%repr(works[i]), LINE())
def TestAssignmentToDouble():
TestSection('Assignment to double member, "radius", (of CylinderAttributes())')
ca = CylinderAttributes()
try:
ca.radius = 1,2
TestFOA('ca.radius=1,2', LINE())
except TypeError:
TestPOA('ca.radius=1,2')
pass
except:
TestFOA('ca.radius=1,2', LINE())
pass
fails = ['123', 1+2j, None, X]
for i in range(len(fails)):
try:
ca.radius = fails[i]
TestFOA('ca.radius=%s'%repr(fails[i]), LINE())
except TypeError:
TestPOA('ca.radius=%s'%repr(fails[i]))
pass
except:
TestFOA('ca.radius=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ca.SetRadius(fails[i])
TestFOA('ca.SetRadius(%s)'%repr(fails[i]), LINE())
except TypeError:
TestPOA('ca.SetRadius(%s)'%repr(fails[i]))
pass
except:
TestFOA('ca.SetRadius(%s)'%repr(fails[i]), LINE())
pass
works = [0, 1, -1, 5.5, 1.1E-479, 1.1E+479, True, False]
for i in range(len(works)):
try:
ca.radius = works[i]
TestPOA('ca.radius=%s'%repr(works[i]))
except:
TestFOA('ca.radius=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
ca.SetRadius(works[i])
TestPOA('ca.SetRadius(%s)'%repr(works[i]))
except:
TestFOA('ca.SetRadius(%s)'%repr(works[i]), LINE())
def TestAssignmentToString():
TestSection('Assignment to string member, "designator", (of CurveAttributes())')
ca = CurveAttributes()
try:
ca.designator = "123","abc"
TestFOA('ca.designator="123","abc"', LINE())
except TypeError:
TestPOA('ca.designator="123","abc"')
pass
except:
TestFOA('ca.designator="123","abc"', LINE())
pass
fails = [0, 1, 1.1, 1+2j, None, X]
for i in range(len(fails)):
try:
ca.designator = fails[i]
TestFOA('ca.designator=%s'%repr(fails[i]), LINE())
except TypeError:
TestPOA('ca.designator=%s'%repr(fails[i]))
pass
except:
TestFOA('ca.designator=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ca.SetDesignator(fails[i])
TestFOA('ca.SetDesignator(%s)'%repr(fails[i]), LINE())
except TypeError:
TestPOA('ca.SetDesignator(%s)'%repr(fails[i]))
pass
except:
TestFOA('ca.SetDesignator(%s)'%repr(fails[i]), LINE())
pass
works = ['123', 'abc', '']
for i in range(len(works)):
try:
ca.designator = works[i]
TestPOA('ca.designator=%s'%repr(works[i]))
except:
TestFOA('ca.designator=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
ca.SetDesignator(works[i])
TestPOA('ca.SetDesignator(%s)'%repr(works[i]))
except:
TestFOA('ca.SetDesignator(%s)'%repr(works[i]), LINE())
def TestAssignmentToGlyphType():
TestSection('Assignment to GlyphType member, "pointType", (of MeshAttributes())')
ma = MeshAttributes()
# Test direct assignment with = operator
try:
ma.pointType = 1
TestPOA('ma.pointType=1')
except:
TestFOA('ma.pointType=1', LINE())
pass
fails = [ '123', 1+2j, None, X, -1, 123123123123123123123123123123]
excpts = [TypeError, TypeError, TypeError, TypeError, ValueError, TypeError]
for i in range(len(fails)):
try:
ma.pointType = fails[i]
TestFOA('ma.pointType=%s'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('ma.pointType=%s'%repr(fails[i]))
pass
except:
TestFOA('ma.pointType=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ma.SetPointType(fails[i])
TestFOA('ma.SetPointType(%s)'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('ma.SetPointType(%s)'%repr(fails[i]))
pass
except:
TestFOA('ma.SetPointType(%s)'%repr(fails[i]), LINE())
pass
works = [0, 1, 5, True, False, ma.Point]
for i in range(len(works)):
try:
ma.pointType = works[i]
TestPOA('ma.pointType=%s'%repr(works[i]))
except:
TestFOA('ma.pointType=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
ma.SetPointType(works[i])
TestPOA('ma.SetPointType(%s)'%repr(works[i]))
except:
TestFOA('ma.SetPointType(%s)'%repr(works[i]), LINE())
def TestAssignmentToEnum():
TestSection('Assignment to Enum member, "smoothingLevel", (of MeshAttributes())')
ma = MeshAttributes()
# Test direct assignment with = operator
try:
ma.smoothingLevel = 1
TestPOA('ma.smoothingLevel=1')
except:
TestFOA('ma.smoothingLevel=1', LINE())
pass
fails = [ '123', 1+2j, None, X, -1, 123123123, 123123123123123123123123123123]
excpts = [TypeError, TypeError, TypeError, TypeError, ValueError, ValueError, TypeError]
for i in range(len(fails)):
try:
ma.smoothingLevel = fails[i]
TestFOA('ma.smoothingLevel=%s'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('ma.smoothingLevel=%s'%repr(fails[i]))
pass
except:
TestFOA('ma.smoothingLevel=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ma.SetSmoothingLevel(fails[i])
TestFOA('ma.SetSmoothingLevel(%s)'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('ma.SetSmoothingLevel(%s)'%repr(fails[i]))
pass
except:
TestFOA('ma.SetSmoothingLevel(%s)'%repr(fails[i]), LINE())
pass
works = [0, 1, 2, True, False, ma.Fast]
for i in range(len(works)):
try:
ma.smoothingLevel = works[i]
TestPOA('ma.smoothingLevel=%s'%repr(works[i]))
except:
TestFOA('ma.smoothingLevel=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
ma.SetSmoothingLevel(works[i])
TestPOA('ma.SmoothingLevel(%s)'%repr(works[i]))
except:
TestFOA('ma.SetSmoothingLevel(%s)'%repr(works[i]), LINE())
def TestAssignmentToUCharVector():
TestSection('Assignment to ucharVector member, "changedColors", (of MultiCurveAttributes())')
mca = MultiCurveAttributes()
# Test direct assignment with = operator
try:
mca.changedColors = 1,2,3
TestPOA('mca.changedColors=1,2,3')
except:
TestFOA('mca.changedColors=1,2,3', LINE())
pass
fails = [(1,123123123123123123123123123123,3), (1,1+2j,3), (1,X,3), (1,'b',3), (1,None,3), ('123',)]
for i in range(len(fails)):
try:
mca.changedColors = fails[i]
TestFOA('mca.changedColors=%s'%repr2(fails[i]), LINE())
except TypeError:
TestPOA('mca.changedColors=%s'%repr2(fails[i]))
pass
except:
TestFOA('mca.changedColors=%s'%repr2(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
mca.SetChangedColors(*fails[i])
TestFOA('mca.SetChangedColors(%s)'%repr2(fails[i]), LINE())
except TypeError:
TestPOA('mca.SetChangedColors(%s)'%repr2(fails[i]))
pass
except:
TestFOA('mca.SetChangedColors(%s)'%repr2(fails[i]), LINE())
pass
works = [(1,2,3), tuple(X), (1,True,3), (1,False,3)]
for i in range(len(works)):
try:
mca.changedColors = works[i]
TestPOA('mca.changedColors=%s'%repr2(works[i]))
except:
TestFOA('mca.changedColors=%s'%repr2(works[i]), LINE())
for i in range(len(works)):
try:
mca.SetChangedColors(*works[i])
TestPOA('mca.SetChangedColors(%s)'%repr2(works[i]))
except:
TestFOA('mca.SetChangedColors(%s)'%repr2(works[i]), LINE())
def TestAssignmentToIntVector():
TestSection('Assignment to intVector member, "index", (of OnionPeelAttributes())')
opa = OnionPeelAttributes()
# Test direct assignment with = operator
try:
opa.index = 1,2,3
TestPOA('opa.index=1,2,3')
except:
TestFOA('opa.index=1,2,3', LINE())
pass
fails = [(Max32BitInt1,), (1+2j,), ('b',), (None,), (1,Max32BitInt1,3),
(1,1+2j,3), (1,X,3), (1,'b',3), (1,None,3)]
if sys.platform.startswith("win"):
excpts = [TypeError, TypeError, TypeError, TypeError, TypeError,
TypeError, TypeError, TypeError, TypeError]
else:
excpts = [ValueError, TypeError, TypeError, TypeError, ValueError,
TypeError, TypeError, TypeError, TypeError]
for i in range(len(fails)):
try:
opa.index = fails[i]
TestFOA('opa.index=%s'%repr2(fails[i]), LINE())
except excpts[i]:
TestPOA('opa.index=%s'%repr2(fails[i]))
pass
except:
TestFOA('opa.index=%s'%repr2(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
opa.SetIndex(*fails[i])
TestFOA('opa.SetIndex(%s)'%repr2(fails[i]), LINE())
except excpts[i]:
TestPOA('opa.SetIndex(%s)'%repr2(fails[i]))
pass
except:
TestFOA('opa.SetIndex(%s)'%repr2(fails[i]), LINE())
pass
works = [(1,2,3), X, tuple(X), (1,True,3), (1,False,3), (1,Max32BitInt,3)]
for i in range(len(works)):
try:
opa.index = works[i]
TestPOA('opa.index=%s'%repr2(works[i]))
except:
TestFOA('opa.index=%s'%repr2(works[i]), LINE())
for i in range(len(works)):
try:
opa.SetIndex(*works[i])
TestPOA('opa.SetIndex(%s)'%repr2(works[i]))
except:
TestFOA('opa.SetIndex(%s)'%repr2(works[i]), LINE())
def TestAssignmentToDoubleVector():
TestSection('Assignment to doubleVector member, "values", (of ContourAttributes())')
ca = ContourAttributes()
# Test direct assignment with = operator
try:
ca.contourValue = 1,2,3
TestPOA('ca.contourValue=1,2,3')
except:
TestFOA('ca.contourValue=1,2,3', LINE())
pass
fails = [(1+2j,), ('b',), (None,), (1,1+2j,3), (1,X,3), (1,'b',3), (1,None,3)]
for i in range(len(fails)):
try:
ca.contourValue = fails[i]
TestFOA('ca.contourValue=%s'%repr2(fails[i]), LINE())
except TypeError:
TestPOA('ca.contourValue=%s'%repr2(fails[i]))
pass
except:
TestFOA('ca.contourValue=%s'%repr2(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ca.SetContourValue(*fails[i])
TestFOA('ca.SetContourValue(%s)'%repr2(fails[i]), LINE())
except TypeError:
TestPOA('ca.SetContourValue(%s)'%repr2(fails[i]))
pass
except:
TestFOA('ca.SetContourValue(%s)'%repr2(fails[i]), LINE())
pass
works = [(1,2,3), X, tuple(X), (1,True,3), (1,False,3)]
for i in range(len(works)):
try:
ca.contourValue = works[i]
TestPOA('ca.contourValue=%s'%repr2(works[i]))
except:
TestFOA('ca.contourValue=%s'%repr2(works[i]), LINE())
for i in range(len(works)):
try:
ca.SetContourValue(*works[i])
TestPOA('ca.SetContourValue(%s)'%repr2(works[i]))
except:
TestFOA('ca.SetContourValue(%s)'%repr2(works[i]), LINE())
def TestAssignmentToUCharArray():
TestSection('Assignment to ucharArray member, "freeformOpacity", (of VolumeAttributes())')
arr = [17,]*256
va = VolumeAttributes()
# Test assigning to individual entry via direct (operator =) assignment
try:
va.freeformOpacity = 3,17
TestPOA('va.freeformOpacity=3,17')
except:
TestFOA('va.freeformOpacity=3,17', LINE())
pass
# Test assigning to individual entry via Set method
try:
va.SetFreeformOpacity(3,17)
TestPOA('va.SetFreeformOpacity(3,17)')
except:
TestFOA('va.SetFreeformOpacity(3,17)', LINE())
pass
# Test assigning to whole array via (operator =) assignment
try:
va.freeformOpacity = tuple(arr)
TestPOA('va.freeformOpacity=tuple(arr)')
except:
TestFOA('va.freeformOpacity=tuple(arr)', LINE())
pass
# Test assigning to whole array via Set method
try:
va.SetFreeformOpacity(*tuple(arr))
TestPOA('va.SetFreeformOpacity(*tuple(arr))')
except:
TestFOA('va.SetFreeformOpacity(*tuple(arr))', LINE())
pass
# Test assigning to individual entry via direct (operator =) assignment
# failures for type of second argument (color value)
fails = [ (3,None), (3,1+2j), (3,X), (3,'123'), (None,17), (1+2j,17), (X,17),('123',17), (-3,17), (3,1700)]
excpts = [TypeError, TypeError, TypeError, TypeError, TypeError, TypeError, TypeError, TypeError, IndexError, ValueError]
for i in range(len(fails)):
try:
va.freeformOpacity = fails[i][0],fails[i][1]
TestFOA('va.freeformOpacity=%s,%s'%(repr(fails[i][0]),repr(fails[i][1])), LINE())
except excpts[i]:
TestPOA('va.freeformOpacity=%s,%s'%(repr(fails[i][0]),repr(fails[i][1])))
pass
except:
TestFOA('va.freeformOpacity=%s,%s'%(repr(fails[i][0]),repr(fails[i][1])), LINE())
pass
for i in range(len(fails)):
try:
va.SetFreeformOpacity(fails[i][0],fails[i][1])
TestFOA('va.SetFreeformOpacity(%s,%s)'%(repr(fails[i][0]),repr(fails[i][1])), LINE())
except excpts[i]:
TestPOA('va.SetFreeformOpacity(%s,%s)'%(repr(fails[i][0]),repr(fails[i][1])))
pass
except:
TestFOA('va.SetFreeformOpacity(%s,%s)'%(repr(fails[i][0]),repr(fails[i][1])), LINE())
pass
# Test assigning to whole member via direct (operator =) assignment
try:
va.freeformOpacity = (17,)*256
TestPOA('va.freeformOpacity=(17,)*256')
except:
TestFOA('va.freeformOpacity=(17,)*256', LINE())
pass
# Test assigning to whole member via Set method
try:
va.SetFreeformOpacity(*(17,)*256)
TestPOA('va.SetFreeformOpacity((17,)*256)')
except:
TestFOA('va.SetFreeformOpacity((17,)*256)', LINE())
pass
# Test assigning to whole member via direct (operator =) assignment
# failures for type of first argument (index)
arr1 = copy.deepcopy(arr)
arr2 = copy.deepcopy(arr)
arr3 = copy.deepcopy(arr)
arr4 = copy.deepcopy(arr)
arr5 = copy.deepcopy(arr)
arr1[3] = None
arr2[3] = 1+2j
arr3[3] = X
arr4[3] = (1,2,3)
arr5[3] = '123'
fails = [tuple(arr1), tuple(arr2), tuple(arr3), tuple(arr4), tuple(arr5)]
for i in range(len(fails)):
try:
va.freeformOpacity = fails[i]
TestFOA('va.freeformOpacity=%s'%repr(fails[i][:7]).replace(')',', ...'), LINE())
except TypeError:
TestPOA('va.freeformOpacity=%s'%repr(fails[i][:7]).replace(')',', ...'))
pass
except:
TestFOA('va.freeformOpacity=%s'%repr(fails[i][:7]).replace(')',', ...'), LINE())
pass
# Test star-deref of tuple
for i in range(len(fails)):
try:
va.SetFreeformOpacity(*fails[i])
TestFOA('va.SetFreeformOpacity%s'%repr(fails[i][:7]).replace(')',', ...)'), LINE())
except TypeError:
TestPOA('va.SetFreeformOpacity%s'%repr(fails[i][:7]).replace(')',', ...)'))
pass
except:
TestFOA('va.SetFreeformOpacity%s'%repr(fails[i][:7]).replace(')',', ...)'), LINE())
pass
# Test just passing the tuple
for i in range(len(fails)):
try:
va.SetFreeformOpacity(fails[i])
TestFOA('va.SetFreeformOpacity(fails[%d])'%i, LINE())
except TypeError:
TestPOA('va.SetFreeformOpacity(fails[%d])'%i)
pass
except:
TestFOA('va.SetFreeformOpacity(fails[%d])'%i, LINE())
pass
def TestAssignmentToIntArray():
TestSection('Assignment to intArray member, "reflections", (of ReflectAttributes())')
ra = ReflectAttributes()
# Test assigning via (operator =) assignment
try:
ra.reflections = 0,1,0,1,0,1,0,1
TestPOA('ra.reflections=0,1,0,1,0,1,0,1')
except:
TestFOA('ra.reflections=0,1,0,1,0,1,0,1', LINE())
pass
fails = [(0,1,None,1,0,1,0,1), (0,1,1+2j,1,0,1,0,1), (0,1,X,1,0,1,0,1),
(0,1,Max32BitInt1,1,0,1,0,1), (0,1,'123',1,0,1,0,1),
(0,1,0,1,0,1,0,1,1), (0,1,0,1,0,1,0)]
if sys.platform.startswith("win"):
excpts = [TypeError, TypeError, TypeError, TypeError, TypeError, TypeError, TypeError]
else:
excpts = [TypeError, TypeError, TypeError, ValueError, TypeError, TypeError, TypeError]
for i in range(len(fails)):
try:
ra.reflections = fails[i]
TestFOA('ra.reflections=%s'%repr2(fails[i]), LINE())
except excpts[i]:
TestPOA('ra.reflections=%s'%repr2(fails[i]))
pass
except:
TestFOA('ra.reflections=%s'%repr2(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ra.SetReflections(*fails[i])
TestFOA('ra.SetReflections(%s)'%repr2(fails[i]), LINE())
except excpts[i]:
TestPOA('ra.SetReflections(%s)'%repr2(fails[i]))
pass
except:
TestFOA('ra.SetReflections(%s)'%repr2(fails[i]), LINE())
pass
works = [(0,1,0,1,0,1,0,1), (-1,100,-1,100,-1,100,-1,100), (0,True,False,1,0,1,0,1), (0,1,Max32BitInt,1,0,1,0,1)]
for i in range(len(works)):
try:
ra.reflections = works[i]
TestPOA('ra.reflections=%s'%repr2(works[i]))
except:
TestFOA('ra.reflections=%s'%repr2(works[i]), LINE())
for i in range(len(works)):
try:
ra.SetReflections(*works[i])
TestPOA('ra.SetReflections(%s)'%repr2(works[i]))
except:
TestFOA('ra.SetReflections(%s)'%repr2(works[i]), LINE())
def TestAssignmentToFloatArray():
TestSection('Assignment to floatArray member, "center", (of RadialResampleAttributes())')
rra = RadialResampleAttributes()
# Test assigning via (operator =) assignment
try:
rra.center = 0,1,2
TestPOA('rra.center=0,1,2')
except:
TestFOA('rra.center=0,1,2', LINE())
pass
try:
rra.center = 0,1
TestFOA('rra.center=0,1', LINE())
except:
TestPOA('rra.center=0,1')
pass
try:
rra.center = 0,1,2,3
TestFOA('rra.center=0,1,2,3', LINE())
except:
TestPOA('rra.center=0,1,2,3')
pass
fails = [(0,1), (0,1,2,3), (0,None,2), (0,1+2j,2), (0,X,2), (0,'123',2), (0, Max32BitFloatB,2)]
excpts = [TypeError, TypeError, TypeError, TypeError, TypeError, TypeError, ValueError]
for i in range(len(fails)):
try:
rra.center = fails[i]
TestFOA('rra.center=%s'%repr2(fails[i]), LINE())
except excpts[i]:
TestPOA('rra.center=%s'%repr2(fails[i]))
pass
except:
TestFOA('rra.center=%s'%repr2(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
rra.SetCenter(*fails[i])
TestFOA('rra.SetCenter(%s)'%repr2(fails[i]), LINE())
except excpts[i]:
TestPOA('rra.SetCenter(%s)'%repr2(fails[i]))
pass
except:
TestFOA('rra.SetCenter(%s)'%repr2(fails[i]), LINE())
pass
works = [(1,2,3), (1.1,2.2,3.3), tuple(X), (1,True,3), (1,False,3), (1,Max32BitFloatA,3)]
for i in range(len(works)):
try:
rra.center = works[i]
TestPOA('rra.center=%s'%repr2(works[i]))
except:
TestFOA('rra.center=%s'%repr2(works[i]), LINE())
for i in range(len(works)):
try:
rra.SetCenter(*works[i])
TestPOA('rra.SetCenter(%s)'%repr2(works[i]))
except:
TestFOA('rra.SetCenter(%s)'%repr2(works[i]), LINE())
def TestAssignmentToDoubleArray():
TestSection('Assignment to doubleArray member, "materialProperties", (of VolumeAttributes())')
va = VolumeAttributes()
# Test assigning via (operator =) assignment
try:
va.materialProperties = 0,1,2,3
TestPOA('va.materialProperties=0,1,2,3')
except:
TestFOA('va.materialProperites=0,1,2,3', LINE())
pass
try:
va.materialProperties = 0,1,2
TestFOA('va.materialProperties=0,1,2', LINE())
except:
TestPOA('va.materialProperties=0,1,2')
pass
try:
va.materialProperties = 0,1,2,3,4
TestFOA('va.materialProperties=0,1,2,3,4', LINE())
except:
TestPOA('va.materialProperties=0,1,2,3,4')
pass
fails = [(0,1), (0,1,2,3,4), (0,None,2,3), (0,1+2j,2,3), (0,X,2,3), (0,'123',2,3)]
for i in range(len(fails)):
try:
va.materialProperties = fails[i]
TestFOA('va.materialProperties=%s'%repr2(fails[i]), LINE())
except TypeError:
TestPOA('va.materialProperties=%s'%repr2(fails[i]))
pass
except:
TestFOA('va.materialProperties=%s'%repr2(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
va.SetMaterialProperties(*fails[i])
TestFOA('va.SetMaterialProperties(%s)'%repr2(fails[i]), LINE())
except TypeError:
TestPOA('va.SetMaterialProperties(%s)'%repr2(fails[i]))
pass
except:
TestFOA('va.SetMaterialProperties(%s)'%repr2(fails[i]), LINE())
pass
works = [(1,2,3,4), (1.1,2.2,3.3,4.4), (1,True,3,4), (1,False,3,4)]
for i in range(len(works)):
try:
va.materialProperties = works[i]
TestPOA('va.materialProperties=%s'%repr2(works[i]))
except:
TestFOA('va.materialProperties=%s'%repr2(works[i]), LINE())
for i in range(len(works)):
try:
va.SetMaterialProperties(*works[i])
TestPOA('va.SetMaterialProperties(%s)'%repr2(works[i]))
except:
TestFOA('va.SetMaterialProperties(%s)'%repr2(works[i]), LINE())
def TestColorAttributeStuff():
TestSection('ColorAttribute stuff')
cla = ColorAttributeList()
ca = ColorAttribute()
fails = [(0,1,2), (0,1,2,3,4), (0,None,2,3), (0,1+2j,2,3), (0,X,2,3),
(0,'123',2,3), (0,-1,2,3), (0,256,2,3)]
excpts = [TypeError, TypeError, TypeError, TypeError, TypeError, TypeError, ValueError, ValueError]
for i in range(len(fails)):
try:
ca.color = fails[i]
TestFOA('ca.color=%s'%repr2(fails[i]), LINE())
except excpts[i]:
TestPOA('ca.color=%s'%repr2(fails[i]))
pass
except:
TestFOA('ca.color=%s'%repr2(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ca.SetColor(*fails[i])
TestFOA('ca.SetColor(%s)'%repr2(fails[i]), LINE())
except excpts[i]:
TestPOA('ca.SetColor(%s)'%repr2(fails[i]))
pass
except:
TestFOA('ca.SetColor(%s)'%repr2(fails[i]), LINE())
pass
try:
ca.color = (5,5,5,5)
cla.AddColors(ca)
ca.color = (255,0,0,255)
cla.AddColors(ca)
TestPOA('cla.AddColors')
except:
TestFOA('cla.AddColors', LINE())
pass
try:
cla.colors
TestFOA('cla.colors', LINE())
except NameError:
TestPOA('cla.colors')
except:
TestFOA('cla.colors', LINE())
pass
try:
if cla.GetColors(0).color != (5,5,5,5) or cla.GetColors(1).color != (255,0,0,255):
raise ValueError
TestPOA('cla.GetColors(0)')
except:
TestFOA('cla.Getcolors(0)', LINE())
pass
try:
cla.GetColors(2)
TestFOA('cla.Getcolors(2)', LINE())
except ValueError:
TestPOA('cla.GetColors(2)')
except:
TestFOA('cla.Getcolors(2)', LINE())
pass
def TestDirOutput(obj, minlen = 5, names = None):
TestSection('behavior of dir()')
try:
x = [f for f in dir(obj) if not (f.startswith('__') and f.endswith('__'))]
if minlen and len(x) < minlen:
TestFOA('dir(%s): minlen: %d < %d'%(repr(obj),len(x),minlen), LINE())
x = [n for n in names if n in x]
if len(x) != len(names):
TestFOA('dir(%s): names: %s'%(repr(obj), names), LINE())
TestPOA('dir(%s)'%repr())
except:
TestFOA('dir(%s)'%repr(obj), LINE())
# Class to facilitate stdout redirect for testing `help()`
class my_redirect_stdout(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = io.StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
# Below import works only for Python > 3.4
# So, we use the class def above
# from contextlib import redirect_stdout
def TestHelpOutput(thing, minlen = 200, words = None):
TestSection('behavior of help()')
try:
with my_redirect_stdout() as output:
help(thing)
if minlen and len(str(output)) < minlen:
TestFOA('dir(%s): minlen: %d < %d'%(repr(thing),len(output),minlen), LINE())
x = [w for w in words if w in str(output)]
if len(x) != len(words):
TestFOA('dir(%s): words: %s'%(repr(thing), words), LINE())
except:
TestFOA('help(%s)'%repr(thing), LINE())
# Scalar assignments
# TestAssignmentToUChar() No instances in any .xml files
TestAssignmentToBool()
TestAssignmentToInt()
TestAssignmentToFloat()
TestAssignmentToDouble()
TestAssignmentToString()
TestAssignmentToGlyphType()
TestAssignmentToEnum()
TestAssignmentToTuple()
# Vector assignments
TestAssignmentToUCharVector()
#TestAssignmentToBoolVector() No instances in any .xml files
TestAssignmentToIntVector()
#TestAssignmentToFloatVector() No instances in any .xml files
TestAssignmentToDoubleVector()
# Array assignments
TestAssignmentToUCharArray()
#TestAssignmentToBoolArray() No instances in any .xml files
TestAssignmentToIntArray()
TestAssignmentToFloatArray()
TestAssignmentToDoubleArray()
# Attribute Assignments
TestColorAttributeStuff()
# Test that dir(x) appears to work
#TestDirOutput(SILRestriction(), None, ['NumSets', 'TurnOnAll', 'Wholes', 'TopSets'])
#TestDirOutput(PseudocolorAttributes(), 50)
#TestDirOutput(ColorAttributeList(), None, ['AddColors', 'ClearColors', 'GetColors'])
# Test Help
#TestHelpOutput(AddPlot, None, ['plotType', 'variableName', 'inheritSIL'])
#TestHelpOutput(CreateDatabaseCorrelation, None,
# ['IndexForIndexCorrelation', 'CycleCorrelation', 'StretchedIndexCorrelation'])
Exit()
|
fireant/tests/queries/test_build_sets.py
|
gl3nn/fireant
| 122 |
77858
|
from unittest import TestCase
from pypika import (
Table,
functions as fn,
)
import fireant as f
from fireant.tests.dataset.mocks import test_database
test_table = Table("test")
ds = f.DataSet(
table=test_table,
database=test_database,
fields=[
f.Field("date", definition=test_table.date, data_type=f.DataType.date),
f.Field("text", definition=test_table.text, data_type=f.DataType.text),
f.Field("number", definition=test_table.number, data_type=f.DataType.number),
f.Field("boolean", definition=test_table.boolean, data_type=f.DataType.boolean),
f.Field(
"aggr_number",
definition=fn.Sum(test_table.number),
data_type=f.DataType.number,
),
],
)
# noinspection SqlDialectInspection,SqlNoDataSourceInspection
class ResultSetTests(TestCase):
maxDiff = None
def test_no_metric_is_removed_when_result_set_metric_filter_is_present(self):
queries = ds.query.widget(f.Pandas(ds.fields.aggr_number)).filter(f.ResultSet(ds.fields.aggr_number > 10)).sql
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN SUM(\"number\")>10 THEN 'set(SUM(number)>10)' "
"ELSE 'complement(SUM(number)>10)' END \"$set(SUM(number)>10)\","
'SUM("number") "$aggr_number" '
'FROM "test" '
'ORDER BY 1 '
'LIMIT 200000',
str(queries[0]),
)
def test_dimension_is_replaced_by_default_when_result_set_filter_is_present(self):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(f.ResultSet(ds.fields.text == "abc"))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_is_replaced_by_default_in_the_target_dimension_place_when_result_set_filter_is_present(
self,
):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.date)
.dimension(ds.fields.text)
.dimension(ds.fields.boolean)
.filter(f.ResultSet(ds.fields.text == "abc"))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
'"date" "$date",'
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\","
'"boolean" "$boolean",'
'SUM("number") "$aggr_number" '
'FROM "test" '
'GROUP BY "$date","$text","$boolean" '
'ORDER BY "$date","$text","$boolean" '
'LIMIT 200000',
str(queries[0]),
)
def test_dimension_with_dimension_modifier_is_replaced_by_default_when_result_set_filter_is_present(
self,
):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.date)
.dimension(f.Rollup(ds.fields.boolean))
.filter(f.ResultSet(ds.fields.boolean == True))
.sql
)
self.assertEqual(len(queries), 2)
with self.subTest('base query is the same as without totals'):
self.assertEqual(
"SELECT "
'"date" "$date",'
"CASE WHEN \"boolean\"=true THEN 'set(boolean=true)' ELSE 'complement(boolean=true)' END \"$boolean\","
'SUM("number") "$aggr_number" '
'FROM "test" '
'GROUP BY "$date","$boolean" '
'ORDER BY "$date","$boolean" '
'LIMIT 200000',
str(queries[0]),
)
with self.subTest('totals dimension is replaced with _FIREANT_ROLLUP_VALUE_'):
self.assertEqual(
"SELECT "
'"date" "$date",'
'\'_FIREANT_ROLLUP_VALUE_\' "$boolean",'
'SUM("number") "$aggr_number" '
'FROM "test" '
'GROUP BY "$date" '
'ORDER BY "$date","$boolean" '
'LIMIT 200000',
str(queries[1]),
)
def test_dimension_is_inserted_before_conditional_dimension_when_result_set_filter_wont_ignore_dimensions(
self,
):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(f.ResultSet(ds.fields.text == "abc", will_replace_referenced_dimension=False))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$set(text='abc')\","
'"text" "$text",'
'SUM("number") "$aggr_number" '
'FROM "test" '
'GROUP BY "$set(text=\'abc\')","$text" '
'ORDER BY "$set(text=\'abc\')","$text" '
'LIMIT 200000',
str(queries[0]),
)
def test_dimension_breaks_complement_down_when_result_set_filter_wont_group_complement(
self,
):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(f.ResultSet(ds.fields.text == "abc", will_group_complement=False))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE \"text\" END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_is_inserted_in_dimensions_even_when_not_selected(self):
queries = ds.query.widget(f.Pandas(ds.fields.aggr_number)).filter(f.ResultSet(ds.fields.text == "abc")).sql
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_is_inserted_as_last_dimension_when_not_selected(self):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.date)
.dimension(ds.fields.boolean)
.filter(f.ResultSet(ds.fields.text == "abc"))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
'"date" "$date",'
'"boolean" "$boolean",'
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
'GROUP BY "$date","$boolean","$text" '
'ORDER BY "$date","$boolean","$text" '
'LIMIT 200000',
str(queries[0]),
)
def test_dimension_uses_set_label_kwarg_and_None_for_complement(self):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(f.ResultSet(ds.fields.text == "abc", set_label="Text is ABC"))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'Text is ABC' ELSE NULL END "
"\"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_breaks_complement_down_even_when_set_label_is_set_when_result_set_filter_wont_group_complement(
self,
):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(
f.ResultSet(
ds.fields.text == "abc",
set_label="IS ABC",
will_group_complement=False,
)
)
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'IS ABC' ELSE \"text\" END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_breaks_complement_down_even_when_both_labels_are_set_but_wont_group_complement(
self,
):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(
f.ResultSet(
ds.fields.text == "abc",
set_label="IS ABC",
complement_label="OTHERS",
will_group_complement=False,
)
)
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'IS ABC' ELSE \"text\" END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_uses_complement_label_kwarg_and_None_for_set(self):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(f.ResultSet(ds.fields.text == "abc", complement_label="Text is NOT ABC"))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN NULL ELSE 'Text is NOT ABC' END "
"\"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_uses_both_set_and_complement_label_kwargs_when_available(self):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(
f.ResultSet(
ds.fields.text == "abc",
set_label="Text is ABC",
complement_label="Text is NOT ABC",
)
)
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'Text is ABC' ELSE 'Text is NOT ABC' END "
"\"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_is_replaced_when_references_are_present(self):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.date)
.dimension(ds.fields.boolean)
.reference(f.WeekOverWeek(ds.fields.date))
.filter(f.ResultSet(ds.fields.text == "abc"))
.sql
)
self.assertEqual(len(queries), 2)
with self.subTest("base query"):
self.assertEqual(
"SELECT "
'"date" "$date",'
'"boolean" "$boolean",'
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
'GROUP BY "$date","$boolean","$text" '
'ORDER BY "$date","$boolean","$text" '
'LIMIT 200000',
str(queries[0]),
)
with self.subTest("ref query"):
self.assertEqual(
"SELECT "
'TIMESTAMPADD(week,1,"date") "$date",'
'"boolean" "$boolean",'
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\","
'SUM("number") "$aggr_number_wow" '
'FROM "test" '
'GROUP BY "$date","$boolean","$text" '
'ORDER BY "$date","$boolean","$text" '
'LIMIT 200000',
str(queries[1]),
)
def test_dimension_filter_variations_with_sets(self):
for field_alias, fltr in [
('text', ds.fields.text.like("%abc%")),
('text', ds.fields.text.not_like("%abc%")),
('text', ds.fields.text.like("%abc%", "%cde%")),
('text', ds.fields.text.not_like("%abc%", "%cde%")),
('text', ds.fields.text.isin(["abc"])),
('text', ds.fields.text.notin(["abc"])),
('date', ds.fields.date.between('date1', 'date2')),
('number', ds.fields.number.between(5, 15)),
('number', ds.fields.number.isin([1, 2, 3])),
('number', ds.fields.number.notin([1, 2, 3])),
]:
fltr_sql = fltr.definition.get_sql(quote_char="")
with self.subTest(fltr_sql):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields[field_alias])
.filter(f.ResultSet(fltr, set_label='set_A', complement_label='set_B'))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
f"CASE WHEN {fltr} THEN 'set_A' ELSE 'set_B' END \"${field_alias}\","
'SUM("number") "$aggr_number" '
'FROM "test" '
f"GROUP BY \"${field_alias}\" "
f"ORDER BY \"${field_alias}\" "
"LIMIT 200000",
str(queries[0]),
)
def test_deeply_nested_dimension_filter_with_sets(self):
field_alias = 'text'
fltr = ds.fields.text.like(
fn.Concat(
fn.Upper(fn.Trim(fn.Concat('%ab', ds.fields.number))),
ds.fields.aggr_number,
fn.Concat(ds.fields.date.between('date1', 'date2'), 'c%'),
)
)
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields[field_alias])
.filter(f.ResultSet(fltr, set_label='set_A', complement_label='set_B'))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
f"CASE WHEN {fltr} THEN 'set_A' ELSE 'set_B' END \"${field_alias}\","
'SUM("number") "$aggr_number" '
'FROM "test" '
f"GROUP BY \"${field_alias}\" "
f"ORDER BY \"${field_alias}\" "
"LIMIT 200000",
str(queries[0]),
)
|
reddit2telegram/channels/~inactive/r_hermitcraft/app.py
|
CaringCat/reddit2telegram
| 187 |
77877
|
#encoding:utf-8
subreddit = 'HermitCraft'
t_channel = '@r_HermitCraft'
def send_post(submission, r2t):
return r2t.send_simple(submission, min_upvotes_limit=100)
|
datasets/wiki_hop/wiki_hop.py
|
zidingz/datasets
| 10,608 |
77914
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WikiHop: Reading Comprehension with Multiple Hops"""
import json
import os
import datasets
_CITATION = """\
@misc{welbl2018constructing,
title={Constructing Datasets for Multi-hop Reading Comprehension Across Documents},
author={<NAME> and <NAME> and <NAME>},
year={2018},
eprint={1710.06481},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
WikiHop is open-domain and based on Wikipedia articles; the goal is to recover Wikidata information by hopping through documents. \
The goal is to answer text understanding queries by combining multiple facts that are spread across different documents.
"""
_URL = "https://drive.google.com/uc?export=download&id=1ytVZ4AhubFDOEL7o7XrIRIyhU8g9wvKA"
class WikiHopConfig(datasets.BuilderConfig):
"""BuilderConfig for WikiHop."""
def __init__(self, masked=False, **kwargs):
"""BuilderConfig for WikiHop.
Args:
masked: `bool`, original or maksed data.
**kwargs: keyword arguments forwarded to super.
"""
super(WikiHopConfig, self).__init__(**kwargs)
self.masked = masked
class WikiHop(datasets.GeneratorBasedBuilder):
"""WikiHop: Reading Comprehension with Multiple Hops"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
WikiHopConfig(
name="original",
version=datasets.Version("1.0.0"),
description="The un-maksed WikiHop dataset",
masked=False,
),
WikiHopConfig(
name="masked", version=datasets.Version("1.0.0"), description="Masked WikiHop dataset", masked=True
),
]
BUILDER_CONFIG_CLASS = WikiHopConfig
DEFAULT_CONFIG_NAME = "original"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"candidates": datasets.Sequence(datasets.Value("string")),
"supports": datasets.Sequence(datasets.Value("string")),
"annotations": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
}
),
supervised_keys=None,
homepage="http://qangaroo.cs.ucl.ac.uk/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
extracted_path = dl_manager.download_and_extract(_URL)
wikihop_path = os.path.join(extracted_path, "qangaroo_v1.1", "wikihop")
train_file = "train.json" if self.config.name == "original" else "train.masked.json"
dev_file = "dev.json" if self.config.name == "original" else "dev.masked.json"
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": os.path.join(wikihop_path, train_file), "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": os.path.join(wikihop_path, dev_file), "split": "dev"},
),
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
examples = json.load(f)
for i, example in enumerate(examples):
# there are no annotations for train split, setting it to empty list
if split == "train":
example["annotations"] = []
example["question"] = example.pop("query")
yield example["id"], example
|
tatsu/tokenizing.py
|
bookofproofs/TatSu
| 259 |
77915
|
<reponame>bookofproofs/TatSu
from __future__ import annotations
from .util._common import _prints
from .exceptions import ParseError # noqa
class Tokenizer:
def error(self, *args, **kwargs):
raise ParseError(_prints(*args, **kwargs))
@property
def filename(self):
raise NotImplementedError
@property
def ignorecase(self):
raise NotImplementedError
@property
def pos(self):
raise NotImplementedError
def goto(self, pos):
raise NotImplementedError
def atend(self):
raise NotImplementedError
def ateol(self):
raise NotImplementedError
@property
def token(self):
raise NotImplementedError
@property
def current(self):
return self.token
def next(self):
raise NotImplementedError
def next_token(self):
raise NotImplementedError
def match(self, token, ignorecase=False):
raise NotImplementedError
def matchre(self, pattern, ignorecase=False):
raise NotImplementedError
def posline(self, pos):
raise NotImplementedError
def line_info(self, pos=None):
raise NotImplementedError
def get_lines(self, start=None, end=None):
raise NotImplementedError
def lookahead(self):
raise NotImplementedError
def lookahead_pos(self):
if self.atend():
return ''
info = self.line_info()
return '~%d:%d' % (info.line + 1, info.col + 1)
|
allennlp/fairness/bias_utils.py
|
MSLars/allennlp
| 11,433 |
77918
|
import torch
import json
from os import PathLike
from typing import List, Tuple, Union, Optional
from allennlp.common.file_utils import cached_path
from allennlp.data import Vocabulary
from allennlp.data.tokenizers.tokenizer import Tokenizer
def _convert_word_to_ids_tensor(word, tokenizer, vocab, namespace, all_cases):
# function does NOT strip special tokens if tokenizer adds them
if all_cases:
words_list = [word.lower(), word.title(), word.upper()]
else:
words_list = [word]
ids = []
for w in words_list:
# if vocab is None, use tokenizer vocab (only works for Huggingface PreTrainedTokenizer)
if vocab:
tokens = tokenizer.tokenize(w)
ids.append(torch.tensor([vocab.get_token_index(t.text, namespace) for t in tokens]))
else:
ids.append(torch.tensor(tokenizer.tokenizer(w)["input_ids"]))
return ids
def load_words(
fname: Union[str, PathLike],
tokenizer: Tokenizer,
vocab: Optional[Vocabulary] = None,
namespace: str = "tokens",
all_cases: bool = True,
) -> List[torch.Tensor]:
"""
This function loads a list of words from a file,
tokenizes each word into subword tokens, and converts the
tokens into IDs.
# Parameters
fname : `Union[str, PathLike]`
Name of file containing list of words to load.
tokenizer : `Tokenizer`
Tokenizer to tokenize words in file.
vocab : `Vocabulary`, optional (default=`None`)
Vocabulary of tokenizer. If `None`, assumes tokenizer is of
type `PreTrainedTokenizer` and uses tokenizer's `vocab` attribute.
namespace : `str`
Namespace of vocab to use when tokenizing.
all_cases : `bool`, optional (default=`True`)
Whether to tokenize lower, title, and upper cases of each word.
# Returns
word_ids : `List[torch.Tensor]`
List of tensors containing the IDs of subword tokens for
each word in the file.
"""
word_ids = []
with open(cached_path(fname)) as f:
words = json.load(f)
for w in words:
word_ids.extend(_convert_word_to_ids_tensor(w, tokenizer, vocab, namespace, all_cases))
return word_ids
def load_word_pairs(
fname: Union[str, PathLike],
tokenizer: Tokenizer,
vocab: Optional[Vocabulary] = None,
namespace: str = "token",
all_cases: bool = True,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""
This function loads a list of pairs of words from a file,
tokenizes each word into subword tokens, and converts the
tokens into IDs.
# Parameters
fname : `Union[str, PathLike]`
Name of file containing list of pairs of words to load.
tokenizer : `Tokenizer`
Tokenizer to tokenize words in file.
vocab : `Vocabulary`, optional (default=`None`)
Vocabulary of tokenizer. If `None`, assumes tokenizer is of
type `PreTrainedTokenizer` and uses tokenizer's `vocab` attribute.
namespace : `str`
Namespace of vocab to use when tokenizing.
all_cases : `bool`, optional (default=`True`)
Whether to tokenize lower, title, and upper cases of each word.
# Returns
word_ids : `Tuple[List[torch.Tensor], List[torch.Tensor]]`
Pair of lists of tensors containing the IDs of subword tokens for
words in the file.
"""
word_ids1 = []
word_ids2 = []
with open(cached_path(fname)) as f:
words = json.load(f)
for w1, w2 in words:
word_ids1.extend(
_convert_word_to_ids_tensor(w1, tokenizer, vocab, namespace, all_cases)
)
word_ids2.extend(
_convert_word_to_ids_tensor(w2, tokenizer, vocab, namespace, all_cases)
)
return word_ids1, word_ids2
|
bin/train_neural_net.py
|
dbdean/DeepOSM
| 1,365 |
77925
|
#!/usr/bin/env python
"""Train a neural network using OpenStreetMap labels and NAIP images."""
import argparse
from src.single_layer_network import train_on_cached_data
def create_parser():
"""Create the argparse parser."""
parser = argparse.ArgumentParser()
parser.add_argument("--neural-net",
default='one_layer_relu',
choices=['one_layer_relu', 'one_layer_relu_conv', 'two_layer_relu_conv'],
help="the neural network architecture to use")
parser.add_argument("--number-of-epochs",
default=5,
type=int,
help="the number of epochs to batch the training data into")
parser.add_argument("--render-results",
action='store_true',
help="output data/predictions to JPEG, in addition to normal JSON")
return parser
def main():
"""Use local data to train the neural net, probably made by bin/create_training_data.py."""
parser = create_parser()
args = parser.parse_args()
train_on_cached_data(args.neural_net, args.number_of_epochs)
if __name__ == "__main__":
main()
|
thermo/eos_alpha_functions.py
|
RoryKurek/thermo
| 380 |
77929
|
<filename>thermo/eos_alpha_functions.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# pylint: disable=E1101
r'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2018, 2019, 2020 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains implementations of the calculation of pure-component
EOS :math:`a \alpha` parameters in a vectorized way. Functions for calculating their
temperature derivatives as may be necessary are included as well.
For certain alpha functions, a class is available to provide these functions to
and class that inherits from it.
A mixing rule must be used on the `a_alphas` to get the overall `a_alpha`
term.
.. contents:: :local:
Vectorized Alpha Functions
--------------------------
.. autofunction:: thermo.eos_alpha_functions.PR_a_alphas_vectorized
.. autofunction:: thermo.eos_alpha_functions.SRK_a_alphas_vectorized
.. autofunction:: thermo.eos_alpha_functions.PRSV_a_alphas_vectorized
.. autofunction:: thermo.eos_alpha_functions.PRSV2_a_alphas_vectorized
.. autofunction:: thermo.eos_alpha_functions.APISRK_a_alphas_vectorized
.. autofunction:: thermo.eos_alpha_functions.RK_a_alphas_vectorized
Vectorized Alpha Functions With Derivatives
-------------------------------------------
.. autofunction:: thermo.eos_alpha_functions.PR_a_alpha_and_derivatives_vectorized
.. autofunction:: thermo.eos_alpha_functions.SRK_a_alpha_and_derivatives_vectorized
.. autofunction:: thermo.eos_alpha_functions.PRSV_a_alpha_and_derivatives_vectorized
.. autofunction:: thermo.eos_alpha_functions.PRSV2_a_alpha_and_derivatives_vectorized
.. autofunction:: thermo.eos_alpha_functions.APISRK_a_alpha_and_derivatives_vectorized
.. autofunction:: thermo.eos_alpha_functions.RK_a_alpha_and_derivatives_vectorized
Class With Alpha Functions
--------------------------
The class-based ones van save a little code when implementing a new EOS.
If there is not a standalone function available for an alpha function, it has
not yet been accelerated in a nice vectorized way.
.. autoclass:: thermo.eos_alpha_functions.a_alpha_base
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Almeida_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Androulakis_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Chen_Yang_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Coquelet_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Gasem_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Gibbons_Laughton_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Haghtalab_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Harmens_Knapp_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Heyen_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Mathias_1983_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Mathias_Copeman_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Mathias_Copeman_poly_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Mathias_Copeman_untruncated_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Melhem_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Poly_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Saffari_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Schwartzentruber_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Soave_1972_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Soave_1984_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Soave_1979_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Soave_1993_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Trebble_Bishnoi_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Twu91_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.TwuPR95_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.TwuSRK95_a_alpha
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: thermo.eos_alpha_functions.Yu_Lu_a_alpha
:members:
:undoc-members:
:show-inheritance:
Pure Alpha Functions
--------------------
.. autofunction:: thermo.eos_alpha_functions.Twu91_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Soave_1972_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Soave_1979_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Heyen_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Harmens_Knapp_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Mathias_1983_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Mathias_Copeman_untruncated_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Gibbons_Laughton_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Soave_1984_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Yu_Lu_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Trebble_Bishnoi_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Melhem_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Androulakis_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Schwartzentruber_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Almeida_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Soave_1993_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Gasem_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Coquelet_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Haghtalab_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Saffari_alpha_pure
.. autofunction:: thermo.eos_alpha_functions.Chen_Yang_alpha_pure
'''
from __future__ import division, print_function
__all__ = [
'PR_a_alphas_vectorized', 'PR_a_alpha_and_derivatives_vectorized',
'RK_a_alphas_vectorized', 'RK_a_alpha_and_derivatives_vectorized',
'SRK_a_alphas_vectorized', 'SRK_a_alpha_and_derivatives_vectorized',
'PRSV_a_alphas_vectorized', 'PRSV_a_alpha_and_derivatives_vectorized',
'PRSV2_a_alphas_vectorized', 'PRSV2_a_alpha_and_derivatives_vectorized',
'APISRK_a_alphas_vectorized', 'APISRK_a_alpha_and_derivatives_vectorized',
'a_alpha_base', 'Poly_a_alpha', 'Soave_1972_a_alpha', 'Heyen_a_alpha',
'Harmens_Knapp_a_alpha', 'Mathias_1983_a_alpha', 'Mathias_Copeman_untruncated_a_alpha',
'Mathias_Copeman_poly_a_alpha', 'Gibbons_Laughton_a_alpha', 'Soave_1984_a_alpha',
'Yu_Lu_a_alpha', 'Trebble_Bishnoi_a_alpha', 'Melhem_a_alpha', 'Androulakis_a_alpha',
'Schwartzentruber_a_alpha', 'Almeida_a_alpha', 'Twu91_a_alpha', 'Soave_1993_a_alpha',
'Gasem_a_alpha', 'Coquelet_a_alpha', 'Haghtalab_a_alpha', 'Saffari_a_alpha',
'Chen_Yang_a_alpha', 'TwuSRK95_a_alpha', 'TwuPR95_a_alpha', 'Soave_1979_a_alpha',
'Twu91_alpha_pure', 'Soave_1972_alpha_pure', 'Soave_1979_alpha_pure',
'Heyen_alpha_pure', 'Harmens_Knapp_alpha_pure', 'Mathias_1983_alpha_pure',
'Mathias_Copeman_untruncated_alpha_pure', 'Gibbons_Laughton_alpha_pure',
'Soave_1984_alpha_pure', 'Yu_Lu_alpha_pure', 'Trebble_Bishnoi_alpha_pure',
'Melhem_alpha_pure', 'Androulakis_alpha_pure', 'Schwartzentruber_alpha_pure',
'Almeida_alpha_pure', 'Soave_1993_alpha_pure', 'Gasem_alpha_pure',
'Coquelet_alpha_pure', 'Haghtalab_alpha_pure', 'Saffari_alpha_pure',
'Chen_Yang_alpha_pure', 'Mathias_Copeman_a_alpha']
from fluids.numerics import (horner, horner_and_der2, numpy as np)
from chemicals.utils import log, exp, sqrt, copysign
try:
array = np.array
except:
pass
def PR_a_alphas_vectorized(T, Tcs, ais, kappas, a_alphas=None):
r'''Calculates the `a_alpha` terms for the Peng-Robinson equation of state
given the critical temperatures `Tcs`, constants `ais`, and
`kappas`.
.. math::
a_i\alpha(T)_i=a_i [1+\kappa_i(1-\sqrt{T_{r,i}})]^2
Parameters
----------
T : float
Temperature, [K]
Tcs : list[float]
Critical temperatures of components, [K]
ais : list[float]
`a` parameters of cubic EOS,
:math:`a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}`, [Pa*m^6/mol^2]
kappas : list[float]
`kappa` parameters of Peng-Robinson EOS; formulas vary, but
the original form uses
:math:`\kappa_i=0.37464+1.54226\omega_i-0.26992\omega^2_i`, [-]
a_alphas : list[float], optional
Vector for pure component `a_alpha` terms in the cubic EOS to be
calculated and stored in, [Pa*m^6/mol^2]
Returns
-------
a_alphas : list[float]
Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2]
Notes
-----
Examples
--------
>>> Tcs = [469.7, 507.4, 540.3]
>>> ais = [2.0698956357716662, 2.7018068455659545, 3.3725793885832323]
>>> kappas = [0.74192743008, 0.819919992, 0.8800122140799999]
>>> PR_a_alphas_vectorized(322.29, Tcs=Tcs, ais=ais, kappas=kappas)
[2.6306811679, 3.6761503348, 4.8593286234]
'''
N = len(Tcs)
x0_inv = 1.0/sqrt(T)
x0 = T*x0_inv
if a_alphas is None:
a_alphas = [0.0]*N
for i in range(N):
x1 = 1.0/sqrt(Tcs[i])
x2 = kappas[i]*(x0*x1 - 1.) - 1.
a_alphas[i] = ais[i]*x2*x2
return a_alphas
def PR_a_alpha_and_derivatives_vectorized(T, Tcs, ais, kappas, a_alphas=None,
da_alpha_dTs=None, d2a_alpha_dT2s=None):
r'''Calculates the `a_alpha` terms and their first two temperature
derivatives for the Peng-Robinson equation of state
given the critical temperatures `Tcs`, constants `ais`, and
`kappas`.
.. math::
a_i\alpha(T)_i=a_i[1+\kappa_i(1-\sqrt{T_{r,i}})]^2
.. math::
\frac{d a_i\alpha_i}{dT} = - \frac{a_i \kappa_i}{T^{0.5} {T_c}_i^{0.5}}
\left(\kappa_i \left(- \frac{T^{0.5}}{{T_c}_i^{0.5}} + 1\right) + 1\right)
.. math::
\frac{d^2 a_i\alpha_i}{dT^2} = 0.5 a_i \kappa_i \left(- \frac{1}{T^{1.5}
{T_c}_i^{0.5}} \left(\kappa_i \left(\frac{T^{0.5}}{{T_c}_i^{0.5}} - 1\right)
- 1\right) + \frac{\kappa_i}{T {T_c}_i}\right)
Parameters
----------
T : float
Temperature, [K]
Tcs : list[float]
Critical temperatures of components, [K]
ais : list[float]
`a` parameters of cubic EOS,
:math:`a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}` [Pa*m^6/mol^2]
kappas : list[float]
`kappa` parameters of Peng-Robinson EOS; formulas vary, but
the original form uses
:math:`\kappa_i=0.37464+1.54226\omega_i-0.26992\omega^2_i`, [-]
Returns
-------
a_alphas : list[float]
Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2]
da_alpha_dTs : list[float]
First temperature derivative of pure component `a_alpha`,
[Pa*m^6/(mol^2*K)]
d2a_alpha_dT2s : list[float]
Second temperature derivative of pure component `a_alpha`,
[Pa*m^6/(mol^2*K^2)]
Notes
-----
Examples
--------
>>> Tcs = [469.7, 507.4, 540.3]
>>> ais = [2.0698956357716662, 2.7018068455659545, 3.3725793885832323]
>>> kappas = [0.74192743008, 0.819919992, 0.8800122140799999]
>>> PR_a_alpha_and_derivatives_vectorized(322.29, Tcs=Tcs, ais=ais, kappas=kappas)
([2.63068116797, 3.67615033489, 4.859328623453], [-0.0044497546430, -0.00638993749167, -0.0085372308846], [1.066668360e-05, 1.546687574587e-05, 2.07440632117e-05])
'''
N = len(Tcs)
x0_inv = 1.0/sqrt(T)
x0 = T*x0_inv
T_inv = x0_inv*x0_inv
x0T_inv = x0_inv*T_inv
x5, x6 = 0.5*T_inv, 0.5*x0T_inv
if a_alphas is None:
a_alphas = [0.0]*N
if da_alpha_dTs is None:
da_alpha_dTs = [0.0]*N
if d2a_alpha_dT2s is None:
d2a_alpha_dT2s = [0.0]*N
for i in range(N):
x1 = 1.0/sqrt(Tcs[i])
x2 = kappas[i]*(x0*x1 - 1.) - 1.
x3 = ais[i]*kappas[i]
x4 = x1*x2
a_alphas[i] = ais[i]*x2*x2
da_alpha_dTs[i] = x4*x3*x0_inv
d2a_alpha_dT2s[i] = x3*(x5*x1*x1*kappas[i] - x4*x6)
return a_alphas, da_alpha_dTs, d2a_alpha_dT2s
def SRK_a_alphas_vectorized(T, Tcs, ais, ms, a_alphas=None):
r'''Calculates the `a_alpha` terms for the SRK equation of state
given the critical temperatures `Tcs`, constants `ais`, and
`kappas`.
.. math::
a_i\alpha(T)_i = \left[1 + m_i\left(1 - \sqrt{\frac{T}{T_{c,i}}}
\right)\right]^2
Parameters
----------
T : float
Temperature, [K]
Tcs : list[float]
Critical temperatures of components, [K]
ais : list[float]
`a` parameters of cubic EOS,
:math:`a_i=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}`, [Pa*m^6/mol^2]
ms : list[float]
`m` parameters of SRK EOS; formulas vary, but
the original form uses
:math:`m_i = 0.480 + 1.574\omega_i - 0.176\omega_i^2`, [-]
Returns
-------
a_alphas : list[float]
Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2]
Notes
-----
Examples
--------
>>> Tcs = [469.7, 507.4, 540.3]
>>> ais = [1.9351940385541342, 2.525982668162287, 3.1531036708059315]
>>> ms = [0.8610138239999999, 0.9436976, 1.007889024]
>>> SRK_a_alphas_vectorized(322.29, Tcs=Tcs, ais=ais, ms=ms)
[2.549485814512, 3.586598245260, 4.76614806648]
'''
sqrtT = sqrt(T)
N = len(Tcs)
if a_alphas is None:
a_alphas = [0.0]*N
for i in range(N):
x0 = ms[i]*(1. - sqrtT/sqrt(Tcs[i])) + 1.0
a_alphas[i] = ais[i]*x0*x0
return a_alphas
def SRK_a_alpha_and_derivatives_vectorized(T, Tcs, ais, ms, a_alphas=None,
da_alpha_dTs=None,
d2a_alpha_dT2s=None):
r'''Calculates the `a_alpha` terms and their first and second temperature
derivatives for the SRK equation of state
given the critical temperatures `Tcs`, constants `ais`, and
`kappas`.
.. math::
a_i\alpha(T)_i = \left[1 + m_i\left(1 - \sqrt{\frac{T}{T_{c,i}}}
\right)\right]^2
.. math::
\frac{d a_i\alpha_i}{dT} = \frac{a_i m_i}{T} \sqrt{\frac{T}{T_{c,i}}}
\left(m_i \left(\sqrt{\frac{T}{T{c,i}}} - 1\right) - 1\right)
.. math::
\frac{d^2 a_i\alpha_i}{dT^2} = \frac{a_i m_i \sqrt{\frac{T}{T_{c,i}}}}
{2 T^{2}} \left(m_i + 1\right)
Parameters
----------
T : float
Temperature, [K]
Tcs : list[float]
Critical temperatures of components, [K]
ais : list[float]
`a` parameters of cubic EOS,
:math:`a_i=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}`, [Pa*m^6/mol^2]
ms : list[float]
`m` parameters of SRK EOS; formulas vary, but
the original form uses
:math:`m_i = 0.480 + 1.574\omega_i - 0.176\omega_i^2`, [-]
Returns
-------
a_alphas : list[float]
Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2]
da_alpha_dTs : list[float]
First temperature derivative of pure component `a_alpha`,
[Pa*m^6/(mol^2*K)]
d2a_alpha_dT2s : list[float]
Second temperature derivative of pure component `a_alpha`,
[Pa*m^6/(mol^2*K^2)]
Notes
-----
Examples
--------
>>> Tcs = [469.7, 507.4, 540.3]
>>> ais = [1.9351940385541342, 2.525982668162287, 3.1531036708059315]
>>> ms = [0.8610138239999999, 0.9436976, 1.007889024]
>>> SRK_a_alpha_and_derivatives_vectorized(322.29, Tcs=Tcs, ais=ais, ms=ms)
([2.549485814512, 3.586598245260, 4.76614806648], [-0.004915469296196, -0.00702410108423, -0.00936320876945], [1.236441916324e-05, 1.77752796719e-05, 2.37231823137e-05])
'''
N = len(Tcs)
sqrtnT = 1.0/sqrt(T)
sqrtT = T*sqrtnT
T_inv = sqrtnT*sqrtnT
x10 = 0.5*T_inv*T_inv
nT_inv = -T_inv
if a_alphas is None:
a_alphas = [0.0]*N
if da_alpha_dTs is None:
da_alpha_dTs = [0.0]*N
if d2a_alpha_dT2s is None:
d2a_alpha_dT2s = [0.0]*N
for i in range(N):
x1 = sqrtT/sqrt(Tcs[i])
x2 = ais[i]*ms[i]*x1
x3 = ms[i]*(1.0 - x1) + 1.
a_alphas[i] = ais[i]*x3*x3
da_alpha_dTs[i] = x2*nT_inv*x3
d2a_alpha_dT2s[i] = x2*x10*(ms[i] + 1.)
return a_alphas, da_alpha_dTs, d2a_alpha_dT2s
def RK_a_alphas_vectorized(T, Tcs, ais, a_alphas=None):
r'''Calculates the `a_alpha` terms for the RK equation of state
given the critical temperatures `Tcs`, and `a` parameters `ais`.
.. math::
a_i\alpha_i = \frac{a_i}{\sqrt{\frac{T}{T_{c,i}}}}
Parameters
----------
T : float
Temperature, [K]
Tcs : list[float]
Critical temperatures of components, [K]
ais : list[float]
`a` parameters of cubic EOS,
:math:`a_i=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}`, [Pa*m^6/mol^2]
Returns
-------
a_alphas : list[float]
Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2]
Notes
-----
Examples
--------
>>> Tcs = [469.7, 507.4, 540.3]
>>> ais = [1.9351940385541342, 2.525982668162287, 3.1531036708059315]
>>> RK_a_alphas_vectorized(322.29, Tcs=Tcs, ais=ais)
[2.3362073307, 3.16943743055, 4.0825575798]
'''
N = len(ais)
if a_alphas is None:
a_alphas = [0.0]*N
T_root_inv = 1.0/sqrt(T)
for i in range(N):
a_alphas[i] = ais[i]*sqrt(Tcs[i])*T_root_inv
return a_alphas
def RK_a_alpha_and_derivatives_vectorized(T, Tcs, ais, a_alphas=None,
da_alpha_dTs=None, d2a_alpha_dT2s=None):
r'''Calculates the `a_alpha` terms and their first and second temperature
derivatives for the RK equation of state
given the critical temperatures `Tcs`, and `a` parameters `ais`.
.. math::
a_i\alpha_i = \frac{a_i}{\sqrt{\frac{T}{T_{c,i}}}}
.. math::
\frac{d a_i\alpha_i}{dT} = - \frac{a_i}{2 T\sqrt{\frac{T}{T_{c,i}}}}
.. math::
\frac{d^2 a_i\alpha_i}{dT^2} = \frac{3 a_i}{4 T^{2}\sqrt{\frac{T}{T_{c,i}}}}
Parameters
----------
T : float
Temperature, [K]
Tcs : list[float]
Critical temperatures of components, [K]
ais : list[float]
`a` parameters of cubic EOS,
:math:`a_i=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}`, [Pa*m^6/mol^2]
Returns
-------
a_alphas : list[float]
Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2]
da_alpha_dTs : list[float]
First temperature derivative of pure component `a_alpha`,
[Pa*m^6/(mol^2*K)]
d2a_alpha_dT2s : list[float]
Second temperature derivative of pure component `a_alpha`,
[Pa*m^6/(mol^2*K^2)]
Notes
-----
Examples
--------
>>> Tcs = [469.7, 507.4, 540.3]
>>> ais = [1.9351940385541342, 2.525982668162287, 3.1531036708059315]
>>> RK_a_alpha_and_derivatives_vectorized(322.29, Tcs=Tcs, ais=ais)
([2.3362073307, 3.16943743055, 4.08255757984], [-0.00362438693525, -0.0049170582868, -0.00633367088622], [1.6868597855e-05, 2.28849403652e-05, 2.94781294155e-05])
'''
N = len(ais)
if a_alphas is None:
a_alphas = [0.0]*N
if da_alpha_dTs is None:
da_alpha_dTs = [0.0]*N
if d2a_alpha_dT2s is None:
d2a_alpha_dT2s = [0.0]*N
T_root_inv = 1.0/sqrt(T)
T_inv = T_root_inv*T_root_inv
T_15_inv = T_inv*T_root_inv
T_25_inv = T_inv*T_15_inv
x0 = -0.5*T_15_inv
x1 = 0.75*T_25_inv
for i in range(N):
Tc_05 = sqrt(Tcs[i])
aiTc_05 = ais[i]*Tc_05
a_alphas[i] = aiTc_05*T_root_inv
da_alpha_dTs[i] = aiTc_05*x0
d2a_alpha_dT2s[i] = aiTc_05*x1
return a_alphas, da_alpha_dTs, d2a_alpha_dT2s
def PRSV_a_alphas_vectorized(T, Tcs, ais, kappa0s, kappa1s, a_alphas=None):
r'''Calculates the `a_alpha` terms for the Peng-Robinson-Stryjek-Vera
equation of state given the critical temperatures `Tcs`, constants `ais`, PRSV
parameters `kappa0s` and `kappa1s`.
.. math::
a_i\alpha_i = a_i \left(\left(\kappa_{0} + \kappa_{1} \left(\sqrt{\frac{
T}{T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)
\right) \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right) + 1\right)^{2}
Parameters
----------
T : float
Temperature, [K]
Tcs : list[float]
Critical temperatures of components, [K]
ais : list[float]
`a` parameters of cubic EOS,
:math:`a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}`, [Pa*m^6/mol^2]
kappa0s : list[float]
`kappa0` parameters of PRSV EOS;
the original form uses
:math:`\kappa_{0,i} = 0.378893 + 1.4897153\omega_i - 0.17131848\omega_i^2 + 0.0196554\omega_i^3`, [-]
kappa1s : list[float]
Fit parameters, can be set to 0 if unknown [-]
Returns
-------
a_alphas : list[float]
Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2]
Notes
-----
Examples
--------
>>> Tcs = [507.6]
>>> ais = [2.6923169620277805]
>>> kappa0s = [0.8074380841890093]
>>> kappa1s = [0.05104]
>>> PRSV_a_alphas_vectorized(299.0, Tcs=Tcs, ais=ais, kappa0s=kappa0s, kappa1s=kappa1s)
[3.81298569831]
'''
sqrtT = sqrt(T)
N = len(Tcs)
if a_alphas is None:
a_alphas = [0.0]*N
for i in range(N):
Tc_inv_root = 1.0/sqrt(Tcs[i])
Tc_inv = Tc_inv_root*Tc_inv_root
x0 = Tc_inv_root*sqrtT
x2 = (1.0 + (kappa0s[i] + kappa1s[i]*(x0 + 1.0)*(0.7 - T*Tc_inv))*(1.0 - x0))
a_alphas[i] = ais[i]*x2*x2
return a_alphas
def PRSV_a_alpha_and_derivatives_vectorized(T, Tcs, ais, kappa0s, kappa1s,
a_alphas=None, da_alpha_dTs=None,
d2a_alpha_dT2s=None):
r'''Calculates the `a_alpha` terms and their first and second derivative
for the Peng-Robinson-Stryjek-Vera
equation of state given the critical temperatures `Tcs`, constants `ais`, PRSV
parameters `kappa0s` and `kappa1s`.
.. math::
a_i\alpha_i = a_i \left(\left(\kappa_{0} + \kappa_{1} \left(\sqrt{\frac{
T}{T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)
\right) \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right) + 1\right)^{2}
.. math::
\frac{d a_i\alpha_i}{dT} =a_{i} \left(\left(1 - \sqrt{\frac{T}{T_{c,i}}}
\right) \left(\kappa_{0,i} + \kappa_{1,i} \left(\sqrt{\frac{T}{T_{c,i}}}
+ 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)\right)
+ 1\right) \left(2 \left(1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left(
- \frac{\kappa_{1,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right)}{T_{c,i}}
+ \frac{\kappa_{1,i} \sqrt{\frac{T}{T_{c,i}}} \left(- \frac{T}{T_{c,i}}
+ \frac{7}{10}\right)}{2 T}\right) - \frac{\sqrt{\frac{T}{T_{c,i}}}
\left(\kappa_{0,i} + \kappa_{1,i} \left(\sqrt{\frac{T}{T_{c,i}}}
+ 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)\right)}{T}
\right)
.. math::
\frac{d^2 a_i\alpha_i}{dT^2} = \frac{a_{i} \left(\left(\kappa_{1,i}
\left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) \left(\frac{20 \left(\sqrt{
\frac{T}{T_{c,i}}} + 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T}{T_{c,i}}}
\left(\frac{10 T}{T_{c,i}} - 7\right)}{T}\right) - \frac{\sqrt{\frac{T}
{T_{c,i}}} \left(10 \kappa_{0,i} - \kappa_{1,i} \left(\sqrt{\frac{T}
{T_{c,i}}} + 1\right) \left(\frac{10 T}{T_{c,i}} - 7\right)\right)}{T}
\right)^{2} - \frac{\sqrt{\frac{T}{T_{c,i}}} \left(\left(10 \kappa_{0,i}
- \kappa_{1,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right)
\left(\frac{10 T}{T_{c,i}} - 7\right)\right) \left(\sqrt{\frac{T}
{T_{c,i}}} - 1\right) - 10\right) \left(\kappa_{1,i} \left(\frac{40}
{T_{c,i}} - \frac{\frac{10 T}{T_{c,i}} - 7}{T}\right) \left(\sqrt{
\frac{T}{T_{c,i}}} - 1\right) + 2 \kappa_{1,i} \left(\frac{20 \left(
\sqrt{\frac{T}{T_{c,i}}} + 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T}
{T_{c,i}}} \left(\frac{10 T}{T_{c,i}} - 7\right)}{T}\right) + \frac{10
\kappa_{0,i} - \kappa_{1,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right)
\left(\frac{10 T}{T_{c,i}} - 7\right)}{T}\right)}{T}\right)}{200}
Parameters
----------
T : float
Temperature, [K]
Tcs : list[float]
Critical temperatures of components, [K]
ais : list[float]
`a` parameters of cubic EOS,
:math:`a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}`, [Pa*m^6/mol^2]
kappa0s : list[float]
`kappa0` parameters of PRSV EOS; the original form uses
:math:`\kappa_{0,i} = 0.378893 + 1.4897153\omega_i - 0.17131848\omega_i^2 + 0.0196554\omega_i^3`, [-]
kappa1s : list[float]
Fit parameters, can be set to 0 if unknown [-]
Returns
-------
a_alphas : list[float]
Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2]
da_alpha_dTs : list[float]
First temperature derivative of pure component `a_alpha`,
[Pa*m^6/(mol^2*K)]
d2a_alpha_dT2s : list[float]
Second temperature derivative of pure component `a_alpha`,
[Pa*m^6/(mol^2*K^2)]
Notes
-----
Examples
--------
>>> Tcs = [507.6]
>>> ais = [2.6923169620277805]
>>> kappa0s = [0.8074380841890093]
>>> kappa1s = [0.05104]
>>> PRSV_a_alpha_and_derivatives_vectorized(299.0, Tcs=Tcs, ais=ais, kappa0s=kappa0s, kappa1s=kappa1s)
([3.8129856983], [-0.0069769034748], [2.00265608110e-05])
'''
r'''
Formula derived with:
from sympy import *
Tc = symbols('T_{c\,i}')
T, a, kappa0, kappa1 = symbols('T, a_i, \kappa_{0\,i}, \kappa_{1\,i}')
kappa = kappa0 + kappa1*(1 + sqrt(T/Tc))*(Rational(7, 10)-T/Tc)
a_alpha = a*(1 + kappa*(1-sqrt(T/Tc)))**2
diff(a_alpha, T, 2)
'''
sqrtT = sqrt(T)
T_inv = 1.0/T
N = len(Tcs)
if a_alphas is None:
a_alphas = [0.0]*N
if da_alpha_dTs is None:
da_alpha_dTs = [0.0]*N
if d2a_alpha_dT2s is None:
d2a_alpha_dT2s = [0.0]*N
for i in range(N):
Tc_inv_root = 1.0/sqrt(Tcs[i])
Tc_inv = Tc_inv_root*Tc_inv_root
x1 = T*Tc_inv
x2 = sqrtT*Tc_inv_root
x3 = x2 - 1.
x4 = 10.*x1 - 7.
x5 = x2 + 1.
x6 = 10.*kappa0s[i] - kappa1s[i]*x4*x5
x7 = x3*x6
x8 = x7*0.1 - 1.
x10 = x6*T_inv
x11 = kappa1s[i]*x3
x12 = x4*T_inv
x13 = 20.*Tc_inv*x5 + x12*x2
x14 = -x10*x2 + x11*x13
a_alpha = ais[i]*x8*x8
da_alpha_dT = -ais[i]*x14*x8*0.1
d2a_alpha_dT2 = ais[i]*0.005*(x14*x14 - x2*T_inv*(x7 - 10.)*(2.*kappa1s[i]*x13 + x10 + x11*(40.*Tc_inv - x12)))
a_alphas[i] = a_alpha
da_alpha_dTs[i] = da_alpha_dT
d2a_alpha_dT2s[i] = d2a_alpha_dT2
return a_alphas, da_alpha_dTs, d2a_alpha_dT2s
def PRSV2_a_alphas_vectorized(T, Tcs, ais, kappa0s, kappa1s, kappa2s, kappa3s,
a_alphas=None):
r'''Calculates the `a_alpha` terms for the Peng-Robinson-Stryjek-Vera 2
equation of state given the critical temperatures `Tcs`, constants `ais`,
PRSV2 parameters `kappa0s, `kappa1s`, `kappa2s`, and `kappa3s`.
.. math::
a_i\alpha_i = a_{i} \left(\left(1 - \sqrt{\frac{T}{T_{c,i}}}\right)
\left(\kappa_{0,i} + \left(\kappa_{1,i} + \kappa_{2,i} \left(1
- \sqrt{\frac{T}{T_{c,i}}}\right) \left(- \frac{T}{T_{c,i}}
+ \kappa_{3,i}\right)\right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right)
\left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)\right) + 1\right)^{2}
Parameters
----------
T : float
Temperature, [K]
Tcs : list[float]
Critical temperatures of components, [K]
ais : list[float]
`a` parameters of cubic EOS,
:math:`a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}`, [Pa*m^6/mol^2]
kappa0s : list[float]
`kappa0` parameters of PRSV EOS; the original form uses
:math:`\kappa_{0,i} = 0.378893 + 1.4897153\omega_i - 0.17131848\omega_i^2 + 0.0196554\omega_i^3`, [-]
kappa1s : list[float]
Fit parameters, can be set to 0 if unknown [-]
kappa2s : list[float]
Fit parameters, can be set to 0 if unknown [-]
kappa3s : list[float]
Fit parameters, can be set to 0 if unknown [-]
Returns
-------
a_alphas : list[float]
Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2]
Notes
-----
Examples
--------
>>> PRSV2_a_alphas_vectorized(400.0, Tcs=[507.6], ais=[2.6923169620277805], kappa0s=[0.8074380841890093], kappa1s=[0.05104], kappa2s=[0.8634], kappa3s=[0.460])
[3.2005700986984]
'''
sqrtT = sqrt(T)
N = len(Tcs)
if a_alphas is None:
a_alphas = [0.0]*N
for i in range(N):
Tc_inv_root = 1.0/sqrt(Tcs[i])
Tr_sqrt = sqrtT*Tc_inv_root
Tr = T*Tc_inv_root*Tc_inv_root
kappa = (kappa0s[i] + ((kappa1s[i] + kappa2s[i]*(kappa3s[i] - Tr)
*(1.0 - Tr_sqrt))*(1.0 + Tr_sqrt)*(0.7 - Tr)))
x0 = (1.0 + kappa*(1.0 - Tr_sqrt))
a_alphas[i] = ais[i]*x0*x0
return a_alphas
def PRSV2_a_alpha_and_derivatives_vectorized(T, Tcs, ais, kappa0s, kappa1s, kappa2s, kappa3s,
a_alphas=None, da_alpha_dTs=None, d2a_alpha_dT2s=None):
r'''Calculates the `a_alpha` terms and their first and second derivatives
for the Peng-Robinson-Stryjek-Vera 2
equation of state given the critical temperatures `Tcs`, constants `ais`,
PRSV2 parameters `kappa0s, `kappa1s`, `kappa2s`, and `kappa3s`.
.. math::
a_i\alpha_i = a_{i} \left(\left(1 - \sqrt{\frac{T}{T_{c,i}}}\right)
\left(\kappa_{0,i} + \left(\kappa_{1,i} + \kappa_{2,i} \left(1
- \sqrt{\frac{T}{T_{c,i}}}\right) \left(- \frac{T}{T_{c,i}}
+ \kappa_{3,i}\right)\right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right)
\left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)\right) + 1\right)^{2}
.. math::
\frac{d a_i\alpha_i}{dT} = a_{i} \left(\left(1 - \sqrt{\frac{T}{T_{c,i}
}}\right) \left(\kappa_{0,i} + \left(\kappa_{1,i} + \kappa_{2,i} \left(
1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left(- \frac{T}{T_{c,i}}
+ \kappa_{3,i}\right)\right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right)
\left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)\right) + 1\right)
\left(2 \left(1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left(\left(\sqrt{
\frac{T}{T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}
\right) \left(- \frac{\kappa_{2,i} \left(1 - \sqrt{\frac{T}{T_{c,i}}}
\right)}{T_{c,i}} - \frac{\kappa_{2,i} \sqrt{\frac{T}{T_{c,i}}} \left(
- \frac{T}{T_{c,i}} + \kappa_{3,i}\right)}{2 T}\right) - \frac{\left(
\kappa_{1,i} + \kappa_{2,i} \left(1 - \sqrt{\frac{T}{T_{c,i}}}\right)
\left(- \frac{T}{T_{c,i}} + \kappa_{3,i}\right)\right) \left(\sqrt{
\frac{T}{T_{c,i}}} + 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T}{T_{c,i}
}} \left(\kappa_{1,i} + \kappa_{2,i} \left(1 - \sqrt{\frac{T}{T_{c,i}}}
\right) \left(- \frac{T}{T_{c,i}} + \kappa_{3,i}\right)\right) \left(
- \frac{T}{T_{c,i}} + \frac{7}{10}\right)}{2 T}\right) - \frac{\sqrt{
\frac{T}{T_{c,i}}} \left(\kappa_{0,i} + \left(\kappa_{1,i}
+ \kappa_{2,i} \left(1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left(
- \frac{T}{T_{c,i}} + \kappa_{3,i}\right)\right) \left(\sqrt{\frac{T}
{T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)
\right)}{T}\right)
.. math::
\frac{d^2 a_i\alpha_i}{dT^2} = - \frac{a_{i} \left(\left(\left(10
\kappa_{0,i} - \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T}
{T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)
\right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{10 T}
{T_{c,i}} - 7\right)\right) \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right)
- 10\right) \left(\left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) \left(
\frac{40 \kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(
\frac{2 \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right)}{T_{c,i}} + \frac{
\sqrt{\frac{T}{T_{c,i}}} \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)}
{T}\right)}{T_{c,i}} + \frac{\kappa_{2,i} \sqrt{\frac{T}{T_{c,i}}}
\left(\frac{4}{T_{c,i}} - \frac{\frac{T}{T_{c,i}} - \kappa_{3,i}}{T}
\right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{10 T}
{T_{c,i}} - 7\right)}{T} + \frac{2 \kappa_{2,i} \sqrt{\frac{T}{T_{c,i}}}
\left(\frac{10 T}{T_{c,i}} - 7\right) \left(\frac{2 \left(\sqrt{\frac
{T}{T_{c,i}}} - 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T}{T_{c,i}}}
\left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)}{T}\right)}{T} + \frac{40
\sqrt{\frac{T}{T_{c,i}}} \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{
\frac{T}{T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}
\right)\right)}{T T_{c,i}} - \frac{\sqrt{\frac{T}{T_{c,i}}} \left(
\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right)
\left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)\right) \left(\frac{10 T}
{T_{c,i}} - 7\right)}{T^{2}}\right) + \frac{2 \sqrt{\frac{T}{T_{c,i}}}
\left(\kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right)
\left(\frac{10 T}{T_{c,i}} - 7\right) \left(\frac{2 \left(\sqrt{
\frac{T}{T_{c,i}}} - 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T}
{T_{c,i}}} \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)}{T}\right)
+ \frac{20 \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T}
{T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)
\right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right)}{T_{c,i}}
+ \frac{\sqrt{\frac{T}{T_{c,i}}} \left(\kappa_{1,i} + \kappa_{2,i}
\left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}}
- \kappa_{3,i}\right)\right) \left(\frac{10 T}{T_{c,i}} - 7\right)}
{T}\right)}{T} + \frac{\sqrt{\frac{T}{T_{c,i}}} \left(10 \kappa_{0,i}
- \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}}
- 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)\right)
\left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{10 T}{T_{c,i}}
- 7\right)\right)}{T^{2}}\right) - \left(\left(\sqrt{\frac{T}{T_{c,i}}}
- 1\right) \left(\kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right)
\left(\frac{10 T}{T_{c,i}} - 7\right) \left(\frac{2 \left(\sqrt{
\frac{T}{T_{c,i}}} - 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T}{T_{c,i}}}
\left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)}{T}\right) + \frac{20
\left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}}
- 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)\right) \left(
\sqrt{\frac{T}{T_{c,i}}} + 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T}
{T_{c,i}}} \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T}
{T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)
\right) \left(\frac{10 T}{T_{c,i}} - 7\right)}{T}\right) - \frac{
\sqrt{\frac{T}{T_{c,i}}} \left(10 \kappa_{0,i} - \left(\kappa_{1,i}
+ \kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) \left(\frac{T}
{T_{c,i}} - \kappa_{3,i}\right)\right) \left(\sqrt{\frac{T}{T_{c,i}}}
+ 1\right) \left(\frac{10 T}{T_{c,i}} - 7\right)\right)}{T}\right)^{2}
\right)}{200}
Parameters
----------
T : float
Temperature, [K]
Tcs : list[float]
Critical temperatures of components, [K]
ais : list[float]
`a` parameters of cubic EOS,
:math:`a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}`, [Pa*m^6/mol^2]
kappa0s : list[float]
`kappa0` parameters of PRSV EOS; the original form uses
:math:`\kappa_{0,i} = 0.378893 + 1.4897153\omega_i - 0.17131848\omega_i^2 + 0.0196554\omega_i^3`, [-]
kappa1s : list[float]
Fit parameters, can be set to 0 if unknown [-]
kappa2s : list[float]
Fit parameters, can be set to 0 if unknown [-]
kappa3s : list[float]
Fit parameters, can be set to 0 if unknown [-]
Returns
-------
a_alphas : list[float]
Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2]
da_alpha_dTs : list[float]
First temperature derivative of pure component `a_alpha`,
[Pa*m^6/(mol^2*K)]
d2a_alpha_dT2s : list[float]
Second temperature derivative of pure component `a_alpha`,
[Pa*m^6/(mol^2*K^2)]
Notes
-----
Examples
--------
>>> PRSV2_a_alpha_and_derivatives_vectorized(400.0, Tcs=[507.6], ais=[2.6923169620277805], kappa0s=[0.8074380841890093], kappa1s=[0.05104], kappa2s=[0.8634], kappa3s=[0.460])
([3.2005700986], [-0.005301195971], [1.11181477576e-05])
'''
sqrtT = sqrt(T)
T_inv = 1.0/T
N = len(Tcs)
if a_alphas is None:
a_alphas = [0.0]*N
if da_alpha_dTs is None:
da_alpha_dTs = [0.0]*N
if d2a_alpha_dT2s is None:
d2a_alpha_dT2s = [0.0]*N
for i in range(N):
Tc_inv_root = 1.0/sqrt(Tcs[i])
Tc_inv = Tc_inv_root*Tc_inv_root
x1 = T*Tc_inv
x2 = sqrtT*Tc_inv_root
x3 = x2 - 1.
x4 = x2 + 1.
x5 = 10.*x1 - 7.
x6 = -kappa3s[i] + x1
x7 = kappa1s[i] + kappa2s[i]*x3*x6
x8 = x5*x7
x9 = 10.*kappa0s[i] - x4*x8
x10 = x3*x9
x11 = x10*0.1 - 1.0
x13 = x2*T_inv
x14 = x7*Tc_inv
x15 = kappa2s[i]*x4*x5
x16 = 2.*(-x2 + 1.)*Tc_inv + x13*(kappa3s[i] - x1)
x17 = -x13*x8 - x14*(20.*x2 + 20.) + x15*x16
x18 = x13*x9 + x17*x3
x19 = x2*T_inv*T_inv
x20 = 2.*x2*T_inv
a_alpha = ais[i]*x11*x11
da_alpha_dT = ais[i]*x11*x18*0.1
d2a_alpha_dT2 = ais[i]*(x18*x18 + (x10 - 10.)*(x17*x20 - x19*x9
+ x3*(40.*kappa2s[i]*Tc_inv*x16*x4
+ kappa2s[i]*x16*x20*x5 - 40.*T_inv*x14*x2
- x15*T_inv*x2*(4.0*Tc_inv - x6*T_inv)
+ x19*x8)))*0.005
a_alphas[i] = a_alpha
da_alpha_dTs[i] = da_alpha_dT
d2a_alpha_dT2s[i] = d2a_alpha_dT2
return a_alphas, da_alpha_dTs, d2a_alpha_dT2s
def APISRK_a_alphas_vectorized(T, Tcs, ais, S1s, S2s, a_alphas=None):
r'''Calculates the `a_alpha` terms for the API SRK equation of state
given the critical temperatures `Tcs`, constants `ais`, and
API parameters `S1s` and `S2s`.
.. math::
a_i\alpha(T)_i = a_i \left[1 + S_{1,i}\left(1-\sqrt{T_{r,i}}\right)
+ S_{2,i} \frac{1- \sqrt{T_{r,i}}}{\sqrt{T_{r,i}}}\right]^2
Parameters
----------
T : float
Temperature, [K]
Tcs : list[float]
Critical temperatures of components, [K]
ais : list[float]
`a` parameters of cubic EOS,
:math:`a_i=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}`, [Pa*m^6/mol^2]
S1s : list[float]
`S1` parameters of API SRK EOS; regressed or estimated with
:math:`S_{1,i} = 0.48508 + 1.55171\omega_i - 0.15613\omega_i^2`, [-]
S2s : list[float]
`S2` parameters of API SRK EOS; regressed or set to zero, [-]
Returns
-------
a_alphas : list[float]
Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2]
Notes
-----
Examples
--------
>>> APISRK_a_alphas_vectorized(T=430.0, Tcs=[514.0], ais=[1.2721974560809934], S1s=[1.678665], S2s=[-0.216396])
[1.60465652994097]
'''
N = len(Tcs)
sqrtT = sqrt(T)
if a_alphas is None:
a_alphas = [0.0]*N
for i in range(N):
rtTr = 1.0/sqrt(Tcs[i])
x0 = (-rtTr*sqrtT + 1.)
x1 = 1.0/(rtTr*sqrtT)
x2 = (S1s[i]*x0 + S2s[i]*(x0)*x1 + 1.0)
a_alphas[i] = ais[i]*x2*x2
return a_alphas
def APISRK_a_alpha_and_derivatives_vectorized(T, Tcs, ais, S1s, S2s, a_alphas=None,
da_alpha_dTs=None, d2a_alpha_dT2s=None):
r'''Calculates the `a_alpha` terms and their first two temperature
derivatives for the API SRK equation of state
given the critical temperatures `Tcs`, constants `ais`, and
API parameters `S1s` and `S2s`.
.. math::
a_i\alpha(T)_i = a_i \left[1 + S_{1,i}\left(1-\sqrt{T_{r,i}}\right)
+ S_{2,i} \frac{1- \sqrt{T_{r,i}}}{\sqrt{T_{r,i}}}\right]^2
.. math::
\frac{d a_i\alpha_i}{dT} = a_i\frac{T_{c,i}}{T^{2}} \left(- S_{2,i} \left(\sqrt{
\frac{T}{T_{c,i}}} - 1\right) + \sqrt{\frac{T}{T_{c,i}}} \left(S_{1,i} \sqrt{
\frac{T}{T_{c,i}}} + S_{2,i}\right)\right) \left(S_{2,i} \left(\sqrt{\frac{
T}{T_{c,i}}} - 1\right) + \sqrt{\frac{T}{T_{c,i}}} \left(S_{1,i} \left(\sqrt{
\frac{T}{T_{c,i}}} - 1\right) - 1\right)\right)
.. math::
\frac{d^2 a_i\alpha_i}{dT^2} = a_i\frac{1}{2 T^{3}} \left(S_{1,i}^{2} T
\sqrt{\frac{T}{T_{c,i}}} - S_{1,i} S_{2,i} T \sqrt{\frac{T}{T_{c,i}}} + 3 S_{1,i}
S_{2,i} T_{c,i} \sqrt{\frac{T}{T_{c,i}}} + S_{1,i} T \sqrt{\frac{T}{T_{c,i}}}
- 3 S_{2,i}^{2} T_{c,i} \sqrt{\frac{T}{T_{c,i}}} + 4 S_{2,i}^{2} T_{c,i} + 3 S_{2,i}
T_{c,i} \sqrt{\frac{T}{T_{c,i}}}\right)
Parameters
----------
T : float
Temperature, [K]
Tcs : list[float]
Critical temperatures of components, [K]
ais : list[float]
`a` parameters of cubic EOS,
:math:`a_i=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}`, [Pa*m^6/mol^2]
S1s : list[float]
`S1` parameters of API SRK EOS; regressed or estimated with
:math:`S_{1,i} = 0.48508 + 1.55171\omega_i - 0.15613\omega_i^2`, [-]
S2s : list[float]
`S2` parameters of API SRK EOS; regressed or set to zero, [-]
Returns
-------
a_alphas : list[float]
Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2]
da_alpha_dTs : list[float]
First temperature derivative of pure component `a_alpha`,
[Pa*m^6/(mol^2*K)]
d2a_alpha_dT2s : list[float]
Second temperature derivative of pure component `a_alpha`,
[Pa*m^6/(mol^2*K^2)]
Notes
-----
Examples
--------
>>> APISRK_a_alpha_and_derivatives_vectorized(T=430.0, Tcs=[514.0], ais=[1.2721974560809934], S1s=[1.678665], S2s=[-0.216396])
([1.60465652994], [-0.0043155855337], [8.9931026263e-06])
'''
N = len(Tcs)
T_inv = 1.0/T
c0 = T_inv*T_inv*0.5
if a_alphas is None:
a_alphas = [0.0]*N
if da_alpha_dTs is None:
da_alpha_dTs = [0.0]*N
if d2a_alpha_dT2s is None:
d2a_alpha_dT2s = [0.0]*N
for i in range(N):
x0 = sqrt(T/Tcs[i])
x1 = x0 - 1.
x2 = x1/x0
x3 = S2s[i]*x2
x4 = S1s[i]*x1 + x3 - 1.
x5 = S1s[i]*x0
x6 = S2s[i] - x3 + x5
x7 = 3.*S2s[i]
a_alphas[i] = ais[i]*x4*x4
da_alpha_dTs[i] = ais[i]*x4*x6*T_inv
d2a_alpha_dT2s[i] = ais[i]*(-x4*(-x2*x7 + x5 + x7) + x6*x6)*c0
return a_alphas, da_alpha_dTs, d2a_alpha_dT2s
def TWU_a_alpha_common(T, Tc, omega, a, full=True, method='PR'):
r'''Function to calculate `a_alpha` and optionally its first and second
derivatives for the TWUPR or TWUSRK EOS. Returns 'a_alpha', and
optionally 'da_alpha_dT' and 'd2a_alpha_dT2'.
Used by `TWUPR` and `TWUSRK`; has little purpose on its own.
See either class for the correct reference, and examples of using the EOS.
Parameters
----------
T : float
Temperature, [K]
Tc : float
Critical temperature, [K]
omega : float
Acentric factor, [-]
a : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
full : float
Whether or not to return its first and second derivatives
method : str
Either 'PR' or 'SRK'
Notes
-----
The derivatives are somewhat long and are not described here for
brevity; they are obtainable from the following SymPy expression.
>>> from sympy import *
>>> T, Tc, omega, N1, N0, M1, M0, L1, L0 = symbols('T, Tc, omega, N1, N0, M1, M0, L1, L0')
>>> Tr = T/Tc
>>> alpha0 = Tr**(N0*(M0-1))*exp(L0*(1-Tr**(N0*M0)))
>>> alpha1 = Tr**(N1*(M1-1))*exp(L1*(1-Tr**(N1*M1)))
>>> alpha = alpha0 + omega*(alpha1-alpha0)
>>> # diff(alpha, T)
>>> # diff(alpha, T, T)
'''
# e-10 works
min_a_alpha = 1e-3 # There are a LOT of formulas, and they do not like having zeros
Tr = T/Tc
if Tr < 5e-3:
# not enough: Tr from (x) 0 to 2e-4 to (y) 1e-4 2e-4
# trying: Tr from (x) 0 to 1e-3 to (y) 5e-4 1e-3
# Tr = 1e-3 + (Tr - 0.0)*(1e-3 - 5e-4)/1e-3
# Tr = 5e-4 + (Tr - 0.0)*(5e-4)/1e-3
Tr = 4e-3 + (Tr - 0.0)*(1e-3)/5e-3
T = Tc*Tr
if method == 'PR':
if Tr < 1.0:
L0, M0, N0 = 0.125283, 0.911807, 1.948150
L1, M1, N1 = 0.511614, 0.784054, 2.812520
else:
L0, M0, N0 = 0.401219, 4.963070, -0.2
L1, M1, N1 = 0.024955, 1.248089, -8.
elif method == 'SRK':
if Tr < 1.0:
L0, M0, N0 = 0.141599, 0.919422, 2.496441
L1, M1, N1 = 0.500315, 0.799457, 3.291790
else:
L0, M0, N0 = 0.441411, 6.500018, -0.20
L1, M1, N1 = 0.032580, 1.289098, -8.0
else:
raise ValueError('Only `PR` and `SRK` are accepted as method')
if not full:
alpha0 = Tr**(N0*(M0-1.))*exp(L0*(1.-Tr**(N0*M0)))
alpha1 = Tr**(N1*(M1-1.))*exp(L1*(1.-Tr**(N1*M1)))
alpha = alpha0 + omega*(alpha1 - alpha0)
a_alpha = a*alpha
if a_alpha < min_a_alpha:
a_alpha = min_a_alpha
return a_alpha
else:
x0 = Tr
x1 = M0 - 1
x2 = N0*x1
x3 = x0**x2
x4 = M0*N0
x5 = x0**x4
x6 = exp(-L0*(x5 - 1.))
x7 = x3*x6
x8 = M1 - 1.
x9 = N1*x8
x10 = x0**x9
x11 = M1*N1
x12 = x0**x11
x13 = x2*x7
x14 = L0*M0*N0*x3*x5*x6
x15 = x13 - x14
x16 = exp(-L1*(x12 - 1))
x17 = -L1*M1*N1*x10*x12*x16 + x10*x16*x9 - x13 + x14
x18 = N0*N0
x19 = x18*x3*x6
x20 = x1**2*x19
x21 = M0**2
x22 = L0*x18*x3*x5*x6
x23 = x21*x22
x24 = 2*M0*x1*x22
x25 = L0**2*x0**(2*x4)*x19*x21
x26 = N1**2
x27 = x10*x16*x26
x28 = M1**2
x29 = L1*x10*x12*x16*x26
a_alpha = a*(-omega*(-x10*exp(L1*(-x12 + 1)) + x3*exp(L0*(-x5 + 1))) + x7)
da_alpha_dT = a*(omega*x17 + x15)/T
d2a_alpha_dT2 = a*(-(omega*(-L1**2*x0**(2.*x11)*x27*x28 + 2.*M1*x29*x8 + x17 + x20 - x23 - x24 + x25 - x27*x8**2 + x28*x29) + x15 - x20 + x23 + x24 - x25)/T**2)
if a_alpha < min_a_alpha:
a_alpha = min_a_alpha
da_alpha_dT = d2a_alpha_dT2 = 0.0
# Hydrogen at low T
# a_alpha = da_alpha_dT = d2a_alpha_dT2 = 0.0
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def Twu91_alpha_pure(T, Tc, c0, c1, c2):
Tr = T/Tc
return (Tr**(c2*(c1 - 1.0))*exp(c0*(1.0 - (Tr)**(c1*c2))))
def Soave_1979_alpha_pure(T, Tc, M, N):
Tr = T/Tc
return (1.0 + (1.0 - Tr)*(M + N/Tr))
def Soave_1972_alpha_pure(T, Tc, c0):
Tr = T/Tc
return (c0*(-sqrt(T/Tc) + 1) + 1)**2
def Heyen_alpha_pure(T, Tc, c1, c2):
return exp(c1*(1.0 -(T/Tc)**c2))
def Harmens_Knapp_alpha_pure(T, Tc, c1, c2):
return (c1*(-sqrt(T/Tc) + 1) - c2*(1 - Tc/T) + 1)**2
def Mathias_1983_alpha_pure(T, Tc, c1, c2):
Tr = T/Tc
return (1 + c1*(1-sqrt(Tr)) -c2*(1-Tr)*(0.7-Tr))**2
def Mathias_Copeman_untruncated_alpha_pure(T, Tc, c1, c2, c3):
return (c1*(-sqrt(T/Tc) + 1) + c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2
def Mathias_Copeman_original_alpha_pure(T, Tc, c1, c2, c3):
if T < Tc:
return (c1*(-sqrt(T/Tc) + 1) + c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2
rt = sqrt(T/Tc)
tau = 1.0 - rt
x = (1.0 + c1*tau)
return x*x
def Mathias_Copeman_alpha_pure(T, Tc, *alpha_coeffs):
rt = sqrt(T/Tc)
tau = 1.0 - rt
if T < Tc:
x0 = horner(alpha_coeffs, tau)
return x0*x0
else:
x = (1.0 + alpha_coeffs[-2]*tau)
return x*x
def Gibbons_Laughton_alpha_pure(T, Tc, c1, c2):
return (c1*(T/Tc - 1) + c2*(sqrt(T/Tc) - 1) + 1)
def Soave_1984_alpha_pure(T, Tc, c1, c2):
return (c1*(-T/Tc + 1) + c2*(-1 + Tc/T) + 1)
def Yu_Lu_alpha_pure(T, Tc, c1, c2, c3, c4):
return 10**(c4*(-T/Tc + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1))
def Trebble_Bishnoi_alpha_pure(T, Tc, c1):
return exp(c1*(-T/Tc + 1))
def Melhem_alpha_pure(T, Tc, c1, c2):
return exp(c1*(-T/Tc + 1) + c2*(-sqrt(T/Tc) + 1)**2)
def Androulakis_alpha_pure(T, Tc, c1, c2, c3):
return (c1*(-(T/Tc)**(2/3) + 1) + c2*(-(T/Tc)**(2/3) + 1)**2 + c3*(-(T/Tc)**(2/3) + 1)**3 + 1)
def Schwartzentruber_alpha_pure(T, Tc, c1, c2, c3, c4):
return ((c4*(-sqrt(T/Tc) + 1) - (-sqrt(T/Tc) + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1) + 1)**2)
def Almeida_alpha_pure(T, Tc, c1, c2, c3):
return exp(c1*(-T/Tc + 1)*abs(T/Tc - 1)**(c2 - 1) + c3*(-1 + Tc/T))
def Soave_1993_alpha_pure(T, Tc, c1, c2):
return (c1*(-T/Tc + 1) + c2*(-sqrt(T/Tc) + 1)**2 + 1)
def Gasem_alpha_pure(T, Tc, c1, c2, c3):
return (exp((-(T/Tc)**c3 + 1)*(T*c2/Tc + c1)))
def Coquelet_alpha_pure(T, Tc, c1, c2, c3):
return (exp(c1*(-T/Tc + 1)*(c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2))
def Haghtalab_alpha_pure(T, Tc, c1, c2, c3):
return exp((-c3**log(T/Tc) + 1)*(-T*c2/Tc + c1))
def Saffari_alpha_pure(T, Tc, c1, c2, c3):
return (exp(T*c1/Tc + c2*log(T/Tc) + c3*(-sqrt(T/Tc) + 1)))
def Chen_Yang_alpha_pure(T, Tc, omega, c1, c2, c3, c4, c5, c6, c7):
return exp(c4*log((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)**2 + (-T/Tc + 1)*(c1 + c2*omega + c3*omega**2))
class a_alpha_base(object):
def _init_test(self, Tc, a, alpha_coeffs, **kwargs):
self.Tc = Tc
self.a = a
self.alpha_coeffs = alpha_coeffs
self.__dict__.update(kwargs)
class Poly_a_alpha(object):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives given that there is a polynomial equation for
:math:`\alpha`.
.. math::
a \alpha = a\cdot \text{poly}(T)
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dTs : list[float]
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2s : list[float]
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
'''
res = horner_and_der2(self.alpha_coeffs, T)
a = self.a
return (a*res[0], a*res[1], a*res[2])
def a_alpha_pure(self, T):
r'''Method to calculate `a_alpha` given that there is a polynomial
equation for :math:`\alpha`.
.. math::
a \alpha = a\cdot \text{poly}(T)
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
'''
return self.a*horner(self.alpha_coeffs, T)
class Soave_1972_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Soave (1972) [1]_. Returns `a_alpha`, `da_alpha_dT`, and
`d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more
documentation. Same as `SRK.a_alpha_and_derivatives` but slower and
requiring `alpha_coeffs` to be set. One coefficient needed.
.. math::
\alpha = \left(c_{0} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)
+ 1\right)^{2}
References
----------
.. [1] <NAME>. "Equilibrium Constants from a Modified Redlich-
Kwong Equation of State." Chemical Engineering Science 27, no. 6
(June 1972): 1197-1203. doi:10.1016/0009-2509(72)80096-4.
.. [2] Young, <NAME>., <NAME>, and <NAME>.
"Comparison of 20 Alpha Functions Applied in the Peng–Robinson
Equation of State for Vapor Pressure Estimation." Industrial &
Engineering Chemistry Research 55, no. 22 (June 8, 2016): 6506-16.
doi:10.1021/acs.iecr.6b00721.
'''
c0 = self.alpha_coeffs[0]
Tc, a = self.Tc, self.a
a_alpha = a*(c0*(-sqrt(T/Tc) + 1) + 1)**2
da_alpha_dT = -a*c0*sqrt(T/Tc)*(c0*(-sqrt(T/Tc) + 1) + 1)/T
d2a_alpha_dT2 = a*c0*(c0/Tc - sqrt(T/Tc)*(c0*(sqrt(T/Tc) - 1) - 1)/T)/(2*T)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
c0 = self.alpha_coeffs[0]
Tc, a = self.Tc, self.a
return a*Soave_1972_alpha_pure(T, Tc, c0)
class Heyen_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Heyen (1980) [1]_. Returns `a_alpha`, `da_alpha_dT`,
and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more
documentation. Two coefficients needed.
.. math::
\alpha = e^{c_{1} \left(- \left(\frac{T}{T_{c,i}}\right)^{c_{2}}
+ 1\right)}
References
----------
.. [1] <NAME>. Liquid and Vapor Properties from a Cubic Equation of
State. In "Proceedings of the 2nd International Conference on Phase
Equilibria and Fluid Properties in the Chemical Industry". DECHEMA:
Frankfurt, 1980; p 9-13.
'''
c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*exp(c1*(1 -(T/Tc)**c2))
da_alpha_dT = -a*c1*c2*(T/Tc)**c2*exp(c1*(-(T/Tc)**c2 + 1))/T
d2a_alpha_dT2 = a*c1*c2*(T/Tc)**c2*(c1*c2*(T/Tc)**c2 - c2 + 1)*exp(-c1*((T/Tc)**c2 - 1))/T**2
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
return a*Heyen_alpha_pure(T, Tc, c1, c2)
class Harmens_Knapp_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Harmens and Knapp (1980) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Two coefficients needed.
.. math::
\alpha = \left(c_{1} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)
- c_{2} \left(1 - \frac{T_{c,i}}{T}\right) + 1\right)^{2}
References
----------
.. [1] <NAME>., and <NAME>. "Three-Parameter Cubic Equation of
State for Normal Substances." Industrial & Engineering Chemistry
Fundamentals 19, no. 3 (August 1, 1980): 291-94.
doi:10.1021/i160075a010.
'''
c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*(c1*(-sqrt(T/Tc) + 1) - c2*(1 - Tc/T) + 1)**2
da_alpha_dT = a*(-c1*sqrt(T/Tc)/T - 2*Tc*c2/T**2)*(c1*(-sqrt(T/Tc) + 1) - c2*(1 - Tc/T) + 1)
d2a_alpha_dT2 = a*((c1*sqrt(T/Tc) + 2*Tc*c2/T)**2 - (c1*sqrt(T/Tc) + 8*Tc*c2/T)*(c1*(sqrt(T/Tc) - 1) + c2*(1 - Tc/T) - 1))/(2*T**2)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
return a*Harmens_Knapp_alpha_pure(T, Tc, c1, c2)
class Mathias_1983_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Mathias (1983) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Two coefficients needed.
.. math::
\alpha = \left(c_{1} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)
- c_{2} \left(- \frac{T}{T_{c,i}} + 0.7\right) \left(- \frac{T}{T_{c,i}}
+ 1\right) + 1\right)^{2}
References
----------
.. [1] <NAME>. "A Versatile Phase Equilibrium Equation of
State." Industrial & Engineering Chemistry Process Design and
Development 22, no. 3 (July 1, 1983): 385-91.
doi:10.1021/i200022a008.
'''
c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
Tr = T/Tc
a_alpha = a*(1 + c1*(1-sqrt(Tr)) -c2*(1-Tr)*(0.7-Tr))**2
da_alpha_dT = a*(c1*(-sqrt(T/Tc) + 1) - c2*(-T/Tc + 0.7)*(-T/Tc + 1) + 1)*(2*c2*(-T/Tc + 0.7)/Tc + 2*c2*(-T/Tc + 1)/Tc - c1*sqrt(T/Tc)/T)
d2a_alpha_dT2 = a*((8*c2/Tc**2 - c1*sqrt(T/Tc)/T**2)*(c1*(sqrt(T/Tc) - 1) + c2*(T/Tc - 1)*(T/Tc - 0.7) - 1) + (2*c2*(T/Tc - 1)/Tc + 2*c2*(T/Tc - 0.7)/Tc + c1*sqrt(T/Tc)/T)**2)/2
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
Tr = T/Tc
return a*Mathias_1983_alpha_pure(T, Tc, c1, c2)
class Mathias_Copeman_untruncated_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Mathias and Copeman (1983) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Three coefficients needed.
.. math::
\alpha = \left(c_{1} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)
+ c_{2} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{2} + c_{3} \left(
- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{3} + 1\right)^{2}
References
----------
.. [1] Mathias, <NAME>., and <NAME>. "Extension of the
Peng-Robinson Equation of State to Complex Mixtures: Evaluation of
the Various Forms of the Local Composition Concept." Fluid Phase
Equilibria 13 (January 1, 1983): 91-108.
doi:10.1016/0378-3812(83)80084-3.
'''
c1, c2, c3 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*(c1*(-sqrt(T/Tc) + 1) + c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2
da_alpha_dT = a*(-c1*sqrt(T/Tc)/T - 2*c2*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)/T - 3*c3*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)**2/T)*(c1*(-sqrt(T/Tc) + 1) + c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)
d2a_alpha_dT2 = a*(T*(c1 - 2*c2*(sqrt(T/Tc) - 1) + 3*c3*(sqrt(T/Tc) - 1)**2)**2 - (2*T*(c2 - 3*c3*(sqrt(T/Tc) - 1)) + Tc*sqrt(T/Tc)*(c1 - 2*c2*(sqrt(T/Tc) - 1) + 3*c3*(sqrt(T/Tc) - 1)**2))*(c1*(sqrt(T/Tc) - 1) - c2*(sqrt(T/Tc) - 1)**2 + c3*(sqrt(T/Tc) - 1)**3 - 1))/(2*T**2*Tc)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
c1, c2, c3 = self.alpha_coeffs
Tc, a = self.Tc, self.a
return a*Mathias_Copeman_untruncated_alpha_pure(T, Tc, c1, c2, c3)
class Mathias_Copeman_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Mathias and Copeman (1983) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Three coefficients needed.
.. math::
\alpha = \left(c_{1} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)
+ c_{2} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{2} + c_{3} \left(
- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{3} + 1\right)^{2}
References
----------
.. [1] Mathias, <NAME>., and <NAME>. "Extension of the
Peng-Robinson Equation of State to Complex Mixtures: Evaluation of
the Various Forms of the Local Composition Concept." Fluid Phase
Equilibria 13 (January 1, 1983): 91-108.
doi:10.1016/0378-3812(83)80084-3.
'''
c1, c2, c3 = self.alpha_coeffs
Tc, a = self.Tc, self.a
Tr = T/Tc
if Tr > 1:
x0 = 1.0/T
x1 = 1.0/Tc
x2 = sqrt(T*x1)
x3 = c1*(x2 - 1.0) - 1.0
x4 = x0*x2*x3
a_alpha = a*x3*x3
da_alpha_dT = a*c1*x4
d2a_alpha_dT2 = 0.5*a*c1*x0*(c1*x1 - x4)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
else:
a_alpha = a*(c1*(-sqrt(T/Tc) + 1) + c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2
da_alpha_dT = a*(-c1*sqrt(T/Tc)/T - 2*c2*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)/T - 3*c3*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)**2/T)*(c1*(-sqrt(T/Tc) + 1) + c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)
d2a_alpha_dT2 = a*(T*(c1 - 2*c2*(sqrt(T/Tc) - 1) + 3*c3*(sqrt(T/Tc) - 1)**2)**2 - (2*T*(c2 - 3*c3*(sqrt(T/Tc) - 1)) + Tc*sqrt(T/Tc)*(c1 - 2*c2*(sqrt(T/Tc) - 1) + 3*c3*(sqrt(T/Tc) - 1)**2))*(c1*(sqrt(T/Tc) - 1) - c2*(sqrt(T/Tc) - 1)**2 + c3*(sqrt(T/Tc) - 1)**3 - 1))/(2*T**2*Tc)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Mathias_Copeman_original_alpha_pure(T, self.Tc, *self.alpha_coeffs)
class Mathias_Copeman_poly_a_alpha(a_alpha_base):
def a_alphas_vectorized(self, T):
ais, alpha_coeffs, Tcs = self.ais, self.alpha_coeffs, self.Tcs
a_alphas = []
for i in range(self.N):
tau = 1.0 - (T/Tcs[i])**0.5
if T < Tcs[i]:
x0 = horner(alpha_coeffs[i], tau)
a_alpha = x0*x0*ais[i]
else:
x = (1.0 + alpha_coeffs[i][-2]*tau)
a_alpha = ais[i]*x*x
a_alphas.append(a_alpha)
return a_alphas
def a_alpha_and_derivatives_vectorized(self, T):
ais, alpha_coeffs, Tcs = self.ais, self.alpha_coeffs, self.Tcs
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = [], [], []
for i in range(self.N):
a = ais[i]
Tc = Tcs[i]
rt = (T/Tc)**0.5
tau = 1.0 - rt
if T < Tc:
x0, x1, x2 = horner_and_der2(alpha_coeffs[i], tau)
a_alpha = x0*x0*a
da_alpha_dT = -a*(rt*x0*x1/T)
d2a_alpha_dT2 = a*((x0*x2/Tc + x1*x1/Tc + rt*x0*x1/T)/(2.0*T))
else:
# [-2] is the index to get the second-last coefficient (c1)! Wasted time on this before.
c1 = alpha_coeffs[i][-2]
x0 = 1.0/T
x1 = 1.0/Tc
x2 = rt#sqrt(T*x1)
x3 = c1*(x2 - 1.0) - 1.0
x4 = x0*x2*x3
a_alpha = a*x3*x3
da_alpha_dT = a*c1*x4
d2a_alpha_dT2 = a*0.5*c1*x0*(c1*x1 - x4)
a_alphas.append(a_alpha)
da_alpha_dTs.append(da_alpha_dT)
d2a_alpha_dT2s.append(d2a_alpha_dT2)
return a_alphas, da_alpha_dTs, d2a_alpha_dT2s
def a_alpha_pure(self, T):
# alpha_coeffs [c3, c2, c1, 1] always
# [-2] is the index to get the second-last coefficient (c1)! Wasted time on this before.
# return self.a*Mathias_Copeman_alpha_pure(T, self.Tc, self.alpha_coeffs)
Tc = self.Tc
a = self.a
rt = sqrt(T/Tc)
tau = 1.0 - rt
alpha_coeffs = self.alpha_coeffs
if T < Tc:
x0 = horner(alpha_coeffs, tau)
a_alpha = x0*x0*a
return a_alpha
else:
x = (1.0 + alpha_coeffs[-2]*tau)
return a*x*x
def a_alpha_and_derivatives_pure(self, T):
Tc = self.Tc
a = self.a
rt = (T/Tc)**0.5
tau = 1.0 - rt
alpha_coeffs = self.alpha_coeffs
if T < Tc:
# Do not optimize until unit tests are in place
x0, x1, x2 = horner_and_der2(alpha_coeffs, tau)
a_alpha = x0*x0*a
da_alpha_dT = -a*(rt*x0*x1/T)
d2a_alpha_dT2 = a*((x0*x2/Tc + x1*x1/Tc + rt*x0*x1/T)/(2.0*T))
return a_alpha, da_alpha_dT, d2a_alpha_dT2
else:
'''
from sympy import *
T, Tc, c1 = symbols('T, Tc, c1')
tau = 1 - sqrt(T/Tc)
alpha = (1 + c1*tau)**2
cse([alpha, diff(alpha, T), diff(alpha, T, T)], optimizations='basic')
'''
# [-2] is the index to get the second-last coefficient (c1)! Wasted time on this before.
c1 = alpha_coeffs[-2]
x0 = 1.0/T
x1 = 1.0/Tc
x2 = rt#sqrt(T*x1)
x3 = c1*(x2 - 1.0) - 1.0
x4 = x0*x2*x3
a_alpha = a*x3*x3
da_alpha_dT = a*c1*x4
d2a_alpha_dT2 = 0.5*a*c1*x0*(c1*x1 - x4)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
class Gibbons_Laughton_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Gibbons and Laughton (1984) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Two coefficients needed.
.. math::
\alpha = c_{1} \left(\frac{T}{T_{c,i}} - 1\right) + c_{2}
\left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) + 1
References
----------
.. [1] Gibbons, <NAME>., and <NAME>. "An Equation of
State for Polar and Non-Polar Substances and Mixtures" 80, no. 9
(January 1, 1984): 1019-38. doi:10.1039/F29848001019.
'''
c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*(c1*(T/Tc - 1) + c2*(sqrt(T/Tc) - 1) + 1)
da_alpha_dT = a*(c1/Tc + c2*sqrt(T/Tc)/(2*T))
d2a_alpha_dT2 = a*(-c2*sqrt(T/Tc)/(4*T**2))
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
return a*Gibbons_Laughton_alpha_pure(T, Tc, c1, c2)
class Soave_1984_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Soave (1984) [1]_. Returns `a_alpha`, `da_alpha_dT`, and
`d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more
documentation. Two coefficients needed.
.. math::
\alpha = c_{1} \left(- \frac{T}{T_{c,i}} + 1\right) + c_{2} \left(-1
+ \frac{T_{c,i}}{T}\right) + 1
References
----------
.. [1] <NAME>. "Improvement of the Van Der Waals Equation of State."
Chemical Engineering Science 39, no. 2 (January 1, 1984): 357-69.
doi:10.1016/0009-2509(84)80034-2.
'''
c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*(c1*(-T/Tc + 1) + c2*(-1 + Tc/T) + 1)
da_alpha_dT = a*(-c1/Tc - Tc*c2/T**2)
d2a_alpha_dT2 = a*(2*Tc*c2/T**3)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
return a*Soave_1984_alpha_pure(T, Tc, c1, c2)
class Yu_Lu_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Yu and Lu (1987) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Four coefficients needed.
.. math::
\alpha = 10^{c_{4} \left(- \frac{T}{T_{c,i}} + 1\right) \left(
\frac{T^{2} c_{3}}{Tc^{2}} + \frac{T c_{2}}{T_{c,i}} + c_{1}\right)}
References
----------
.. [1] Yu, Jin-Min, and <NAME>. -<NAME>. "A Three-Parameter Cubic
Equation of State for Asymmetric Mixture Density Calculations."
Fluid Phase Equilibria 34, no. 1 (January 1, 1987): 1-19.
doi:10.1016/0378-3812(87)85047-1.
'''
c1, c2, c3, c4 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*10**(c4*(-T/Tc + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1))
da_alpha_dT = a*(10**(c4*(-T/Tc + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1))*(c4*(-T/Tc + 1)*(2*T*c3/Tc**2 + c2/Tc) - c4*(T**2*c3/Tc**2 + T*c2/Tc + c1)/Tc)*log(10))
d2a_alpha_dT2 = a*(10**(-c4*(T/Tc - 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1))*c4*(-4*T*c3/Tc - 2*c2 - 2*c3*(T/Tc - 1) + c4*(T**2*c3/Tc**2 + T*c2/Tc + c1 + (T/Tc - 1)*(2*T*c3/Tc + c2))**2*log(10))*log(10)/Tc**2)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Yu_Lu_alpha_pure(T, self.Tc, *self.alpha_coeffs)
class Trebble_Bishnoi_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Trebble and Bishnoi (1987) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. One coefficient needed.
.. math::
\alpha = e^{c_{1} \left(- \frac{T}{T_{c,i}} + 1\right)}
References
----------
.. [1] <NAME>., and <NAME>. "Development of a New Four-
Parameter Cubic Equation of State." Fluid Phase Equilibria 35, no. 1
(September 1, 1987): 1-18. doi:10.1016/0378-3812(87)80001-8.
'''
c1 = self.alpha_coeffs[0]
Tc, a = self.Tc, self.a
a_alpha = a*exp(c1*(-T/Tc + 1))
da_alpha_dT = a*-c1*exp(c1*(-T/Tc + 1))/Tc
d2a_alpha_dT2 = a*c1**2*exp(-c1*(T/Tc - 1))/Tc**2
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Trebble_Bishnoi_alpha_pure(T, self.Tc, *self.alpha_coeffs)
class Melhem_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Melhem et al. (1989) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Two coefficients needed.
.. math::
\alpha = e^{c_{1} \left(- \frac{T}{T_{c,i}} + 1\right) + c_{2}
\left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{2}}
References
----------
.. [1] Melhem, <NAME>., <NAME>, and <NAME>. "A
Modified Peng-Robinson Equation of State." Fluid Phase Equilibria
47, no. 2 (August 1, 1989): 189-237.
doi:10.1016/0378-3812(89)80176-1.
'''
c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*exp(c1*(-T/Tc + 1) + c2*(-sqrt(T/Tc) + 1)**2)
da_alpha_dT = a*((-c1/Tc - c2*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)/T)*exp(c1*(-T/Tc + 1) + c2*(-sqrt(T/Tc) + 1)**2))
d2a_alpha_dT2 = a*(((c1/Tc - c2*sqrt(T/Tc)*(sqrt(T/Tc) - 1)/T)**2 + c2*(1/Tc - sqrt(T/Tc)*(sqrt(T/Tc) - 1)/T)/(2*T))*exp(-c1*(T/Tc - 1) + c2*(sqrt(T/Tc) - 1)**2))
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Melhem_alpha_pure(T, self.Tc, *self.alpha_coeffs)
class Androulakis_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Androulakis et al. (1989) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Three coefficients needed.
.. math::
\alpha = c_{1} \left(- \left(\frac{T}{T_{c,i}}\right)^{\frac{2}{3}}
+ 1\right) + c_{2} \left(- \left(\frac{T}{T_{c,i}}\right)^{\frac{2}{3}}
+ 1\right)^{2} + c_{3} \left(- \left(\frac{T}{T_{c,i}}\right)^{
\frac{2}{3}} + 1\right)^{3} + 1
References
----------
.. [1] Androulakis, <NAME>., <NAME>, and <NAME>.
"Thermophysical Properties of Pure Polar and Nonpolar Compounds with
a Modified VdW-711 Equation of State." Fluid Phase Equilibria 45,
no. 2 (April 1, 1989): 135-63. doi:10.1016/0378-3812(89)80254-7.
'''
c1, c2, c3 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*(c1*(-(T/Tc)**(2/3) + 1) + c2*(-(T/Tc)**(2/3) + 1)**2 + c3*(-(T/Tc)**(2/3) + 1)**3 + 1)
da_alpha_dT = a*(-2*c1*(T/Tc)**(2/3)/(3*T) - 4*c2*(T/Tc)**(2/3)*(-(T/Tc)**(2/3) + 1)/(3*T) - 2*c3*(T/Tc)**(2/3)*(-(T/Tc)**(2/3) + 1)**2/T)
d2a_alpha_dT2 = a*(2*(T/Tc)**(2/3)*(c1 + 4*c2*(T/Tc)**(2/3) - 2*c2*((T/Tc)**(2/3) - 1) - 12*c3*(T/Tc)**(2/3)*((T/Tc)**(2/3) - 1) + 3*c3*((T/Tc)**(2/3) - 1)**2)/(9*T**2))
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Androulakis_alpha_pure(T, self.Tc, *self.alpha_coeffs)
class Schwartzentruber_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Schwartzentruber et al. (1990) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Three coefficients needed.
.. math::
\alpha = \left(c_{4} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)
- \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{T^{2} c_{3}}
{Tc^{2}} + \frac{T c_{2}}{T_{c,i}} + c_{1}\right) + 1\right)^{2}
References
----------
.. [1] <NAME>, <NAME>, and <NAME>, "K-values for
Non-Ideal Systems:An Easier Way," Chem. Eng., March 1990, 118-124.
'''
c1, c2, c3, c4 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*((c4*(-sqrt(T/Tc) + 1) - (-sqrt(T/Tc) + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1) + 1)**2)
da_alpha_dT = a*((c4*(-sqrt(T/Tc) + 1) - (-sqrt(T/Tc) + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1) + 1)*(-2*(-sqrt(T/Tc) + 1)*(2*T*c3/Tc**2 + c2/Tc) - c4*sqrt(T/Tc)/T + sqrt(T/Tc)*(T**2*c3/Tc**2 + T*c2/Tc + c1)/T))
d2a_alpha_dT2 = a*(((-c4*(sqrt(T/Tc) - 1) + (sqrt(T/Tc) - 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1) + 1)*(8*c3*(sqrt(T/Tc) - 1)/Tc**2 + 4*sqrt(T/Tc)*(2*T*c3/Tc + c2)/(T*Tc) + c4*sqrt(T/Tc)/T**2 - sqrt(T/Tc)*(T**2*c3/Tc**2 + T*c2/Tc + c1)/T**2) + (2*(sqrt(T/Tc) - 1)*(2*T*c3/Tc + c2)/Tc - c4*sqrt(T/Tc)/T + sqrt(T/Tc)*(T**2*c3/Tc**2 + T*c2/Tc + c1)/T)**2)/2)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Schwartzentruber_alpha_pure(T, self.Tc, *self.alpha_coeffs)
class Almeida_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Almeida et al. (1991) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Three coefficients needed.
.. math::
\alpha = e^{c_{1} \left(- \frac{T}{T_{c,i}} + 1\right) \left|{
\frac{T}{T_{c,i}} - 1}\right|^{c_{2} - 1} + c_{3} \left(-1
+ \frac{T_{c,i}}{T}\right)}
References
----------
.. [1] <NAME>., <NAME>, and <NAME>. "Uma Nova Forma de
Dependência Com a Temperatura Do Termo Atrativo de Equações de
Estado Cúbicas." RBE, Rev. Bras. Eng., Cad. Eng. Quim 8 (1991): 95.
'''
# Note: Sympy didn't handle the derivative of the absolute value for
# the second derivative, requires the use a CAS which can
# handle the assumption that Tr-1 != 0.
# A second pass on this function resulted in writting two functions:
# one which works on Tr < 1, one which works on Tr > 1.
c1, c2, c3 = self.alpha_coeffs
Tc, a = self.Tc, self.a
Tr = T/Tc
if Tr > 1:
x0 = c3*(1 - Tc/T)
x1 = 1/Tc
x2 = T*x1 - 1
x3 = c2 - 1
x4 = c1*x2**x3
x5 = x2*x4
x6 = exp(-x0 - x5)
x7 = Tc*c3
x8 = x1*x4
x9 = x3*x8 + x8 + x7/T**2
x10 = x4/(Tc**2*x2)
alpha, d_alpha_dT, d2_alpha_dT2 = exp(-x0 - x5), -x6*x9, x6*(-x10*x3**2 - x10*x3 + x9**2 + 2*x7/T**3)
else:
x0 = c3*(1 - Tc/T)
x1 = 1/Tc
x2 = T*x1
x3 = x2 - 1
x4 = c2 - 1
x5 = c1*(1 - x2)**x4
x6 = x3*x5
x7 = exp(-x0 - x6)
x8 = Tc*c3
x9 = x1*x5
x10 = x4*x9 + x9 + x8/T**2
x11 = x5/(Tc**2*x3)
alpha, d_alpha_dT, d2_alpha_dT2 = exp(-x0 - x6), -x10*x7, x7*(x10**2 - x11*x4**2 - x11*x4 + 2*x8/T**3)
return a*alpha, a*d_alpha_dT, a*d2_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Almeida_alpha_pure(T, self.Tc, *self.alpha_coeffs)
class Twu91_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Twu et al. (1991) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Three coefficients needed.
.. math::
\alpha = \left(\frac{T}{T_{c,i}}\right)^{c_{3} \left(c_{2}
- 1\right)} e^{c_{1} \left(- \left(\frac{T}{T_{c,i}}
\right)^{c_{2} c_{3}} + 1\right)}
References
----------
.. [1] Twu, <NAME>., <NAME>, <NAME>, and <NAME>.
Coon. "A Cubic Equation of State with a New Alpha Function and a
New Mixing Rule." Fluid Phase Equilibria 69 (December 10, 1991):
33-50. doi:10.1016/0378-3812(91)90024-2.
'''
c0, c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
Tr = T/Tc
T_inv = 1.0/T
x1 = c1 - 1.0
x2 = c2*x1
x3 = c1*c2
x4 = Tr**x3
x5 = a*Tr**x2*exp(-c0*(x4 - 1.0))
x6 = c0*x4
x7 = c1*x6
x8 = c2*x5
x9 = c1*c1*c2
d2a_alpha_dT2 = (x8*(c0*c0*x4*x4*x9 - c1 + c2*x1*x1
- 2.0*x2*x7 - x6*x9 + x7 + 1.0)*T_inv*T_inv)
return x5, x8*(x1 - x7)*T_inv, d2a_alpha_dT2
def a_alpha_pure(self, T):
c0, c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
return a*Twu91_alpha_pure(T, Tc, c0, c1, c2)
def a_alphas_vectorized(self, T):
ais, alpha_coeffs, Tcs = self.ais, self.alpha_coeffs, self.Tcs
a_alphas = []
for i in range(self.N):
coeffs = alpha_coeffs[i]
Tr = T/Tcs[i]
a_alpha = ais[i]*(Tr**(coeffs[2]*(coeffs[1] - 1.0))*exp(coeffs[0]*(1.0 - (Tr)**(coeffs[1]*coeffs[2]))))
a_alphas.append(a_alpha)
if self.scalar:
return a_alphas
return array(a_alphas)
def a_alpha_and_derivatives_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` and their first
and second derivatives for TWU91 alpha function EOS. This vectorized
implementation is added for extra speed.
.. math::
\alpha = \left(\frac{T}{T_{c,i}}\right)^{c_{3} \left(c_{2}
- 1\right)} e^{c_{1} \left(- \left(\frac{T}{T_{c,i}}
\right)^{c_{2} c_{3}} + 1\right)}
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dTs : list[float]
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2s : list[float]
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
'''
ais, alpha_coeffs, Tcs = self.ais, self.alpha_coeffs, self.Tcs
N = len(ais)
a_alphas = [0.0]*N
da_alpha_dTs = [0.0]*N
d2a_alpha_dT2s = [0.0]*N
T_inv = 1.0/T
for i in range(N):
coeffs = alpha_coeffs[i]
c0, c1, c2 = coeffs[0], coeffs[1], coeffs[2]
Tr = T/Tcs[i]
x1 = c1 - 1.0
x2 = c2*x1
x3 = c1*c2
x4 = Tr**x3
x5 = ais[i]*Tr**x2*exp(-c0*(x4 - 1.0))
x6 = c0*x4
x7 = c1*x6
x8 = c2*x5
x9 = c1*c1*c2
d2a_alpha_dT2 = (x8*(c0*c0*x4*x4*x9 - c1 + c2*x1*x1
- 2.0*x2*x7 - x6*x9 + x7 + 1.0)*T_inv*T_inv)
a_alphas[i] = x5
da_alpha_dTs[i] = x8*(x1 - x7)*T_inv
d2a_alpha_dT2s[i] = d2a_alpha_dT2
if self.scalar:
return a_alphas, da_alpha_dTs, d2a_alpha_dT2s
return array(a_alphas), array(da_alpha_dTs), array(d2a_alpha_dT2s)
class Soave_1993_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Soave (1983) [1]_. Returns `a_alpha`, `da_alpha_dT`, and
`d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more
documentation. Two coefficient needed.
.. math::
\alpha = c_{1} \left(- \frac{T}{T_{c,i}} + 1\right) + c_{2}
\left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{2} + 1
References
----------
.. [1] <NAME>. "Improving the Treatment of Heavy Hydrocarbons by the
SRK EOS." Fluid Phase Equilibria 84 (April 1, 1993): 339-42.
doi:10.1016/0378-3812(93)85131-5.
'''
c1, c2 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*(c1*(-T/Tc + 1) + c2*(-sqrt(T/Tc) + 1)**2 + 1)
da_alpha_dT = a*(-c1/Tc - c2*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)/T)
d2a_alpha_dT2 = a*(c2*(1/Tc - sqrt(T/Tc)*(sqrt(T/Tc) - 1)/T)/(2*T))
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Soave_1993_alpha_pure(T, self.Tc, *self.alpha_coeffs)
class Gasem_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Gasem (2001) [1]_. Returns `a_alpha`, `da_alpha_dT`, and
`d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more
documentation. Three coefficients needed.
.. math::
\alpha = e^{\left(- \left(\frac{T}{T_{c,i}}\right)^{c_{3}} + 1\right)
\left(\frac{T c_{2}}{T_{c,i}} + c_{1}\right)}
References
----------
.. [1] Gasem, <NAME>, <NAME>, <NAME>, and <NAME>. "A Modified
Temperature Dependence for the Peng-Robinson Equation of State."
Fluid Phase Equilibria 181, no. 1–2 (May 25, 2001): 113-25.
doi:10.1016/S0378-3812(01)00488-5.
'''
c1, c2, c3 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*(exp((-(T/Tc)**c3 + 1)*(T*c2/Tc + c1)))
da_alpha_dT = a*((c2*(-(T/Tc)**c3 + 1)/Tc - c3*(T/Tc)**c3*(T*c2/Tc + c1)/T)*exp((-(T/Tc)**c3 + 1)*(T*c2/Tc + c1)))
d2a_alpha_dT2 = a*(((c2*((T/Tc)**c3 - 1)/Tc + c3*(T/Tc)**c3*(T*c2/Tc + c1)/T)**2 - c3*(T/Tc)**c3*(2*c2/Tc + c3*(T*c2/Tc + c1)/T - (T*c2/Tc + c1)/T)/T)*exp(-((T/Tc)**c3 - 1)*(T*c2/Tc + c1)))
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Gasem_alpha_pure(T, self.Tc, *self.alpha_coeffs)
class Coquelet_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Coquelet et al. (2004) [1]_. Returns `a_alpha`, `da_alpha_dT`, and
`d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more
documentation. Three coefficients needed.
.. math::
\alpha = e^{c_{1} \left(- \frac{T}{T_{c,i}} + 1\right) \left(c_{2}
\left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{2} + c_{3}
\left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{3} + 1\right)^{2}}
References
----------
.. [1] <NAME>., <NAME>, and <NAME>. "Development of a New
Alpha Function for the Peng–Robinson Equation of State: Comparative
Study of Alpha Function Models for Pure Gases (Natural Gas
Components) and Water-Gas Systems." International Journal of
Thermophysics 25, no. 1 (January 1, 2004): 133-58.
doi:10.1023/B:IJOT.0000022331.46865.2f.
'''
c1, c2, c3 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*(exp(c1*(-T/Tc + 1)*(c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2))
da_alpha_dT = a*((c1*(-T/Tc + 1)*(-2*c2*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)/T - 3*c3*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)**2/T)*(c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1) - c1*(c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2/Tc)*exp(c1*(-T/Tc + 1)*(c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2))
d2a_alpha_dT2 = a*(c1*(c1*(-(c2*(sqrt(T/Tc) - 1)**2 - c3*(sqrt(T/Tc) - 1)**3 + 1)/Tc + sqrt(T/Tc)*(-2*c2 + 3*c3*(sqrt(T/Tc) - 1))*(sqrt(T/Tc) - 1)*(T/Tc - 1)/T)**2*(c2*(sqrt(T/Tc) - 1)**2 - c3*(sqrt(T/Tc) - 1)**3 + 1)**2 - ((T/Tc - 1)*(c2*(sqrt(T/Tc) - 1)**2 - c3*(sqrt(T/Tc) - 1)**3 + 1)*(2*c2/Tc - 6*c3*(sqrt(T/Tc) - 1)/Tc - 2*c2*sqrt(T/Tc)*(sqrt(T/Tc) - 1)/T + 3*c3*sqrt(T/Tc)*(sqrt(T/Tc) - 1)**2/T) + 4*sqrt(T/Tc)*(2*c2 - 3*c3*(sqrt(T/Tc) - 1))*(sqrt(T/Tc) - 1)*(c2*(sqrt(T/Tc) - 1)**2 - c3*(sqrt(T/Tc) - 1)**3 + 1)/Tc + (2*c2 - 3*c3*(sqrt(T/Tc) - 1))**2*(sqrt(T/Tc) - 1)**2*(T/Tc - 1)/Tc)/(2*T))*exp(-c1*(T/Tc - 1)*(c2*(sqrt(T/Tc) - 1)**2 - c3*(sqrt(T/Tc) - 1)**3 + 1)**2))
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Coquelet_alpha_pure(T, self.Tc, *self.alpha_coeffs)
class Haghtalab_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Haghtalab et al. (2010) [1]_. Returns `a_alpha`, `da_alpha_dT`, and
`d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more
documentation. Three coefficients needed.
.. math::
\alpha = e^{\left(- c_{3}^{\ln{\left (\frac{T}{T_{c,i}} \right )}}
+ 1\right) \left(- \frac{T c_{2}}{T_{c,i}} + c_{1}\right)}
References
----------
.. [1] <NAME>., <NAME>, <NAME>, and <NAME>.
"A New Three-Parameter Cubic Equation of State for Calculation
Physical Properties and Vapor-liquid Equilibria." Fluid Phase
Equilibria 293, no. 2 (June 25, 2010): 209-18.
doi:10.1016/j.fluid.2010.03.029.
'''
c1, c2, c3 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*exp((-c3**log(T/Tc) + 1)*(-T*c2/Tc + c1))
da_alpha_dT = a*((-c2*(-c3**log(T/Tc) + 1)/Tc - c3**log(T/Tc)*(-T*c2/Tc + c1)*log(c3)/T)*exp((-c3**log(T/Tc) + 1)*(-T*c2/Tc + c1)))
d2a_alpha_dT2 = a*(((c2*(c3**log(T/Tc) - 1)/Tc + c3**log(T/Tc)*(T*c2/Tc - c1)*log(c3)/T)**2 + c3**log(T/Tc)*(2*c2/Tc + (T*c2/Tc - c1)*log(c3)/T - (T*c2/Tc - c1)/T)*log(c3)/T)*exp((c3**log(T/Tc) - 1)*(T*c2/Tc - c1)))
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Haghtalab_alpha_pure(T, self.Tc, *self.alpha_coeffs)
class Saffari_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Saffari and Zahedi (2013) [1]_. Returns `a_alpha`, `da_alpha_dT`, and
`d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more
documentation. Three coefficients needed.
.. math::
\alpha = e^{\frac{T c_{1}}{T_{c,i}} + c_{2} \ln{\left (\frac{T}{T_{c,i}}
\right )} + c_{3} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)}
References
----------
.. [1] Saffari, Hamid, and <NAME>. "A New Alpha-Function for
the Peng-Robinson Equation of State: Application to Natural Gas."
Chinese Journal of Chemical Engineering 21, no. 10 (October 1,
2013): 1155-61. doi:10.1016/S1004-9541(13)60581-9.
'''
c1, c2, c3 = self.alpha_coeffs
Tc, a = self.Tc, self.a
a_alpha = a*(exp(T*c1/Tc + c2*log(T/Tc) + c3*(-sqrt(T/Tc) + 1)))
da_alpha_dT = a*((c1/Tc + c2/T - c3*sqrt(T/Tc)/(2*T))*exp(T*c1/Tc + c2*log(T/Tc) + c3*(-sqrt(T/Tc) + 1)))
d2a_alpha_dT2 = a*(((2*c1/Tc + 2*c2/T - c3*sqrt(T/Tc)/T)**2 - (4*c2 - c3*sqrt(T/Tc))/T**2)*exp(T*c1/Tc + c2*log(T/Tc) - c3*(sqrt(T/Tc) - 1))/4)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Saffari_alpha_pure(T, self.Tc, *self.alpha_coeffs)
class Chen_Yang_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Hamid and Yang (2017) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Seven coefficients needed.
.. math::
\alpha = e^{\left(- c_{3}^{\ln{\left (\frac{T}{T_{c,i}} \right )}}
+ 1\right) \left(- \frac{T c_{2}}{T_{c,i}} + c_{1}\right)}
References
----------
.. [1] Chen, Zehua, and <NAME>. "Optimization of the Reduced
Temperature Associated with Peng–Robinson Equation of State and
Soave-Redlich-Kwong Equation of State To Improve Vapor Pressure
Prediction for Heavy Hydrocarbon Compounds." Journal of Chemical &
Engineering Data, August 31, 2017. doi:10.1021/acs.jced.7b00496.
'''
c1, c2, c3, c4, c5, c6, c7 = self.alpha_coeffs
Tc, a, omega = self.Tc, self.a, self.omega
a_alpha = a*exp(c4*log((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)**2 + (-T/Tc + 1)*(c1 + c2*omega + c3*omega**2))
da_alpha_dT = a*(-(c1 + c2*omega + c3*omega**2)/Tc - c4*sqrt(T/Tc)*(c5 + c6*omega + c7*omega**2)*log((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)/(T*((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)))*exp(c4*log((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)**2 + (-T/Tc + 1)*(c1 + c2*omega + c3*omega**2))
d2a_alpha_dT2 = a*(((c1 + c2*omega + c3*omega**2)/Tc - c4*sqrt(T/Tc)*(c5 + c6*omega + c7*omega**2)*log(-(sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) + 1)/(T*((sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) - 1)))**2 - c4*(c5 + c6*omega + c7*omega**2)*((c5 + c6*omega + c7*omega**2)*log(-(sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) + 1)/(Tc*((sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) - 1)) - (c5 + c6*omega + c7*omega**2)/(Tc*((sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) - 1)) + sqrt(T/Tc)*log(-(sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) + 1)/T)/(2*T*((sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) - 1)))*exp(c4*log(-(sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) + 1)**2 - (T/Tc - 1)*(c1 + c2*omega + c3*omega**2))
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
return self.a*Chen_Yang_alpha_pure(T, self.Tc, self.omega, *self.alpha_coeffs)
class TwuSRK95_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate :math:`a \alpha` and its first and second
derivatives for the Twu alpha function. Uses the set values of `Tc`,
`omega` and `a`.
.. math::
\alpha = \alpha^{(0)} + \omega(\alpha^{(1)}-\alpha^{(0)})
.. math::
\alpha^{(i)} = T_r^{N(M-1)}\exp[L(1-T_r^{NM})]
For sub-critical conditions:
L0, M0, N0 = 0.141599, 0.919422, 2.496441
L1, M1, N1 = 0.500315, 0.799457, 3.291790
For supercritical conditions:
L0, M0, N0 = 0.441411, 6.500018, -0.20
L1, M1, N1 = 0.032580, 1.289098, -8.0
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^2]
Notes
-----
This method does not alter the object's state and the temperature
provided can be a different than that of the object.
The derivatives are somewhat long and are not described here for
brevity; they are obtainable from the following SymPy expression.
>>> from sympy import * # doctest:+SKIP
>>> T, Tc, omega, N1, N0, M1, M0, L1, L0 = symbols('T, Tc, omega, N1, N0, M1, M0, L1, L0') # doctest:+SKIP
>>> Tr = T/Tc # doctest:+SKIP
>>> alpha0 = Tr**(N0*(M0-1))*exp(L0*(1-Tr**(N0*M0))) # doctest:+SKIP
>>> alpha1 = Tr**(N1*(M1-1))*exp(L1*(1-Tr**(N1*M1))) # doctest:+SKIP
>>> alpha = alpha0 + omega*(alpha1-alpha0) # doctest:+SKIP
>>> diff(alpha, T) # doctest:+SKIP
>>> diff(alpha, T, T) # doctest:+SKIP
'''
return TWU_a_alpha_common(T, self.Tc, self.omega, self.a, full=True, method='SRK')
def a_alpha_pure(self, T):
r'''Method to calculate :math:`a \alpha` for the Twu alpha function.
Uses the set values of `Tc`, `omega` and `a`.
.. math::
\alpha = \alpha^{(0)} + \omega(\alpha^{(1)}-\alpha^{(0)})
.. math::
\alpha^{(i)} = T_r^{N(M-1)}\exp[L(1-T_r^{NM})]
For sub-critical conditions:
L0, M0, N0 = 0.141599, 0.919422, 2.496441
L1, M1, N1 = 0.500315, 0.799457, 3.291790
For supercritical conditions:
L0, M0, N0 = 0.441411, 6.500018, -0.20
L1, M1, N1 = 0.032580, 1.289098, -8.0
Parameters
----------
T : float
Temperature at which to calculate the value, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
Notes
-----
This method does not alter the object's state and the temperature
provided can be a different than that of the object.
'''
return TWU_a_alpha_common(T, self.Tc, self.omega, self.a, full=False, method='SRK')
def a_alphas_vectorized(self, T):
Tcs, omegas, ais = self.Tcs, self.omegas, self.ais
a_alphas = [TWU_a_alpha_common(T, Tcs[i], omegas[i], ais[i], full=False, method='SRK')
for i in range(self.N)]
if self.scalar:
return a_alphas
return array(a_alphas)
def a_alpha_and_derivatives_vectorized(self, T):
Tcs, omegas, ais = self.Tcs, self.omegas, self.ais
r0, r1, r2 = [], [], []
for i in range(self.N):
v0, v1, v2 = TWU_a_alpha_common(T, Tcs[i], omegas[i], ais[i], full=True, method='SRK')
r0.append(v0)
r1.append(v1)
r2.append(v2)
if self.scalar:
return r0, r1, r2
return array(r0), array(r1), array(r2)
class TwuPR95_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate :math:`a \alpha` and its first and second
derivatives for the Twu alpha function. Uses the set values of `Tc`,
`omega` and `a`.
.. math::
\alpha = \alpha^{(0)} + \omega(\alpha^{(1)}-\alpha^{(0)})
.. math::
\alpha^{(i)} = T_r^{N(M-1)}\exp[L(1-T_r^{NM})]
For sub-critical conditions:
L0, M0, N0 = 0.125283, 0.911807, 1.948150;
L1, M1, N1 = 0.511614, 0.784054, 2.812520
For supercritical conditions:
L0, M0, N0 = 0.401219, 4.963070, -0.2;
L1, M1, N1 = 0.024955, 1.248089, -8.
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^2]
Notes
-----
This method does not alter the object's state and the temperature
provided can be a different than that of the object.
The derivatives are somewhat long and are not described here for
brevity; they are obtainable from the following SymPy expression.
>>> from sympy import * # doctest:+SKIP
>>> T, Tc, omega, N1, N0, M1, M0, L1, L0 = symbols('T, Tc, omega, N1, N0, M1, M0, L1, L0') # doctest:+SKIP
>>> Tr = T/Tc # doctest:+SKIP
>>> alpha0 = Tr**(N0*(M0-1))*exp(L0*(1-Tr**(N0*M0))) # doctest:+SKIP
>>> alpha1 = Tr**(N1*(M1-1))*exp(L1*(1-Tr**(N1*M1))) # doctest:+SKIP
>>> alpha = alpha0 + omega*(alpha1-alpha0) # doctest:+SKIP
>>> diff(alpha, T) # doctest:+SKIP
>>> diff(alpha, T, T) # doctest:+SKIP
'''
return TWU_a_alpha_common(T, self.Tc, self.omega, self.a, full=True, method='PR')
def a_alpha_pure(self, T):
r'''Method to calculate :math:`a \alpha` for the Twu alpha function.
Uses the set values of `Tc`, `omega` and `a`.
.. math::
\alpha = \alpha^{(0)} + \omega(\alpha^{(1)}-\alpha^{(0)})
.. math::
\alpha^{(i)} = T_r^{N(M-1)}\exp[L(1-T_r^{NM})]
For sub-critical conditions:
L0, M0, N0 = 0.125283, 0.911807, 1.948150;
L1, M1, N1 = 0.511614, 0.784054, 2.812520
For supercritical conditions:
L0, M0, N0 = 0.401219, 4.963070, -0.2;
L1, M1, N1 = 0.024955, 1.248089, -8.
Parameters
----------
T : float
Temperature at which to calculate the value, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
Notes
-----
This method does not alter the object's state and the temperature
provided can be a different than that of the object.
'''
return TWU_a_alpha_common(T, self.Tc, self.omega, self.a, full=False, method='PR')
def a_alphas_vectorized(self, T):
Tcs, omegas, ais = self.Tcs, self.omegas, self.ais
a_alphas = [TWU_a_alpha_common(T, Tcs[i], omegas[i], ais[i], full=False, method='PR')
for i in range(self.N)]
if self.scalar:
return a_alphas
return array(a_alphas)
def a_alpha_and_derivatives_vectorized(self, T):
Tcs, omegas, ais = self.Tcs, self.omegas, self.ais
r0, r1, r2 = [], [], []
for i in range(self.N):
v0, v1, v2 = TWU_a_alpha_common(T, Tcs[i], omegas[i], ais[i], full=True, method='PR')
r0.append(v0)
r1.append(v1)
r2.append(v2)
if self.scalar:
return r0, r1, r2
return array(r0), array(r1), array(r2)
class Soave_1979_a_alpha(a_alpha_base):
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Soave (1979) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. Three coefficients are needed.
.. math::
\alpha = 1 + (1 - T_r)(M + \frac{N}{T_r})
References
----------
.. [1] <NAME>. "Rigorous and Simplified Procedures for Determining
the Pure-Component Parameters in the Redlich—Kwong—Soave Equation of
State." Chemical Engineering Science 35, no. 8 (January 1, 1980):
1725-30. https://doi.org/10.1016/0009-2509(80)85007-X.
'''
M, N = self.alpha_coeffs#self.M, self.N
Tc, a = self.Tc, self.a
T_inv = 1.0/T
x0 = 1.0/Tc
x1 = T*x0 - 1.0
x2 = Tc*T_inv
x3 = M + N*x2
x4 = N*T_inv*T_inv
return (a*(1.0 - x1*x3), a*(Tc*x1*x4 - x0*x3), a*(2.0*x4*(1.0 - x1*x2)))
def a_alpha_pure(self, T):
M, N = self.alpha_coeffs
Tc, a = self.Tc, self.a
return a*Soave_1979_alpha_pure(T, self.Tc, M, N)
def a_alphas_vectorized(self, T):
ais, alpha_coeffs, Tcs = self.ais, self.alpha_coeffs, self.Tcs
a_alphas = []
for i in range(self.N):
Tr = T/Tcs[i]
M, N = alpha_coeffs[i]
a_alphas.append(ais[i]*(1.0 + (1.0 - Tr)*(M + N/Tr)))
return a_alphas
def a_alpha_and_derivatives_vectorized(self, T):
ais, alpha_coeffs, Tcs = self.ais, self.alpha_coeffs, self.Tcs
T_inv = 1.0/T
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = [], [], []
for i in range(self.N):
a = ais[i]
M, N = alpha_coeffs[i]
x0 = 1.0/Tcs[i]
x1 = T*x0 - 1.0
x2 = Tcs[i]*T_inv
x3 = M + N*x2
x4 = N*T_inv*T_inv
a_alphas.append(a*(1.0 - x1*x3))
da_alpha_dTs.append(a*(Tcs[i]*x1*x4 - x0*x3))
d2a_alpha_dT2s.append(a*(2.0*x4*(1.0 - x1*x2)))
return a_alphas, da_alpha_dTs, d2a_alpha_dT2s
a_alpha_bases = [Soave_1972_a_alpha, Heyen_a_alpha, Harmens_Knapp_a_alpha, Mathias_1983_a_alpha,
Mathias_Copeman_untruncated_a_alpha, Gibbons_Laughton_a_alpha, Soave_1984_a_alpha, Yu_Lu_a_alpha,
Trebble_Bishnoi_a_alpha, Melhem_a_alpha, Androulakis_a_alpha, Schwartzentruber_a_alpha,
Almeida_a_alpha, Twu91_a_alpha, Soave_1993_a_alpha, Gasem_a_alpha,
Coquelet_a_alpha, Haghtalab_a_alpha, Saffari_a_alpha, Chen_Yang_a_alpha,
Mathias_Copeman_poly_a_alpha,
TwuSRK95_a_alpha, TwuPR95_a_alpha, Soave_1979_a_alpha]
|
fastmri_recon/models/subclassed_models/vnet.py
|
chaithyagr/fastmri-reproducible-benchmark
| 105 |
77944
|
<gh_stars>100-1000
import tensorflow as tf
from tensorflow.keras.layers import Layer, Conv3D, LeakyReLU, PReLU, UpSampling3D, MaxPooling3D, Activation
from tensorflow.keras.models import Model
from ..utils.complex import to_complex
from ..utils.fourier import AdjNFFT
class Conv(Layer):
def __init__(self, n_filters, kernel_size=3, non_linearity='relu', **kwargs):
super().__init__(**kwargs)
self.n_filters = n_filters
self.kernel_size = kernel_size
self.non_linearity = non_linearity
self.conv = Conv3D(
filters=self.n_filters,
kernel_size=self.kernel_size,
padding='same',
activation=None,
)
if self.non_linearity == 'lrelu':
self.act = LeakyReLU(0.1)
elif self.non_linearity == 'prelu':
self.act = PReLU(shared_axes=[1, 2, 3])
else:
self.act = Activation(self.non_linearity)
def call(self, inputs):
outputs = self.conv(inputs)
outputs = self.act(outputs)
return outputs
def get_config(self):
config = super().get_config()
config.update({
'n_filters': self.n_filters,
'kernel_size': self.kernel_size,
'non_linearity': self.non_linearity,
})
return config
class ConvBlock(Layer):
def __init__(self, n_filters, kernel_size=3, non_linearity='relu', n_non_lins=2, **kwargs):
super().__init__(**kwargs)
self.n_filters = n_filters
self.kernel_size = kernel_size
self.non_linearity = non_linearity
self.n_non_lins = n_non_lins
self.convs = [
Conv(
n_filters=self.n_filters,
kernel_size=self.kernel_size,
non_linearity=self.non_linearity,
) for _ in range(self.n_non_lins)
]
def call(self, inputs):
outputs = inputs
for conv in self.convs:
outputs = conv(outputs)
return outputs
def get_config(self):
config = super().get_config()
config.update({
'n_non_lins': self.n_non_lins,
'n_filters': self.n_filters,
'kernel_size': self.kernel_size,
'non_linearity': self.non_linearity,
})
return config
class UpConv(Layer):
def __init__(self, n_filters, kernel_size=3, post_processing=False, **kwargs):
super().__init__(**kwargs)
self.n_filters = n_filters
self.kernel_size = kernel_size
self.post_processing = post_processing
self.conv = Conv3D(
filters=self.n_filters,
kernel_size=self.kernel_size,
padding='same',
activation=None,
)
self.up = UpSampling3D(size=(1 if self.post_processing else 2, 2, 2))
def call(self, inputs):
outputs = self.up(inputs)
outputs = self.conv(outputs)
return outputs
def get_config(self):
config = super().get_config()
config.update({
'n_filters': self.n_filters,
'kernel_size': self.kernel_size,
'post_processing': self.post_processing,
})
return config
class Vnet(Model):
def __init__(
self,
n_output_channels=1,
kernel_size=3,
layers_n_channels=[4],
layers_n_non_lins=1,
non_linearity='relu',
post_processing=False,
res=False,
**kwargs,
):
super().__init__(**kwargs)
self.n_output_channels = n_output_channels
self.kernel_size = kernel_size
self.layers_n_channels = layers_n_channels
self.n_layers = len(self.layers_n_channels)
self.layers_n_non_lins = layers_n_non_lins
self.non_linearity = non_linearity
self.post_processing = post_processing
self.res = res
self.down_convs = [
ConvBlock(
n_filters=n_channels,
kernel_size=self.kernel_size,
non_linearity=self.non_linearity,
n_non_lins=self.layers_n_non_lins,
) for n_channels in self.layers_n_channels[:-1]
]
self.down = MaxPooling3D(
pool_size=(1 if self.post_processing else 2, 2, 2),
padding='same',
)
self.bottom_conv = ConvBlock(
n_filters=self.layers_n_channels[-1],
kernel_size=self.kernel_size,
non_linearity=self.non_linearity,
n_non_lins=self.layers_n_non_lins,
)
self.up_convs = [
ConvBlock(
n_filters=n_channels,
kernel_size=self.kernel_size,
non_linearity=self.non_linearity,
n_non_lins=self.layers_n_non_lins,
) for n_channels in self.layers_n_channels[:-1]
]
self.ups = [
UpConv(
n_filters=n_channels,
kernel_size=self.kernel_size,
post_processing=self.post_processing,
) for n_channels in self.layers_n_channels[:-1]
]
self.final_conv = Conv3D(
filters=self.n_output_channels,
kernel_size=1,
padding='same',
activation=None,
)
def call(self, inputs):
scales = []
outputs = inputs
for conv in self.down_convs:
outputs = conv(outputs)
scales.append(outputs)
outputs = self.down(outputs)
outputs = self.bottom_conv(outputs)
for scale, conv, up in zip(scales[::-1], self.up_convs[::-1], self.ups[::-1]):
outputs = up(outputs)
outputs = tf.concat([outputs, scale], axis=-1)
outputs = conv(outputs)
outputs = self.final_conv(outputs)
if self.res:
outputs = outputs + inputs
return outputs
class VnetComplex(Model):
def __init__(
self,
n_input_channels=1,
n_output_channels=1,
kernel_size=3,
layers_n_channels=[4],
layers_n_non_lins=1,
res=False,
non_linearity='relu',
dealiasing_nc=False,
im_size=None,
dcomp=None,
grad_traj=False,
**kwargs,
):
super(VnetComplex, self).__init__(**kwargs)
self.n_input_channels = n_input_channels
self.n_output_channels = n_output_channels
self.kernel_size = kernel_size
self.layers_n_channels = layers_n_channels
self.layers_n_non_lins = layers_n_non_lins
self.res = res
self.non_linearity = non_linearity
self.dealiasing_nc = dealiasing_nc
if self.dealiasing_nc:
self.adj_op = AdjNFFT(
im_size=im_size,
multicoil=False,
density_compensation=dcomp,
grad_traj=grad_traj,
)
self.vnet = Vnet(
n_output_channels=2 * self.n_output_channels,
kernel_size=self.kernel_size,
layers_n_channels=self.layers_n_channels,
layers_n_non_lins=self.layers_n_non_lins,
non_linearity=self.non_linearity,
)
def call(self, inputs):
if self.dealiasing_nc:
if len(inputs) == 2:
original_kspace, mask = inputs
op_args = ()
else:
original_kspace, mask, op_args = inputs
outputs = self.adj_op([original_kspace, mask, *op_args])
# we do this to match the residual part.
inputs = outputs
else:
outputs = inputs
# NOTE: for now no padding in 3d case
outputs = tf.concat([tf.math.real(outputs), tf.math.imag(outputs)], axis=-1)
outputs = self.vnet(outputs)
outputs = to_complex(outputs, self.n_output_channels)
if self.res:
outputs = inputs[..., :self.n_output_channels] + outputs
if self.dealiasing_nc:
outputs = tf.abs(outputs)
return outputs
|
thop/vision/onnx_counter.py
|
jinmingyi1998/pytorch-OpCounter
| 3,444 |
77945
|
import torch
import numpy as np
from onnx import numpy_helper
from thop.vision.basic_hooks import zero_ops
from .counter import counter_matmul, counter_zero_ops,\
counter_conv, counter_mul, counter_norm, counter_pow,\
counter_sqrt, counter_div, counter_softmax, counter_avgpool
def onnx_counter_matmul(diction, node):
input1 = node.input[0]
input2 = node.input[1]
input1_dim = diction[input1]
input2_dim = diction[input2]
out_size = np.append(input1_dim[0:-1], input2_dim[-1])
output_name = node.output[0]
macs = counter_matmul(input1_dim, out_size[-2:])
return macs, out_size, output_name
def onnx_counter_add(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
out_size = diction[node.input[1]]
else:
out_size = diction[node.input[0]]
output_name = node.output[0]
macs = counter_zero_ops()
# if '140' in diction:
# print(diction['140'],output_name)
return macs, out_size, output_name
def onnx_counter_conv(diction, node):
# print(node)
# bias,kernelsize,outputsize
dim_bias = 0
input_count = 0
for i in node.input:
input_count += 1
if (input_count == 3):
dim_bias = 1
dim_weight = diction[node.input[1]]
else:
dim_weight = diction[node.input[1]]
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
if(attr.name == 'strides'):
dim_stride = attr.ints
if(attr.name == 'pads'):
dim_pad = attr.ints
if(attr.name == 'dilations'):
dim_dil = attr.ints
if(attr.name == 'group'):
group = attr.i
# print(dim_dil)
dim_input = diction[node.input[0]]
output_size = np.append(
dim_input[0:-np.array(dim_kernel).size-1], dim_weight[0])
hw = np.array(dim_input[-np.array(dim_kernel).size:])
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_dil[i] *
(dim_kernel[i]-1)-1)/dim_stride[i]+1)
output_size = np.append(output_size, hw)
macs = counter_conv(dim_bias, np.prod(dim_kernel),
np.prod(output_size), dim_weight[1], group)
output_name = node.output[0]
# if '140' in diction:
# print("conv",diction['140'],output_name)
return macs, output_size, output_name
def onnx_counter_constant(diction, node):
# print("constant",node)
macs = counter_zero_ops()
output_name = node.output[0]
output_size = [1]
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_mul(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_mul(np.prod(input_size))
output_size = diction[node.input[0]]
output_name = node.output[0]
return macs, output_size, output_name
def onnx_counter_bn(diction, node):
input_size = diction[node.input[0]]
macs = counter_norm(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_relu(diction, node):
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
#print(macs, output_size, output_name)
# if '140' in diction:
# print("relu",diction['140'],output_name)
return macs, output_size, output_name
def onnx_counter_reducemean(diction, node):
keep_dim = 0
for attr in node.attribute:
if('axes' in attr.name):
dim_axis = np.array(attr.ints)
elif('keepdims' in attr.name):
keep_dim = attr.i
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
if (keep_dim == 1):
output_size = input_size
else:
output_size = np.delete(input_size, dim_axis)
#output_size = input_size
return macs, output_size, output_name
def onnx_counter_sub(diction, node):
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_pow(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_pow(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_sqrt(diction, node):
input_size = diction[node.input[0]]
macs = counter_sqrt(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_div(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_div(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_instance(diction, node):
input_size = diction[node.input[0]]
macs = counter_norm(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_softmax(diction, node):
input_size = diction[node.input[0]]
dim = node.attribute[0].i
nfeatures = input_size[dim]
batch_size = np.prod(input_size) / nfeatures
macs = counter_softmax(nfeatures, batch_size)
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_pad(diction, node):
# # TODO add constant name and output real vector
# if
# if (np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size):
# input_size = diction[node.input[1]]
# else:
# input_size = diction[node.input[0]]
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_averagepool(diction, node):
# TODO add support of ceil_mode and floor
macs = counter_avgpool(np.prod(diction[node.input[0]]))
output_name = node.output[0]
dim_pad = None
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
elif(attr.name == 'strides'):
dim_stride = attr.ints
elif(attr.name == 'pads'):
dim_pad = attr.ints
elif(attr.name == 'dilations'):
dim_dil = attr.ints
# print(dim_dil)
dim_input = diction[node.input[0]]
hw = dim_input[-np.array(dim_kernel).size:]
if dim_pad is not None:
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
else:
for i in range(hw.size):
hw[i] = int((hw[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_flatten(diction, node):
# print(node)
macs = counter_zero_ops()
output_name = node.output[0]
axis = node.attribute[0].i
input_size = diction[node.input[0]]
output_size = np.append(input_size[axis-1], np.prod(input_size[axis:]))
# print("flatten",output_size)
return macs, output_size, output_name
def onnx_counter_gemm(diction, node):
# print(node)
# Compute Y = alpha * A' * B' + beta * C
input_size = diction[node.input[0]]
dim_weight = diction[node.input[1]]
# print(input_size,dim_weight)
macs = np.prod(input_size) * dim_weight[1] + dim_weight[0]
output_size = np.append(input_size[0:-1], dim_weight[0])
output_name = node.output[0]
return macs, output_size, output_name
pass
def onnx_counter_maxpool(diction, node):
# TODO add support of ceil_mode and floor
# print(node)
macs = counter_zero_ops()
output_name = node.output[0]
dim_pad = None
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
elif(attr.name == 'strides'):
dim_stride = attr.ints
elif(attr.name == 'pads'):
dim_pad = attr.ints
elif(attr.name == 'dilations'):
dim_dil = attr.ints
# print(dim_dil)
dim_input = diction[node.input[0]]
hw = dim_input[-np.array(dim_kernel).size:]
if dim_pad is not None:
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
else:
for i in range(hw.size):
hw[i] = int((hw[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_globalaveragepool(diction, node):
macs = counter_zero_ops()
output_name = node.output[0]
input_size = diction[node.input[0]]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_concat(diction, node):
# print(node)
# print(diction[node.input[0]])
axis = node.attribute[0].i
input_size = diction[node.input[0]]
for i in node.input:
dim_concat = diction[i][axis]
output_size = input_size
output_size[axis] = dim_concat
output_name = node.output[0]
macs = counter_zero_ops()
return macs, output_size, output_name
def onnx_counter_clip(diction, node):
macs = counter_zero_ops()
output_name = node.output[0]
input_size = diction[node.input[0]]
output_size = input_size
return macs, output_size, output_name
onnx_operators = {
'MatMul': onnx_counter_matmul,
'Add': onnx_counter_add,
'Conv': onnx_counter_conv,
'Mul': onnx_counter_mul,
'Constant': onnx_counter_constant,
'BatchNormalization': onnx_counter_bn,
'Relu': onnx_counter_relu,
'ReduceMean': onnx_counter_reducemean,
'Sub': onnx_counter_sub,
'Pow': onnx_counter_pow,
'Sqrt': onnx_counter_sqrt,
'Div': onnx_counter_div,
'InstanceNormalization': onnx_counter_instance,
'Softmax': onnx_counter_softmax,
'Pad': onnx_counter_pad,
'AveragePool': onnx_counter_averagepool,
'MaxPool': onnx_counter_maxpool,
'Flatten': onnx_counter_flatten,
'Gemm': onnx_counter_gemm,
'GlobalAveragePool': onnx_counter_globalaveragepool,
'Concat': onnx_counter_concat,
'Clip': onnx_counter_clip,
None: None,
}
|
foresight/mysql/rand.py
|
abdulmohit/foresight
| 193 |
77946
|
""" Predicts outputs of the MySQL 'rand' function. For example,
mysql> CREATE TABLE t (i INT);
mysql> INSERT INTO t VALUES(1),(2),(3);
mysql> SELECT i, RAND() FROM t;
+------+------------------+
| i | RAND() |
+------+------------------+
| 1 | 0.61914388706828 |
| 2 | 0.93845168309142 |
| 3 | 0.83482678498591 |
+------+------------------+
Given "0.61914388706828" and "0.93845168309142", this module
`from_outputs` would yield "0.83482678498591".
"""
from ctypes import c_uint32
__all__ = [
"from_outputs",
"from_seed"
]
def predict_state(values):
values = [int(round(v * 0x3FFFFFFF, 0)) for v in values]
seed1 = values[0]
seed2 = (values[1] - seed1 * 3) % 0x3FFFFFFF
return [seed1, seed2]
def generate_values(state):
# from password.c
# max_value = 0x3FFFFFFFL
# max_value_dbl=(double) rand_st->max_value;
# from my_rnd.cc
# rand_st->seed1= (rand_st->seed1*3+rand_st->seed2) % rand_st->max_value;
# rand_st->seed2= (rand_st->seed1+rand_st->seed2+33) % rand_st->max_value;
# return (((double) rand_st->seed1) / rand_st->max_value_dbl);
seed1, seed2 = state
while True:
seed1 = (seed1 * 3 + seed2) % 0x3FFFFFFF
seed2 = (seed1 + seed2 + 33) % 0x3FFFFFFF
yield seed1 / 0x3FFFFFFF
def from_outputs(prev_values):
state = predict_state(prev_values)
gen = generate_values(state)
# only 2 values are needed, so advance past any others we were given
for _ in prev_values[1:]:
next(gen)
yield from gen
def from_seed(seed):
seed1 = c_uint32(seed*0x10001+55555555).value
seed2 = c_uint32(seed*0x10000001).value
yield from generate_values([seed1, seed2])
|
tests/test_04_online.py
|
vanheeringen-lab/genomepy
| 146 |
77955
|
<filename>tests/test_04_online.py
import os
import urllib.error
import urllib.request
from tempfile import NamedTemporaryFile
import pytest
import genomepy.online
from tests import linux, travis
def test_download_file():
# HTTP
tmp = NamedTemporaryFile().name
assert not os.path.exists(tmp)
url = "http://hgdownload.soe.ucsc.edu/goldenPath/ailMel1/bigZips/md5sum.txt"
genomepy.online.download_file(url, tmp)
assert os.path.exists(tmp)
# FTP (doesn't work on Travis-Linux)
if not (travis and linux):
tmp = NamedTemporaryFile().name
assert not os.path.exists(tmp)
url = "ftp://ftp.ncbi.nlm.nih.gov//genomes/all/GCF/000/027/325/GCF_000027325.1_ASM2732v1/README.txt"
genomepy.online.download_file(url, tmp)
assert os.path.exists(tmp)
def test_connect_ftp_link():
if not (travis and linux): # (doesn't work on Travis-Linux)
# good FTP host
ftp_link = "ftp://ftp.ncbi.nlm.nih.gov/genomes/README.txt"
ftp, target = genomepy.online.connect_ftp_link(ftp_link)
assert target == "/genomes/README.txt"
result = ftp.nlst(target)
ftp.quit() # logout
assert result == [target]
# bad FTP host
with pytest.raises(genomepy.exceptions.GenomeDownloadError):
genomepy.online.connect_ftp_link("ftp://not.an.ftp/at/all")
def test_read_url(
url="http://ftp.xenbase.org/pub/Genomics/JGI/README", expected="The data"
):
text = genomepy.online.read_url(url)
assert text.startswith(expected)
def test_retry(capsys):
# runs passed function
txt = "hello world"
genomepy.online.retry(print, 1, txt)
captured = capsys.readouterr().out.strip()
assert captured == txt
# handles URLErrors
def _offline_func():
raise urllib.error.URLError("this function is offline")
assert genomepy.online.retry(_offline_func, 1) is None
def test_check_url():
# good URL
assert genomepy.online.check_url("http://ftp.xenbase.org/pub/Genomics/JGI/README")
# bad URL:
assert not genomepy.online.check_url("https://www.thiswebsiteisoffline.nl/")
if not (travis and linux): # (doesn't work on Travis-Linux)
# good FTP
ftp_link = (
"ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/000/027/325/"
"GCF_000027325.1_ASM2732v1/GCF_000027325.1_ASM2732v1_genomic.gff.gz"
)
assert genomepy.online.check_url(ftp_link)
# bad FTP target
assert not genomepy.online.check_url("ftp://ftp.ncbi.nlm.nih.gov/bad_target")
|
common.py
|
johnding1996/UMD-CMSC726-Project
| 118 |
77960
|
from torch import nn
class FeedForwardNet(nn.Module):
def __init__(self, inp_dim, hidden_dim, outp_dim, n_layers, nonlinearity, dropout=0):
super().__init__()
layers = []
d_in = inp_dim
for i in range(n_layers):
module = nn.Linear(d_in, hidden_dim)
self.reset_parameters(module)
layers.append(module)
if dropout > 0:
layers.append(nn.Dropout(dropout))
if nonlinearity == 'relu':
nonlin = nn.ReLU(inplace=True)
elif nonlinearity == 'tanh':
nonlin = nn.Tanh()
elif nonlinearity == 'elu':
nonlin = nn.ELU(inplace=True)
elif nonlinearity != 'none':
raise NotImplementedError('only relu, tanh, and elu nonlinearities have been implemented')
if nonlinearity != 'none':
layers.append(nonlin)
d_in = hidden_dim
module = nn.Linear(d_in, outp_dim)
self.reset_parameters(module)
layers.append(module)
self.network = nn.Sequential(*layers)
def reset_parameters(self, module):
init_range = 0.07
module.weight.data.uniform_(-init_range, init_range)
module.bias.data.zero_()
def forward(self, x):
return self.network(x)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.