max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
doomrnn/model.py
|
yumaloop/predwm
| 494 |
67920
|
<gh_stars>100-1000
import numpy as np
import random
#from scipy.fftpack import dct
import json
import sys
import config
from env import make_env
import time
final_mode = True
render_mode = True
RENDER_DELAY = False
def make_model(game):
# can be extended in the future.
model = Model(game)
return model
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(x, 0)
def passthru(x):
return x
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def sample(p):
return np.argmax(np.random.multinomial(1, p))
class Model:
''' simple feedforward model '''
def __init__(self, game):
self.noise_level = 0.0
self.env_name = game.env_name
self.input_size = game.input_size
self.output_size = game.output_size
self.shapes = [ (game.input_size, game.output_size) ]
self.sample_output = False
if game.activation == 'relu':
self.activations = [relu]
elif game.activation == 'sigmoid':
self.activations = [sigmoid]
elif game.activation == 'passthru':
self.activations = [np.tanh]
else:
self.activations = [np.tanh]
self.weight = []
self.param_count = game.input_size * game.output_size
for shape in self.shapes:
self.weight.append(np.zeros(shape=shape))
self.render_mode = False
def make_env(self, seed=-1, render_mode=False, load_model=True):
self.render_mode = render_mode
self.env = make_env(self.env_name, seed=seed, render_mode=render_mode, load_model=load_model)
def get_action(self, x):
# if mean_mode = True, ignore sampling.
h = np.array(x).flatten()
num_layers = len(self.weight)
for i in range(num_layers):
w = self.weight[i]
h = np.matmul(h, w)
h = self.activations[i](h + np.random.randn()*self.noise_level)
return h
def set_model_params(self, model_params):
pointer = 0
for i in range(len(self.shapes)):
w_shape = self.shapes[i]
s_w = np.product(w_shape)
s = s_w
chunk = np.array(model_params[pointer:pointer+s])
self.weight[i] = chunk[:s_w].reshape(w_shape)
pointer += s
def load_model(self, filename):
with open(filename) as f:
data = json.load(f)
print('loading file %s' % (filename))
self.data = data
model_params = np.array(data[0]) # assuming other stuff is in data
self.set_model_params(model_params)
# also load the vae and rnn
self.env.vae.load_json('tf_models/vae.json')
self.env.rnn.load_json('tf_models/rnn.json')
def get_random_model_params(self, stdev=0.1):
#return np.random.randn(self.param_count)*stdev
return np.random.standard_cauchy(self.param_count)*stdev # spice things up!
def init_random_model_params(self, stdev=0.1):
params = self.get_random_model_params(stdev=stdev)
self.set_model_params(params)
vae_params = self.env.vae.get_random_model_params(stdev=stdev)
self.env.vae.set_model_params(vae_params)
rnn_params = self.env.rnn.get_random_model_params(stdev=stdev)
self.env.rnn.set_model_params(rnn_params)
def evaluate(model):
# run 100 times and average score, according to the reles.
model.env.seed(0)
total_reward = 0.0
N = 100
for i in range(N):
reward, t = simulate(model, train_mode=False, render_mode=False, num_episode=1)
total_reward += reward[0]
return (total_reward / float(N))
def simulate(model, train_mode=False, render_mode=True, num_episode=5, seed=-1, max_len=-1):
reward_list = []
t_list = []
max_episode_length = 2100
if train_mode and max_len > 0:
max_episode_length = max_len
if (seed >= 0):
random.seed(seed)
np.random.seed(seed)
model.env.seed(seed)
for episode in range(num_episode):
obs = model.env.reset()
if obs is None:
obs = np.zeros(model.input_size)
total_reward = 0.0
for t in range(max_episode_length):
if render_mode:
model.env.render("human")
if RENDER_DELAY:
time.sleep(0.01)
action = model.get_action(obs)
prev_obs = obs
obs, reward, done, info = model.env.step(action)
if (render_mode):
pass
#print("action", action, "step reward", reward)
#print("step reward", reward)
total_reward += reward
if done:
break
if render_mode:
print("reward", total_reward, "timesteps", t)
model.env.close()
reward_list.append(total_reward)
t_list.append(t)
return reward_list, t_list
def main():
global RENDER_DELAY
global final_mode
assert len(sys.argv) > 2, 'python model.py gamename render/norender path_to_model.json [seed]'
gamename = sys.argv[1]
game = config.games[gamename]
final_mode_string = str(sys.argv[2])
if (final_mode_string == "render"):
final_mode = False # don't run 100 times, just visualize results.
use_model = False
if (len(sys.argv) > 3):
use_model = True
filename = sys.argv[3]
print("filename", filename)
the_seed = np.random.randint(10000)
if len(sys.argv) > 4:
the_seed = int(sys.argv[4])
print("seed", the_seed)
model = make_model(game)
print('model size', model.param_count)
if (use_model):
model.make_env(render_mode=render_mode)
model.load_model(filename)
else:
model.make_env(render_mode=render_mode, load_model=False)
model.init_random_model_params(stdev=np.random.rand()*0.01)
if final_mode:
total_reward = 0.0
np.random.seed(the_seed)
model.env.seed(the_seed)
reward_list = []
for i in range(100):
reward, steps_taken = simulate(model, train_mode=False, render_mode=False, num_episode=1)
print("iteration", i, "reward", reward[0])
total_reward += reward[0]
reward_list.append(reward[0])
print("seed", the_seed, "average_reward", total_reward/100, "stdev", np.std(reward_list))
else:
reward, steps_taken = simulate(model,
train_mode=False, render_mode=render_mode, num_episode=1)
print ("terminal reward", reward, "average steps taken", np.mean(steps_taken)+1)
if __name__ == "__main__":
main()
|
tests/pytests/unit/state/test_sub_state_returns.py
|
tomdoherty/salt
| 9,425 |
67922
|
<gh_stars>1000+
"""
:codeauthor: <NAME> <<EMAIL>>
"""
import logging
import pytest # pylint: disable=unused-import
from salt.utils.decorators import state as statedecorators
log = logging.getLogger(__name__)
def test_sub_state_output_check_changes_is_dict():
"""
Test that changes key contains a dictionary.
:return:
"""
data = {"changes": {}, "sub_state_run": [{"changes": []}]}
out = statedecorators.OutputUnifier("content_check")(lambda: data)()
assert "'Changes' should be a dictionary" in out["sub_state_run"][0]["comment"]
assert not out["sub_state_run"][0]["result"]
def test_sub_state_output_check_return_is_dict():
"""
Test for the entire return is a dictionary
:return:
"""
data = {"sub_state_run": [["whatever"]]}
out = statedecorators.OutputUnifier("content_check")(lambda: data)()
assert (
"Malformed state return. Data must be a dictionary type"
in out["sub_state_run"][0]["comment"]
)
assert not out["sub_state_run"][0]["result"]
def test_sub_state_output_check_return_has_nrc():
"""
Test for name/result/comment keys are inside the return.
:return:
"""
data = {"sub_state_run": [{"arbitrary": "data", "changes": {}}]}
out = statedecorators.OutputUnifier("content_check")(lambda: data)()
assert (
" The following keys were not present in the state return: name, result, comment"
in out["sub_state_run"][0]["comment"]
)
assert not out["sub_state_run"][0]["result"]
def test_sub_state_output_unifier_comment_is_not_list():
"""
Test for output is unified so the comment is converted to a multi-line string
:return:
"""
data = {
"sub_state_run": [
{
"comment": ["data", "in", "the", "list"],
"changes": {},
"name": None,
"result": "fantastic!",
}
]
}
expected = {
"sub_state_run": [
{
"comment": "data\nin\nthe\nlist",
"changes": {},
"name": None,
"result": True,
}
]
}
assert statedecorators.OutputUnifier("unify")(lambda: data)() == expected
data = {
"sub_state_run": [
{
"comment": ["data", "in", "the", "list"],
"changes": {},
"name": None,
"result": None,
}
]
}
expected = "data\nin\nthe\nlist"
assert (
statedecorators.OutputUnifier("unify")(lambda: data)()["sub_state_run"][0][
"comment"
]
== expected
)
def test_sub_state_output_unifier_result_converted_to_true():
"""
Test for output is unified so the result is converted to True
:return:
"""
data = {
"sub_state_run": [
{
"comment": ["data", "in", "the", "list"],
"changes": {},
"name": None,
"result": "Fantastic",
}
]
}
assert (
statedecorators.OutputUnifier("unify")(lambda: data)()["sub_state_run"][0][
"result"
]
is True
)
def test_sub_state_output_unifier_result_converted_to_false():
"""
Test for output is unified so the result is converted to False
:return:
"""
data = {
"sub_state_run": [
{
"comment": ["data", "in", "the", "list"],
"changes": {},
"name": None,
"result": "",
}
]
}
assert (
statedecorators.OutputUnifier("unify")(lambda: data)()["sub_state_run"][0][
"result"
]
is False
)
|
PyFlow/Packages/PyFlowBase/Nodes/rerouteExecs.py
|
luzpaz/PyFlow
| 1,463 |
67969
|
<filename>PyFlow/Packages/PyFlowBase/Nodes/rerouteExecs.py<gh_stars>1000+
## Copyright 2015-2019 <NAME>, <NAME>
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from PyFlow.Core import NodeBase
from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper
from PyFlow.Core.Common import *
class rerouteExecs(NodeBase):
def __init__(self, name):
super(rerouteExecs, self).__init__(name)
self.input = self.createInputPin("in", 'ExecPin')
self.output = self.createOutputPin("out", 'ExecPin')
pinAffects(self.input, self.output)
self.input.call = self.output.call
def postCreate(self, jsonTemplate=None):
super(rerouteExecs, self).postCreate(jsonTemplate=jsonTemplate)
self.setName("reroute")
@staticmethod
def pinTypeHints():
helper = NodePinsSuggestionsHelper()
helper.addInputDataType('ExecPin')
helper.addOutputDataType('ExecPin')
helper.addInputStruct(StructureType.Single)
helper.addOutputStruct(StructureType.Single)
return helper
@staticmethod
def category():
return 'Common'
def compute(self, *args, **kwargs):
pass
|
venv/Lib/site-packages/pythonwin/pywin/scintilla/find.py
|
ajayiagbebaku/NFL-Model
| 150 |
67983
|
<filename>venv/Lib/site-packages/pythonwin/pywin/scintilla/find.py
# find.py - Find and Replace
import win32con, win32api
import win32ui
from pywin.mfc import dialog
import afxres
from pywin.framework import scriptutils
FOUND_NOTHING = 0
FOUND_NORMAL = 1
FOUND_LOOPED_BACK = 2
FOUND_NEXT_FILE = 3
class SearchParams:
def __init__(self, other=None):
if other is None:
self.__dict__["findText"] = ""
self.__dict__["replaceText"] = ""
self.__dict__["matchCase"] = 0
self.__dict__["matchWords"] = 0
self.__dict__["acrossFiles"] = 0
self.__dict__["remember"] = 1
self.__dict__["sel"] = (-1, -1)
self.__dict__["keepDialogOpen"] = 0
else:
self.__dict__.update(other.__dict__)
# Helper so we cant misspell attributes :-)
def __setattr__(self, attr, val):
if not hasattr(self, attr):
raise AttributeError(attr)
self.__dict__[attr] = val
curDialog = None
lastSearch = defaultSearch = SearchParams()
searchHistory = []
def ShowFindDialog():
_ShowDialog(FindDialog)
def ShowReplaceDialog():
_ShowDialog(ReplaceDialog)
def _ShowDialog(dlgClass):
global curDialog
if curDialog is not None:
if curDialog.__class__ != dlgClass:
curDialog.DestroyWindow()
curDialog = None
else:
curDialog.SetFocus()
if curDialog is None:
curDialog = dlgClass()
curDialog.CreateWindow()
def FindNext():
params = SearchParams(lastSearch)
params.sel = (-1, -1)
if not params.findText:
ShowFindDialog()
else:
return _FindIt(None, params)
def _GetControl(control=None):
if control is None:
control = scriptutils.GetActiveEditControl()
return control
def _FindIt(control, searchParams):
global lastSearch, defaultSearch
control = _GetControl(control)
if control is None:
return FOUND_NOTHING
# Move to the next char, so we find the next one.
flags = 0
if searchParams.matchWords:
flags = flags | win32con.FR_WHOLEWORD
if searchParams.matchCase:
flags = flags | win32con.FR_MATCHCASE
if searchParams.sel == (-1, -1):
sel = control.GetSel()
# If the position is the same as we found last time,
# then we assume it is a "FindNext"
if sel == lastSearch.sel:
sel = sel[0] + 1, sel[0] + 1
else:
sel = searchParams.sel
if sel[0] == sel[1]:
sel = sel[0], control.GetTextLength()
rc = FOUND_NOTHING
# (Old edit control will fail here!)
posFind, foundSel = control.FindText(flags, sel, searchParams.findText)
lastSearch = SearchParams(searchParams)
if posFind >= 0:
rc = FOUND_NORMAL
lineno = control.LineFromChar(posFind)
control.SCIEnsureVisible(lineno)
control.SetSel(foundSel)
control.SetFocus()
win32ui.SetStatusText(win32ui.LoadString(afxres.AFX_IDS_IDLEMESSAGE))
if rc == FOUND_NOTHING and lastSearch.acrossFiles:
# Loop around all documents. First find this document.
try:
try:
doc = control.GetDocument()
except AttributeError:
try:
doc = control.GetParent().GetDocument()
except AttributeError:
print("Cant find a document for the control!")
doc = None
if doc is not None:
template = doc.GetDocTemplate()
alldocs = template.GetDocumentList()
mypos = lookpos = alldocs.index(doc)
while 1:
lookpos = (lookpos + 1) % len(alldocs)
if lookpos == mypos:
break
view = alldocs[lookpos].GetFirstView()
posFind, foundSel = view.FindText(
flags, (0, view.GetTextLength()), searchParams.findText
)
if posFind >= 0:
nChars = foundSel[1] - foundSel[0]
lineNo = view.LineFromChar(posFind) # zero based.
lineStart = view.LineIndex(lineNo)
colNo = posFind - lineStart # zero based.
scriptutils.JumpToDocument(
alldocs[lookpos].GetPathName(),
lineNo + 1,
colNo + 1,
nChars,
)
rc = FOUND_NEXT_FILE
break
except win32ui.error:
pass
if rc == FOUND_NOTHING:
# Loop around this control - attempt to find from the start of the control.
posFind, foundSel = control.FindText(
flags, (0, sel[0] - 1), searchParams.findText
)
if posFind >= 0:
control.SCIEnsureVisible(control.LineFromChar(foundSel[0]))
control.SetSel(foundSel)
control.SetFocus()
win32ui.SetStatusText("Not found! Searching from the top of the file.")
rc = FOUND_LOOPED_BACK
else:
lastSearch.sel = -1, -1
win32ui.SetStatusText("Can not find '%s'" % searchParams.findText)
if rc != FOUND_NOTHING:
lastSearch.sel = foundSel
if lastSearch.remember:
defaultSearch = lastSearch
# track search history
try:
ix = searchHistory.index(searchParams.findText)
except ValueError:
if len(searchHistory) > 50:
searchHistory[50:] = []
else:
del searchHistory[ix]
searchHistory.insert(0, searchParams.findText)
return rc
def _ReplaceIt(control):
control = _GetControl(control)
statusText = "Can not find '%s'." % lastSearch.findText
rc = FOUND_NOTHING
if control is not None and lastSearch.sel != (-1, -1):
control.ReplaceSel(lastSearch.replaceText)
rc = FindNext()
if rc != FOUND_NOTHING:
statusText = win32ui.LoadString(afxres.AFX_IDS_IDLEMESSAGE)
win32ui.SetStatusText(statusText)
return rc
class FindReplaceDialog(dialog.Dialog):
def __init__(self):
dialog.Dialog.__init__(self, self._GetDialogTemplate())
self.HookCommand(self.OnFindNext, 109)
def OnInitDialog(self):
self.editFindText = self.GetDlgItem(102)
self.butMatchWords = self.GetDlgItem(105)
self.butMatchCase = self.GetDlgItem(107)
self.butKeepDialogOpen = self.GetDlgItem(115)
self.butAcrossFiles = self.GetDlgItem(116)
self.butRemember = self.GetDlgItem(117)
self.editFindText.SetWindowText(defaultSearch.findText)
control = _GetControl()
# _GetControl only gets normal MDI windows; if the interactive
# window is docked and no document open, we get None.
if control:
# If we have a selection, default to that.
sel = control.GetSelText()
if len(sel) != 0:
self.editFindText.SetWindowText(sel)
if defaultSearch.remember:
defaultSearch.findText = sel
for hist in searchHistory:
self.editFindText.AddString(hist)
if hasattr(self.editFindText, "SetEditSel"):
self.editFindText.SetEditSel(0, -2)
else:
self.editFindText.SetSel(0, -2)
self.editFindText.SetFocus()
self.butMatchWords.SetCheck(defaultSearch.matchWords)
self.butMatchCase.SetCheck(defaultSearch.matchCase)
self.butKeepDialogOpen.SetCheck(defaultSearch.keepDialogOpen)
self.butAcrossFiles.SetCheck(defaultSearch.acrossFiles)
self.butRemember.SetCheck(defaultSearch.remember)
return dialog.Dialog.OnInitDialog(self)
def OnDestroy(self, msg):
global curDialog
curDialog = None
return dialog.Dialog.OnDestroy(self, msg)
def DoFindNext(self):
params = SearchParams()
params.findText = self.editFindText.GetWindowText()
params.matchCase = self.butMatchCase.GetCheck()
params.matchWords = self.butMatchWords.GetCheck()
params.acrossFiles = self.butAcrossFiles.GetCheck()
params.remember = self.butRemember.GetCheck()
return _FindIt(None, params)
def OnFindNext(self, id, code):
if not self.editFindText.GetWindowText():
win32api.MessageBeep()
return
if self.DoFindNext() != FOUND_NOTHING:
if not self.butKeepDialogOpen.GetCheck():
self.DestroyWindow()
class FindDialog(FindReplaceDialog):
def _GetDialogTemplate(self):
style = (
win32con.DS_MODALFRAME
| win32con.WS_POPUP
| win32con.WS_VISIBLE
| win32con.WS_CAPTION
| win32con.WS_SYSMENU
| win32con.DS_SETFONT
)
visible = win32con.WS_CHILD | win32con.WS_VISIBLE
dt = [
["Find", (0, 2, 240, 75), style, None, (8, "MS Sans Serif")],
["Static", "Fi&nd What:", 101, (5, 8, 40, 10), visible],
[
"ComboBox",
"",
102,
(50, 7, 120, 120),
visible
| win32con.WS_BORDER
| win32con.WS_TABSTOP
| win32con.WS_VSCROLL
| win32con.CBS_DROPDOWN
| win32con.CBS_AUTOHSCROLL,
],
[
"Button",
"Match &whole word only",
105,
(5, 23, 100, 10),
visible | win32con.BS_AUTOCHECKBOX | win32con.WS_TABSTOP,
],
[
"Button",
"Match &case",
107,
(5, 33, 100, 10),
visible | win32con.BS_AUTOCHECKBOX | win32con.WS_TABSTOP,
],
[
"Button",
"Keep &dialog open",
115,
(5, 43, 100, 10),
visible | win32con.BS_AUTOCHECKBOX | win32con.WS_TABSTOP,
],
[
"Button",
"Across &open files",
116,
(5, 52, 100, 10),
visible | win32con.BS_AUTOCHECKBOX | win32con.WS_TABSTOP,
],
[
"Button",
"&Remember as default search",
117,
(5, 61, 150, 10),
visible | win32con.BS_AUTOCHECKBOX | win32con.WS_TABSTOP,
],
[
"Button",
"&Find Next",
109,
(185, 5, 50, 14),
visible | win32con.BS_DEFPUSHBUTTON | win32con.WS_TABSTOP,
],
[
"Button",
"Cancel",
win32con.IDCANCEL,
(185, 23, 50, 14),
visible | win32con.WS_TABSTOP,
],
]
return dt
class ReplaceDialog(FindReplaceDialog):
def _GetDialogTemplate(self):
style = (
win32con.DS_MODALFRAME
| win32con.WS_POPUP
| win32con.WS_VISIBLE
| win32con.WS_CAPTION
| win32con.WS_SYSMENU
| win32con.DS_SETFONT
)
visible = win32con.WS_CHILD | win32con.WS_VISIBLE
dt = [
["Replace", (0, 2, 240, 95), style, 0, (8, "MS Sans Serif")],
["Static", "Fi&nd What:", 101, (5, 8, 40, 10), visible],
[
"ComboBox",
"",
102,
(60, 7, 110, 120),
visible
| win32con.WS_BORDER
| win32con.WS_TABSTOP
| win32con.WS_VSCROLL
| win32con.CBS_DROPDOWN
| win32con.CBS_AUTOHSCROLL,
],
["Static", "Re&place with:", 103, (5, 25, 50, 10), visible],
[
"ComboBox",
"",
104,
(60, 24, 110, 120),
visible
| win32con.WS_BORDER
| win32con.WS_TABSTOP
| win32con.WS_VSCROLL
| win32con.CBS_DROPDOWN
| win32con.CBS_AUTOHSCROLL,
],
[
"Button",
"Match &whole word only",
105,
(5, 42, 100, 10),
visible | win32con.BS_AUTOCHECKBOX | win32con.WS_TABSTOP,
],
[
"Button",
"Match &case",
107,
(5, 52, 100, 10),
visible | win32con.BS_AUTOCHECKBOX | win32con.WS_TABSTOP,
],
[
"Button",
"Keep &dialog open",
115,
(5, 62, 100, 10),
visible | win32con.BS_AUTOCHECKBOX | win32con.WS_TABSTOP,
],
[
"Button",
"Across &open files",
116,
(5, 72, 100, 10),
visible | win32con.BS_AUTOCHECKBOX | win32con.WS_TABSTOP,
],
[
"Button",
"&Remember as default search",
117,
(5, 81, 150, 10),
visible | win32con.BS_AUTOCHECKBOX | win32con.WS_TABSTOP,
],
[
"Button",
"&Find Next",
109,
(185, 5, 50, 14),
visible | win32con.BS_DEFPUSHBUTTON | win32con.WS_TABSTOP,
],
[
"Button",
"&Replace",
110,
(185, 23, 50, 14),
visible | win32con.WS_TABSTOP,
],
[
"Button",
"Replace &All",
111,
(185, 41, 50, 14),
visible | win32con.WS_TABSTOP,
],
[
"Button",
"Cancel",
win32con.IDCANCEL,
(185, 59, 50, 14),
visible | win32con.WS_TABSTOP,
],
]
return dt
def OnInitDialog(self):
rc = FindReplaceDialog.OnInitDialog(self)
self.HookCommand(self.OnReplace, 110)
self.HookCommand(self.OnReplaceAll, 111)
self.HookMessage(self.OnActivate, win32con.WM_ACTIVATE)
self.editReplaceText = self.GetDlgItem(104)
self.editReplaceText.SetWindowText(lastSearch.replaceText)
if hasattr(self.editReplaceText, "SetEditSel"):
self.editReplaceText.SetEditSel(0, -2)
else:
self.editReplaceText.SetSel(0, -2)
self.butReplace = self.GetDlgItem(110)
self.butReplaceAll = self.GetDlgItem(111)
self.CheckButtonStates()
return rc
def CheckButtonStates(self):
# We can do a "Replace" or "Replace All" if the current selection
# is the same as the search text.
ft = self.editFindText.GetWindowText()
control = _GetControl()
# bCanReplace = len(ft)>0 and control.GetSelText() == ft
bCanReplace = control is not None and lastSearch.sel == control.GetSel()
self.butReplace.EnableWindow(bCanReplace)
# self.butReplaceAll.EnableWindow(bCanReplace)
def OnActivate(self, msg):
wparam = msg[2]
fActive = win32api.LOWORD(wparam)
if fActive != win32con.WA_INACTIVE:
self.CheckButtonStates()
def OnFindNext(self, id, code):
self.DoFindNext()
self.CheckButtonStates()
def OnReplace(self, id, code):
lastSearch.replaceText = self.editReplaceText.GetWindowText()
_ReplaceIt(None)
def OnReplaceAll(self, id, code):
control = _GetControl(None)
if control is not None:
control.SetSel(0)
num = 0
if self.DoFindNext() == FOUND_NORMAL:
num = 1
lastSearch.replaceText = self.editReplaceText.GetWindowText()
while _ReplaceIt(control) == FOUND_NORMAL:
num = num + 1
win32ui.SetStatusText("Replaced %d occurrences" % num)
if num > 0 and not self.butKeepDialogOpen.GetCheck():
self.DestroyWindow()
if __name__ == "__main__":
ShowFindDialog()
|
aiida/cmdline/utils/query/calculation.py
|
aiidateam/aiida_core
| 153 |
67985
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""A utility module with a factory of standard QueryBuilder instances for Calculation nodes."""
from aiida.cmdline.utils.query.mapping import CalculationProjectionMapper
from aiida.common.lang import classproperty
class CalculationQueryBuilder:
"""Utility class to construct a QueryBuilder instance for Calculation nodes and project the query set."""
# This tuple serves to mark compound projections that cannot explicitly be projected in the QueryBuilder, but will
# have to be manually projected from composing its individual projection constituents
_compound_projections = ('state',)
_default_projections = ('pk', 'ctime', 'process_label', 'state', 'process_status')
_valid_projections = (
'pk', 'uuid', 'ctime', 'mtime', 'state', 'process_state', 'process_status', 'exit_status', 'sealed',
'process_label', 'label', 'description', 'node_type', 'paused', 'process_type', 'job_state', 'scheduler_state',
'exception'
)
def __init__(self, mapper=None):
if mapper is None:
self._mapper = CalculationProjectionMapper(self._valid_projections)
else:
self._mapper = mapper
@property
def mapper(self):
return self._mapper
@classproperty
def default_projections(self):
return self._default_projections
@classproperty
def valid_projections(self):
return self._valid_projections
def get_filters(
self,
all_entries=False,
process_state=None,
process_label=None,
paused=False,
exit_status=None,
failed=False,
node_types=None
):
"""
Return a set of QueryBuilder filters based on typical command line options.
:param node_types: a tuple of node classes to filter for (must be sub classes of Calculation)
:param all_entries: boolean to negate filtering for process state
:param process_state: filter for this process state attribute
:param process_label: filter for this process label attribute
:param paused: boolean, if True, filter for processes that are paused
:param exit_status: filter for this exit status
:param failed: boolean to filter only failed processes
:return: dictionary of filters suitable for a QueryBuilder.append() call
"""
# pylint: disable=too-many-arguments
from aiida.engine import ProcessState
exit_status_attribute = self.mapper.get_attribute('exit_status')
process_label_attribute = self.mapper.get_attribute('process_label')
process_state_attribute = self.mapper.get_attribute('process_state')
paused_attribute = self.mapper.get_attribute('paused')
filters = {}
if node_types is not None:
filters['or'] = []
for node_class in node_types:
filters['or'].append({'type': node_class.class_node_type})
if process_state and not all_entries:
filters[process_state_attribute] = {'in': process_state}
if process_label is not None:
if '%' in process_label or '_' in process_label:
filters[process_label_attribute] = {'like': process_label}
else:
filters[process_label_attribute] = process_label
if paused:
filters[paused_attribute] = True
if failed:
filters[process_state_attribute] = {'==': ProcessState.FINISHED.value}
filters[exit_status_attribute] = {'>': 0}
if exit_status is not None:
filters[process_state_attribute] = {'==': ProcessState.FINISHED.value}
filters[exit_status_attribute] = {'==': exit_status}
return filters
def get_query_set(self, relationships=None, filters=None, order_by=None, past_days=None, limit=None):
"""
Return the query set of calculations for the given filters and query parameters
:param relationships: a mapping of relationships to join on, e.g. {'with_node': Group} to join on a Group. The
keys in this dictionary should be the keyword used in the `append` method of the `QueryBuilder` to join the
entity on that is defined as the value.
:param filters: rules to filter query results with
:param order_by: order the query set by this criterion
:param past_days: only include entries from the last past days
:param limit: limit the query set to this number of entries
:return: the query set, a list of dictionaries
"""
import datetime
from aiida import orm
from aiida.common import timezone
# Define the list of projections for the QueryBuilder, which are all valid minus the compound projections
projected_attributes = [
self.mapper.get_attribute(projection)
for projection in self._valid_projections
if projection not in self._compound_projections
]
if filters is None:
filters = {}
if past_days is not None:
filters['ctime'] = {'>': timezone.now() - datetime.timedelta(days=past_days)}
builder = orm.QueryBuilder()
builder.append(cls=orm.ProcessNode, filters=filters, project=projected_attributes, tag='process')
if relationships is not None:
for tag, entity in relationships.items():
builder.append(cls=type(entity), filters={'id': entity.pk}, **{tag: 'process'})
if order_by is not None:
builder.order_by({'process': order_by})
else:
builder.order_by({'process': {'ctime': 'asc'}})
if limit is not None:
builder.limit(limit)
return builder.iterdict()
def get_projected(self, query_set, projections):
"""
Project the query set for the given set of projections
"""
header = [self.mapper.get_label(projection) for projection in projections]
result = [header]
for query_result in query_set:
result_row = [self.mapper.format(projection, query_result['process']) for projection in projections]
result.append(result_row)
return result
|
twistedcaldav/config.py
|
backwardn/ccs-calendarserver
| 462 |
67991
|
<filename>twistedcaldav/config.py<gh_stars>100-1000
##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
__all__ = [
"Config",
"ConfigDict",
"ConfigProvider",
"ConfigurationError",
"config",
]
import copy
import hashlib
import os
class ConfigurationError(RuntimeError):
"""
Invalid server configuration.
"""
class ConfigDict(dict):
"""
Dictionary which can be accessed using attribute syntax, because
that reads and writes nicer in code. For example:
C{config.Thingo.Tiny.Tweak}
instead of:
C{config["Thingo"]["Tiny"]["Tweak"]}
"""
def __init__(self, mapping=None):
if mapping is not None:
for key, value in mapping.iteritems():
self[key] = value
def __repr__(self):
return "*" + dict.__repr__(self)
def __setitem__(self, key, value):
if key.startswith("_"):
# Names beginning with "_" are reserved for real attributes
raise KeyError("Keys may not begin with '_': %s" % (key,))
if isinstance(value, dict) and not isinstance(value, self.__class__):
dict.__setitem__(self, key, self.__class__(value))
else:
dict.__setitem__(self, key, value)
def __setattr__(self, attr, value):
if attr.startswith("_"):
dict.__setattr__(self, attr, value)
else:
self[attr] = value
def __getattr__(self, attr):
if not attr.startswith("_") and attr in self:
return self[attr]
else:
return dict.__getattribute__(self, attr)
def __delattr__(self, attr):
if not attr.startswith("_") and attr in self:
del self[attr]
else:
dict.__delattr__(self, attr)
class ConfigProvider(object):
"""
Configuration provider, abstraction for config storage/format/defaults.
"""
def __init__(self, defaults=None):
"""
Create configuration provider with given defaults.
"""
self._configFileName = None
if defaults is None:
self._defaults = ConfigDict()
else:
self._defaults = ConfigDict(copy.deepcopy(defaults))
self.importedFiles = []
self.includedFiles = []
self.missingFiles = []
def getDefaults(self):
"""
Return defaults.
"""
return self._defaults
def setDefaults(self, defaults):
"""
Change defaults.
"""
self._defaults = ConfigDict(copy.deepcopy(defaults))
def getConfigFileName(self):
"""
Return current configuration file path and name.
"""
return self._configFileName
def setConfigFileName(self, configFileName):
"""
Change configuration file path and name for next load operations.
"""
self._configFileName = configFileName
if self._configFileName:
self._configFileName = os.path.abspath(configFileName)
def hasErrors(self):
"""
Return true if last load operation encountered any errors.
"""
return False
def loadConfig(self):
"""
Load the configuration, return a dictionary of settings.
"""
return self._defaults
class Config(object):
def __init__(self, provider=None):
if not provider:
self._provider = ConfigProvider()
else:
self._provider = provider
self._updating = False
self._beforeResetHook = None
self._afterResetHook = None
self._preUpdateHooks = []
self._postUpdateHooks = []
self.reset()
def __setattr__(self, attr, value):
if "_data" in self.__dict__ and attr in self.__dict__["_data"]:
self._data[attr] = value
else:
self.__dict__[attr] = value
# So as not to cause a flurry of updates, don't mark ourselves
# dirty when the attribute begins with an underscore
if not attr.startswith("_"):
self.__dict__["_dirty"] = True
_dirty = False
_data = ()
def __getattr__(self, attr):
if self._dirty:
self.update()
if attr in self._data:
return self._data[attr]
raise AttributeError(attr)
def __hasattr__(self, attr):
return attr in self._data
def __str__(self):
return str(self._data)
def get(self, attr, defaultValue):
parts = attr.split(".")
lastDict = self._data
for part in parts[:-1]:
if part not in lastDict:
lastDict[attr] = ConfigDict()
lastDict = lastDict.__getattr__(part)
configItem = parts[-1]
if configItem in lastDict:
return lastDict[configItem]
else:
lastDict[configItem] = defaultValue
return defaultValue
def addResetHooks(self, before, after):
"""
Hooks for preserving config across reload( ) + reset( )
Each hook will be passed the config data; whatever the before hook
returns will be passed as the second arg to the after hook.
"""
self._beforeResetHook = before
self._afterResetHook = after
def addPreUpdateHooks(self, hooks):
self._preUpdateHooks.extend(hooks)
def addPostUpdateHooks(self, hooks):
self._postUpdateHooks.extend(hooks)
def getProvider(self):
return self._provider
def setProvider(self, provider):
self._provider = provider
self.reset()
def setDefaults(self, defaults):
self._provider.setDefaults(defaults)
self.reset()
def updateDefaults(self, items):
mergeData(self._provider.getDefaults(), items)
self.update(items)
def update(self, items=None, reloading=False):
if self._updating:
return
self._updating = True
if not isinstance(items, ConfigDict):
items = ConfigDict(items)
# Call hooks
for hook in self._preUpdateHooks:
hook(self._data, items, reloading=reloading)
mergeData(self._data, items)
for hook in self._postUpdateHooks:
hook(self._data, reloading=reloading)
self._updating = False
self._dirty = False
self._cachedSyncToken = None
def load(self, configFile):
self._provider.setConfigFileName(configFile)
configDict = self._provider.loadConfig()
if not self._provider.hasErrors():
self.update(configDict)
else:
raise ConfigurationError("Invalid configuration in %s"
% (self._provider.getConfigFileName(),))
def reload(self):
configDict = self._provider.loadConfig()
if not self._provider.hasErrors():
if self._beforeResetHook:
# Give the beforeResetHook a chance to stash away values we want
# to preserve across the reload( )
preserved = self._beforeResetHook(self._data)
else:
preserved = None
self.reset()
if preserved and self._afterResetHook:
# Pass the preserved data back to the afterResetHook
self._afterResetHook(self._data, preserved)
self.update(configDict, reloading=True)
else:
raise ConfigurationError(
"Invalid configuration in %s"
% (self._provider.getConfigFileName(), ))
def reset(self):
self._data = ConfigDict(copy.deepcopy(self._provider.getDefaults()))
self._dirty = True
self._syncTokenKeys = []
self._cachedSyncToken = None
def getKeyPath(self, keyPath):
"""
Allows the getting of arbitrary nested dictionary keys via a single
dot-separated string. For example, getKeyPath(self, "foo.bar.baz")
would fetch parent["foo"]["bar"]["baz"]. If any of the keys don't
exist, None is returned instead.
@param keyPath: a dot-delimited string specifying the path of keys to
traverse
@type keyPath: C{str}
@return: the value at keyPath
"""
parent = self
parts = keyPath.split(".")
for part in parts[:-1]:
child = parent.get(part, None)
if child is None:
return None
parent = child
return parent.get(parts[-1], None)
def addSyncTokenKey(self, keyPath):
"""
Indicates the specified key should be taken into account when generating
the sync token. Also invalidates the (possibly) cached syncToken.
@param keyPath: a dot-delimited string specifying the path of keys to
traverse
@type keyPath: C{str}
"""
if keyPath not in self._syncTokenKeys:
self._syncTokenKeys.append(keyPath)
self._cachedSyncToken = None
def syncToken(self):
"""
Iterates the previously registered keys (sorted, so the order in which
the keys were registered doesn't affect the hash) and generates an MD5
hash of the combined values. The hash is cached, and is invalidated
during a reload or if invalidateSyncToken is called.
@return: the sync token
@rtype: C{str}
"""
if self._cachedSyncToken is None:
pieces = []
self._syncTokenKeys.sort()
for key in self._syncTokenKeys:
value = self.getKeyPath(key)
if value is None:
value = ""
pieces.append(key + ":" + str(value))
whole = "|".join(pieces)
self._cachedSyncToken = hashlib.md5(whole).hexdigest()
return self._cachedSyncToken
def invalidateSyncToken(self):
"""
Invalidates the cached copy of the sync token.
"""
self._cachedSyncToken = None
def joinToken(self, dataToken):
"""
Joins the config sync token with the dataToken. If EnableConfigSyncToken
is False, the original dataToken is just returned
"""
if self.EnableConfigSyncToken:
configToken = self.syncToken()
return "{}/{}".format(dataToken, configToken)
else:
return dataToken
def mergeData(oldData, newData):
"""
Merge two ConfigDict objects; oldData will be updated with all the keys
and values from newData
@param oldData: the object to modify
@type oldData: ConfigDict
@param newData: the object to copy data from
@type newData: ConfigDict
"""
for key, value in newData.iteritems():
if isinstance(value, (dict,)):
if key in oldData:
assert isinstance(oldData[key], ConfigDict), \
"%r in %r is not a ConfigDict" % (oldData[key], oldData)
else:
oldData[key] = {}
mergeData(oldData[key], value)
else:
oldData[key] = value
def fullServerPath(base, path):
if type(path) is str:
return os.path.join(base, path) if path and path[0] not in ('/', '.',) else path
else:
return path
config = Config()
|
extraPackages/matplotlib-3.0.3/examples/userdemo/connect_simple01.py
|
dolboBobo/python3_ios
| 130 |
68010
|
<reponame>dolboBobo/python3_ios
"""
================
Connect Simple01
================
"""
from matplotlib.patches import ConnectionPatch
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3))
xyA = (0.2, 0.2)
xyB = (0.8, 0.8)
coordsA = "data"
coordsB = "data"
con = ConnectionPatch(xyA, xyB, coordsA, coordsB,
arrowstyle="-|>", shrinkA=5, shrinkB=5,
mutation_scale=20, fc="w")
ax1.plot([xyA[0], xyB[0]], [xyA[1], xyB[1]], "o")
ax1.add_artist(con)
xy = (0.3, 0.2)
coordsA = "data"
coordsB = "data"
con = ConnectionPatch(xyA=xy, xyB=xy, coordsA=coordsA, coordsB=coordsB,
axesA=ax2, axesB=ax1,
arrowstyle="->", shrinkB=5)
ax2.add_artist(con)
ax1.set_xlim(0, 1)
ax1.set_ylim(0, 1)
ax2.set_xlim(0, .5)
ax2.set_ylim(0, .5)
plt.show()
|
alipay/aop/api/domain/RecomProduct.py
|
antopen/alipay-sdk-python-all
| 213 |
68019
|
<filename>alipay/aop/api/domain/RecomProduct.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.RecomPlan import RecomPlan
from alipay.aop.api.domain.ProdResource import ProdResource
from alipay.aop.api.domain.ProdResource import ProdResource
class RecomProduct(object):
def __init__(self):
self._base_premium = None
self._biz_data = None
self._company_id = None
self._company_name = None
self._company_seller_id = None
self._company_seller_nick = None
self._company_service_phone = None
self._csu_no = None
self._max_quan = None
self._name = None
self._plans = None
self._premium = None
self._prod_no = None
self._recom_flow_no = None
self._resource_list = None
self._resources = None
self._restriction_type = None
self._sum_insured = None
self._type = None
@property
def base_premium(self):
return self._base_premium
@base_premium.setter
def base_premium(self, value):
self._base_premium = value
@property
def biz_data(self):
return self._biz_data
@biz_data.setter
def biz_data(self, value):
self._biz_data = value
@property
def company_id(self):
return self._company_id
@company_id.setter
def company_id(self, value):
self._company_id = value
@property
def company_name(self):
return self._company_name
@company_name.setter
def company_name(self, value):
self._company_name = value
@property
def company_seller_id(self):
return self._company_seller_id
@company_seller_id.setter
def company_seller_id(self, value):
self._company_seller_id = value
@property
def company_seller_nick(self):
return self._company_seller_nick
@company_seller_nick.setter
def company_seller_nick(self, value):
self._company_seller_nick = value
@property
def company_service_phone(self):
return self._company_service_phone
@company_service_phone.setter
def company_service_phone(self, value):
self._company_service_phone = value
@property
def csu_no(self):
return self._csu_no
@csu_no.setter
def csu_no(self, value):
self._csu_no = value
@property
def max_quan(self):
return self._max_quan
@max_quan.setter
def max_quan(self, value):
self._max_quan = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def plans(self):
return self._plans
@plans.setter
def plans(self, value):
if isinstance(value, RecomPlan):
self._plans = value
else:
self._plans = RecomPlan.from_alipay_dict(value)
@property
def premium(self):
return self._premium
@premium.setter
def premium(self, value):
self._premium = value
@property
def prod_no(self):
return self._prod_no
@prod_no.setter
def prod_no(self, value):
self._prod_no = value
@property
def recom_flow_no(self):
return self._recom_flow_no
@recom_flow_no.setter
def recom_flow_no(self, value):
self._recom_flow_no = value
@property
def resource_list(self):
return self._resource_list
@resource_list.setter
def resource_list(self, value):
if isinstance(value, list):
self._resource_list = list()
for i in value:
if isinstance(i, ProdResource):
self._resource_list.append(i)
else:
self._resource_list.append(ProdResource.from_alipay_dict(i))
@property
def resources(self):
return self._resources
@resources.setter
def resources(self, value):
if isinstance(value, ProdResource):
self._resources = value
else:
self._resources = ProdResource.from_alipay_dict(value)
@property
def restriction_type(self):
return self._restriction_type
@restriction_type.setter
def restriction_type(self, value):
self._restriction_type = value
@property
def sum_insured(self):
return self._sum_insured
@sum_insured.setter
def sum_insured(self, value):
self._sum_insured = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.base_premium:
if hasattr(self.base_premium, 'to_alipay_dict'):
params['base_premium'] = self.base_premium.to_alipay_dict()
else:
params['base_premium'] = self.base_premium
if self.biz_data:
if hasattr(self.biz_data, 'to_alipay_dict'):
params['biz_data'] = self.biz_data.to_alipay_dict()
else:
params['biz_data'] = self.biz_data
if self.company_id:
if hasattr(self.company_id, 'to_alipay_dict'):
params['company_id'] = self.company_id.to_alipay_dict()
else:
params['company_id'] = self.company_id
if self.company_name:
if hasattr(self.company_name, 'to_alipay_dict'):
params['company_name'] = self.company_name.to_alipay_dict()
else:
params['company_name'] = self.company_name
if self.company_seller_id:
if hasattr(self.company_seller_id, 'to_alipay_dict'):
params['company_seller_id'] = self.company_seller_id.to_alipay_dict()
else:
params['company_seller_id'] = self.company_seller_id
if self.company_seller_nick:
if hasattr(self.company_seller_nick, 'to_alipay_dict'):
params['company_seller_nick'] = self.company_seller_nick.to_alipay_dict()
else:
params['company_seller_nick'] = self.company_seller_nick
if self.company_service_phone:
if hasattr(self.company_service_phone, 'to_alipay_dict'):
params['company_service_phone'] = self.company_service_phone.to_alipay_dict()
else:
params['company_service_phone'] = self.company_service_phone
if self.csu_no:
if hasattr(self.csu_no, 'to_alipay_dict'):
params['csu_no'] = self.csu_no.to_alipay_dict()
else:
params['csu_no'] = self.csu_no
if self.max_quan:
if hasattr(self.max_quan, 'to_alipay_dict'):
params['max_quan'] = self.max_quan.to_alipay_dict()
else:
params['max_quan'] = self.max_quan
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.plans:
if hasattr(self.plans, 'to_alipay_dict'):
params['plans'] = self.plans.to_alipay_dict()
else:
params['plans'] = self.plans
if self.premium:
if hasattr(self.premium, 'to_alipay_dict'):
params['premium'] = self.premium.to_alipay_dict()
else:
params['premium'] = self.premium
if self.prod_no:
if hasattr(self.prod_no, 'to_alipay_dict'):
params['prod_no'] = self.prod_no.to_alipay_dict()
else:
params['prod_no'] = self.prod_no
if self.recom_flow_no:
if hasattr(self.recom_flow_no, 'to_alipay_dict'):
params['recom_flow_no'] = self.recom_flow_no.to_alipay_dict()
else:
params['recom_flow_no'] = self.recom_flow_no
if self.resource_list:
if isinstance(self.resource_list, list):
for i in range(0, len(self.resource_list)):
element = self.resource_list[i]
if hasattr(element, 'to_alipay_dict'):
self.resource_list[i] = element.to_alipay_dict()
if hasattr(self.resource_list, 'to_alipay_dict'):
params['resource_list'] = self.resource_list.to_alipay_dict()
else:
params['resource_list'] = self.resource_list
if self.resources:
if hasattr(self.resources, 'to_alipay_dict'):
params['resources'] = self.resources.to_alipay_dict()
else:
params['resources'] = self.resources
if self.restriction_type:
if hasattr(self.restriction_type, 'to_alipay_dict'):
params['restriction_type'] = self.restriction_type.to_alipay_dict()
else:
params['restriction_type'] = self.restriction_type
if self.sum_insured:
if hasattr(self.sum_insured, 'to_alipay_dict'):
params['sum_insured'] = self.sum_insured.to_alipay_dict()
else:
params['sum_insured'] = self.sum_insured
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = RecomProduct()
if 'base_premium' in d:
o.base_premium = d['base_premium']
if 'biz_data' in d:
o.biz_data = d['biz_data']
if 'company_id' in d:
o.company_id = d['company_id']
if 'company_name' in d:
o.company_name = d['company_name']
if 'company_seller_id' in d:
o.company_seller_id = d['company_seller_id']
if 'company_seller_nick' in d:
o.company_seller_nick = d['company_seller_nick']
if 'company_service_phone' in d:
o.company_service_phone = d['company_service_phone']
if 'csu_no' in d:
o.csu_no = d['csu_no']
if 'max_quan' in d:
o.max_quan = d['max_quan']
if 'name' in d:
o.name = d['name']
if 'plans' in d:
o.plans = d['plans']
if 'premium' in d:
o.premium = d['premium']
if 'prod_no' in d:
o.prod_no = d['prod_no']
if 'recom_flow_no' in d:
o.recom_flow_no = d['recom_flow_no']
if 'resource_list' in d:
o.resource_list = d['resource_list']
if 'resources' in d:
o.resources = d['resources']
if 'restriction_type' in d:
o.restriction_type = d['restriction_type']
if 'sum_insured' in d:
o.sum_insured = d['sum_insured']
if 'type' in d:
o.type = d['type']
return o
|
torch2trt_dynamic/converters/mean.py
|
jinfagang/torch2trt_dynamic
| 155 |
68024
|
<reponame>jinfagang/torch2trt_dynamic
import tensorrt as trt
import torch
from torch2trt_dynamic.module_test import add_module_test
from torch2trt_dynamic.torch2trt_dynamic import (get_arg, tensorrt_converter,
trt_)
@tensorrt_converter('torch.mean')
@tensorrt_converter('torch.Tensor.mean')
def convert_mean(ctx):
input = ctx.method_args[0]
input_trt = trt_(ctx.network, input)
output = ctx.method_return
dim = get_arg(ctx, 'dim', pos=1, default=None)
keep_dims = get_arg(ctx, 'keepdim', pos=2, default=False)
# get dims from args or kwargs
if dim is None:
dim = tuple(range(len(input.shape)))
# convert list to tuple
if isinstance(dim, list):
dim = tuple(dim)
if not isinstance(dim, tuple):
dim = (dim, )
dim = tuple([d if d >= 0 else len(input.shape) + d for d in dim])
# create axes bitmask for reduce layer
axes = 0
for d in dim:
axes |= 1 << d
layer = ctx.network.add_reduce(input_trt, trt.ReduceOperation.AVG, axes,
keep_dims)
output._trt = layer.get_output(0)
class Mean(torch.nn.Module):
def __init__(self, dim, keepdim):
super(Mean, self).__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
return x.mean(self.dim, self.keepdim)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_mean_channel():
return Mean(1, False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_mean_tuple():
return Mean((1, 2), False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_mean_keepdim():
return Mean(1, True)
|
api/webhooks/tests/test_webhooks.py
|
SolidStateGroup/Bullet-Train-API
| 126 |
68034
|
<filename>api/webhooks/tests/test_webhooks.py
import hashlib
import hmac
import json
from unittest import TestCase, mock
import pytest
from core.constants import FLAGSMITH_SIGNATURE_HEADER
from environments.models import Environment, Webhook
from organisations.models import Organisation, OrganisationWebhook
from projects.models import Project
from webhooks.sample_webhook_data import (
environment_webhook_data,
organisation_webhook_data,
)
from webhooks.webhooks import (
WebhookEventType,
WebhookType,
call_environment_webhooks,
trigger_sample_webhook,
)
@pytest.mark.django_db
class WebhooksTestCase(TestCase):
def setUp(self) -> None:
organisation = Organisation.objects.create(name="Test organisation")
project = Project.objects.create(name="Test project", organisation=organisation)
self.environment = Environment.objects.create(
name="Test environment", project=project
)
@mock.patch("webhooks.webhooks.requests")
def test_requests_made_to_all_urls_for_environment(self, mock_requests):
# Given
webhook_1 = Webhook.objects.create(
url="http://url.1.com", enabled=True, environment=self.environment
)
webhook_2 = Webhook.objects.create(
url="http://url.2.com", enabled=True, environment=self.environment
)
# When
call_environment_webhooks(
environment=self.environment,
data={},
event_type=WebhookEventType.FLAG_UPDATED,
)
# Then
assert len(mock_requests.post.call_args_list) == 2
# and
call_1_args, _ = mock_requests.post.call_args_list[0]
call_2_args, _ = mock_requests.post.call_args_list[1]
all_call_args = call_1_args + call_2_args
assert all(
str(webhook.url) in all_call_args for webhook in (webhook_1, webhook_2)
)
@mock.patch("webhooks.webhooks.requests")
def test_request_not_made_to_disabled_webhook(self, mock_requests):
# Given
Webhook.objects.create(
url="http://url.1.com", enabled=False, environment=self.environment
)
# When
call_environment_webhooks(
environment=self.environment,
data={},
event_type=WebhookEventType.FLAG_UPDATED,
)
# Then
mock_requests.post.assert_not_called()
@mock.patch("webhooks.webhooks.requests")
def test_trigger_sample_webhook_makes_correct_post_request_for_environment(
self, mock_request
):
url = "http://test.test"
webhook = Webhook(url=url)
trigger_sample_webhook(webhook, WebhookType.ENVIRONMENT)
args, kwargs = mock_request.post.call_args
assert json.loads(kwargs["data"]) == environment_webhook_data
assert args[0] == url
@mock.patch("webhooks.webhooks.requests")
def test_trigger_sample_webhook_makes_correct_post_request_for_organisation(
self, mock_request
):
url = "http://test.test"
webhook = OrganisationWebhook(url=url)
trigger_sample_webhook(webhook, WebhookType.ORGANISATION)
args, kwargs = mock_request.post.call_args
assert json.loads(kwargs["data"]) == organisation_webhook_data
assert args[0] == url
@mock.patch("webhooks.webhooks.WebhookSerializer")
@mock.patch("webhooks.webhooks.requests")
def test_request_made_with_correct_signature(
self, mock_requests, webhook_serializer
):
# Given
payload = {"key": "value"}
webhook_serializer.return_value.data = payload
secret = "random_key"
Webhook.objects.create(
url="http://url.1.com",
enabled=True,
environment=self.environment,
secret=secret,
)
expected_signature = hmac.new(
key=secret.encode(),
msg=json.dumps(payload).encode(),
digestmod=hashlib.sha256,
).hexdigest()
call_environment_webhooks(
environment=self.environment,
data={},
event_type=WebhookEventType.FLAG_UPDATED,
)
# When
_, kwargs = mock_requests.post.call_args_list[0]
# Then
received_signature = kwargs["headers"][FLAGSMITH_SIGNATURE_HEADER]
assert hmac.compare_digest(expected_signature, received_signature) is True
@mock.patch("webhooks.webhooks.requests")
def test_request_does_not_have_signature_header_if_secret_is_not_set(
self, mock_requests
):
# Given
Webhook.objects.create(
url="http://url.1.com", enabled=True, environment=self.environment
)
# When
call_environment_webhooks(
environment=self.environment,
data={},
event_type=WebhookEventType.FLAG_UPDATED,
)
# Then
_, kwargs = mock_requests.post.call_args_list[0]
assert FLAGSMITH_SIGNATURE_HEADER not in kwargs["headers"]
|
setup.py
|
rajeshkppt/scispacy
| 1,139 |
68037
|
from setuptools import setup, find_packages
"""
Instructions for creating a release of the scispacy library.
1. Make sure your working directory is clean.
2. Make sure that you have changed the versions in "scispacy/version.py".
3. Create the distribution by running "python setup.py sdist" in the root of the repository.
4. Check you can install the new distribution in a clean environment.
5. Upload the distribution to pypi by running "twine upload <path to the distribution> -u <username> -p <password>".
This step will ask you for a username and password - the username is "scispacy" you can
get the password from LastPass.
"""
VERSION = {}
# version.py defines VERSION and VERSION_SHORT variables.
# We use exec here to read it so that we don't import scispacy
# whilst setting up the package.
with open("scispacy/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
setup(
name="scispacy",
version=VERSION["VERSION"],
url="https://allenai.github.io/SciSpaCy/",
author="Allen Institute for Artificial Intelligence",
author_email="<EMAIL>",
description="A full SpaCy pipeline and models for scientific/biomedical documents.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
keywords=["bioinformatics nlp spacy SpaCy biomedical"],
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
license="Apache",
install_requires=[
"spacy>=3.0.0,<3.1.0",
"requests>=2.0.0,<3.0.0",
"conllu",
"numpy",
"joblib",
"nmslib>=1.7.3.6",
"scikit-learn>=0.20.3",
"pysbd",
],
tests_require=["pytest", "pytest-cov", "flake8", "black", "mypy"],
python_requires=">=3.6.0",
)
|
outputformat/plot.py
|
JuPeg/outputformat
| 146 |
68038
|
<reponame>JuPeg/outputformat<gh_stars>100-1000
from outputformat import emoji
def bar(
value,
maxvalue,
style="block",
length=32,
title=False,
title_pad=0,
show_values=True,
values_pad=0,
values_precision=2,
show_percentage=True,
return_str=False,
):
"""Generate a single bar using ASCII.
Parameters
----------
value : int, float
The value, or height, of this bar.
maxvalue : int, float
The maxium value this variable shoud have.
style : string
Style of bar. Options are: 'block', 'battery', 'bar', 'circle', 'star'
'block'.....: ▓▓▓▓▓▓▓▓▓▓▓▓░░░░░░
'battery'...: ┫████████████ ┣
'bar'.......: [■■■■■■■■■■■■ ]
'circle'....: ●●●●●●●●●●●●○○○○○○
'star': uses the Unicode star emoji '\U00002B50'
In case of using 'star', it is recommended a low value for 'length',
usually something as 3 or 5 (interesting for ratings)
An list of strings can also be passed, as:
>>> bar(35, 50, style=["(", "X", "-", ")"], title="Custom style")
Custom style: (XXXXXXXXXXXXXXXXXXXXXX----------) 35/50 ( 70.00%)
The characters in the list are used to build the bar,
and only the first 4 values in the list are used.
If a single string character is passed, it is used for a basic bar:
>>> bar(35, 50, style="$", title="Custom style")
Custom style: [$$$$$$$$$$$$$$$$$$$$$$ ] 35/50 ( 70.00%)
length : int
Total size of the bar (in characters) to be displayed.
title : string, optional
Text for a title displayed before the bar.
title_pad : int, optional
Padding to the right of title. Usefull to aling several bars.
show_values : Bool
If True, shows the values as 'value/maxvalue' in front of the bar
show_percentage : Bool
If True, shows the percentage in front of the bar
return_str : Bool, default: False
If True, returns a string instead of printing.
Returns
-------
string
Only returns in case 'return_str = True', otherwise None
"""
# Check if value < maxvalue
if value and value > maxvalue:
raise ValueError(f"'value' cannot be bigger than 'maxvalue' {emoji.crazy}")
if style in ["block"]:
start = ""
fill = "▓"
empty = "░"
end = ""
elif style in ["battery"]:
start = "┫"
fill = "█"
empty = " "
end = "┣"
elif style in ["bar"]:
start = "["
fill = "■"
empty = " "
end = "]"
elif style in ["circle"]:
start = ""
fill = "●"
empty = "○"
end = ""
elif style in ["star"]:
start = ""
fill = f"{emoji.star}"
empty = ""
end = ""
else:
# Use values given as a list
if isinstance(style, list):
start = str(style[0])
fill = str(style[1])
empty = str(style[2])
end = str(style[3])
# Just use the char given
else:
start = "["
fill = str(style)
empty = " " * len(str(style))
end = "]"
# Start outputstring
outputstring = ""
if title:
title = str(title)
outputstring += f"{title:.<{title_pad}}: "
# Check if it's not a NaN or None
if value == value and value:
ratio = value / maxvalue
nfill = int(ratio * length)
outputstring += start
outputstring += f"{fill*nfill}{empty*(length-nfill)}"
outputstring += end
if show_values:
values_pad += values_precision + 1
outputstring += f" {value:>{values_pad}.{values_precision}f}/{maxvalue:.{values_precision}f}"
if show_percentage:
outputstring += f" ({value/maxvalue:>7.2%})"
if return_str:
return outputstring
else:
print(outputstring)
def barlist(
values,
titles=False,
maxvalue=False,
style="bar",
length=32,
show_values=True,
values_precision=2,
show_percentage=True,
return_str=False,
):
"""Short summary.
Parameters
----------
values : list
list of values to be displayed (paired with 'titles').
titles : list
list of titles to be displayed (paired with 'values').
maxvalue : int, float, optional
The max value that any of the 'values' could have.
In case None (or False) is given, uses the max value from all the values
style : string
Style passed to outputformat.bar
Options are: 'block', 'battery', 'bar', 'circle', 'star'
'block'.....: ▓▓▓▓▓▓▓▓▓▓▓▓░░░░░░
'battery'...: ┫████████████ ┣
'bar'.......: [■■■■■■■■■■■■ ]
'circle'....: ●●●●●●●●●●●●○○○○○○
'star': uses the Unicode star emoji '\U00002B50'
In case of using 'star', it is recommended a low value for 'length',
usually something as 3 or 5 (interesting for ratings)
An list of strings can also be passed, as:
>>> bar(35, 50, style=["(", "X", "-", ")"], title="Custom style")
Custom style: (XXXXXXXXXXXXXXXXXXXXXX----------) 35/50 ( 70.00%)
The characters in the list are used to build the bar,
and only the first 4 values in the list are used.
If a single string character is passed, it is used for a basic bar:
>>> bar(35, 50, style="$", title="Custom style")
Custom style: [$$$$$$$$$$$$$$$$$$$$$$ ] 35/50 ( 70.00%)
length : int
Total size of the bars, in characters.
show_values : Bool
If True, shows the values as 'value/maxvalue' in front of the bar
show_percentage : Bool
If True, shows the percentage in front of the bar
return_str : Bool, default: False
If True, returns a string instead of printing.
Returns
-------
string
Only returns in case 'return_str = True', otherwise None
"""
# If titles are not provided, make an empty list
if not titles:
titles = [""] * len(values)
# Negative values are not suported
for value in values:
try:
if value < 0:
raise ValueError("Negative values are not supported")
except:
pass
# Check if titles match values
if len(values) != len(titles):
errormsg = f"'values' and 'titles' must have the same length {emoji.sad}"
errormsg += f"\ntotal values: {len(values)}"
errormsg += f"\ntotal titles: {len(titles)}"
raise ValueError(errormsg)
# Initialize string
outputstring = ""
# In case maxvalue is no given,
# uses the max from all the values
if not maxvalue:
# Get the max without breaking in case we find a "None"
maxvalue = max([v for v in values if v])
# Convert all titles to strings
titles = [str(t) for t in titles]
# Get the longest title to use the proper padding
longest_title = len(max(titles, key=len))
longest_value = len(str(int(maxvalue)))
# Create each row
for idx in range(len(values)):
outputstring += bar(
values[idx],
maxvalue,
style=style,
title=titles[idx],
title_pad=longest_title,
length=length,
show_values=show_values,
values_pad=longest_value,
values_precision=values_precision,
show_percentage=show_percentage,
return_str=True,
)
outputstring += "\n"
if return_str:
return outputstring
else:
print(outputstring)
|
utils/env_vars.py
|
yu-iskw/elementary
| 282 |
68065
|
<filename>utils/env_vars.py
import os
def is_flight_mode_on() -> bool:
return is_env_var_on('FLIGHTMODE')
def is_debug_mode_on() -> bool:
return is_env_var_on('DEBUG')
def is_env_var_on(env_var) -> bool:
if os.getenv(env_var) == '1':
print(env_var, ' is on!')
return True
return False
|
tests/st/ops/ascend/test_tbe_ops/test_tanh_grad.py
|
GuoSuiming/mindspore
| 3,200 |
68072
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops.operations import _grad_ops as G
from mindspore.train.model import Model
context.set_context(device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.tanh_grad = G.TanhGrad()
def construct(self, y, dy):
return self.tanh_grad(y, dy)
input_shape = [1]
input_np = np.random.randn(*input_shape).astype(np.float32)
input_me = Tensor(input_np)
def test_net():
context.set_context(mode=context.GRAPH_MODE)
tanh_grad = Net()
tanh_grad.set_train()
m = Model(tanh_grad)
out = m.predict(input_me, input_me)
print("out_me.dtype={}".format(out.dtype))
print("out_me.asnumpy={}".format(out.asnumpy()))
return out.asnumpy()
|
cloudiscovery/provider/aws/limit/resource/all.py
|
MalarvizhiK/cloudiscovery
| 429 |
68073
|
from concurrent.futures.thread import ThreadPoolExecutor
from typing import List
from provider.aws.common_aws import get_paginator
from provider.aws.limit.command import LimitOptions
from provider.aws.limit.data.allowed_resources import (
ALLOWED_SERVICES_CODES,
FILTER_EC2_BIGFAMILY,
SPECIAL_RESOURCES,
)
from shared.common import (
ResourceProvider,
Resource,
ResourceDigest,
message_handler,
ResourceCache,
LimitsValues,
)
from shared.error_handler import exception
SERVICEQUOTA_TO_BOTO3 = {
"elasticloadbalancing": "elbv2",
"elasticfilesystem": "efs",
"vpc": "ec2",
"codeguru-profiler": "codeguruprofiler",
"AWSCloudMap": "servicediscovery",
"ebs": "ec2",
}
MAX_EXECUTION_PARALLEL = 2
class LimitResources(ResourceProvider):
def __init__(self, options: LimitOptions):
"""
All resources
:param options:
"""
super().__init__()
self.options = options
self.cache = ResourceCache()
@exception
# pylint: disable=too-many-locals
def get_resources(self) -> List[Resource]:
threshold_requested = (
0 if self.options.threshold is None else self.options.threshold
)
client_quota = self.options.client("service-quotas")
resources_found = []
services = self.options.services
with ThreadPoolExecutor(MAX_EXECUTION_PARALLEL) as executor:
results = executor.map(
lambda service_name: self.analyze_service(
service_name=service_name,
client_quota=client_quota,
threshold_requested=int(threshold_requested),
),
services,
)
for result in results:
if result is not None:
resources_found.extend(result)
return resources_found
@exception
def analyze_service(self, service_name, client_quota, threshold_requested):
if service_name in SPECIAL_RESOURCES:
return []
cache_key = "aws_limits_" + service_name + "_" + self.options.region_name
cache = self.cache.get_key(cache_key)
resources_found = []
if service_name not in cache:
return []
"""
Services that must be enabled in your account. Those services will fail you don't enable
Fraud Detector: https://pages.awscloud.com/amazon-fraud-detector-preview.html#
AWS Organizations: https://console.aws.amazon.com/organizations/
"""
if service_name in ("frauddetector", "organizations"):
message_handler(
"Attention: Service "
+ service_name
+ " must be enabled to use API calls.",
"WARNING",
)
for data_quota_code in cache[service_name]:
if data_quota_code is None:
continue
resource_found = self.analyze_quota(
client_quota=client_quota,
data_quota_code=data_quota_code,
service=service_name,
threshold_requested=threshold_requested,
)
if resource_found is not None:
resources_found.append(resource_found)
return resources_found
@exception
# pylint: disable=too-many-locals,too-many-statements
def analyze_quota(
self, client_quota, data_quota_code, service, threshold_requested
):
resource_found = None
quota_data = ALLOWED_SERVICES_CODES[service][data_quota_code["quota_code"]]
value_aws = value = data_quota_code["value"]
# Quota is adjustable by ticket request, then must override this values.
if bool(data_quota_code["adjustable"]) is True:
try:
response_quota = client_quota.get_service_quota(
ServiceCode=service, QuotaCode=data_quota_code["quota_code"]
)
if "Value" in response_quota["Quota"]:
value = response_quota["Quota"]["Value"]
else:
value = data_quota_code["value"]
except client_quota.exceptions.NoSuchResourceException:
value = data_quota_code["value"]
if self.options.verbose:
message_handler(
"Collecting data from Quota: "
+ service
+ " - "
+ data_quota_code["quota_name"]
+ "...",
"HEADER",
)
# Need to convert some quota-services endpoint
if service in SERVICEQUOTA_TO_BOTO3:
service = SERVICEQUOTA_TO_BOTO3.get(service)
"""
AWS Networkservice is a global service and just allows region us-west-2 instead us-east-1
Reference https://docs.aws.amazon.com/networkmanager/latest/APIReference/Welcome.html
TODO: If we detect more resources like that, convert it into a dict
"""
if service == "networkmanager":
region_boto3 = "us-west-2"
else:
region_boto3 = self.options.region_name
client = self.options.session.client(service, region_name=region_boto3)
usage = 0
# Check filters by resource
if "filter" in quota_data:
filters = quota_data["filter"]
else:
filters = None
pages = get_paginator(
client=client,
operation_name=quota_data["method"],
resource_type="aws_limit",
filters=filters,
)
if not pages:
if filters:
response = getattr(client, quota_data["method"])(**filters)
else:
response = getattr(client, quota_data["method"])()
# If fields element is not empty, sum values instead list len
if quota_data["fields"]:
for item in response[quota_data["method"]]:
usage = usage + item[quota_data["fields"]]
else:
usage = len(response[quota_data["key"]])
else:
for page in pages:
if quota_data["fields"]:
if len(page[quota_data["key"]]) > 0:
usage = usage + page[quota_data["key"]][0][quota_data["fields"]]
else:
usage = usage + len(page[quota_data["key"]])
# Value for division
if "divisor" in quota_data:
usage = usage / quota_data["divisor"]
"""
Hack to workaround boto3 limits of 200 items per filter.
Quota L-1216C47A needs more than 200 items. Not happy with this code
TODO: Refactor this piece of terrible code.
"""
if data_quota_code["quota_code"] == "L-1216C47A":
filters = FILTER_EC2_BIGFAMILY["filter"]
pages = get_paginator(
client=client,
operation_name=quota_data["method"],
resource_type="aws_limit",
filters=filters,
)
if not pages:
response = getattr(client, quota_data["method"])(**filters)
usage = len(response[quota_data["key"]])
else:
for page in pages:
usage = usage + len(page[quota_data["key"]])
try:
percent = round((usage / value) * 100, 2)
except ZeroDivisionError:
percent = 0
if percent >= threshold_requested:
resource_found = Resource(
digest=ResourceDigest(
id=data_quota_code["quota_code"], type="aws_limit"
),
name="",
group="",
limits=LimitsValues(
quota_name=data_quota_code["quota_name"],
quota_code=data_quota_code["quota_code"],
aws_limit=int(value_aws),
local_limit=int(value),
usage=int(usage),
service=service,
percent=percent,
),
)
return resource_found
|
nemo/collections/asr/parts/utils/nmesc_clustering.py
|
mlgill/NeMo
| 4,145 |
68074
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2007-2020 The scikit-learn developers.
# BSD 3-Clause License
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file is part of https://github.com/scikit-learn/scikit-learn/blob/114616d9f6ce9eba7c1aacd3d4a254f868010e25/sklearn/manifold/_spectral_embedding.py and
# https://github.com/tango4j/Auto-Tuning-Spectral-Clustering.
from collections import Counter
import numpy as np
import torch
from sklearn.cluster._kmeans import k_means
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler
from nemo.utils import logging
from nemo.utils.decorators.experimental import experimental
scaler = MinMaxScaler(feature_range=(0, 1))
try:
from torch.linalg import eigh as eigh
TORCH_EIGN = True
except ImportError:
TORCH_EIGN = False
from scipy.linalg import eigh as eigh
logging.warning("Using eigen decomposition from scipy, upgrade torch to 1.9 or higher for faster clustering")
def isGraphFullyConnected(affinity_mat):
return getTheLargestComponent(affinity_mat, 0).sum() == affinity_mat.shape[0]
def getTheLargestComponent(affinity_mat, seg_index):
"""
Find the largest affinity_mat connected components for each given node.
This is for checking whether the affinity_mat is fully connected.
"""
num_of_segments = affinity_mat.shape[0]
connected_nodes = np.zeros(num_of_segments).astype(np.bool)
nodes_to_explore = np.zeros(num_of_segments).astype(np.bool)
nodes_to_explore[seg_index] = True
for k in range(num_of_segments):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
neighbors = affinity_mat[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
def getKneighborsConnections(affinity_mat, p_value):
"""
Binarize top-p values for each row from the given affinity matrix.
"""
binarized_affinity_mat = np.zeros_like(affinity_mat)
for i, line in enumerate(affinity_mat):
sorted_idx = np.argsort(line)
sorted_idx = sorted_idx[::-1]
indices = sorted_idx[:p_value]
binarized_affinity_mat[indices, i] = 1
return binarized_affinity_mat
def getAffinityGraphMat(affinity_mat_raw, p_value):
"""
Calculate a binarized graph matrix and
symmetrize the binarized graph matrix.
"""
X = getKneighborsConnections(affinity_mat_raw, p_value)
symm_affinity_mat = 0.5 * (X + X.T)
return symm_affinity_mat
def getMinimumConnection(mat, max_N, n_list):
"""
Generate connections until fully connect all the nodes in the graph.
If graph is not fully connected, it might generate an inaccurate results.
"""
p_value = 1
affinity_mat = getAffinityGraphMat(mat, p_value)
for i, p_value in enumerate(n_list):
fully_connected = isGraphFullyConnected(affinity_mat)
affinity_mat = getAffinityGraphMat(mat, p_value)
if fully_connected or p_value > max_N:
break
return affinity_mat, p_value
def getRepeatedList(mapping_argmat, score_mat_size):
"""
Count the numbers in the mapping dictionary and create lists that contain
repeated indices to be used for creating the repeated affinity matrix for
fusing the affinity values.
"""
count_dict = dict(Counter(mapping_argmat))
repeat_list = []
for k in range(score_mat_size):
if k in count_dict:
repeat_list.append(count_dict[k])
else:
repeat_list.append(0)
return repeat_list
@experimental
def get_argmin_mat(uniq_scale_dict):
"""
Calculate the mapping between the base scale and other scales. A segment from a longer scale is
repeatedly mapped to a segment from a shorter scale or the base scale.
Args:
uniq_scale_dict (dict) :
Dictionary of embeddings and timestamps for each scale.
Returns:
session_scale_mapping_dict (dict) :
Dictionary containing argmin arrays indexed by scale index.
"""
scale_list = sorted(list(uniq_scale_dict.keys()))
segment_anchor_dict = {}
for scale_idx in scale_list:
time_stamp_list = uniq_scale_dict[scale_idx]['time_stamps']
time_stamps_float = np.array([[float(x.split()[0]), float(x.split()[1])] for x in time_stamp_list])
segment_anchor_dict[scale_idx] = np.mean(time_stamps_float, axis=1)
base_scale_idx = max(scale_list)
base_scale_anchor = segment_anchor_dict[base_scale_idx]
session_scale_mapping_dict = {}
for scale_idx in scale_list:
curr_scale_anchor = segment_anchor_dict[scale_idx]
curr_mat = np.tile(curr_scale_anchor, (base_scale_anchor.shape[0], 1))
base_mat = np.tile(base_scale_anchor, (curr_scale_anchor.shape[0], 1)).T
argmin_mat = np.argmin(np.abs(curr_mat - base_mat), axis=1)
session_scale_mapping_dict[scale_idx] = argmin_mat
return session_scale_mapping_dict
@experimental
def getMultiScaleCosAffinityMatrix(uniq_embs_and_timestamps):
"""
Calculate cosine similarity values among speaker embeddings for each scale then
apply multiscale weights to calculate the fused similarity matrix.
Args:
uniq_embs_and_timestamps: (dict)
The dictionary containing embeddings, timestamps and multiscale weights.
If uniq_embs_and_timestamps contains only one scale, single scale diarization
is performed.
Returns:
fused_sim_d (np.array):
This function generates an ffinity matrix that is obtained by calculating
the weighted sum of the affinity matrices from the different scales.
base_scale_emb (np.array):
The base scale embedding (the embeddings from the finest scale)
"""
uniq_scale_dict = uniq_embs_and_timestamps['scale_dict']
base_scale_idx = max(uniq_scale_dict.keys())
base_scale_emb = np.array(uniq_scale_dict[base_scale_idx]['embeddings'])
multiscale_weights = uniq_embs_and_timestamps['multiscale_weights']
score_mat_list, repeated_mat_list = [], []
session_scale_mapping_dict = get_argmin_mat(uniq_scale_dict)
for scale_idx in sorted(uniq_scale_dict.keys()):
mapping_argmat = session_scale_mapping_dict[scale_idx]
score_mat = getCosAffinityMatrix(uniq_scale_dict[scale_idx]['embeddings'])
score_mat_list.append(score_mat)
repeat_list = getRepeatedList(mapping_argmat, score_mat.shape[0])
repeated_mat = np.repeat(np.repeat(score_mat, repeat_list, axis=0), repeat_list, axis=1)
repeated_mat_list.append(repeated_mat)
fused_sim_d = np.average(np.array(repeated_mat_list), weights=multiscale_weights, axis=0)
return fused_sim_d, base_scale_emb
def addAnchorEmb(emb, anchor_sample_n, anchor_spk_n, sigma):
"""
Add randomly generated synthetic embeddings to make eigen analysis more stable.
We refer to these embeddings as anchor embeddings.
emb (np.array):
The input embedding from the emebedding extractor.
anchor_sample_n (int):
The number of embedding samples per speaker.
anchor_sample_n = 10 is recommended.
anchor_spk_n (int):
The number of speakers for synthetic embedding.
anchor_spk_n = 3 is recommended.
sigma (int):
The amplitude of synthetic noise for each embedding vector.
If sigma value is too small, under-counting could happen.
If sigma value is too large, over-counting could happen.
sigma = 50 is recommended.
"""
emb_dim = emb.shape[1]
std_org = np.std(emb, axis=0)
new_emb_list = []
for _ in range(anchor_spk_n):
emb_m = np.tile(np.random.randn(1, emb_dim), (anchor_sample_n, 1))
emb_noise = np.random.randn(anchor_sample_n, emb_dim).T
emb_noise = np.dot(np.diag(std_org), emb_noise / np.max(np.abs(emb_noise))).T
emb_gen = emb_m + sigma * emb_noise
new_emb_list.append(emb_gen)
new_emb_list.append(emb)
new_emb_np = np.vstack(new_emb_list)
return new_emb_np
def getEnhancedSpeakerCount(emb, cuda, random_test_count=5, anchor_spk_n=3, anchor_sample_n=10, sigma=50):
"""
Calculate the number of speakers using NME analysis with anchor embeddings.
"""
est_num_of_spk_list = []
for seed in range(random_test_count):
np.random.seed(seed)
emb_aug = addAnchorEmb(emb, anchor_sample_n, anchor_spk_n, sigma)
mat = getCosAffinityMatrix(emb_aug)
nmesc = NMESC(
mat,
max_num_speaker=emb.shape[0],
max_rp_threshold=0.25,
sparse_search=True,
sparse_search_volume=30,
fixed_thres=None,
NME_mat_size=300,
cuda=cuda,
)
est_num_of_spk, _ = nmesc.NMEanalysis()
est_num_of_spk_list.append(est_num_of_spk)
ctt = Counter(est_num_of_spk_list)
oracle_num_speakers = max(ctt.most_common(1)[0][0] - anchor_spk_n, 1)
return oracle_num_speakers
def getCosAffinityMatrix(emb):
"""
Calculate cosine similarity values among speaker embeddings.
"""
sim_d = cosine_similarity(emb)
scaler.fit(sim_d)
sim_d = scaler.transform(sim_d)
return sim_d
def getLaplacian(X):
"""
Calculate a laplacian matrix from an affinity matrix X.
"""
X[np.diag_indices(X.shape[0])] = 0
A = X
D = np.sum(np.abs(A), axis=1)
D = np.diag(D)
L = D - A
return L
def eigDecompose(laplacian, cuda, device=None):
if TORCH_EIGN:
if cuda:
if device is None:
device = torch.cuda.current_device()
laplacian = torch.from_numpy(laplacian).float().to(device)
else:
laplacian = torch.from_numpy(laplacian).float()
lambdas, diffusion_map = eigh(laplacian)
lambdas = lambdas.cpu().numpy()
diffusion_map = diffusion_map.cpu().numpy()
else:
lambdas, diffusion_map = eigh(laplacian)
return lambdas, diffusion_map
def getLamdaGaplist(lambdas):
lambdas = np.real(lambdas)
return list(lambdas[1:] - lambdas[:-1])
def estimateNumofSpeakers(affinity_mat, max_num_speaker, is_cuda=False):
"""
Estimate the number of speakers using eigen decompose on laplacian Matrix.
affinity_mat: (array)
NxN affitnity matrix
max_num_speaker: (int)
Maximum number of clusters to consider for each session
is_cuda: (bool)
if cuda availble eigh decomposition would be computed on GPUs
"""
laplacian = getLaplacian(affinity_mat)
lambdas, _ = eigDecompose(laplacian, is_cuda)
lambdas = np.sort(lambdas)
lambda_gap_list = getLamdaGaplist(lambdas)
num_of_spk = np.argmax(lambda_gap_list[: min(max_num_speaker, len(lambda_gap_list))]) + 1
return num_of_spk, lambdas, lambda_gap_list
class _SpectralClustering:
def __init__(self, n_clusters=8, random_state=0, n_init=10, p_value=10, n_jobs=None, cuda=False):
self.n_clusters = n_clusters
self.random_state = random_state
self.n_init = n_init
self.p_value = p_value
self.affinity_matrix_ = None
self.cuda = cuda
def predict(self, X):
if X.shape[0] != X.shape[1]:
raise ValueError("The affinity matrix is not a square matrix.")
self.affinity_matrix_ = X
labels = self.clusterSpectralEmbeddings(self.affinity_matrix_, n_init=self.n_init, cuda=self.cuda)
return labels
def clusterSpectralEmbeddings(self, affinity, n_init=10, cuda=False):
spectral_emb = self.getSpectralEmbeddings(affinity, n_spks=self.n_clusters, drop_first=False, cuda=cuda)
_, labels, _ = k_means(spectral_emb, self.n_clusters, random_state=self.random_state, n_init=n_init)
return labels
def getSpectralEmbeddings(self, affinity_mat, n_spks=8, drop_first=True, cuda=False):
if not isGraphFullyConnected(affinity_mat):
logging.warning("Graph is not fully connected and the clustering result might not be accurate.")
laplacian = getLaplacian(affinity_mat)
lambdas_, diffusion_map_ = eigDecompose(laplacian, cuda)
diffusion_map = diffusion_map_[:, :n_spks]
embedding = diffusion_map.T[n_spks::-1]
return embedding[:n_spks].T
class NMESC:
"""
Normalized Maximum Eigengap based Spectral Clustering (NME-SC)
uses Eigengap analysis to get an estimated p-value for
affinity binarization and an estimated number of speakers.
p_value (also referred to as p_neighbors) is for taking
top p number of affinity values and convert those to 1 while
convert the rest of values to 0.
p_value can be also tuned on a development set without performing
NME-analysis.
Reference: Auto-Tuning Spectral Clustering for Speaker Diarization
Using Normalized Maximum Eigengap (https://arxiv.org/abs/2003.02405)
Parameters:
Please refer to def __init__()
Methods:
NMEanalysis():
Performs NME-analysis to estimate p_value and the number of speakers.
subsampleAffinityMat(NME_mat_size):
Subsamples the number of speakers to reduce the computational load.
getPvalueList():
Generates a list contains p-values that need to be examined.
getEigRatio(p_neighbors):
calculates g_p, which is a ratio between p_neighbors and the maximum eigengap.
getLamdaGaplist(lambdas):
Calculates lambda gap values from an array contains ambda values.
estimateNumofSpeakers(affinity_mat):
Estimates the number of speakers using lambda gap list.
"""
def __init__(
self,
mat,
max_num_speaker=10,
max_rp_threshold=0.250,
sparse_search=True,
sparse_search_volume=30,
use_subsampling_for_NME=True,
fixed_thres=None,
cuda=False,
NME_mat_size=512,
):
"""
Parameters:
mat: (numpy.array)
Cosine similarity matrix calculated from speaker embeddings.
max_num_speaker: (int)
Maximum number of speakers for estimating number of speakers.
Shows stable performance under 20.
max_rp_threshold: (float)
Limits the range of parameter search.
Clustering performance can vary depending on this range.
Default is 0.25.
sparse_search: (bool)
To increase the speed of parameter estimation, sparse_search=True
limits the number of p_values we search.
sparse_search_volume: (int)
The number of p_values we search during NME analysis.
Default is 30. The lower the value, the faster NME-analysis becomes.
Lower than 20 might cause a poor parameter estimation.
use_subsampling_for_NME: (bool)
Use subsampling to reduce the calculational complexity.
Default is True.
fixed_thres: (float or None)
A fixed threshould can be used instead of estimating the
threshold with NME analysis. If fixed_thres is float,
it skips NME analysis part.
cuda: (bool)
Use cuda for Eigen decomposition if cuda=True.
NME_mat_size: (int)
Targeted size of matrix for NME analysis.
"""
self.max_num_speaker = max_num_speaker
self.max_rp_threshold = max_rp_threshold
self.use_subsampling_for_NME = use_subsampling_for_NME
self.NME_mat_size = NME_mat_size
self.sparse_search = sparse_search
self.sparse_search_volume = sparse_search_volume
self.fixed_thres = fixed_thres
self.cuda = cuda
self.eps = 1e-10
self.max_N = None
self.mat = mat
self.p_value_list = []
def NMEanalysis(self):
"""
Subsample the input matrix to reduce the computational load.
"""
if self.use_subsampling_for_NME:
subsample_ratio = self.subsampleAffinityMat(self.NME_mat_size)
# Scans p_values and find a p_value that generates
# the smallest g_p value.
eig_ratio_list, est_spk_n_dict = [], {}
self.p_value_list = self.getPvalueList()
for p_value in self.p_value_list:
est_num_of_spk, g_p = self.getEigRatio(p_value)
est_spk_n_dict[p_value] = est_num_of_spk
eig_ratio_list.append(g_p)
index_nn = np.argmin(eig_ratio_list)
rp_p_value = self.p_value_list[index_nn]
affinity_mat = getAffinityGraphMat(self.mat, rp_p_value)
# Checks whether affinity graph is fully connected.
# If not, it adds minimum number of connections to make it fully connected.
if not isGraphFullyConnected(affinity_mat):
affinity_mat, rp_p_value = getMinimumConnection(self.mat, self.max_N, self.p_value_list)
p_hat_value = int(subsample_ratio * rp_p_value)
est_num_of_spk = est_spk_n_dict[rp_p_value]
return est_num_of_spk, p_hat_value
def subsampleAffinityMat(self, NME_mat_size):
"""
Perform Subsampling of affinity matrix.
This subsampling is for calculational complexity, not for performance.
The smaller NME_mat_size is,
- the bigger the chance of missing a speaker.
- the faster p-value estimation speed (based on eigen decomposition).
Recommended NME_mat_size is 250~750.
However, if there are speakers who speak for very short period of time in the recording,
this subsampling might make the system miss the underrepresented speaker.
Use this with caution.
Parameters:
NME_mat_size: (int)
Targeted matrix size
Returns:
subsample_ratio : (float)
The ratio between NME_mat_size and the original matrix size
"""
subsample_ratio = int(max(1, self.mat.shape[0] / NME_mat_size))
self.mat = self.mat[::subsample_ratio, ::subsample_ratio]
return subsample_ratio
def getEigRatio(self, p_neighbors):
"""
For a given p_neighbors value,
calculates g_p, which is a ratio
between p_neighbors and the maximum eigengap.
For more details: https://arxiv.org/abs/2003.02405
Parameters:
p_neighbors: (int)
Determines how many binary graph connections we want to keep for each row.
Returns:
est_num_of_spk: (int)
Estimated number of speakers
g_p: (float)
The ratio between p_neighbors value and the maximum eigen gap value.
"""
affinity_mat = getAffinityGraphMat(self.mat, p_neighbors)
est_num_of_spk, lambdas, lambda_gap_list = estimateNumofSpeakers(affinity_mat, self.max_num_speaker, self.cuda)
arg_sorted_idx = np.argsort(lambda_gap_list[: self.max_num_speaker])[::-1]
max_key = arg_sorted_idx[0]
max_eig_gap = lambda_gap_list[max_key] / (max(lambdas) + self.eps)
g_p = (p_neighbors / self.mat.shape[0]) / (max_eig_gap + self.eps)
return est_num_of_spk, g_p
def getPvalueList(self):
"""
Generates a p-value (p_neighbour) list for searching.
"""
if self.fixed_thres:
p_value_list = [int(self.mat.shape[0] * self.fixed_thres)]
self.max_N = p_value_list[0]
else:
self.max_N = int(self.mat.shape[0] * self.max_rp_threshold)
if self.sparse_search:
N = min(self.max_N, self.sparse_search_volume)
p_value_list = list(np.linspace(1, self.max_N, N, endpoint=True).astype(int))
else:
p_value_list = list(range(1, self.max_N))
return p_value_list
# emb,
def COSclustering(
uniq_embs_and_timestamps=None,
oracle_num_speakers=None,
max_num_speaker=8,
min_samples_for_NMESC=6,
enhanced_count_thres=80,
max_rp_threshold=0.25,
sparse_search_volume=30,
fixed_thres=None,
cuda=False,
):
"""
Clustering method for speaker diarization based on cosine similarity.
Parameters:
uniq_embs_and_timestamps: (dict)
The dictionary containing embeddings, timestamps and multiscale weights.
If uniq_embs_and_timestamps contains only one scale, single scale diarization
is performed.
oracle_num_speaker: (int or None)
Oracle number of speakers if known else None
max_num_speaker: (int)
Maximum number of clusters to consider for each session
min_samples_for_NMESC: (int)
Minimum number of samples required for NME clustering, this avoids
zero p_neighbour_lists. If the input has fewer segments than min_samples,
it is directed to the enhanced speaker counting mode.
enhanced_count_thres: (int)
For short audio recordings under 60 seconds, clustering algorithm cannot
accumulate enough amount of speaker profile for each cluster.
Thus, getEnhancedSpeakerCount() employs anchor embeddings (dummy representations)
to mitigate the effect of cluster sparsity.
enhanced_count_thres = 80 is recommended.
max_rp_threshold: (float)
Limits the range of parameter search.
Clustering performance can vary depending on this range.
Default is 0.25.
sparse_search_volume: (int)
The number of p_values we search during NME analysis.
Default is 30. The lower the value, the faster NME-analysis becomes.
Lower than 20 might cause a poor parameter estimation.
fixed_thres: (float)
If fixed_thres value is provided, NME-analysis process will be skipped.
This value should be optimized on a development set to obtain a quality result.
Default is None and performs NME-analysis to estimate the threshold.
Returns:
Y: (List[int])
Speaker label for each segment.
"""
# Get base-scale embedding from uniq_embs_and_timestamps.
uniq_scale_dict = uniq_embs_and_timestamps['scale_dict']
emb = np.array(uniq_scale_dict[max(uniq_scale_dict.keys())]['embeddings'])
if emb.shape[0] == 1:
return np.array([0])
elif emb.shape[0] <= max(enhanced_count_thres, min_samples_for_NMESC) and oracle_num_speakers is None:
est_num_of_spk_enhanced = getEnhancedSpeakerCount(emb, cuda)
else:
est_num_of_spk_enhanced = None
if oracle_num_speakers:
max_num_speaker = oracle_num_speakers
mat, emb = getMultiScaleCosAffinityMatrix(uniq_embs_and_timestamps)
nmesc = NMESC(
mat,
max_num_speaker=max_num_speaker,
max_rp_threshold=max_rp_threshold,
sparse_search=True,
sparse_search_volume=sparse_search_volume,
fixed_thres=fixed_thres,
NME_mat_size=300,
cuda=cuda,
)
if emb.shape[0] > min_samples_for_NMESC:
est_num_of_spk, p_hat_value = nmesc.NMEanalysis()
affinity_mat = getAffinityGraphMat(mat, p_hat_value)
else:
affinity_mat = mat
if oracle_num_speakers:
est_num_of_spk = oracle_num_speakers
elif est_num_of_spk_enhanced:
est_num_of_spk = est_num_of_spk_enhanced
spectral_model = _SpectralClustering(n_clusters=est_num_of_spk, cuda=cuda)
Y = spectral_model.predict(affinity_mat)
return Y
|
circus/stats/__init__.py
|
BradleyKirton/circus
| 820 |
68078
|
<gh_stars>100-1000
"""
Stats architecture:
* streamer.StatsStreamer listens to circusd events and maintain a list of pids
* collector.StatsCollector runs a pool of threads that compute stats for each
pid in the list. Each stat is pushed in a queue
* publisher.StatsPublisher continuously pushes those stats in a zmq PUB socket
* client.StatsClient is a simple subscriber that can be used to intercept the
stream of stats.
"""
import sys
import signal
import argparse
from circus.stats.streamer import StatsStreamer
from circus.util import configure_logger
from circus.sighandler import SysHandler
from circus import logger
from circus import util
from circus import __version__
def main():
desc = 'Runs the stats aggregator for Circus'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--endpoint',
help='The circusd ZeroMQ socket to connect to',
default=util.DEFAULT_ENDPOINT_DEALER)
parser.add_argument('--pubsub',
help='The circusd ZeroMQ pub/sub socket to connect to',
default=util.DEFAULT_ENDPOINT_SUB)
parser.add_argument('--statspoint',
help='The ZeroMQ pub/sub socket to send data to',
default=util.DEFAULT_ENDPOINT_STATS)
parser.add_argument('--log-level', dest='loglevel', default='info',
help="log level")
parser.add_argument('--log-output', dest='logoutput', default='-',
help="log output")
parser.add_argument('--version', action='store_true',
default=False,
help='Displays Circus version and exits.')
parser.add_argument('--ssh', default=None, help='SSH Server')
args = parser.parse_args()
if args.version:
print(__version__)
sys.exit(0)
# configure the logger
configure_logger(logger, args.loglevel, args.logoutput)
stats = StatsStreamer(args.endpoint, args.pubsub, args.statspoint,
args.ssh)
# Register some sighandlers to stop the loop when killed
for sig in SysHandler.SIGNALS:
signal.signal(
sig, lambda *_: stats.loop.add_callback_from_signal(stats.stop)
)
try:
stats.start()
finally:
stats.stop()
sys.exit(0)
if __name__ == '__main__':
main()
|
pymetamap/SubprocessBackendLite.py
|
liquet-ai/pymetamap
| 151 |
68082
|
<filename>pymetamap/SubprocessBackendLite.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import tempfile
from .MetaMapLite import MetaMapLite
from .ConceptLite import CorpusLite
class SubprocessBackendLite(MetaMapLite):
def __init__(self, metamap_home):
""" Interface to MetaMap using subprocess. This creates a
command line call to a specified metamap process.
"""
MetaMapLite.__init__(self, metamap_home=metamap_home)
def extract_concepts(self, sentences=None, ids=None, filename=None,
restrict_to_sts=None, restrict_to_sources=None):
""" extract_concepts takes a list of sentences and ids(optional)
then returns a list of Concept objects extracted via
MetaMapLite.
Supported Options:
Restrict to Semantic Types --restrict_to_sts
Restrict to Sources --restrict_to_sources
For information about the available options visit
http://metamap.nlm.nih.gov/.
Note: If an error is encountered the process will be closed
and whatever was processed, if anything, will be
returned along with the error found.
"""
if (sentences is not None and filename is not None) or \
(sentences is None and filename is None):
raise ValueError("You must either pass a list of sentences "
"OR a filename.")
input_file = None
if sentences is not None:
input_file = tempfile.NamedTemporaryFile(mode="wb", delete=False, suffix='.mmi')
else:
input_file = open(filename, 'r')
# Unlike MetaMap, MetaMapLite does not take an output filename as a parameter.
# It creates a new output file at same location as "input_file" with the default file extension ".mmi".
# output_file = tempfile.NamedTemporaryFile(mode="r", delete=False)
output_file_name = None
error = None
try:
if sentences is not None:
if ids is not None:
for identifier, sentence in zip(ids, sentences):
input_file.write('{0!r}|{1}\n'.format(identifier, sentence).encode('utf8'))
else:
for sentence in sentences:
input_file.write('{0!r}\n'.format(sentence).encode('utf8'))
input_file.flush()
input_file.close()
command = ["bash", os.path.join(self.metamap_home, "metamaplite.sh")]
if restrict_to_sts:
if isinstance(restrict_to_sts, str):
restrict_to_sts = [restrict_to_sts]
if len(restrict_to_sts) > 0:
command.append('--restrict_to_sts={}'.format(str(','.join(restrict_to_sts))))
#command.append(str(','.join(restrict_to_sts)))
if restrict_to_sources:
if isinstance(restrict_to_sources, str):
restrict_to_sources = [restrict_to_sources]
if len(restrict_to_sources) > 0:
command.append('--restrict_to_sources')
command.append(str(','.join(restrict_to_sources)))
if ids is not None:
command.append('--inputformat=sldiwi')
command.append(input_file.name)
command.append('--overwrite')
#command.append('--indexdir={}data/ivf/2020AA/USAbase'.format(self.metamap_home))
#command.append('--specialtermsfile={}data/specialterms.txt'.format(self.metamap_home))
# command.append(output_file.name)
output_file_name, file_extension = os.path.splitext(input_file.name)
output_file_name += "." + "mmi"
output_file_name, file_extension = os.path.splitext(input_file.name)
output_file_name += "." + "mmi"
# output = str(output_file.read())
metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE)
while metamap_process.poll() is None:
stdout = str(metamap_process.stdout.readline())
if 'ERROR' in stdout:
metamap_process.terminate()
error = stdout.rstrip()
# print("input file name: {0}".format(input_file.name))
output_file_name, file_extension = os.path.splitext(input_file.name)
output_file_name += "." + "mmi"
# print("output_file_name: {0}".format(output_file_name))
with open(output_file_name) as fd:
output = fd.read()
# output = str(output_file.read())
# print("output: {0}".format(output))
except:
pass
concepts = CorpusLite.load(output.splitlines())
return concepts, error
#finally:
# if sentences is not None:
# os.remove(input_file.name)
# else:
# input_file.close()
# # os.remove(output_file.name)
# #os.remove(output_file_name)
|
stonesoup/functions/orbital.py
|
Red-Portal/Stone-Soup-1
| 157 |
68084
|
# -*- coding: utf-8 -*-
"""
Orbital functions
-----------------
Functions used within multiple orbital classes in Stone Soup
"""
import numpy as np
from . import dotproduct
from ..types.array import StateVector
def stumpff_s(z):
r"""The Stumpff S function
.. math::
S(z) = \begin{cases}\frac{\sqrt(z) - \sin{\sqrt(z)}}{(\sqrt(z))^{3}}, & (z > 0)\\
\frac{\sinh(\sqrt(-z)) - \sqrt(-z)}{(\sqrt(-z))^{3}}, & (z < 0) \\
\frac{1}{6}, & (z = 0)\end{cases}
Parameters
----------
z : float
input parameter, :math:`z`
Returns
-------
: float
Output value, :math:`S(z)`
"""
if z > 0:
sqz = np.sqrt(z)
return (sqz - np.sin(sqz)) / sqz ** 3
elif z < 0:
sqz = np.sqrt(-z)
return (np.sinh(sqz) - sqz) / sqz ** 3
else: # which means z== 0:
return 1 / 6
def stumpff_c(z):
r"""The Stumpff C function
.. math::
C(z) = \begin{cases}\frac{1 - \cos{\sqrt(z)}}{z}, & (z > 0)\\
\frac{\cosh{\sqrt(-z)} - 1}{-z}, & (z < 0) \\
\frac{1}{2}, & (z = 0)\end{cases}
Parameters
----------
z : float
input parameter, :math:`z`
Returns
-------
: float
Output value, :math:`C(z)`
"""
if z > 0:
sqz = np.sqrt(z)
return (1 - np.cos(sqz)) / sqz ** 2
elif z < 0:
sqz = np.sqrt(-z)
return (np.cosh(sqz) - 1) / sqz ** 2
else: # which means z == 0:
return 1 / 2
def universal_anomaly_newton(o_state_vector, delta_t,
grav_parameter=3.986004418e14, precision=1e-8, max_iterations=1e5):
r"""Calculate the universal anomaly via Newton's method. Algorithm 3.3 in [1]_.
Parameters
----------
o_state_vector : :class:`~StateVector`
The orbital state vector formed as
:math:`[r_x, r_y, r_z, \dot{r}_x, \dot{r}_y, \dot{r}_z]^T`
delta_t : timedelta
The time interval over which to estimate the universal anomaly
grav_parameter : float, optional
The universal gravitational parameter. Defaults to that of the
Earth, :math:`3.986004418 \times 10^{14} \ \mathrm{m}^{3} \
\mathrm{s}^{-2}`
precision : float, optional
For Newton's method, the difference between new and old estimates of the universal anomaly
below which the iteration stops and the answer is returned, (default = 1e-8)
max_iterations : float, optional
Maximum number of iterations allowed in while loop (default = 1e5)
Returns
-------
: float
The universal anomaly, :math:`\chi`
References
----------
.. [1] <NAME>. 2010, Orbital Mechanics for Engineering Students, 3rd Ed., Elsevier
"""
# For convenience
mag_r_0 = np.sqrt(dotproduct(o_state_vector[0:3], o_state_vector[0:3]))
mag_v_0 = np.sqrt(dotproduct(o_state_vector[3:6], o_state_vector[3:6]))
v_rad_0 = dotproduct(o_state_vector[3:6], o_state_vector[0:3])/mag_r_0
root_mu = np.sqrt(grav_parameter)
inv_sma = 2/mag_r_0 - (mag_v_0**2)/grav_parameter
# Initial estimate of Chi
chi_i = root_mu * np.abs(inv_sma) * delta_t.total_seconds()
ratio = 1
count = 0
# Do Newton's method
while np.abs(ratio) > precision and count <= max_iterations:
z_i = inv_sma * chi_i ** 2
f_chi_i = mag_r_0 * v_rad_0 / root_mu * chi_i ** 2 * \
stumpff_c(z_i) + (1 - inv_sma * mag_r_0) * chi_i ** 3 * \
stumpff_s(z_i) + mag_r_0 * chi_i - root_mu * \
delta_t.total_seconds()
fp_chi_i = mag_r_0 * v_rad_0 / root_mu * chi_i * \
(1 - inv_sma * chi_i ** 2 * stumpff_s(z_i)) + \
(1 - inv_sma * mag_r_0) * chi_i ** 2 * stumpff_c(z_i) + \
mag_r_0
ratio = f_chi_i / fp_chi_i
chi_i = chi_i - ratio
count += 1
return chi_i
def lagrange_coefficients_from_universal_anomaly(o_state_vector, delta_t,
grav_parameter=3.986004418e14,
precision=1e-8, max_iterations=1e5):
r""" Calculate the Lagrangian coefficients, f and g, and their time derivatives, by way of the
universal anomaly and the Stumpff functions [2]_.
Parameters
----------
o_state_vector : StateVector
The (Cartesian) orbital state vector,
:math:`[r_x, r_y, r_z, \dot{r}_x, \dot{r}_y, \dot{r}_z]^T`
delta_t : timedelta
The time interval over which to calculate
grav_parameter : float, optional
The universal gravitational parameter. Defaults to that of the
Earth, :math:`3.986004418 \times 10^{14} \ \mathrm{m}^{3} \
\mathrm{s}^{-2}`. Note that the units of time must be seconds.
precision : float, optional
Precision to which to calculate the :meth:`universal anomaly` (default = 1e-8). See the doc
section for that function
max_iterations : float, optional
Maximum number of iterations in determining universal anomaly (default = 1e5)
Returns
-------
: float, float, float, float
The Lagrange coefficients, :math:`f, g, \dot{f}, \dot{g}`, in that order.
References
----------
.. [2] <NAME>., <NAME>. 1996, Modern Astrodynamics: Fundamentals and Perturbation
Methods, Princeton University Press
"""
# First get the universal anomaly using Newton's method
chii = universal_anomaly_newton(o_state_vector, delta_t,
grav_parameter=grav_parameter,
precision=precision, max_iterations=max_iterations)
# Get the position and velocity vectors
bold_r_0 = o_state_vector[0:3]
bold_v_0 = o_state_vector[3:6]
# Calculate the magnitude of the position and velocity vectors
r_0 = np.sqrt(dotproduct(bold_r_0, bold_r_0))
v_0 = np.sqrt(dotproduct(bold_v_0, bold_v_0))
# For convenience
root_mu = np.sqrt(grav_parameter)
inv_sma = 2 / r_0 - (v_0 ** 2) / grav_parameter
z = inv_sma * chii ** 2
# Get the Lagrange coefficients using Stumpf
f = 1 - chii ** 2 / r_0 * stumpff_c(z)
g = delta_t.total_seconds() - 1 / root_mu * chii ** 3 * \
stumpff_s(z)
# Get the position vector and magnitude of that vector
bold_r = f * bold_r_0 + g * bold_v_0
r = np.sqrt(dotproduct(bold_r, bold_r))
# and the Lagrange (time) derivatives also using Stumpf
fdot = root_mu / (r * r_0) * (inv_sma * chii ** 3 * stumpff_s(z) - chii)
gdot = 1 - (chii ** 2 / r) * stumpff_c(z)
return f, g, fdot, gdot
def eccentric_anomaly_from_mean_anomaly(mean_anomaly, eccentricity,
precision=1e-8, max_iterations=1e5):
r"""Approximately solve the transcendental equation :math:`E - e sin E = M_e` for E. This is
an iterative process using Newton's method.
Parameters
----------
mean_anomaly : float
Current mean anomaly
eccentricity : float
Orbital eccentricity
precision : float, optional
Precision used for the stopping point in determining eccentric anomaly from mean anomaly,
(default = 1e-8)
max_iterations : float, optional
Maximum number of iterations for the while loop, (default = 1e5)
Returns
-------
: float
Eccentric anomaly of the orbit
"""
if mean_anomaly < np.pi:
ecc_anomaly = mean_anomaly + eccentricity / 2
else:
ecc_anomaly = mean_anomaly - eccentricity / 2
ratio = 1
count = 0
while np.abs(ratio) > precision and count <= max_iterations:
f = ecc_anomaly - eccentricity * np.sin(ecc_anomaly) - mean_anomaly
fp = 1 - eccentricity * np.cos(ecc_anomaly)
ratio = f / fp # Need to check conditioning
ecc_anomaly = ecc_anomaly - ratio
count += 1
return ecc_anomaly # Check whether this ever goes outside 0 < 2pi
def tru_anom_from_mean_anom(mean_anomaly, eccentricity, precision=1e-8, max_iterations=1e5):
r"""Get the true anomaly from the mean anomaly via the eccentric anomaly
Parameters
----------
mean_anomaly : float
The mean anomaly
eccentricity : float
Eccentricity
precision : float, optional
Precision used for the stopping point in determining eccentric anomaly from mean anomaly,
(default = 1e-8)
max_iterations : float, optional
Maximum number of iterations in determining eccentric anomaly, (default = 1e5)
Returns
-------
: float
True anomaly
"""
cos_ecc_anom = np.cos(eccentric_anomaly_from_mean_anomaly(
mean_anomaly, eccentricity, precision=precision, max_iterations=max_iterations))
sin_ecc_anom = np.sin(eccentric_anomaly_from_mean_anomaly(
mean_anomaly, eccentricity, precision=precision, max_iterations=max_iterations))
# This only works for M_e < \pi
# return np.arccos(np.clip((eccentricity - cos_ecc_anom) /
# (eccentricity*cos_ecc_anom - 1), -1, 1))
return np.remainder(np.arctan2(np.sqrt(1 - eccentricity**2) *
sin_ecc_anom,
cos_ecc_anom - eccentricity), 2*np.pi)
def perifocal_position(eccentricity, semimajor_axis, true_anomaly):
r"""The position vector in perifocal coordinates calculated from the Keplerian elements
Parameters
----------
eccentricity : float
Orbit eccentricity
semimajor_axis : float
Orbit semi-major axis
true_anomaly
Orbit true anomaly
Returns
-------
: numpy.array
:math:`[r_x, r_y, r_z]` position in perifocal coordinates
"""
# Cache some trigonometric functions
c_tran = np.cos(true_anomaly)
s_tran = np.sin(true_anomaly)
return semimajor_axis * (1 - eccentricity ** 2) / \
(1 + eccentricity * c_tran) * np.array([[c_tran], [s_tran],
[0]])
def perifocal_velocity(eccentricity, semimajor_axis, true_anomaly,
grav_parameter=3.986004418e14):
r"""The velocity vector in perifocal coordinates calculated from the Keplerian elements
Parameters
----------
eccentricity : float
Orbit eccentricity
semimajor_axis : float
Orbit semi-major axis
true_anomaly : float
Orbit true anomaly
grav_parameter : float, optional
Standard gravitational parameter :math:`\mu = G M`. Default is
:math:`3.986004418 \times 10^{14} \mathrm{m}^3 \mathrm{s}^{-2}`
Returns
-------
: numpy.narray
:math:`[\dot{r}_x, \dot{r}_y, \dot{r}_z]` velocity in perifocal coordinates
"""
# Cache some trigonometric functions
c_tran = np.cos(true_anomaly)
s_tran = np.sin(true_anomaly)
return np.sqrt(grav_parameter / (semimajor_axis * (1 - eccentricity**2)))\
* np.array([[-s_tran], [eccentricity + c_tran], [0]])
def perifocal_to_geocentric_matrix(inclination, raan, argp):
r"""Return the matrix which transforms from perifocal to geocentric coordinates
Parameters
----------
inclination : float
Orbital inclination
raan : float
Orbit Right Ascension of the ascending node
argp : float
The orbit's argument of periapsis
Returns
-------
: numpy.array
The :math:`3 \times 3` array that transforms from perifocal coordinates to geocentric
coordinates
"""
# Cache some trig functions
s_incl = np.sin(inclination)
c_incl = np.cos(inclination)
s_raan = np.sin(raan)
c_raan = np.cos(raan)
s_aper = np.sin(argp)
c_aper = np.cos(argp)
# Build the matrix
return np.array([[-s_raan * c_incl * s_aper + c_raan * c_aper,
-s_raan * c_incl * c_aper - c_raan * s_aper,
s_raan * s_incl],
[c_raan * c_incl * s_aper + s_raan * c_aper,
c_raan * c_incl * c_aper - s_raan * s_aper,
-c_raan * s_incl],
[s_incl * s_aper, s_incl * c_aper, c_incl]])
def keplerian_to_rv(state_vector, grav_parameter=3.986004418e14):
r"""Convert the Keplerian orbital elements to position, velocity state vector
Parameters
----------
state_vector : :class:`~.StateVector`
The Keplerian orbital state vector is defined as
.. math::
X = [e, a, i, \Omega, \omega, \theta]^{T} \\
where:
:math:`e` is the orbital eccentricity (unitless),
:math:`a` the semi-major axis (m),
:math:`i` the inclination (rad),
:math:`\Omega` is the longitude of the ascending node (rad),
:math:`\omega` the argument of periapsis (rad), and
:math:`\theta` the true anomaly (rad)
grav_parameter : float, optional
Standard gravitational parameter :math:`\mu = G M`. The default is :math:`3.986004418
\times 10^{14} \mathrm{m}^3 \mathrm{s}^{-2}`
Returns
-------
: :class:`~.StateVector`
Orbital state vector as :math:`[r_x, r_y, r_z, \dot{r}_x, \dot{r}_y, \dot{r}_z]`
Warning
-------
No checking undertaken. Assumes Keplerian elements rendered correctly as above
"""
# Calculate the position vector in perifocal coordinates
rx = perifocal_position(state_vector[0], state_vector[1], state_vector[5])
# Calculate the velocity vector in perifocal coordinates
vx = perifocal_velocity(state_vector[0], state_vector[1], state_vector[5],
grav_parameter=grav_parameter)
# Transform position (perifocal) and velocity (perifocal)
# into geocentric
r = perifocal_to_geocentric_matrix(state_vector[2], state_vector[3], state_vector[4]) @ rx
v = perifocal_to_geocentric_matrix(state_vector[2], state_vector[3], state_vector[4]) @ vx
# And put them into the state vector
return StateVector(np.concatenate((r, v), axis=0))
def mod_inclination(x):
r"""Calculates the modulus of an inclination. Inclination angles are within the range :math:`0`
to :math:`\pi`.
Parameters
----------
x: float
inclination angle in radians
Returns
-------
float
Angle in radians in the range :math:`0` to :math:`+\pi`
"""
x = x % np.pi
return x
def mod_elongitude(x):
r"""Calculates the modulus of an ecliptic longitude in which angles are within the range
:math:`0` to :math:`2 \pi`.
Parameters
----------
x: float
longitudinal angle in radians
Returns
-------
float
Angle in radians in the range :math:`0` to :math:`+2 \pi`
"""
x = x % (2*np.pi)
return x
|
phobos/utils/editing.py
|
kartben/phobos
| 323 |
68121
|
<filename>phobos/utils/editing.py
#!/usr/bin/python3
# coding=utf-8
# -------------------------------------------------------------------------------
# This file is part of Phobos, a Blender Add-On to edit robot models.
# Copyright (C) 2020 University of Bremen & DFKI GmbH Robotics Innovation Center
#
# You should have received a copy of the 3-Clause BSD License in the LICENSE file.
# If not, see <https://opensource.org/licenses/BSD-3-Clause>.
# -------------------------------------------------------------------------------
"""
Contains the utility functions for editing objects and Phobos models.
"""
import bpy
import mathutils
import math
from phobos.phoboslog import log
import phobos.utils.selection as sUtils
import phobos.utils.naming as nUtils
import phobos.utils.blender as bUtils
import phobos.utils.io as ioUtils
import phobos.defs as defs
def dissolveLink(obj, delete_other=False):
"""Remove the selected link and reparent all links, inertia, visual and collisions to its effective Parent.
Args:
obj(bpy.types.Object): the link to dissolve
delete_other: (Default value = False)
Returns:
"""
# Store original layers and show all layers
originallayers = {}
for name, coll in bpy.context.window.view_layer.layer_collection.children.items():
originallayers[name] = coll.exclude
coll.exclude = False
if not obj.phobostype == 'link':
log('Selected object {} is not a link!'.format(obj.name), 'ERROR')
return
else:
delete = [obj]
# Get all children
children = sUtils.getRecursiveChildren(
obj, phobostypes=('link', 'inertial', 'visual', 'collision'), include_hidden=True
)
if delete_other:
other_children = sUtils.getRecursiveChildren(
obj,
recursion_depth=2,
phobostypes=('motor', 'controller', 'sensor', 'submodel'),
include_hidden=True,
)
delete += [child for child in other_children if child not in children]
# Get the parent
parent = obj.parent
# If parent is not None ( Root )
if obj.parent:
# Reparent
parentObjectsTo(children, parent, clear=True)
# Delete the objects
sUtils.selectObjects(delete, clear=True, active=-1)
bpy.ops.object.delete()
# Restore original layers
for key, value in originallayers.items():
bpy.context.window.view_layer.layer_collection.children[key].exclude = value
def getCombinedTransform(obj, effectiveparent):
"""Get the combined transform of the object relative to the effective parent.
This combines all transformations in the parenting hierarchy up to the specified effective
parent.
Note, that the scale transformation of the effective parent is used anyway, as it scales the
local matrix of the child object.
Args:
obj(bpy.types.Object): the child object
effectiveparent(bpy.types.Object): the effective parent of the child object
Returns:
: bpy.types.Matrix -- the combined transformations of the child object
"""
parent = obj.parent
matrix = obj.matrix_local
# use the parents absolute scale to scale the relative matrix
if parent:
scale_mat = mathutils.Matrix.Identity(4)
scale_mat[0][0], scale_mat[1][1], scale_mat[2][2] = parent.matrix_world.to_scale()
matrix = scale_mat @ matrix
# combine transformations up to effective parent
while parent is not None:
if parent == effectiveparent:
break
# use relative rotation
matrix = parent.matrix_local @ matrix
parent = parent.parent
return matrix
def restructureKinematicTree(link, root=None):
"""Restructures a tree such that the ``link`` provided becomes the root of the tree.
If no root object is provided, :func:`phobos.utils.selection.getRoot` will be used.
For instance, the following tree::
A
/ \\
B C
/ \ \\
D E F
would, using the call restructureKinematicsTree(C), become::
C
/ \\
A F
/
B
/ \\
D E
Currently, this function ignores all options such as unselected or hidden objects.
Args:
link(bpy.types.Object): the link which will become the new root object
root(bpy.types.Object, optional): the current root object (Default value = None)
Returns:
None: None
"""
if not root:
root = sUtils.getRoot(link)
links = [link]
obj = link
# stop right now when the link is already root
if not obj.parent:
log('No restructure necessary. Link is already root.', 'INFO')
return
# gather chain of links ascending the tree
while obj.parent.name != root.name:
obj = obj.parent
if obj.phobostype == 'link':
links.append(obj)
links.append(root)
log("Unparenting objects for restructure: " + str([link.name for link in links]) + ".", 'DEBUG')
# unparent all links
sUtils.selectObjects(links, True)
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
log("Restructuring objects for new hierarchy.", 'DEBUG')
for i in range(len(links) - 1):
parent = links[i]
child = links[i + 1]
parentObjectsTo(child, parent)
log("Copying model information from old root.", 'DEBUG')
# copy properties
if 'model/name' in root:
link['model/name'] = root['model/name']
del root['model/name']
if 'model/version' in root:
link['model/version'] = root['model/version']
del root['model/version']
log("Restructured kinematic tree to new root: {}.".format(link.name), 'INFO')
def parentObjectsTo(objects, parent, clear=False):
"""Parents the specified objects to the parent object.
Depending on their phobostype the objects are parented either *bone relative* or *object*.
If *clear* is set, the parenting of the objects will be cleared (keeping the transform), before
parenting.
Args:
objects(list(bpy.types.Object): objects to set parent of
parent(bpy.types.Object): parent object
clear(bool, optional): if True, the parenting of the objects will be cleared (Default value = False)
Returns:
"""
if not isinstance(objects, list):
objects = [objects]
# Store original layers
#originallayers = list(bpy.context.scene.layers)
# Select all layers
#bpy.context.scene.layers = [True for i in range(20)]
# Restore original layers
#bpy.context.scene.layers = originallayers
if clear:
sUtils.selectObjects(objects, active=0, clear=True)
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
sUtils.selectObjects([parent] + objects, active=0, clear=True)
if parent.phobostype == 'link':
bpy.ops.object.parent_set(type='BONE_RELATIVE')
else:
bpy.ops.object.parent_set(type='OBJECT')
def getNearestCommonParent(objs):
"""Returns hierarchically lowest common parent of the provided objects
Args:
objs: list of objects (bpy_types.Object)
Returns:
"""
anchor = objs[0] # pick one link as the anchor link
rest = objs[1:] # get other links to iterate over
in_all = False # this will be true if all 'rest' branches have parent as a common parent
parent = anchor # the candidate for common parent
inter_objects = set()
while not in_all and parent.parent:
in_all = True
parent = parent.parent # go up the anchor branch
inter_objects.add(parent)
for obj in rest: # start at base of each rest branch
o = obj
while (
o.parent and o.parent != parent
): # as long as there is a parent that is not the candidate parent
o = o.parent
inter_objects.add(o)
if (
o.parent != parent
): # check which break condition happened, break if not arrived at parent
in_all = False
break
if not in_all: # this is only true if none of the branches set it to False and broke afterwards
return None
else:
inter_objects.remove(parent)
return parent, list(inter_objects)
def instantiateSubmodel(submodelname, instancename, size=1.0):
"""Creates an instance of the submodel specified by the submodelname.
The instance receives the definitions of the group as it is generated.
Args:
submodelname: name of the submodel (Blender group) to create an
instance of
instancename: name the instance object will receive
size: (Default value = 1.0)
Returns:
"""
submodel = None
interfaces = None
# find the existing group for submodel and interface
for group in bpy.data.groups:
# search for namespaced groups with the exact name
if ':' in group.name and submodelname == group.name:
submodel = group
if group.name.startswith('interfaces:') and submodelname.split(':')[1] in group.name:
interfaces = group
if not submodel:
log('Selected submodel is not defined.', 'ERROR')
if not interfaces:
log('No interfaces defined for this submodel.', 'INFO')
# add the submodel and write in data
bpy.ops.object.group_instance_add(group=submodel.name)
submodelobj = bpy.context.active_object
submodelobj.phobostype = 'submodel'
submodelobj['submodeltype'] = submodel.name.split(':')[0]
# TODO currently this works only by name binding, we should add links to
# the group here
submodelobj['submodel/name'] = submodelname
submodelobj['submodelname'] = submodelname
# copy custom props from group to instance
for key in submodel.keys():
submodelobj[key] = submodel[key]
submodelobj.name = instancename
submodelobj.empty_draw_size = size
# add the interfaces if available
if interfaces:
# create group and make real
bpy.ops.object.group_instance_add(group=interfaces.name)
bpy.ops.object.duplicates_make_real()
# write interface parameters and change namespace
for obj in bpy.context.selected_objects:
nUtils.addNamespace(obj, instancename)
obj.name = obj.name.rsplit('.')[0]
obj['submodeltype'] = 'interface'
bUtils.toggleTransformLock(obj, True)
# parent interfaces to submodel empty
parentObjectsTo(bpy.context.selected_objects, submodelobj)
# delete empty parent object of interfaces
sUtils.selectObjects(
objects=[
a
for a in bpy.context.selected_objects
if a.type == 'EMPTY' and 'submodeltype' in a and a['submodeltype'] == 'interface'
],
clear=True,
active=0,
)
bpy.ops.object.delete(use_global=False)
return submodelobj
def defineSubmodel(submodelname, submodeltype, version='', objects=None):
"""Defines a new submodule group with the specified name and type.
The group will be named like so:
'submodeltype:submodelname/version'
Objects with the phobostype 'interface' (if present) are handled separately
and put into a respective submodel group (which features the 'interface'
submodeltype).
If the version is omitted, the respective part of the name is dropped, too.
If no object list is provided the objects are derived from selection.
The submodeltype is also added as dict entry to the group in Blender.
The selected objects are moved to the respective layer for submodels or
interfaces.
Args:
submodelname: descriptive name of the submodel
submodeltype: type of the submodel (e.g. 'fmu', 'mechanics')
version: a version string (e.g. '1.0', 'dangerous') (Default value = '')
objects: the objects which belong to the submodel (None will derive
objects from the selection) (Default value = None)
Returns:
: a tuple of the submodelgroup and interfacegroup/None
"""
if not objects:
objects = bpy.context.selected_objects
# split interface from physical objects
interfaces = [i for i in objects if i.phobostype == 'interface']
physical_objects = [p for p in objects if p.phobostype != 'interface']
# make the physical group
sUtils.selectObjects(physical_objects, True, 0)
submodelgroupname = submodeltype + ':' + submodelname
if version != '':
submodelgroupname += '/' + version
if submodelgroupname in bpy.data.groups.keys():
log('submodelgroupname ' + 'already exists', 'WARNING')
bpy.ops.group.create(name=submodelgroupname)
submodelgroup = bpy.data.groups[submodelgroupname]
submodelgroup['submodeltype'] = submodeltype
submodelgroup['version'] = version
modeldefs = defs.definitions['submodeltypes'][submodeltype]
# copy the definition parameters to the group properties
for key in modeldefs['definitions']:
submodelgroup[key] = modeldefs['definitions'][key]
# move objects to submodel layer
for obj in physical_objects:
if not 'submodel' in bpy.context.scene.collection.children.keys():
newcollection = bpy.data.collections.new('submodel')
bpy.context.scene.collection.children.link(newcollection)
for name, collection in bpy.context.scene.collection.children.items():
if name == 'submodel':
collection.objects.link(obj)
elif obj.name in collection.objects:
collection.objects.unlink(obj)
log('Created submodel group ' + submodelname + ' of type "' + submodeltype + '".', 'DEBUG')
interfacegroup = None
# make the interface group
if interfaces:
sUtils.selectObjects(interfaces, True, 0)
interfacegroupname = 'interfaces:' + submodelname
if version != '':
interfacegroupname += '/' + version
# TODO what about overwriting groups with same names?
bpy.ops.group.create(name=interfacegroupname)
interfacegroup = bpy.data.groups[interfacegroupname]
interfacegroup['submodeltype'] = 'interfaces'
# copy interface definitions from submodel definitions
for key in modeldefs['interfaces']:
interfacegroup[key] = modeldefs['interfaces'][key]
# move objects to interface layer
for obj in interfaces:
bUtils.sortObjectToCollection(obj, cname='interface')
log('Created interface group for submodel ' + submodelname + '.', 'DEBUG')
else:
log('No interfaces for this submodel.', 'DEBUG')
for i in interfaces:
i.show_name = True
return (submodelgroup, interfacegroup)
def removeSubmodel(submodelname, submodeltype, version='', interfaces=True):
"""Removes a submodel definition from the Blender project.
Returns True or False depending on whether groups have been removed or not.
Args:
submodelname: the name of the submodel
submodeltype: the submodeltype of the submodel
version: optional version of the submodel (Default value = '')
interfaces: True if interface should also be deleted, else False. (Default value = True)
Returns:
: True if groups have been removed, else False.
"""
# build the group name to look for
submodelgroupname = submodeltype + ':' + submodelname
if version != '':
submodelgroupname += '/' + version
# remove the submodelgroup
if submodelgroupname in bpy.data.groups:
bpy.data.groups.remove(bpy.data.groups[submodelgroupname])
if not interfaces:
return True
if interfaces:
interfacegroupname = 'interfaces:' + submodelname
if version != '':
interfacegroupname += '/' + version
if interfacegroupname in bpy.data.groups:
bpy.data.groups.remove(bpy.data.groups[interfacegroupname])
return True
return False
def createInterface(ifdict, parent=None):
"""Create an interface object and optionally parent to existing object.
ifdict is expected as:
| **type**: str
| **direction**: str
| **model**: str
| **name**: str
| **parent**: bpy.types.Object (optional)
| **scale**: float (optional)
Args:
ifdict(dict): interface data
parent(bpy.types.Object, optional): designated parent object (Default value = None)
Returns:
bpy.data.Object: newly created interface object
"""
if not parent:
try:
parent = ifdict['parent']
assert isinstance(parent, bpy.types.Object)
except (AttributeError, AssertionError, KeyError):
parent = None
location = parent.matrix_world.translation if parent else mathutils.Vector()
rotation = parent.matrix_world.to_euler() if parent else mathutils.Euler()
model = ifdict['model'] if 'model' in ifdict else 'default'
templateobj = ioUtils.getResource(('interface', model, ifdict['direction']))
scale = ifdict['scale'] if 'scale' in ifdict else 1.0
ifobj = bUtils.createPrimitive(
ifdict['name'],
'box',
(1.0, 1.0, 1.0),
defs.layerTypes['interface'],
plocation=location,
protation=rotation,
phobostype='interface',
)
nUtils.safelyName(ifobj, ifdict['name'], 'interface')
ifobj.data = templateobj.data
ifobj.scale = (scale,) * 3
ifobj['interface/type'] = ifdict['type']
ifobj['interface/direction'] = ifdict['direction']
if parent is not None:
ifobj['interface/parent'] = parent.name
parentObjectsTo(ifobj, parent)
bpy.ops.object.make_single_user(object=True, obdata=True)
def toggleInterfaces(interfaces=None, modename='toggle'):
"""
Args:
interfaces: (Default value = None)
modename: (Default value = 'toggle')
Returns:
"""
modedict = {'toggle': 0, 'activate': 1, 'deactivate': 2}
mode = modedict[modename]
if not interfaces:
interfaces = [i for i in bpy.context.selected_objects if i.phobostype == 'interface']
for i in interfaces:
if mode == 0:
i.show_name = not i.show_name
elif mode == 1:
i.show_name = True
elif mode == 2:
i.show_name = False
def connectInterfaces(parentinterface, childinterface, transform=None):
"""
Args:
parentinterface:
childinterface:
transform: (Default value = None)
Returns:
"""
# first check if the interface is child of the root object and if not, restructure the tree
root = sUtils.getRoot(childinterface)
parent = childinterface.parent
if root != parent:
restructureKinematicTree(parent)
childsubmodel = childinterface.parent
# connect the interfaces
sUtils.selectObjects(objects=[parentinterface], clear=True, active=0)
bpy.ops.object.make_single_user(object=True, obdata=True)
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
sUtils.selectObjects(objects=[childinterface], clear=True, active=0)
bpy.ops.object.make_single_user(object=True, obdata=True)
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
# parent interfaces
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
parentObjectsTo(childsubmodel, childinterface, clear=True)
parentObjectsTo(childinterface, parentinterface)
loc, rot, sca = parentinterface.matrix_world.decompose()
# apply additional transform (ignoring the scale of the parent interface)
if not transform:
transform = (
mathutils.Euler((math.radians(180.0), 0.0, math.radians(180.0)), 'XYZ')
.to_matrix()
.to_4x4()
)
childinterface.matrix_world = (
mathutils.Matrix.Translation(loc) @ rot.to_matrix().to_4x4() @ transform
)
# TODO clean this up
# try:
# del childsubmodel['modelname']
# except KeyError:
# pass
# TODO: re-implement this for MECHANICS models
# try:
# # parent visual and collision objects to new parent
# children = sUtils.getImmediateChildren(parent, ['visual', 'collision', 'interface'])
# print(children)
# sUtils.selectObjects(children, True, 0)
# bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
# print()
# parentObjectsTo(children, sUtils.getEffectiveParent(parent, ignore_selection=True))
# bpy.ops.object.parent_set(type='BONE_RELATIVE')
# except (IndexError, AttributeError):
# pass # no objects to re-parent
parentinterface.show_name = False
childinterface.show_name = False
def disconnectInterfaces(parentinterface, childinterface, transform=None):
"""
Args:
parentinterface:
childinterface:
transform: (Default value = None)
Returns:
"""
# unparent the child
sUtils.selectObjects(objects=[childinterface], clear=True, active=0)
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
# select the former parent of the interface as new root
if childinterface.children and len(childinterface.children) > 0:
# prefer submodel instances
for child in childinterface.children:
if child.phobostype == 'submodel':
root = child
break
# otherwise just use the first child
else:
root = childinterface.children[0]
# restructure the kinematic tree to make the interface child of the submodel again
sUtils.selectObjects(objects=[root], clear=True, active=0)
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
parentObjectsTo(childinterface, root)
# apply additional transform
if transform:
childinterface.matrix_world = root.matrix_world @ transform
# make the interfaces active again
parentinterface.show_name = True
childinterface.show_name = True
def setProperties(obj, diction, category=None):
"""Adds the specified dictionary as custom properties to the object.
If a category is provided, the keys of the dictionary are prepended with the category:
`category/key`
Args:
obj(bpy.types.Object): object to add the information to
diction(dict): information to add to the object
category(str, optional): category for the dictionary entries (Default value = None)
Returns:
"""
for key, value in diction.items():
obj[(category + '/' + key) if category else key] = value
def getProperties(obj, category=None):
"""Returns a dictionary of custom property information of the object.
If a category is provided, only the custom properties of the specified category are returned.
Otherwise, the phobostype of the object will be used as category.
The dictionary contains the custom property keys with the category removed (e.g. 'name' for
'link/name').
Args:
obj(bpy.types.Object): object to get properties of
category(str, optional): property category to look for (Default value = None)
Returns:
: dict -- custom property information of the phobostype/category for the object
"""
if not category:
category = obj.phobostype
try:
diction = {
key.replace(category + '/', ''): value
for key, value in obj.items()
if key.startswith(category + '/')
}
except KeyError:
log("Failed filtering properties for category " + category, "ERROR")
return diction
def removeProperties(obj, props, recursive=False):
"""Removes a list of custom properties from the specified object.
The specified property list can contain names with wildcards at the end (e.g. sensor*).
If recursive is set, the properties will be removed recursively from all children, too.
Args:
obj(bpy.types.Object): object to remove the properties from
props(list(str): list of property names, which will be removed from the object
recursive(bool, optional): if True, the properties will be removed recursively from the children, too (Default value = False)
Returns:
"""
for prop in props:
if len(prop) == 0:
continue
if prop in obj:
del obj[prop]
elif prop[-1] == '*':
for objprop in obj.keys():
if objprop.startswith(prop[:-1]):
del obj[objprop]
if recursive:
for child in obj.children:
removeProperties(child, props, recursive=recursive)
def mergeLinks(links, targetlink, movetotarget=False):
"""
Args:
links:
targetlink:
movetotarget: (Default value = False)
Returns:
"""
for link in links:
if movetotarget:
link.matrix_world = targetlink.matrix_world
sUtils.selectObjects([link], clear=True, active=0)
bpy.ops.object.select_grouped(type='CHILDREN')
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
try:
parentObjectsTo(bpy.context.selected_objects, targetlink)
except RuntimeError as e:
log("Cannot resolve new parent hierarchy: " + str(e), 'ERROR')
del link
def addAnnotationObject(obj, annotation, name=None, size=0.1, namespace=None):
"""Add a new annotation object with the specified annotations to the object.
The annotation object will receive 'annotation_object' as its default name, unless a name is
provided. Naming is done using :func:`phobos.utils.naming.safelyName`.
The annotation object will be scaled according to the **size** parameter.
If ``namespace`` is provided, the annotations will be saved with this string prepended.
This is done using :func:`addAnnotation`.
Args:
obj(bpy.types.Object): object to add annotation object to
annotation(dict): annotations that will be added
name(str, optional): name for the new annotation object (Default value = None)
size(int/float, optional): size of the new annotation object (Default value = 0.1)
namespace(str, optional): namespace that will be prepended to the annotations (Default value = None)
Returns:
: bpy.types.Object - the new annotation object
"""
loc = obj.matrix_world.to_translation()
if not name:
name = obj.name + '_annotation_object'
annot_obj = bUtils.createPrimitive(
name,
'box',
[1, 1, 1],
defs.layerTypes['annotation'],
plocation=loc,
phobostype='annotation',
)
annot_obj.scale = (size,) * 3
resource = ioUtils.getResource(['annotation', namespace.split('/')[-1]])
if resource:
annot_obj.data = resource.data
else:
annot_obj.data = ioUtils.getResource(['annotation', 'default']).data
# make sure all layers are enabled for parenting
originallayers = {}
for name, coll in bpy.context.window.view_layer.layer_collection.children.items():
originallayers[name] = coll.exclude
coll.exclude = False
# parent annotation object
parentObjectsTo(annot_obj, obj)
# Restore original layers
for key, value in originallayers.items():
bpy.context.window.view_layer.layer_collection.children[key].exclude = value
addAnnotation(annot_obj, annotation, namespace=namespace)
return annot_obj
def addAnnotation(obj, annotation, namespace=None, ignore=[]):
"""Adds the specified annotations to the object.
If provided, the namespace will be prepended to the annotation keys and separated with a /.
Args:
obj(bpy.types.Object): object to add the annotations to
annotation(dict): annotations to add to the object
namespace(str, optional): namespace which will be prepended to the annotations (Default value = None)
ignore(list(str, optional): skip these keys when adding annotations (Default value = [])
Returns:
"""
for key, value in annotation.items():
obj[str(namespace + '/' if namespace and key not in ignore else '') + key] = value
def sortObjectsToLayers(objs):
"""Sorts the specified objects to the layers which match their phobostype.
The layer for each phobostype is defined according to `phobos.defs.layerTypes`.
Args:
objs(list(bpy.types.Object): objects to move to their respective layer
Returns:
"""
for obj in objs:
if obj.phobostype != 'undefined':
# first check if we have a collcection for the type
bUtils.sortObjectToCollection(obj, cname=obj.phobostype)
else:
log("The phobostype of object {} is undefined.".format(obj.name), 'ERROR')
def smoothen_surface(obj):
"""Applies various steps to make the specified object look clean and smooth.
Args:
obj(bpy.types.Object): object to make look clean
Returns:
"""
bpy.context.view_layer.objects.active = obj
# recalculate surface normals
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent()
bpy.ops.mesh.mark_sharp(clear=True)
bpy.ops.object.mode_set(mode='OBJECT')
# add smooth shading
bpy.ops.object.shade_smooth()
# use edge split modifier to improve the look of CAD-models
for mod in obj.modifiers:
if mod.type == 'EDGE_SPLIT':
log("Edge split modifier already added to object {}.".format(obj.name), 'DEBUG')
break
else:
bpy.ops.object.modifier_add(type='EDGE_SPLIT')
|
os/example_dirs.py
|
Carglglz/micropython-lib
| 1,556 |
68148
|
import os
print(os.getcwd())
l = os.listdir()
print(l)
assert "test_dirs.py" in l
assert "os" in l
for t in os.walk("."):
print(t)
for t in os.walk(".", False):
print(t)
|
ip_rep/database_connector/views.py
|
logicbomb-1/ARTIF
| 205 |
68162
|
<reponame>logicbomb-1/ARTIF<filename>ip_rep/database_connector/views.py<gh_stars>100-1000
from django.shortcuts import render
from pymongo import MongoClient
# Create your views here.
|
WebMirror/management/rss_parser_funcs/feed_parse_extractNovelsJapan.py
|
fake-name/ReadableWebProxy
| 193 |
68173
|
def extractNovelsJapan(item):
"""
'Novels Japan'
"""
if item['title'].endswith(' (Sponsored)'):
item['title'] = item['title'][:-1 * len(' (Sponsored)')]
if item['title'].endswith(' and Announcement'):
item['title'] = item['title'][:-1 * len(' and Announcement')]
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if item['title'].lower().endswith('loner dungeon'):
return buildReleaseMessageWithType(item, 'I who is a Loner, Using cheats adapts to the Dungeon', vol, chp, frag=frag, postfix=postfix)
if item['title'].lower().endswith('vending machine'):
return buildReleaseMessageWithType(item, 'I was Reborn as a Vending Machine, Wandering in the Dungeon', vol, chp, frag=frag, postfix=postfix)
if item['title'].lower().endswith('login bonus'):
return buildReleaseMessageWithType(item, 'Skill Up with Login Bonus', vol, chp, frag=frag, postfix=postfix)
if item['title'].lower().endswith('lv2 cheat') or item['title'].lower().endswith(
'ex-hero candidate’s, who turned out to be a cheat from lv2, laid-back life in another world') or 'Lv2 Cheat' in item['tags']:
return buildReleaseMessageWithType(item, "Ex-Hero Candidate's, Who Turned Out To Be A Cheat From Lv2, Laid-back Life In Another World", vol, chp, frag=frag, postfix=postfix)
if 'Second Earth' in item['tags']:
return buildReleaseMessageWithType(item, 'Second Earth', vol, chp, frag=frag, postfix=postfix)
if 'Strongest Revolution' in item['tags']:
return buildReleaseMessageWithType(item, 'The Fierce Revolution ~ The Strongest Organism Which Can Kill the Devil and the Hero', vol, chp, frag=frag, postfix=postfix)
if 'Loner Dungeon' in item['tags']:
return buildReleaseMessageWithType(item, 'I who is a Loner, Using cheats adapts to the Dungeon', vol, chp, frag=frag, postfix=postfix)
if 'Skill Up' in item['tags']:
return buildReleaseMessageWithType(item, 'Skill Up with Login Bonus', vol, chp, frag=frag, postfix=postfix)
if 'Isobe Isobee' in item['tags']:
return buildReleaseMessageWithType(item, 'Isobe Isobee', vol, chp, frag=frag, postfix=postfix)
if 'Ex-hero' in item['tags']:
return buildReleaseMessageWithType(item, "Ex-Hero Candidate's, Who Turned Out To Be A Cheat From Lv2, Laid-back Life In Another World", vol, chp, frag=frag, postfix=postfix)
return False
|
model_analyzer/config/run/run_search.py
|
MarkMoTrin/model_analyzer
| 115 |
68183
|
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model_analyzer.config.input.objects.config_model_profile_spec \
import ConfigModelProfileSpec
from model_analyzer.constants import LOGGER_NAME, THROUGHPUT_GAIN
import copy
import logging
logger = logging.getLogger(LOGGER_NAME)
class RunSearch:
"""
A class responsible for searching the config space.
"""
def __init__(self, config):
self._max_concurrency = config.run_config_search_max_concurrency
self._max_instance_count = config.run_config_search_max_instance_count
self._max_preferred_batch_size = config.run_config_search_max_preferred_batch_size
self._sweep_preferred_batch_size_disable = config.run_config_search_preferred_batch_size_disable
self._model_config_parameters = {'instance_count': 1}
self._measurements = []
self._last_batch_length = None
# Run search operating mode
self._sweep_mode_function = None
def _create_model_config(self, cpu_only=False):
"""
Generate the model config sweep to be used.
"""
model_config = self._model_config_parameters
new_config = {}
if 'dynamic_batching' in model_config:
if model_config['dynamic_batching'] is None:
new_config['dynamic_batching'] = {}
else:
new_config['dynamic_batching'] = {
'preferred_batch_size': [model_config['dynamic_batching']]
}
if 'instance_count' in model_config:
if not cpu_only:
new_config['instance_group'] = [{
'count': model_config['instance_count'],
'kind': 'KIND_GPU'
}]
else:
new_config['instance_group'] = [{
'count': model_config['instance_count'],
'kind': 'KIND_CPU'
}]
return new_config
def add_measurements(self, measurements):
"""
Add the measurments that are the result of running
the sweeps.
Parameters
----------
measurements : list
list of measurements
"""
self._last_batch_length = len(measurements)
# The list will contain one parameter, because we are experimenting
# with one value at a time.
self._measurements += measurements
def _step_instance_count(self):
"""
Advances instance count by one step.
"""
self._model_config_parameters['instance_count'] += 1
def _step_dynamic_batching(self):
"""
Advances the dynamic batching by one step.
"""
if 'dynamic_batching' not in self._model_config_parameters:
# Enable dynamic batching
self._model_config_parameters['dynamic_batching'] = None
else:
if self._model_config_parameters['dynamic_batching'] is None:
self._model_config_parameters['dynamic_batching'] = 1
else:
self._model_config_parameters['dynamic_batching'] *= 2
def _get_throughput(self, measurement):
return measurement.get_metric_value('perf_throughput')
def _calculate_throughput_gain(self, index):
throughput_before = self._get_throughput(
self._measurements[-(index + 1)])
throughput_after = self._get_throughput(self._measurements[-index])
gain = (throughput_after - throughput_before) / throughput_before
return gain
def _valid_throughput_gain(self):
"""
Returns true if the amount of throughput gained
is reasonable for continuing the search process
"""
# If number of measurements is smaller than 4,
# the search can continue.
if len(self._measurements) < 4:
return True
return self._calculate_throughput_gain(1) > THROUGHPUT_GAIN or \
self._calculate_throughput_gain(2) > THROUGHPUT_GAIN or \
self._calculate_throughput_gain(3) > THROUGHPUT_GAIN
def init_model_sweep(self, concurrency, search_model_config_parameters):
"""
Intiliazes the sweep mode, and model config parameters in some cases.
"""
# Reset the measurements after each init
self._measurements = []
if len(concurrency) != 0 and search_model_config_parameters:
self._model_config_parameters = {'instance_count': 0}
self._sweep_mode_function = self._sweep_model_config_only
elif len(concurrency) == 0 and search_model_config_parameters:
self._model_config_parameters = {'instance_count': 1}
logger.info(
'Will sweep both the concurrency and model config parameters...'
)
self._sweep_mode_function = self._sweep_concurrency_and_model_config
else:
logger.info('Will sweep only through the concurrency values...')
self._sweep_mode_function = self._sweep_concurrency_only
def get_model_sweep(self, config_model):
"""
Get the next iteration of the sweeps.
Parameters
----------
config_model : ConfigModelProfileSpec
The config model object of the model to sweep through
Returns
-------
config_model, list
The list may be empty, contain a model config dict or None
"""
new_model = ConfigModelProfileSpec(
copy.deepcopy(config_model.model_name()),
copy.deepcopy(config_model.cpu_only()),
copy.deepcopy(config_model.objectives()),
copy.deepcopy(config_model.constraints()),
copy.deepcopy(config_model.parameters()),
copy.deepcopy(config_model.model_config_parameters()),
copy.deepcopy(config_model.perf_analyzer_flags()))
if self._sweep_mode_function:
new_model, model_sweep = self._sweep_mode_function(new_model)
# Only log message if there is new runs.
if model_sweep:
self._log_message(new_model)
return new_model, model_sweep
return new_model, []
def _sweep_concurrency_and_model_config(self, model):
"""
Gets next iteration of both the concurrency and model config
parameters
Parameters
----------
model : ConfigModelProfileSpec
The model whose parameters are being swept over
"""
return self._sweep_parameters(model, sweep_model_configs=True)
def _sweep_concurrency_only(self, model):
"""
Gets next iteration of the concurrency sweep
"""
return self._sweep_parameters(model, sweep_model_configs=False)
def _sweep_parameters(self, model, sweep_model_configs):
"""
A helper function that sweeps over concurrency
and if required, over model configs as well
"""
concurrency = model.parameters()['concurrency']
if len(concurrency) == 0:
model.parameters()['concurrency'] = [1]
else:
# Exponentially increase concurrency
new_concurrency = concurrency[0] * 2
# If the concurrency limit has been reached, the last batch lead to
# an error, or the throughput gain is not significant, step
# the concurrency value. TODO: add exponential backoff so
# that the algorithm can step back and exactly find the points.
concurrency_limit_reached = new_concurrency > self._max_concurrency
last_batch_erroneous = self._last_batch_length == 0
throughput_peaked = not self._valid_throughput_gain()
if concurrency_limit_reached or last_batch_erroneous or throughput_peaked:
# Reset concurrency
if sweep_model_configs:
self._measurements = []
model.parameters()['concurrency'] = [1]
return self._sweep_model_config_only(model)
else:
return model, []
model.parameters()['concurrency'] = [new_concurrency]
return model, [
self._create_model_config(
cpu_only=model.cpu_only()) if sweep_model_configs else None
]
def _sweep_model_config_only(self, model):
"""
Gets next iteration model config
parameters sweep
"""
self._step_instance_count()
instance_limit_reached = self._model_config_parameters[
'instance_count'] > self._max_instance_count
if instance_limit_reached:
if self._sweep_preferred_batch_size_disable:
return model, []
# Reset instance_count
self._model_config_parameters['instance_count'] = 1
self._step_dynamic_batching()
dynamic_batching_enabled = self._model_config_parameters[
'dynamic_batching'] is not None
if dynamic_batching_enabled:
batch_size_limit_reached = self._model_config_parameters[
'dynamic_batching'] > self._max_preferred_batch_size
if batch_size_limit_reached:
return model, []
return model, [self._create_model_config(cpu_only=model.cpu_only())]
def _log_message(self, model):
"""
Writes the current state of the search to the console
"""
concurrency = model.parameters()['concurrency'][0]
message = 'dynamic batching is disabled.'
if 'dynamic_batching' in self._model_config_parameters:
if self._model_config_parameters['dynamic_batching'] is None:
message = 'dynamic batching is enabled.'
else:
message = (
"preferred batch size is set to "
f"{self._model_config_parameters['dynamic_batching']}.")
if self._sweep_mode_function == self._sweep_concurrency_only:
logger.info(f"[Search Step] Concurrency set to {concurrency}. ")
elif self._sweep_mode_function == self._sweep_concurrency_and_model_config:
logger.info(
f"[Search Step] Concurrency set to {concurrency}. "
f"Instance count set to "
f"{self._model_config_parameters['instance_count']}, and {message}"
)
elif self._sweep_mode_function == self._sweep_model_config_only:
logger.info(
f"[Search Step] Instance count set to "
f"{self._model_config_parameters['instance_count']}, and {message}"
)
|
tests/transactions/tests.py
|
huicheese/Django-test
| 118 |
68206
|
from __future__ import absolute_import
import sys
try:
import threading
except ImportError:
threading = None
import time
from django.db import (connection, transaction,
DatabaseError, Error, IntegrityError, OperationalError)
from django.test import TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import IgnorePendingDeprecationWarningsMixin
from django.utils import six
from django.utils.unittest import skipIf, skipUnless
from .models import Reporter
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: <NAME>>', '<Reporter: Tintin>'])
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: <NAME>>', '<Reporter: Tintin>'])
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: <NAME>>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
connection.cursor().execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
class AtomicInsideLegacyTransactionManagementTests(AtomicTests):
def setUp(self):
transaction.enter_transaction_management()
def tearDown(self):
# The tests access the database after exercising 'atomic', making the
# connection dirty; a rollback is required to make it clean.
transaction.rollback()
transaction.leave_transaction_management()
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Tournesol")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Tournesol")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_calling_transaction_management_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.enter_transaction_management()
with self.assertRaises(transaction.TransactionManagementError):
transaction.leave_transaction_management()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
with self.assertRaises(transaction.TransactionManagementError):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
other_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.create(id=1, first_name="Tintin")
other_thread_ready.set()
# We cannot synchronize the two threads with an event here
# because the main thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see below for 1)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
other_thread_ready.wait()
with six.assertRaisesRegex(self, OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
# 1) This line locks... (see above for 2)
Reporter.objects.create(id=1, first_name="Tintin")
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
# Regression test for #20028
class Callable(object):
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
class TransactionTests(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
available_apps = ['transactions']
def create_a_reporter_then_fail(self, first, last):
a = Reporter(first_name=first, last_name=last)
a.save()
raise Exception("I meant to do that")
def remove_a_reporter(self, first_name):
r = Reporter.objects.get(first_name="Alice")
r.delete()
def manually_managed(self):
r = Reporter(first_name="Dirk", last_name="Gently")
r.save()
transaction.commit()
def manually_managed_mistake(self):
r = Reporter(first_name="Edward", last_name="Woodward")
r.save()
# Oops, I forgot to commit/rollback!
@skipUnlessDBFeature('supports_transactions')
def test_autocommit(self):
"""
The default behavior is to autocommit after each save() action.
"""
self.assertRaises(Exception,
self.create_a_reporter_then_fail,
"Alice", "Smith"
)
# The object created before the exception still exists
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_decorator(self):
"""
The autocommit decorator works exactly the same as the default behavior.
"""
autocomitted_create_then_fail = transaction.autocommit(
self.create_a_reporter_then_fail
)
self.assertRaises(Exception,
autocomitted_create_then_fail,
"Alice", "Smith"
)
# Again, the object created before the exception still exists
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_decorator_with_using(self):
"""
The autocommit decorator also works with a using argument.
"""
autocomitted_create_then_fail = transaction.autocommit(using='default')(
self.create_a_reporter_then_fail
)
self.assertRaises(Exception,
autocomitted_create_then_fail,
"Alice", "Smith"
)
# Again, the object created before the exception still exists
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success(self):
"""
With the commit_on_success decorator, the transaction is only committed
if the function doesn't throw an exception.
"""
committed_on_success = transaction.commit_on_success(
self.create_a_reporter_then_fail)
self.assertRaises(Exception, committed_on_success, "Dirk", "Gently")
# This time the object never got saved
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_with_using(self):
"""
The commit_on_success decorator also works with a using argument.
"""
using_committed_on_success = transaction.commit_on_success(using='default')(
self.create_a_reporter_then_fail
)
self.assertRaises(Exception,
using_committed_on_success,
"Dirk", "Gently"
)
# This time the object never got saved
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_succeed(self):
"""
If there aren't any exceptions, the data will get saved.
"""
Reporter.objects.create(first_name="Alice", last_name="Smith")
remove_comitted_on_success = transaction.commit_on_success(
self.remove_a_reporter
)
remove_comitted_on_success("Alice")
self.assertEqual(list(Reporter.objects.all()), [])
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_exit(self):
@transaction.autocommit()
def gen_reporter():
@transaction.commit_on_success
def create_reporter():
Reporter.objects.create(first_name="Bobby", last_name="Tables")
create_reporter()
# Much more formal
r = Reporter.objects.get()
r.first_name = "Robert"
r.save()
gen_reporter()
r = Reporter.objects.get()
self.assertEqual(r.first_name, "Robert")
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed(self):
"""
You can manually manage transactions if you really want to, but you
have to remember to commit/rollback.
"""
manually_managed = transaction.commit_manually(self.manually_managed)
manually_managed()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_mistake(self):
"""
If you forget, you'll get bad errors.
"""
manually_managed_mistake = transaction.commit_manually(
self.manually_managed_mistake
)
self.assertRaises(transaction.TransactionManagementError,
manually_managed_mistake)
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_with_using(self):
"""
The commit_manually function also works with a using argument.
"""
using_manually_managed_mistake = transaction.commit_manually(using='default')(
self.manually_managed_mistake
)
self.assertRaises(transaction.TransactionManagementError,
using_manually_managed_mistake
)
class TransactionRollbackTests(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
available_apps = ['transactions']
def execute_bad_sql(self):
cursor = connection.cursor()
cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');")
@skipUnlessDBFeature('requires_rollback_on_dirty_transaction')
def test_bad_sql(self):
"""
Regression for #11900: If a function wrapped by commit_on_success
writes a transaction that can't be committed, that transaction should
be rolled back. The bug is only visible using the psycopg2 backend,
though the fix is generally a good idea.
"""
execute_bad_sql = transaction.commit_on_success(self.execute_bad_sql)
self.assertRaises(IntegrityError, execute_bad_sql)
transaction.rollback()
class TransactionContextManagerTests(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
available_apps = ['transactions']
def create_reporter_and_fail(self):
Reporter.objects.create(first_name="Bob", last_name="Holtzman")
raise Exception
@skipUnlessDBFeature('supports_transactions')
def test_autocommit(self):
"""
The default behavior is to autocommit after each save() action.
"""
with self.assertRaises(Exception):
self.create_reporter_and_fail()
# The object created before the exception still exists
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_context_manager(self):
"""
The autocommit context manager works exactly the same as the default
behavior.
"""
with self.assertRaises(Exception):
with transaction.autocommit():
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_context_manager_with_using(self):
"""
The autocommit context manager also works with a using argument.
"""
with self.assertRaises(Exception):
with transaction.autocommit(using="default"):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success(self):
"""
With the commit_on_success context manager, the transaction is only
committed if the block doesn't throw an exception.
"""
with self.assertRaises(Exception):
with transaction.commit_on_success():
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_with_using(self):
"""
The commit_on_success context manager also works with a using argument.
"""
with self.assertRaises(Exception):
with transaction.commit_on_success(using="default"):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_succeed(self):
"""
If there aren't any exceptions, the data will get saved.
"""
Reporter.objects.create(first_name="Alice", last_name="Smith")
with transaction.commit_on_success():
Reporter.objects.filter(first_name="Alice").delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_exit(self):
with transaction.autocommit():
with transaction.commit_on_success():
Reporter.objects.create(first_name="Bobby", last_name="Tables")
# Much more formal
r = Reporter.objects.get()
r.first_name = "Robert"
r.save()
r = Reporter.objects.get()
self.assertEqual(r.first_name, "Robert")
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed(self):
"""
You can manually manage transactions if you really want to, but you
have to remember to commit/rollback.
"""
with transaction.commit_manually():
Reporter.objects.create(first_name="Libby", last_name="Holtzman")
transaction.commit()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_mistake(self):
"""
If you forget, you'll get bad errors.
"""
with self.assertRaises(transaction.TransactionManagementError):
with transaction.commit_manually():
Reporter.objects.create(first_name="Scott", last_name="Browning")
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_with_using(self):
"""
The commit_manually function also works with a using argument.
"""
with self.assertRaises(transaction.TransactionManagementError):
with transaction.commit_manually(using="default"):
Reporter.objects.create(first_name="Walter", last_name="Cronkite")
@skipUnlessDBFeature('requires_rollback_on_dirty_transaction')
def test_bad_sql(self):
"""
Regression for #11900: If a block wrapped by commit_on_success
writes a transaction that can't be committed, that transaction should
be rolled back. The bug is only visible using the psycopg2 backend,
though the fix is generally a good idea.
"""
with self.assertRaises(IntegrityError):
with transaction.commit_on_success():
cursor = connection.cursor()
cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');")
transaction.rollback()
|
deep_models/paper_10_mul_lstm/utils.py
|
bicepjai/Deep-Survey-on-Text-Classification
| 197 |
68220
|
# refer https://github.com/titu1994/Keras-Multiplicative-LSTM
from __future__ import absolute_import
import numpy as np
__all__ = ['MultiplicativeLSTM']
from keras import backend as K
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.engine import Layer
from keras.engine import InputSpec
from keras.legacy import interfaces
from keras.layers import Recurrent
from keras.layers.recurrent import _time_distributed_dense
class MultiplicativeLSTM(Recurrent):
"""Multiplicative Long-Short Term Memory unit - https://arxiv.org/pdf/1609.07959.pdf
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
# References
- [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper)
- [Learning to forget: Continual prediction with MultiplicativeLSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)
- [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(MultiplicativeLSTM, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = [InputSpec(shape=(None, self.units)),
InputSpec(shape=(None, self.units))]
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(batch_size, None, self.input_dim))
self.states = [None, None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight(shape=(self.input_dim, self.units * 5),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 5),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(shape, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 3,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.units * 5,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_i = self.kernel[:, :self.units]
self.kernel_f = self.kernel[:, self.units: self.units * 2]
self.kernel_c = self.kernel[:, self.units * 2: self.units * 3]
self.kernel_o = self.kernel[:, self.units * 3: self.units * 4]
self.kernel_m = self.kernel[:, self.units * 4:]
self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_f = self.recurrent_kernel[:, self.units: self.units * 2]
self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2: self.units * 3]
self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3: self.units * 4]
self.recurrent_kernel_m = self.recurrent_kernel[:, self.units * 4:]
if self.use_bias:
self.bias_i = self.bias[:self.units]
self.bias_f = self.bias[self.units: self.units * 2]
self.bias_c = self.bias[self.units * 2: self.units * 3]
self.bias_o = self.bias[self.units * 3: self.units * 4]
self.bias_m = self.bias[self.units * 4:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.bias_m = None
self.built = True
def preprocess_input(self, inputs, training=None):
if self.implementation == 0:
input_shape = K.int_shape(inputs)
input_dim = input_shape[2]
timesteps = input_shape[1]
x_i = _time_distributed_dense(inputs, self.kernel_i, self.bias_i,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_f = _time_distributed_dense(inputs, self.kernel_f, self.bias_f,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_c = _time_distributed_dense(inputs, self.kernel_c, self.bias_c,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_o = _time_distributed_dense(inputs, self.kernel_o, self.bias_o,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_m = _time_distributed_dense(inputs, self.kernel_m, self.bias_m,
self.dropout, input_dim, self.units,
timesteps, training=training)
return K.concatenate([x_i, x_f, x_c, x_o, x_m], axis=2)
else:
return inputs
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(5)]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(5)])
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(5)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(5)])
return constants
def step(self, inputs, states):
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = states[2]
rec_dp_mask = states[3]
if self.implementation == 2:
z = K.dot(inputs * dp_mask[0], self.kernel)
z += z * K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel) # applies m instead of h_tm1 to z
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units: 2 * self.units]
z2 = z[:, 2 * self.units: 3 * self.units]
z3 = z[:, 3 * self.units: 4 * self.units]
z4 = z[:, 4 * self.units:] # just elementwise multiplication, no activation functions
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
else:
if self.implementation == 0:
x_i = inputs[:, :self.units]
x_f = inputs[:, self.units: 2 * self.units]
x_c = inputs[:, 2 * self.units: 3 * self.units]
x_o = inputs[:, 3 * self.units: 4 * self.units]
x_m = inputs[:, 4 * self.units:]
elif self.implementation == 1:
x_i = K.dot(inputs * dp_mask[0], self.kernel_i) + self.bias_i
x_f = K.dot(inputs * dp_mask[1], self.kernel_f) + self.bias_f
x_c = K.dot(inputs * dp_mask[2], self.kernel_c) + self.bias_c
x_o = K.dot(inputs * dp_mask[3], self.kernel_o) + self.bias_o
x_m = K.dot(inputs * dp_mask[4], self.kernel_m) + self.bias_m
else:
raise ValueError('Unknown `implementation` mode.')
m = x_m * K.dot(h_tm1 * rec_dp_mask[4], self.recurrent_kernel_m) # elementwise multiplication m
i = self.recurrent_activation(x_i + K.dot(m * rec_dp_mask[0], self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(m * rec_dp_mask[1], self.recurrent_kernel_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(m * rec_dp_mask[2], self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(m * rec_dp_mask[3], self.recurrent_kernel_o))
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(MultiplicativeLSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
sparkmagic/sparkmagic/kernels/__init__.py
|
sciserver/sparkmagic
| 1,141 |
68223
|
from sparkmagic.kernels.kernelmagics import *
|
test/test_gpuarray.py
|
hyperfraise/pycuda
| 1,264 |
68227
|
<filename>test/test_gpuarray.py
#! /usr/bin/env python
import numpy as np
import numpy.linalg as la
import sys
from pycuda.tools import mark_cuda_test
from pycuda.characterize import has_double_support
import pycuda.gpuarray as gpuarray
import pycuda.driver as drv
from pycuda.compiler import SourceModule
class TestGPUArray:
@mark_cuda_test
def test_pow_array(self):
a = np.array([1, 2, 3, 4, 5]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
result = pow(a_gpu, a_gpu).get()
assert (np.abs(a ** a - result) < 1e-3).all()
result = (a_gpu ** a_gpu).get()
assert (np.abs(pow(a, a) - result) < 1e-3).all()
a_gpu **= a_gpu
a_gpu = a_gpu.get()
assert (np.abs(pow(a, a) - a_gpu) < 1e-3).all()
@mark_cuda_test
def test_pow_number(self):
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
result = pow(a_gpu, 2).get()
assert (np.abs(a ** 2 - result) < 1e-3).all()
a_gpu **= 2
a_gpu = a_gpu.get()
assert (np.abs(a ** 2 - a_gpu) < 1e-3).all()
@mark_cuda_test
def test_numpy_integer_shape(self):
gpuarray.empty(np.int32(17), np.float32)
gpuarray.empty((np.int32(17), np.int32(17)), np.float32)
@mark_cuda_test
def test_ndarray_shape(self):
gpuarray.empty(np.array(3), np.float32)
gpuarray.empty(np.array([3]), np.float32)
gpuarray.empty(np.array([2, 3]), np.float32)
@mark_cuda_test
def test_abs(self):
a = -gpuarray.arange(111, dtype=np.float32)
res = a.get()
for i in range(111):
assert res[i] <= 0
a = abs(a)
res = a.get()
for i in range(111):
assert abs(res[i]) >= 0
assert res[i] == i
@mark_cuda_test
def test_len(self):
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
a_cpu = gpuarray.to_gpu(a)
assert len(a_cpu) == 10
@mark_cuda_test
def test_multiply(self):
"""Test the muliplication of an array with a scalar. """
for sz in [10, 50000]:
for dtype, scalars in [(np.float32, [2]), (np.complex64, [2, 2j])]:
for scalar in scalars:
a = np.arange(sz).astype(dtype)
a_gpu = gpuarray.to_gpu(a)
a_doubled = (scalar * a_gpu).get()
assert (a * scalar == a_doubled).all()
@mark_cuda_test
def test_rmul_yields_right_type(self):
a = np.array([1, 2, 3, 4, 5]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
two_a = 2 * a_gpu
assert isinstance(two_a, gpuarray.GPUArray)
two_a = np.float32(2) * a_gpu
assert isinstance(two_a, gpuarray.GPUArray)
@mark_cuda_test
def test_multiply_array(self):
"""Test the multiplication of two arrays."""
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(a)
a_squared = (b_gpu * a_gpu).get()
assert (a * a == a_squared).all()
@mark_cuda_test
def test_addition_array(self):
"""Test the addition of two arrays."""
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
a_added = (a_gpu + a_gpu).get()
assert (a + a == a_added).all()
@mark_cuda_test
def test_iaddition_array(self):
"""Test the inplace addition of two arrays."""
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
a_gpu += a_gpu
a_added = a_gpu.get()
assert (a + a == a_added).all()
@mark_cuda_test
def test_addition_scalar(self):
"""Test the addition of an array and a scalar."""
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
a_added = (7 + a_gpu).get()
assert (7 + a == a_added).all()
@mark_cuda_test
def test_iaddition_scalar(self):
"""Test the inplace addition of an array and a scalar."""
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
a_gpu += 7
a_added = a_gpu.get()
assert (7 + a == a_added).all()
@mark_cuda_test
def test_substract_array(self):
"""Test the subtraction of two arrays."""
# test data
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
b = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
result = (a_gpu - b_gpu).get()
assert (a - b == result).all()
result = (b_gpu - a_gpu).get()
assert (b - a == result).all()
@mark_cuda_test
def test_substract_scalar(self):
"""Test the subtraction of an array and a scalar."""
# test data
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
# convert a to a gpu object
a_gpu = gpuarray.to_gpu(a)
result = (a_gpu - 7).get()
assert (a - 7 == result).all()
result = (7 - a_gpu).get()
assert (7 - a == result).all()
@mark_cuda_test
def test_divide_scalar(self):
"""Test the division of an array and a scalar."""
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
result = (a_gpu / 2).get()
assert (a / 2 == result).all()
result = (2 / a_gpu).get()
assert (2 / a == result).all()
@mark_cuda_test
def test_divide_array(self):
"""Test the division of an array and a scalar. """
# test data
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(np.float32)
b = np.array([10, 10, 10, 10, 10, 10, 10, 10, 10, 10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
a_divide = (a_gpu / b_gpu).get()
assert (np.abs(a / b - a_divide) < 1e-3).all()
a_divide = (b_gpu / a_gpu).get()
assert (np.abs(b / a - a_divide) < 1e-3).all()
@mark_cuda_test
def test_random(self):
from pycuda.curandom import rand as curand
if has_double_support():
dtypes = [np.float32, np.float64]
else:
dtypes = [np.float32]
for dtype in dtypes:
a = curand((10, 100), dtype=dtype).get()
assert (0 <= a).all()
assert (a < 1).all()
@mark_cuda_test
def test_curand_wrappers(self):
from pycuda.curandom import get_curand_version
if get_curand_version() is None:
from pytest import skip
skip("curand not installed")
generator_types = []
if get_curand_version() >= (3, 2, 0):
from pycuda.curandom import (
XORWOWRandomNumberGenerator,
Sobol32RandomNumberGenerator,
)
generator_types.extend(
[XORWOWRandomNumberGenerator, Sobol32RandomNumberGenerator]
)
if get_curand_version() >= (4, 0, 0):
from pycuda.curandom import (
ScrambledSobol32RandomNumberGenerator,
Sobol64RandomNumberGenerator,
ScrambledSobol64RandomNumberGenerator,
)
generator_types.extend(
[
ScrambledSobol32RandomNumberGenerator,
Sobol64RandomNumberGenerator,
ScrambledSobol64RandomNumberGenerator,
]
)
if get_curand_version() >= (4, 1, 0):
from pycuda.curandom import MRG32k3aRandomNumberGenerator
generator_types.extend([MRG32k3aRandomNumberGenerator])
if has_double_support():
dtypes = [np.float32, np.float64]
else:
dtypes = [np.float32]
for gen_type in generator_types:
gen = gen_type()
for dtype in dtypes:
gen.gen_normal(10000, dtype)
# test non-Box-Muller version, if available
gen.gen_normal(10001, dtype)
if get_curand_version() >= (4, 0, 0):
gen.gen_log_normal(10000, dtype, 10.0, 3.0)
# test non-Box-Muller version, if available
gen.gen_log_normal(10001, dtype, 10.0, 3.0)
x = gen.gen_uniform(10000, dtype)
x_host = x.get()
assert (-1 <= x_host).all()
assert (x_host <= 1).all()
gen.gen_uniform(10000, np.uint32)
if get_curand_version() >= (5, 0, 0):
gen.gen_poisson(10000, np.uint32, 13.0)
for dtype in dtypes + [np.uint32]:
a = gpuarray.empty(1000000, dtype=dtype)
v = 10
a.fill(v)
gen.fill_poisson(a)
tmp = (a.get() == (v - 1)).sum() / a.size # noqa: F841
# Commented out for CI on the off chance it'd fail
# # Check Poisson statistics (need 1e6 values)
# # Compare with scipy.stats.poisson.pmf(v - 1, v)
# assert np.isclose(0.12511, tmp, atol=0.002)
@mark_cuda_test
def test_array_gt(self):
"""Test whether array contents are > the other array's
contents"""
a = np.array([5, 10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
b = np.array([2, 10]).astype(np.float32)
b_gpu = gpuarray.to_gpu(b)
result = (a_gpu > b_gpu).get()
assert result[0]
assert not result[1]
@mark_cuda_test
def test_array_lt(self):
"""Test whether array contents are < the other array's
contents"""
a = np.array([5, 10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
b = np.array([2, 10]).astype(np.float32)
b_gpu = gpuarray.to_gpu(b)
result = (b_gpu < a_gpu).get()
assert result[0]
assert not result[1]
@mark_cuda_test
def test_array_le(self):
"""Test whether array contents are <= the other array's
contents"""
a = np.array([5, 10, 1]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
b = np.array([2, 10, 2]).astype(np.float32)
b_gpu = gpuarray.to_gpu(b)
result = (b_gpu <= a_gpu).get()
assert result[0]
assert result[1]
assert not result[2]
@mark_cuda_test
def test_array_ge(self):
"""Test whether array contents are >= the other array's
contents"""
a = np.array([5, 10, 1]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
b = np.array([2, 10, 2]).astype(np.float32)
b_gpu = gpuarray.to_gpu(b)
result = (a_gpu >= b_gpu).get()
assert result[0]
assert result[1]
assert not result[2]
@mark_cuda_test
def test_array_eq(self):
"""Test whether array contents are == the other array's
contents"""
a = np.array([5, 10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
b = np.array([2, 10]).astype(np.float32)
b_gpu = gpuarray.to_gpu(b)
result = (a_gpu == b_gpu).get()
assert not result[0]
assert result[1]
@mark_cuda_test
def test_array_ne(self):
"""Test whether array contents are != the other array's
contents"""
a = np.array([5, 10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
b = np.array([2, 10]).astype(np.float32)
b_gpu = gpuarray.to_gpu(b)
result = (a_gpu != b_gpu).get()
assert result[0]
assert not result[1]
@mark_cuda_test
def test_nan_arithmetic(self):
def make_nan_contaminated_vector(size):
shape = (size,)
a = np.random.randn(*shape).astype(np.float32)
# for i in range(0, shape[0], 3):
# a[i] = float('nan')
from random import randrange
for i in range(size // 10):
a[randrange(0, size)] = float("nan")
return a
size = 1 << 20
a = make_nan_contaminated_vector(size)
a_gpu = gpuarray.to_gpu(a)
b = make_nan_contaminated_vector(size)
b_gpu = gpuarray.to_gpu(b)
ab = a * b
ab_gpu = (a_gpu * b_gpu).get()
assert (np.isnan(ab) == np.isnan(ab_gpu)).all()
@mark_cuda_test
def test_elwise_kernel(self):
from pycuda.curandom import rand as curand
a_gpu = curand((50,))
b_gpu = curand((50,))
from pycuda.elementwise import ElementwiseKernel
lin_comb = ElementwiseKernel(
"float a, float *x, float b, float *y, float *z",
"z[i] = a*x[i] + b*y[i]",
"linear_combination",
)
c_gpu = gpuarray.empty_like(a_gpu)
lin_comb(5, a_gpu, 6, b_gpu, c_gpu)
assert la.norm((c_gpu - (5 * a_gpu + 6 * b_gpu)).get()) < 1e-5
@mark_cuda_test
def test_ranged_elwise_kernel(self):
from pycuda.elementwise import ElementwiseKernel
set_to_seven = ElementwiseKernel("float *z", "z[i] = 7", "set_to_seven")
for i, slc in enumerate(
[
slice(5, 20000),
slice(5, 20000, 17),
slice(3000, 5, -1),
slice(1000, -1),
]
):
a_gpu = gpuarray.zeros((50000,), dtype=np.float32)
a_cpu = np.zeros(a_gpu.shape, a_gpu.dtype)
a_cpu[slc] = 7
set_to_seven(a_gpu, slice=slc)
drv.Context.synchronize()
assert la.norm(a_cpu - a_gpu.get()) == 0, i
@mark_cuda_test
def test_take(self):
idx = gpuarray.arange(0, 10000, 2, dtype=np.uint32)
for dtype in [np.float32, np.complex64]:
a = gpuarray.arange(0, 600000, dtype=np.uint32).astype(dtype)
a_host = a.get()
result = gpuarray.take(a, idx)
assert (a_host[idx.get()] == result.get()).all()
@mark_cuda_test
def test_arange(self):
a = gpuarray.arange(12, dtype=np.float32)
assert (np.arange(12, dtype=np.float32) == a.get()).all()
@mark_cuda_test
def test_stack(self):
orders = ["F", "C"]
input_dims_lst = [0, 1, 2]
for order in orders:
for input_dims in input_dims_lst:
shape = (2, 2, 2)[:input_dims]
axis = -1 if order == "F" else 0
from numpy.random import default_rng
rng = default_rng()
x_in = rng.random(size=shape)
y_in = rng.random(size=shape)
x_in = x_in if order == "C" else np.asfortranarray(x_in)
y_in = y_in if order == "C" else np.asfortranarray(y_in)
x_gpu = gpuarray.to_gpu(x_in)
y_gpu = gpuarray.to_gpu(y_in)
numpy_stack = np.stack((x_in, y_in), axis=axis)
gpuarray_stack = gpuarray.stack((x_gpu, y_gpu), axis=axis)
np.testing.assert_allclose(gpuarray_stack.get(), numpy_stack)
assert gpuarray_stack.shape == numpy_stack.shape
@mark_cuda_test
def test_concatenate(self):
from pycuda.curandom import rand as curand
a_dev = curand((5, 15, 20), dtype=np.float32)
b_dev = curand((4, 15, 20), dtype=np.float32)
c_dev = curand((3, 15, 20), dtype=np.float32)
a = a_dev.get()
b = b_dev.get()
c = c_dev.get()
cat_dev = gpuarray.concatenate((a_dev, b_dev, c_dev))
cat = np.concatenate((a, b, c))
np.testing.assert_allclose(cat, cat_dev.get())
assert cat.shape == cat_dev.shape
@mark_cuda_test
def test_reverse(self):
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
a_cpu = gpuarray.to_gpu(a)
a_cpu = a_cpu.reverse()
b = a_cpu.get()
for i in range(0, 10):
assert a[len(a) - 1 - i] == b[i]
@mark_cuda_test
def test_sum(self):
from pycuda.curandom import rand as curand
a_gpu = curand((200000,))
a = a_gpu.get()
sum_a = np.sum(a)
sum_a_gpu = gpuarray.sum(a_gpu).get()
assert abs(sum_a_gpu - sum_a) / abs(sum_a) < 1e-4
@mark_cuda_test
def test_minmax(self):
from pycuda.curandom import rand as curand
if has_double_support():
dtypes = [np.float64, np.float32, np.int32]
else:
dtypes = [np.float32, np.int32]
for what in ["min", "max"]:
for dtype in dtypes:
a_gpu = curand((200000,), dtype)
a = a_gpu.get()
op_a = getattr(np, what)(a)
op_a_gpu = getattr(gpuarray, what)(a_gpu).get()
assert op_a_gpu == op_a, (op_a_gpu, op_a, dtype, what)
@mark_cuda_test
def test_subset_minmax(self):
from pycuda.curandom import rand as curand
l_a = 200000
gran = 5
l_m = l_a - l_a // gran + 1
if has_double_support():
dtypes = [np.float64, np.float32, np.int32]
else:
dtypes = [np.float32, np.int32]
for dtype in dtypes:
a_gpu = curand((l_a,), dtype)
a = a_gpu.get()
meaningful_indices_gpu = gpuarray.zeros(l_m, dtype=np.int32)
meaningful_indices = meaningful_indices_gpu.get()
j = 0
for i in range(len(meaningful_indices)):
meaningful_indices[i] = j
j = j + 1
if j % gran == 0:
j = j + 1
meaningful_indices_gpu = gpuarray.to_gpu(meaningful_indices)
b = a[meaningful_indices]
min_a = np.min(b)
min_a_gpu = gpuarray.subset_min(meaningful_indices_gpu, a_gpu).get()
assert min_a_gpu == min_a
@mark_cuda_test
def test_dot(self):
from pycuda.curandom import rand as curand
for sz in [
2,
3,
4,
5,
6,
7,
31,
32,
33,
127,
128,
129,
255,
256,
257,
16384 - 993,
20000,
]:
a_gpu = curand((sz,))
a = a_gpu.get()
b_gpu = curand((sz,))
b = b_gpu.get()
dot_ab = np.dot(a, b)
dot_ab_gpu = gpuarray.dot(a_gpu, b_gpu).get()
assert abs(dot_ab_gpu - dot_ab) / abs(dot_ab) < 1e-4
@mark_cuda_test
def test_slice(self):
from pycuda.curandom import rand as curand
sz = 20000
a_gpu = curand((sz,))
a = a_gpu.get()
from random import randrange
for i in range(200):
start = randrange(sz)
end = randrange(start, sz)
a_gpu_slice = a_gpu[start:end]
a_slice = a[start:end]
assert la.norm(a_gpu_slice.get() - a_slice) == 0
@mark_cuda_test
def test_2d_slice_c(self):
from pycuda.curandom import rand as curand
n = 1000
m = 300
a_gpu = curand((n, m))
a = a_gpu.get()
from random import randrange
for i in range(200):
start = randrange(n)
end = randrange(start, n)
a_gpu_slice = a_gpu[start:end]
a_slice = a[start:end]
assert la.norm(a_gpu_slice.get() - a_slice) == 0
@mark_cuda_test
def test_2d_slice_f(self):
from pycuda.curandom import rand as curand
import pycuda.gpuarray as gpuarray
n = 1000
m = 300
a_gpu = curand((n, m))
a_gpu_f = gpuarray.GPUArray(
(m, n), np.float32, gpudata=a_gpu.gpudata, order="F"
)
a = a_gpu_f.get()
from random import randrange
for i in range(200):
start = randrange(n)
end = randrange(start, n)
a_gpu_slice = a_gpu_f[:, start:end]
a_slice = a[:, start:end]
assert la.norm(a_gpu_slice.get() - a_slice) == 0
@mark_cuda_test
def test_if_positive(self):
from pycuda.curandom import rand as curand
sz = 20
a_gpu = curand((sz,))
b_gpu = curand((sz,))
a = a_gpu.get()
b = b_gpu.get()
import pycuda.gpuarray as gpuarray
max_a_b_gpu = gpuarray.maximum(a_gpu, b_gpu)
min_a_b_gpu = gpuarray.minimum(a_gpu, b_gpu)
print(max_a_b_gpu)
print(np.maximum(a, b))
assert la.norm(max_a_b_gpu.get() - np.maximum(a, b)) == 0
assert la.norm(min_a_b_gpu.get() - np.minimum(a, b)) == 0
@mark_cuda_test
def test_take_put(self):
for n in [5, 17, 333]:
one_field_size = 8
buf_gpu = gpuarray.zeros(n * one_field_size, dtype=np.float32)
dest_indices = gpuarray.to_gpu(
np.array([0, 1, 2, 3, 32, 33, 34, 35], dtype=np.uint32)
)
read_map = gpuarray.to_gpu(
np.array([7, 6, 5, 4, 3, 2, 1, 0], dtype=np.uint32)
)
gpuarray.multi_take_put(
arrays=[buf_gpu for i in range(n)],
dest_indices=dest_indices,
src_indices=read_map,
src_offsets=[i * one_field_size for i in range(n)],
dest_shape=(96,),
)
drv.Context.synchronize()
@mark_cuda_test
def test_astype(self):
from pycuda.curandom import rand as curand
if not has_double_support():
return
a_gpu = curand((2000,), dtype=np.float32)
a = a_gpu.get().astype(np.float64)
a2 = a_gpu.astype(np.float64).get()
assert a2.dtype == np.float64
assert la.norm(a - a2) == 0, (a, a2)
a_gpu = curand((2000,), dtype=np.float64)
a = a_gpu.get().astype(np.float32)
a2 = a_gpu.astype(np.float32).get()
assert a2.dtype == np.float32
assert la.norm(a - a2) / la.norm(a) < 1e-7
@mark_cuda_test
def test_complex_bits(self):
from pycuda.curandom import rand as curand
if has_double_support():
dtypes = [np.complex64, np.complex128]
else:
dtypes = [np.complex64]
n = 20
for tp in dtypes:
dtype = np.dtype(tp)
from pytools import match_precision
real_dtype = match_precision(np.dtype(np.float64), dtype)
z = curand((n,), real_dtype).astype(dtype) + 1j * curand(
(n,), real_dtype
).astype(dtype)
assert la.norm(z.get().real - z.real.get()) == 0
assert la.norm(z.get().imag - z.imag.get()) == 0
assert la.norm(z.get().conj() - z.conj().get()) == 0
# verify conj with out parameter
z_out = z.astype(np.complex64)
assert z_out is z.conj(out=z_out)
assert la.norm(z.get().conj() - z_out.get()) < 1e-7
# verify contiguity is preserved
for order in ["C", "F"]:
# test both zero and non-zero value code paths
z_real = gpuarray.zeros(z.shape, dtype=real_dtype, order=order)
z2 = z.reshape(z.shape, order=order)
for zdata in [z_real, z2]:
if order == "C":
assert zdata.flags.c_contiguous
assert zdata.real.flags.c_contiguous
assert zdata.imag.flags.c_contiguous
assert zdata.conj().flags.c_contiguous
elif order == "F":
assert zdata.flags.f_contiguous
assert zdata.real.flags.f_contiguous
assert zdata.imag.flags.f_contiguous
assert zdata.conj().flags.f_contiguous
@mark_cuda_test
def test_pass_slice_to_kernel(self):
mod = SourceModule(
"""
__global__ void twice(float *a)
{
const int i = threadIdx.x + blockIdx.x * blockDim.x;
a[i] *= 2;
}
"""
)
multiply_them = mod.get_function("twice")
a = np.ones(256 ** 2, np.float32)
a_gpu = gpuarray.to_gpu(a)
multiply_them(a_gpu[256:-256], block=(256, 1, 1), grid=(254, 1))
a = a_gpu.get()
assert (a[255:257] == np.array([1, 2], np.float32)).all()
assert (a[255 * 256 - 1: 255 * 256 + 1] == np.array([2, 1], np.float32)).all()
@mark_cuda_test
def test_scan(self):
from pycuda.scan import ExclusiveScanKernel, InclusiveScanKernel
for cls in [ExclusiveScanKernel, InclusiveScanKernel]:
scan_kern = cls(np.int32, "a+b", "0")
for n in [
10,
2 ** 10 - 5,
2 ** 10,
2 ** 20 - 2 ** 18,
2 ** 20 - 2 ** 18 + 5,
2 ** 10 + 5,
2 ** 20 + 5,
2 ** 20,
2 ** 24,
]:
host_data = np.random.randint(0, 10, n).astype(np.int32)
gpu_data = gpuarray.to_gpu(host_data)
scan_kern(gpu_data)
desired_result = np.cumsum(host_data, axis=0)
if cls is ExclusiveScanKernel:
desired_result -= host_data
assert (gpu_data.get() == desired_result).all()
@mark_cuda_test
def test_stride_preservation(self):
A = np.random.rand(3, 3)
AT = A.T
print((AT.flags.f_contiguous, AT.flags.c_contiguous))
AT_GPU = gpuarray.to_gpu(AT)
print((AT_GPU.flags.f_contiguous, AT_GPU.flags.c_contiguous))
assert np.allclose(AT_GPU.get(), AT)
@mark_cuda_test
def test_vector_fill(self):
a_gpu = gpuarray.GPUArray(100, dtype=gpuarray.vec.float3)
a_gpu.fill(gpuarray.vec.make_float3(0.0, 0.0, 0.0))
a = a_gpu.get()
assert a.dtype == gpuarray.vec.float3
@mark_cuda_test
def test_create_complex_zeros(self):
gpuarray.zeros(3, np.complex64)
@mark_cuda_test
def test_reshape(self):
a = np.arange(128).reshape(8, 16).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
# different ways to specify the shape
a_gpu.reshape(4, 32)
a_gpu.reshape((4, 32))
a_gpu.reshape([4, 32])
# using -1 as unknown dimension
assert a_gpu.reshape(-1, 32).shape == (4, 32)
assert a_gpu.reshape((32, -1)).shape == (32, 4)
assert a_gpu.reshape((8, -1, 4)).shape == (8, 4, 4)
throws_exception = False
try:
a_gpu.reshape(-1, -1, 4)
except ValueError:
throws_exception = True
assert throws_exception
# with order specified
a_gpu = a_gpu.reshape((4, 32), order="C")
assert a_gpu.flags.c_contiguous
a_gpu = a_gpu.reshape(4, 32, order="F")
assert a_gpu.flags.f_contiguous
a_gpu = a_gpu.reshape((4, 32), order="F")
assert a_gpu.flags.f_contiguous
# default is C-contiguous
a_gpu = a_gpu.reshape((4, 32))
assert a_gpu.flags.c_contiguous
@mark_cuda_test
def test_view(self):
a = np.arange(128).reshape(8, 16).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
# same dtype
view = a_gpu.view()
assert view.shape == a_gpu.shape and view.dtype == a_gpu.dtype
# larger dtype
view = a_gpu.view(np.complex64)
assert view.shape == (8, 8) and view.dtype == np.complex64
# smaller dtype
view = a_gpu.view(np.int16)
assert view.shape == (8, 32) and view.dtype == np.int16
@mark_cuda_test
def test_squeeze(self):
shape = (40, 2, 5, 100)
a_cpu = np.random.random(size=shape)
a_gpu = gpuarray.to_gpu(a_cpu)
# Slice with length 1 on dimensions 0 and 1
a_gpu_slice = a_gpu[0:1, 1:2, :, :]
assert a_gpu_slice.shape == (1, 1, shape[2], shape[3])
assert a_gpu_slice.flags.c_contiguous
# Squeeze it and obtain contiguity
a_gpu_squeezed_slice = a_gpu[0:1, 1:2, :, :].squeeze()
assert a_gpu_squeezed_slice.shape == (shape[2], shape[3])
assert a_gpu_squeezed_slice.flags.c_contiguous
# Check that we get the original values out
assert np.all(a_gpu_slice.get().ravel() == a_gpu_squeezed_slice.get().ravel())
# Slice with length 1 on dimensions 2
a_gpu_slice = a_gpu[:, :, 2:3, :]
assert a_gpu_slice.shape == (shape[0], shape[1], 1, shape[3])
assert not a_gpu_slice.flags.c_contiguous
# Squeeze it, but no contiguity here
a_gpu_squeezed_slice = a_gpu[:, :, 2:3, :].squeeze()
assert a_gpu_squeezed_slice.shape == (shape[0], shape[1], shape[3])
assert not a_gpu_squeezed_slice.flags.c_contiguous
# Check that we get the original values out
assert np.all(a_gpu_slice.get().ravel() == a_gpu_squeezed_slice.get().ravel())
@mark_cuda_test
def test_struct_reduce(self):
preamble = """
struct minmax_collector
{
float cur_min;
float cur_max;
__device__
minmax_collector()
{ }
__device__
minmax_collector(float cmin, float cmax)
: cur_min(cmin), cur_max(cmax)
{ }
__device__ minmax_collector(minmax_collector const &src)
: cur_min(src.cur_min), cur_max(src.cur_max)
{ }
__device__ minmax_collector(minmax_collector const volatile &src)
: cur_min(src.cur_min), cur_max(src.cur_max)
{ }
__device__ minmax_collector volatile &operator=(
minmax_collector const &src) volatile
{
cur_min = src.cur_min;
cur_max = src.cur_max;
return *this;
}
};
__device__
minmax_collector agg_mmc(minmax_collector a, minmax_collector b)
{
return minmax_collector(
fminf(a.cur_min, b.cur_min),
fmaxf(a.cur_max, b.cur_max));
}
"""
mmc_dtype = np.dtype([("cur_min", np.float32), ("cur_max", np.float32)])
from pycuda.curandom import rand as curand
a_gpu = curand((20000,), dtype=np.float32)
a = a_gpu.get()
from pycuda.tools import register_dtype
register_dtype(mmc_dtype, "minmax_collector")
from pycuda.reduction import ReductionKernel
red = ReductionKernel(
mmc_dtype,
neutral="minmax_collector(10000, -10000)",
# FIXME: needs infinity literal in real use, ok here
reduce_expr="agg_mmc(a, b)",
map_expr="minmax_collector(x[i], x[i])",
arguments="float *x",
preamble=preamble,
)
minmax = red(a_gpu).get()
# print minmax["cur_min"], minmax["cur_max"]
# print np.min(a), np.max(a)
assert minmax["cur_min"] == np.min(a)
assert minmax["cur_max"] == np.max(a)
@mark_cuda_test
def test_reduce_out(self):
from pycuda.curandom import rand as curand
a_gpu = curand((10, 200), dtype=np.float32)
a = a_gpu.get()
from pycuda.reduction import ReductionKernel
red = ReductionKernel(
np.float32, neutral=0, reduce_expr="max(a,b)", arguments="float *in"
)
max_gpu = gpuarray.empty(10, dtype=np.float32)
for i in range(10):
red(a_gpu[i], out=max_gpu[i])
assert np.alltrue(a.max(axis=1) == max_gpu.get())
@mark_cuda_test
def test_sum_allocator(self):
# FIXME
from pytest import skip
skip("https://github.com/inducer/pycuda/issues/163")
# crashes with terminate called after throwing an instance of 'pycuda::error'
# what(): explicit_context_dependent failed: invalid device context - no currently active context?
import pycuda.tools
pool = pycuda.tools.DeviceMemoryPool()
rng = np.random.randint(low=512, high=1024)
a = gpuarray.arange(rng, dtype=np.int32)
b = gpuarray.sum(a)
c = gpuarray.sum(a, allocator=pool.allocate)
# Test that we get the correct results
assert b.get() == rng * (rng - 1) // 2
assert c.get() == rng * (rng - 1) // 2
# Test that result arrays were allocated with the appropriate allocator
assert b.allocator == a.allocator
assert c.allocator == pool.allocate
@mark_cuda_test
def test_dot_allocator(self):
# FIXME
from pytest import skip
skip("https://github.com/inducer/pycuda/issues/163")
import pycuda.tools
pool = pycuda.tools.DeviceMemoryPool()
a_cpu = np.random.randint(low=512, high=1024, size=1024)
b_cpu = np.random.randint(low=512, high=1024, size=1024)
# Compute the result on the CPU
dot_cpu_1 = np.dot(a_cpu, b_cpu)
a_gpu = gpuarray.to_gpu(a_cpu)
b_gpu = gpuarray.to_gpu(b_cpu)
# Compute the result on the GPU using different allocators
dot_gpu_1 = gpuarray.dot(a_gpu, b_gpu)
dot_gpu_2 = gpuarray.dot(a_gpu, b_gpu, allocator=pool.allocate)
# Test that we get the correct results
assert dot_cpu_1 == dot_gpu_1.get()
assert dot_cpu_1 == dot_gpu_2.get()
# Test that result arrays were allocated with the appropriate allocator
assert dot_gpu_1.allocator == a_gpu.allocator
assert dot_gpu_2.allocator == pool.allocate
@mark_cuda_test
def test_view_and_strides(self):
from pycuda.curandom import rand as curand
X = curand((5, 10), dtype=np.float32)
Y = X[:3, :5]
y = Y.view()
assert y.shape == Y.shape
assert y.strides == Y.strides
assert np.array_equal(y.get(), X.get()[:3, :5])
@mark_cuda_test
def test_scalar_comparisons(self):
a = np.array([1.0, 0.25, 0.1, -0.1, 0.0])
a_gpu = gpuarray.to_gpu(a)
x_gpu = a_gpu > 0.25
x = (a > 0.25).astype(a.dtype)
assert (x == x_gpu.get()).all()
x_gpu = a_gpu <= 0.25
x = (a <= 0.25).astype(a.dtype)
assert (x == x_gpu.get()).all()
x_gpu = a_gpu == 0.25
x = (a == 0.25).astype(a.dtype)
assert (x == x_gpu.get()).all()
x_gpu = a_gpu == 1 # using an integer scalar
x = (a == 1).astype(a.dtype)
assert (x == x_gpu.get()).all()
@mark_cuda_test
def test_minimum_maximum_scalar(self):
from pycuda.curandom import rand as curand
sz = 20
a_gpu = curand((sz,))
a = a_gpu.get()
import pycuda.gpuarray as gpuarray
max_a0_gpu = gpuarray.maximum(a_gpu, 0)
min_a0_gpu = gpuarray.minimum(0, a_gpu)
assert la.norm(max_a0_gpu.get() - np.maximum(a, 0)) == 0
assert la.norm(min_a0_gpu.get() - np.minimum(0, a)) == 0
@mark_cuda_test
def test_transpose(self):
from pycuda.curandom import rand as curand
a_gpu = curand((10, 20, 30))
a = a_gpu.get()
# assert np.allclose(a_gpu.transpose((1,2,0)).get(), a.transpose((1,2,0))) # not contiguous
assert np.allclose(a_gpu.T.get(), a.T)
@mark_cuda_test
def test_newaxis(self):
from pycuda.curandom import rand as curand
a_gpu = curand((10, 20, 30))
a = a_gpu.get()
b_gpu = a_gpu[:, np.newaxis]
b = a[:, np.newaxis]
assert b_gpu.shape == b.shape
assert b_gpu.strides == b.strides
@mark_cuda_test
def test_copy(self):
from pycuda.curandom import rand as curand
a_gpu = curand((3, 3))
for start, stop, step in [(0, 3, 1), (1, 2, 1), (0, 3, 2), (0, 3, 3)]:
assert np.allclose(
a_gpu[start:stop:step].get(), a_gpu.get()[start:stop:step]
)
a_gpu = curand((3, 1))
for start, stop, step in [(0, 3, 1), (1, 2, 1), (0, 3, 2), (0, 3, 3)]:
assert np.allclose(
a_gpu[start:stop:step].get(), a_gpu.get()[start:stop:step]
)
a_gpu = curand((3, 3, 3))
for start, stop, step in [(0, 3, 1), (1, 2, 1), (0, 3, 2), (0, 3, 3)]:
assert np.allclose(
a_gpu[start:stop:step, start:stop:step].get(),
a_gpu.get()[start:stop:step, start:stop:step],
)
a_gpu = curand((3, 3, 3)).transpose((1, 2, 0))
for start, stop, step in [(0, 3, 1), (1, 2, 1), (0, 3, 2), (0, 3, 3)]:
assert np.allclose(
a_gpu[start:stop:step, :, start:stop:step].get(),
a_gpu.get()[start:stop:step, :, start:stop:step],
)
# 4-d should work as long as only 2 axes are discontiguous
a_gpu = curand((3, 3, 3, 3))
for start, stop, step in [(0, 3, 1), (1, 2, 1), (0, 3, 3)]:
assert np.allclose(
a_gpu[start:stop:step, :, start:stop:step].get(),
a_gpu.get()[start:stop:step, :, start:stop:step],
)
@mark_cuda_test
def test_get_set(self):
import pycuda.gpuarray as gpuarray
a = np.random.normal(0.0, 1.0, (4, 4))
a_gpu = gpuarray.to_gpu(a)
assert np.allclose(a_gpu.get(), a)
assert np.allclose(a_gpu[1:3, 1:3].get(), a[1:3, 1:3])
a = np.random.normal(0.0, 1.0, (4, 4, 4)).transpose((1, 2, 0))
a_gpu = gpuarray.to_gpu(a)
assert np.allclose(a_gpu.get(), a)
assert np.allclose(a_gpu[1:3, 1:3, 1:3].get(), a[1:3, 1:3, 1:3])
@mark_cuda_test
def test_zeros_like_etc(self):
shape = (16, 16)
a = np.random.randn(*shape).astype(np.float32)
z = gpuarray.to_gpu(a)
zf = gpuarray.to_gpu(np.asfortranarray(a))
a_noncontig = np.arange(3 * 4 * 5).reshape(3, 4, 5).swapaxes(1, 2)
z_noncontig = gpuarray.to_gpu(a_noncontig)
for func in [gpuarray.empty_like, gpuarray.zeros_like, gpuarray.ones_like]:
for arr in [z, zf, z_noncontig]:
contig = arr.flags.c_contiguous or arr.flags.f_contiguous
if not contig:
continue
# Output matches order of input.
# Non-contiguous becomes C-contiguous
new_z = func(arr, order="A")
if contig:
assert new_z.flags.c_contiguous == arr.flags.c_contiguous
assert new_z.flags.f_contiguous == arr.flags.f_contiguous
else:
assert new_z.flags.c_contiguous is True
assert new_z.flags.f_contiguous is False
assert new_z.dtype == arr.dtype
assert new_z.shape == arr.shape
# Force C-ordered output
new_z = func(arr, order="C")
assert new_z.flags.c_contiguous is True
assert new_z.flags.f_contiguous is False
assert new_z.dtype == arr.dtype
assert new_z.shape == arr.shape
# Force Fortran-orded output
new_z = func(arr, order="F")
assert new_z.flags.c_contiguous is False
assert new_z.flags.f_contiguous is True
assert new_z.dtype == arr.dtype
assert new_z.shape == arr.shape
# Change the dtype, but otherwise match order & strides
# order = "K" so non-contiguous array remains non-contiguous
new_z = func(arr, dtype=np.complex64, order="K")
assert new_z.flags.c_contiguous == arr.flags.c_contiguous
assert new_z.flags.f_contiguous == arr.flags.f_contiguous
assert new_z.dtype == np.complex64
assert new_z.shape == arr.shape
if __name__ == "__main__":
# make sure that import failures get reported, instead of skipping the tests.
import pycuda.autoinit # noqa
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from pytest import main
main([__file__])
|
src/organisations/migrations/0022_organisation_persist_trait_data.py
|
augustuswm/flagsmith-api
| 1,259 |
68244
|
# Generated by Django 2.2.15 on 2020-09-09 15:51
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organisations', '0021_auto_20200619_1555'),
]
operations = [
migrations.AddField(
model_name='organisation',
name='persist_trait_data',
field=models.BooleanField(default=settings.DEFAULT_ORG_STORE_TRAITS_VALUE, help_text="Disable this if you don't want Flagsmith to store trait data for this org's identities."),
),
]
|
permissionslocker/permissionslocker.py
|
Onii-Chan-Discord/phen-cogs
| 105 |
68255
|
<reponame>Onii-Chan-Discord/phen-cogs
"""
MIT License
Copyright (c) 2020-2021 phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# original invoke hook logic from https://github.com/mikeshardmind/SinbadCogs/tree/v3/noadmin
from typing import Literal
import discord
from redbot.core import commands
from redbot.core.bot import Red
from redbot.core.config import Config
from redbot.core.utils.chat_formatting import box
RequestType = Literal["discord_deleted_user", "owner", "user", "user_strict"]
class PermissionsLocker(commands.Cog):
"""
Force permissions for the bot.
"""
__version__ = "1.3.0"
def format_help_for_context(self, ctx):
pre_processed = super().format_help_for_context(ctx)
n = "\n" if "\n\n" not in pre_processed else ""
return f"{pre_processed}{n}\nCog Version: {self.__version__}"
def __init__(self, bot: Red) -> None:
self.bot = bot
self.config = Config.get_conf(
self,
identifier=4235969345783789456,
force_registration=True,
)
default_global = {"permissions": 387136, "whitelisted": []}
self.config.register_global(**default_global)
self.perms = None
self._whitelist = set()
async def red_delete_data_for_user(self, *, requester: RequestType, user_id: int) -> None:
return
async def initialize(self):
data = await self.config.all()
self.perms = discord.Permissions(data["permissions"])
self._whitelist.update(data["whitelisted"])
self.bot.before_invoke(self.before_invoke_hook)
def cog_unload(self):
self.bot.remove_before_invoke_hook(self.before_invoke_hook)
async def before_invoke_hook(self, ctx: commands.Context):
if not ctx.guild or isinstance(ctx.command, commands.commands._AlwaysAvailableCommand):
return
guild = ctx.guild
if guild.me == guild.owner:
return
if await ctx.bot.is_owner(ctx.author):
return
if guild.id in self._whitelist:
return
me = guild.me
required_perms = self.perms
myPerms = ctx.channel.permissions_for(me)
if not myPerms.is_superset(required_perms):
missingPerms = await self.humanize_perms(
discord.Permissions((myPerms.value ^ required_perms.value) & required_perms.value),
True,
)
await ctx.send(
"Hello there!\nI'm missing the following permissions. Without these permissions, I cannot function properly. "
"Please check your guild and channel permissions to ensure I have these permissions:"
f"\n{box(missingPerms, 'diff')}",
delete_after=60,
)
raise commands.CheckFailure()
@commands.is_owner()
@commands.group()
async def permlock(self, ctx):
"""Permissions locker group command."""
@permlock.command()
async def perms(self, ctx, permissions: int):
"""Set the permissions value that is required for the bot to work."""
permissions = discord.Permissions(permissions)
await self.config.permissions.set(permissions.value)
await ctx.send(
f"I will now require these permissions on commands:\n{box(await self.humanize_perms(permissions, True), 'diff')}"
)
self.perms = permissions
@permlock.command(aliases=["wl"])
async def whitelist(self, ctx, guild: int):
"""Whitelist a guild from permission checks."""
async with self.config.whitelisted() as w:
w.append(guild)
self._whitelist.add(guild)
await ctx.tick()
@permlock.command(aliases=["unwl"])
async def unwhitelist(self, ctx, guild: int):
"""Remove a guild from the whitelist."""
async with self.config.whitelisted() as w:
try:
index = w.index(guild)
except ValueError:
return await ctx.send("This is not a guild in the whitelist")
w.pop(index)
self._whitelist.remove(guild)
await ctx.tick()
@commands.bot_has_permissions(embed_links=True)
@permlock.command()
async def settings(self, ctx: commands.Context):
"""View PermissionsLocker settings."""
data = await self.config.all()
e = discord.Embed(color=await ctx.embed_color(), title="PermissionsLocker")
e.add_field(
name="Required Permissions",
value=str(data["permissions"])
+ box(
await self.humanize_perms(discord.Permissions(data["permissions"]), True),
"diff",
),
inline=False,
)
if data["whitelisted"]:
whitelisted = [str(item) for item in data["whitelisted"]]
e.add_field(name="Whitelisted", value=", ".join(whitelisted), inline=False)
await ctx.send(embed=e)
async def humanize_perms(self, permissions: discord.Permissions, check: bool):
perms = dict(permissions)
perms_list = [f"+ {key}" for key, value in perms.items() if value == check]
return "\n".join(perms_list)
|
orttraining/orttraining/eager/opgen/opgen/ast.py
|
lchang20/onnxruntime
| 6,036 |
68267
|
<reponame>lchang20/onnxruntime
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import io
from typing import TextIO, List, Union
from opgen.lexer import Token
class Node(object):
def __init__(self):
self.tokens = []
def write(self, writer: TextIO):
raise NotImplementedError(self.write)
def __str__(self):
writer = io.StringIO()
self.write(writer)
return writer.getvalue()
#region Syntax List
class SyntaxListMember(Node):
def __init__(self, member: Node, trailing_separator: Token = None):
super().__init__()
self.member = member
self.trailing_separator = trailing_separator
def write(self, writer: TextIO):
self.member.write(writer)
if self.trailing_separator:
writer.write(self.trailing_separator.value)
writer.write(" ")
class SyntaxList(Node):
open_token: Token
members: List[SyntaxListMember]
close_token: Token
def __init__(self):
super().__init__()
self.open_token = None
self.members = []
self.close_token = None
def __iter__(self):
return self.members.__iter__()
def __getitem__(self, key):
return self.members.__getitem__(key)
def __len__(self):
return len(self.members)
def append(self, member: Node, trailing_separator: Token):
self.members.append(SyntaxListMember(member, trailing_separator))
def write(self, writer: TextIO):
if self.open_token:
writer.write(self.open_token.value)
for member in self.members:
member.write(writer)
if self.close_token:
writer.write(self.close_token.value)
#endregion
#region Expressions
class Expression(Node): pass
class LiteralExpression(Expression):
def __init__(self, token: Token):
super().__init__()
self.token = token
def write(self, writer: TextIO):
writer.write(self.token.value)
class ArrayExpression(Expression):
def __init__(self, elements: SyntaxList):
self.elements = elements
#endregion
#region Types
class Type(Node):
def _desugar_self(self) -> "Type":
return self
def desugar(self) -> "Type":
desugared = self
while True:
_desugared = desugared._desugar_self()
if _desugared == desugared:
return desugared
desugared = _desugared
class ExpressionType(Type):
def __init__(self, expression: Expression):
super().__init__()
self.expression = expression
def write(self, writer: TextIO):
self.expression.write(writer)
class ConcreteType(Type):
def __init__(self, identifier_tokens: Union[Token, List[Token]]):
super().__init__()
if isinstance(identifier_tokens, Token):
self.identifier_tokens = [identifier_tokens]
else:
self.identifier_tokens = identifier_tokens
def write(self, writer: TextIO):
for identifier_token in self.identifier_tokens:
writer.write(identifier_token.value)
class ConstType(Type):
def __init__(self, const_token: Token, inner_type: Type):
super().__init__()
self.const_token = const_token
self.inner_type = inner_type
def write(self, writer: TextIO):
writer.write(self.const_token.value)
writer.write(" ")
self.inner_type.write(writer)
def _desugar_self(self) -> Type:
return self.inner_type
class ReferenceType(Type):
def __init__(self, inner_type: Type, reference_token: Token):
super().__init__()
self.inner_type = inner_type
self.reference_token = reference_token
def write(self, writer: TextIO):
self.inner_type.write(writer)
writer.write(self.reference_token.value)
def _desugar_self(self) -> Type:
return self.inner_type
class ModifiedType(Type):
def __init__(self, base_type: Type):
super().__init__()
self.base_type = base_type
def _desugar_self(self) -> Type:
return self.base_type
class OptionalType(ModifiedType):
def __init__(self, base_type: Type, token: Token):
super().__init__(base_type)
self.token = token
def write(self, writer: TextIO):
self.base_type.write(writer)
writer.write(self.token.value)
class ArrayType(ModifiedType):
def __init__(
self,
base_type: Type,
open_token: Token,
length_token: Token,
close_token: Token):
super().__init__(base_type)
self.open_token = open_token
self.length_token = length_token
self.close_token = close_token
def write(self, writer: TextIO):
self.base_type.write(writer)
writer.write(self.open_token.value)
if self.length_token:
writer.write(self.length_token.value)
writer.write(self.close_token.value)
class TemplateType(Type):
def __init__(
self,
identifier_tokens: Union[Token, List[Token]],
type_arguments: SyntaxList):
super().__init__()
if isinstance(identifier_tokens, Token):
self.identifier_tokens = [identifier_tokens]
else:
self.identifier_tokens = identifier_tokens
self.type_arguments = type_arguments
def write(self, writer: TextIO):
for identifier_token in self.identifier_tokens:
writer.write(identifier_token.value)
self.type_arguments.write(writer)
class TupleMemberType(Type):
def __init__(self, element_type: Type, element_name: Token):
super().__init__()
self.element_type = element_type
self.element_name = element_name
def write(self, writer: TextIO):
self.element_type.write(writer)
def _desugar_self(self) -> Type:
return self.element_name
class TupleType(Type):
def __init__(self, elements: SyntaxList):
super().__init__()
self.elements = elements
def write(self, writer: TextIO):
self.elements.write(writer)
class AliasInfo(Node):
before_set: List[str]
after_set: List[str]
contained_types: List[Type]
tokens: List[Token]
def __init__(self):
super().__init__()
self.before_set = []
self.after_set = []
self.contained_types = []
self.tokens = []
self.is_writable = False
def __str__(self):
buffer = io.StringIO()
self.write(buffer)
return buffer.getvalue()
def __eq__(self, obj):
return isinstance(obj, AliasInfo) and str(self) == str(obj)
def __ne__(self, obj):
return not self.__eq__(obj)
def write(self, writer: TextIO):
writer.write("(")
writer.write("|".join(self.before_set))
if self.is_writable:
writer.write("!")
writer.write(" -> ")
writer.write("|".join(self.after_set))
writer.write(")")
class AliasInfoType(Type):
def __init__(self, inner_type: Type, alias_info: AliasInfo):
super().__init__()
self.inner_type = inner_type
self.alias_info = alias_info
self.inner_type.alias_info = alias_info
def write(self, writer: TextIO):
self.inner_type.write(writer)
self.alias_info.write(writer)
def _desugar_self(self) -> Type:
return self.inner_type
class KWArgsSentinelType(Type):
def __init__(self, token: Token):
super().__init__()
self.token = token
def write(self, writer: TextIO):
writer.write(self.token.value)
class TensorType(ConcreteType): pass
class IntType(ConcreteType): pass
class FloatType(ConcreteType): pass
class BoolType(ConcreteType): pass
class StrType(ConcreteType): pass
class ScalarType(ConcreteType): pass
class ScalarTypeType(ConcreteType): pass
class DimnameType(ConcreteType): pass
class GeneratorType(ConcreteType): pass
class TensorOptionsType(ConcreteType): pass
class LayoutType(ConcreteType): pass
class DeviceType(ConcreteType): pass
class MemoryFormatType(ConcreteType): pass
class QSchemeType(ConcreteType): pass
class StorageType(ConcreteType): pass
class ConstQuantizerPtrType(ConcreteType): pass
class StreamType(ConcreteType): pass
#region Decls
class Decl(Node): pass
class ParameterDecl(Decl):
def __init__(
self,
parameter_type: Type,
identifier: Token = None,
equals: Token = None,
default_value: Expression = None):
super().__init__()
self.parameter_type = parameter_type
self.identifier = identifier
self.equals = equals
self.default_value = default_value
def write(self, writer: TextIO):
self.parameter_type.write(writer)
if self.identifier:
writer.write(" ")
writer.write(self.identifier.value)
class FunctionDecl(Decl):
def __init__(
self,
identifier: Token,
parameters: SyntaxList,
return_type: Type = None,
semicolon: Token = None,
arrow: Token = None):
super().__init__()
self.is_leaf = False
self.identifier = identifier
self.return_type = return_type
self.parameters = parameters
self.semicolon = semicolon
self.arrow = arrow
def get_parameter(self, identifier: str) -> ParameterDecl:
for param in self.parameters:
id = param.member.identifier
if id and id.value == identifier:
return param.member
return None
class TranslationUnitDecl(Decl):
def __init__(self, decls: List[FunctionDecl]):
super().__init__()
self.decls = decls
def __iter__(self):
return self.decls.__iter__()
#endregion
|
Lib/lib2to3/fixes/fix_input.py
|
leepro/unladen-swallow
| 2,293 |
68294
|
"""Fixer that changes input(...) into eval(input(...))."""
# Author: <NAME>
# Local imports
from .. import fixer_base
from ..fixer_util import Call, Name
from .. import patcomp
context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >")
class FixInput(fixer_base.BaseFix):
PATTERN = """
power< 'input' args=trailer< '(' [any] ')' > >
"""
def transform(self, node, results):
# If we're already wrapped in a eval() call, we're done.
if context.match(node.parent.parent):
return
new = node.clone()
new.set_prefix("")
return Call(Name("eval"), [new], prefix=node.get_prefix())
|
scripts/update_taxonomies.py
|
gaybro8777/osf.io
| 628 |
68321
|
import os
import json
import logging
import sys
from django.db import transaction
from django.apps import apps
from scripts import utils as script_utils
from scripts.populate_preprint_providers import update_or_create
from website.app import init_app
from website import settings
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# OSF preprint provider used for initial subject creation
OSF_PROVIDER_DATA = {
'_id': 'osf',
'name': 'Open Science Framework',
'domain': settings.DOMAIN,
'domain_redirect_enabled': False,
'default_license': 'CC0 1.0 Universal',
'licenses_acceptable': ['CC0 1.0 Universal', 'CC-By Attribution 4.0 International', 'No license'],
}
def update_taxonomies(filename):
Subject = apps.get_model('osf.Subject')
PreprintProvider = apps.get_model('osf.PreprintProvider')
try:
bepress_provider = PreprintProvider.objects.get(_id='osf')
except PreprintProvider.DoesNotExist:
bepress_provider, _ = update_or_create(OSF_PROVIDER_DATA)
# Flat taxonomy is stored locally, read in here
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', filename
)
) as fp:
taxonomy = json.load(fp)
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
# Search for parent subject, get id if it exists
parent = None
if len(subjects) > 1:
parent, created_p = Subject.objects.get_or_create(text=subjects[-2], provider=bepress_provider)
if created_p:
logger.info('Created parent "{}":{} for subject {}'.format(parent.text, parent._id, text))
logger.info(u'Getting or creating Subject "{}"{}'.format(
text,
u' with parent {}:{}'.format(parent.text, parent._id) if parent else ''
))
subject, _ = Subject.objects.get_or_create(text=text, provider=bepress_provider)
if parent and not subject.parent:
logger.info(u'Adding parent "{}":{} to Subject "{}":{}'.format(
parent.text, parent._id,
subject.text, subject._id
))
subject.parent = parent
subject.save()
def main():
init_app(set_backends=True, routes=False)
dry_run = '--dry' in sys.argv
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
update_taxonomies('bepress_taxonomy.json')
if dry_run:
raise RuntimeError('Dry run, transaction rolled back')
if __name__ == '__main__':
main()
|
pywick/datasets/PredictFolderDataset.py
|
achaiah/pywick
| 408 |
68324
|
<filename>pywick/datasets/PredictFolderDataset.py
from .FolderDataset import FolderDataset, identity_x
class PredictFolderDataset(FolderDataset):
"""
Convenience class for loading out-of-memory data that is more geared toward prediction data loading (where ground truth is not available). \n
If not transformed in any way (either via one of the loaders or transforms) the inputs and targets will be identical (paths to the discovered files)\n
Instead, the intended use is that the input path is loaded into some kind of binary representation (usually an image), while the target is either
left as a path or is post-processed to accommodate some special need.
Arguments
---------
:param root: (string):
path to main directory
:param input_regex: (string `(default is any valid image file)`):
regular expression to find inputs.
e.g. if all your inputs have the word 'input',
you'd enter something like input_regex='*input*'
:param input_transform: (torch transform):
transform to apply to each input before returning
:param input_loader: (callable `(default: identity)`):
defines how to load input samples from file.
If a function is provided, it should take in a file path as input and return the loaded sample. Identity simply returns the input.
:param target_loader: (callable `(default: None)`):
defines how to load target samples from file (which, in our case, are the same as inputs)
If a function is provided, it should take in a file path as input and return the loaded sample.
:param exclusion_file: (string):
list of files to exclude when enumerating all files.
The list must be a full path relative to the root parameter
"""
def __init__(self, root, input_regex='*', input_transform=None, input_loader=identity_x, target_loader=None, exclusion_file=None):
super().__init__(root=root, class_mode='path', input_regex=input_regex, target_extension=None, transform=input_transform,
default_loader=input_loader, target_loader=target_loader, exclusion_file=exclusion_file, target_index_map=None)
|
reaver/__init__.py
|
HatsuneMiku4/reaver
| 239 |
68350
|
<reponame>HatsuneMiku4/reaver
import reaver.envs
import reaver.models
import reaver.agents
import reaver.utils
|
conans/test/integration/command/install/install_cascade_test.py
|
Wonders11/conan
| 6,205 |
68359
|
import unittest
from collections import OrderedDict
from conans.model.ref import ConanFileReference
from conans.test.utils.tools import TestServer, TurboTestClient, GenConanfile
class InstallCascadeTest(unittest.TestCase):
def setUp(self):
"""
A
/ \
B C
| \
D |
/ \ |
| \ /
E F
"""
server = TestServer()
servers = OrderedDict([("default", server)])
self.client = TurboTestClient(servers=servers)
self.ref_a = ConanFileReference.loads("libA/1.0@conan/stable")
self.client.create(self.ref_a, conanfile=GenConanfile())
self.ref_b = ConanFileReference.loads("libB/1.0@conan/stable")
self.client.create(self.ref_b, conanfile=GenConanfile().with_requirement(self.ref_a))
self.ref_c = ConanFileReference.loads("libC/1.0@conan/stable")
self.client.create(self.ref_c, conanfile=GenConanfile().with_requirement(self.ref_a))
self.ref_d = ConanFileReference.loads("libD/1.0@conan/stable")
self.client.create(self.ref_d, conanfile=GenConanfile().with_requirement(self.ref_b))
self.ref_e = ConanFileReference.loads("libE/1.0@conan/stable")
self.client.create(self.ref_e, conanfile=GenConanfile().with_requirement(self.ref_d))
self.ref_f = ConanFileReference.loads("libF/1.0@conan/stable")
conanfile = GenConanfile().with_requirement(self.ref_c).with_requirement(self.ref_d)
self.client.create(self.ref_f, conanfile=conanfile)
def _assert_built(self, refs):
for ref in refs:
self.assertIn("{}: Copying sources to build folder".format(ref), self.client.out)
for ref in [self.ref_a, self.ref_b, self.ref_c, self.ref_d, self.ref_e, self.ref_f]:
if ref not in refs:
self.assertNotIn("{}: Copying sources to build folder".format(ref),
self.client.out)
def test_install_cascade_only_affected(self):
project = ConanFileReference.loads("project/1.0@conan/stable")
project_cf = GenConanfile().with_requirement(self.ref_e).with_requirement(self.ref_f)
# Building A everything is built
self.client.create(project, conanfile=project_cf,
args="--build {} --build cascade".format(self.ref_a))
self._assert_built([self.ref_a, self.ref_b, self.ref_c, self.ref_d,
self.ref_e, self.ref_f, project])
# Building D builds E, F and project
self.client.create(project, conanfile=project_cf,
args="--build {} --build cascade".format(self.ref_d))
self._assert_built([self.ref_d, self.ref_e, self.ref_f, project])
# Building E only builds E and project
self.client.create(project, conanfile=project_cf,
args="--build {} --build cascade".format(self.ref_e))
self._assert_built([self.ref_e, project])
# Building project only builds project
self.client.create(project, conanfile=project_cf,
args="--build {} --build cascade".format(project))
self._assert_built([project])
# Building C => builds F and project
self.client.create(project, conanfile=project_cf,
args="--build {} --build cascade".format(self.ref_c))
self._assert_built([project, self.ref_f, self.ref_c])
|
librclone/python/rclone.py
|
elbaby/rclone
| 18,121 |
68378
|
"""
Python interface to librclone.so using ctypes
Create an rclone object
rclone = Rclone(shared_object="/path/to/librclone.so")
Then call rpc calls on it
rclone.rpc("rc/noop", a=42, b="string", c=[1234])
When finished, close it
rclone.close()
"""
__all__ = ('Rclone', 'RcloneException')
import os
import json
import subprocess
from ctypes import *
class RcloneRPCResult(Structure):
"""
This is returned from the C API when calling RcloneRPC
"""
_fields_ = [("Output", c_char_p),
("Status", c_int)]
class RcloneException(Exception):
"""
Exception raised from rclone
This will have the attributes:
output - a dictionary from the call
status - a status number
"""
def __init__(self, output, status):
self.output = output
self.status = status
message = self.output.get('error', 'Unknown rclone error')
super().__init__(message)
class Rclone():
"""
Interface to Rclone via librclone.so
Initialise with shared_object as the file path of librclone.so
"""
def __init__(self, shared_object="./librclone.so"):
self.rclone = CDLL(shared_object)
self.rclone.RcloneRPC.restype = RcloneRPCResult
self.rclone.RcloneRPC.argtypes = (c_char_p, c_char_p)
self.rclone.RcloneInitialize.restype = None
self.rclone.RcloneInitialize.argtypes = ()
self.rclone.RcloneFinalize.restype = None
self.rclone.RcloneFinalize.argtypes = ()
self.rclone.RcloneInitialize()
def rpc(self, method, **kwargs):
"""
Call an rclone RC API call with the kwargs given.
The result will be a dictionary.
If an exception is raised from rclone it will of type
RcloneException.
"""
method = method.encode("utf-8")
parameters = json.dumps(kwargs).encode("utf-8")
resp = self.rclone.RcloneRPC(method, parameters)
output = json.loads(resp.Output.decode("utf-8"))
status = resp.Status
if status != 200:
raise RcloneException(output, status)
return output
def close(self):
"""
Call to finish with the rclone connection
"""
self.rclone.RcloneFinalize()
self.rclone = None
@classmethod
def build(cls, shared_object):
"""
Builds rclone to shared_object if it doesn't already exist
Requires go to be installed
"""
if os.path.exists(shared_object):
return
print("Building "+shared_object)
subprocess.check_call(["go", "build", "--buildmode=c-shared", "-o", shared_object, "github.com/rclone/rclone/librclone"])
|
aiida/orm/nodes/data/remote/stash/__init__.py
|
azadoks/aiida-core
| 180 |
68379
|
# -*- coding: utf-8 -*-
"""Module with data plugins that represent files of completed calculations jobs that have been stashed."""
# AUTO-GENERATED
# yapf: disable
# pylint: disable=wildcard-import
from .base import *
from .folder import *
__all__ = (
'RemoteStashData',
'RemoteStashFolderData',
)
# yapf: enable
|
controle_estoque/mainfinanceiro.py
|
jucimar1/controleEstoque
| 134 |
68422
|
# -*- coding: utf-8 -*-
from Views.mainFinanceiro import Ui_ct_MainFinanceiro
from movconta import MainMovimentoConta
from areceber import MainAReceber
from apagar import MainAPagar
from Funcoes.data import DataAtual
class MainFinanceiro(Ui_ct_MainFinanceiro, DataAtual, MainMovimentoConta,
MainAReceber, MainAPagar):
def mainfinanceiro(self, frame):
super(MainFinanceiro, self).setMainFinanceiro(frame)
# Ocultando botoes ainda sem funcção
self.bt_ajustesFinanceiro.setHidden(True)
self.bt_relatCompras.setHidden(True)
self.bt_relatVendas.setHidden(True)
self.frameMainFinanceiro.show()
""" Chamando funcoes ao clicar nos botoes """
# Movimento Financeiro
self.bt_MovCaixa.clicked.connect(self.JanelaMovimento)
# Conta a Receber
self.bt_AReceber.clicked.connect(self.JanelaAReceber)
# Conta a Receber
self.bt_APagar.clicked.connect(self.JanelaAPagar)
# Abrindo janena Movimento Financeiro
self.JanelaMovimento()
def JanelaMovimento(self):
self.LimpaFrame(self.ct_financeiro)
self.DesativaBotao(self.fr_menuFinanceiro, self.bt_MovCaixa)
self.mainmovconta(self.ct_financeiro)
def JanelaAReceber(self):
self.LimpaFrame(self.ct_financeiro)
self.DesativaBotao(self.fr_menuFinanceiro, self.bt_AReceber)
self.mainAReceber(self.ct_financeiro)
def JanelaAPagar(self):
self.LimpaFrame(self.ct_financeiro)
self.DesativaBotao(self.fr_menuFinanceiro, self.bt_APagar)
self.mainAPagar(self.ct_financeiro)
|
apps/staffs/migrations/0002_auto_20201124_0614.py
|
shravakushwaha/school_system
| 235 |
68465
|
# Generated by Django 3.0.8 on 2020-11-24 12:14
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("staffs", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="staff",
name="mobile_number",
field=models.CharField(
blank=True,
max_length=13,
validators=[
django.core.validators.RegexValidator(
message="Entered mobile number isn't in a right format!",
regex="^[0-9]{10,15}$",
)
],
),
),
]
|
regtests/lang/try_except.py
|
ahakingdom/Rusthon
| 622 |
68475
|
<reponame>ahakingdom/Rusthon
from runtime import *
'''
try except
'''
def main():
a = [1,2,3]
b = False
try:
a.no_such_method()
b = 'this should not happen'
except:
b = True
assert( b == True )
main()
|
lib/galaxy/model/migrate/versions/0031_community_and_workflow_tags.py
|
rikeshi/galaxy
| 1,085 |
68479
|
<filename>lib/galaxy/model/migrate/versions/0031_community_and_workflow_tags.py
"""
Migration script to (a) add and populate necessary columns for doing community tagging of histories, datasets, and pages and \
(b) add table for doing individual and community tagging of workflows.
"""
import logging
from sqlalchemy import (
Column,
ForeignKey,
Integer,
MetaData,
Table,
Unicode
)
from galaxy.model.migrate.versions.util import (
add_column,
create_table,
drop_column,
drop_table
)
log = logging.getLogger(__name__)
metadata = MetaData()
StoredWorkflowTagAssociation_table = Table("stored_workflow_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", Unicode(255), index=True),
Column("value", Unicode(255), index=True),
Column("user_value", Unicode(255), index=True))
WorkflowTagAssociation_table = Table("workflow_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_id", Integer, ForeignKey("workflow.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", Unicode(255), index=True),
Column("value", Unicode(255), index=True),
Column("user_value", Unicode(255), index=True))
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
# Create user_id column in history_tag_association table.
c = Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True)
add_column(c, 'history_tag_association', metadata, index_name='ix_history_tag_association_user_id')
# Populate column so that user_id is the id of the user who owns the history (and, up to now, was the only person able to tag the history).
migrate_engine.execute(
"UPDATE history_tag_association SET user_id=( SELECT user_id FROM history WHERE history_tag_association.history_id = history.id )")
# Create user_id column in history_dataset_association_tag_association table.
c = Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True)
add_column(c, 'history_dataset_association_tag_association', metadata, index_name='ix_history_dataset_association_tag_association_user_id')
# Populate column so that user_id is the id of the user who owns the history_dataset_association (and, up to now, was the only person able to tag the page).
migrate_engine.execute(
"UPDATE history_dataset_association_tag_association SET user_id=( SELECT history.user_id FROM history, history_dataset_association WHERE history_dataset_association.history_id = history.id AND history_dataset_association.id = history_dataset_association_tag_association.history_dataset_association_id)")
# Create user_id column in page_tag_association table.
c = Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True)
add_column(c, 'page_tag_association', metadata, index_name='ix_page_tag_association_user_id')
# Populate column so that user_id is the id of the user who owns the page (and, up to now, was the only person able to tag the page).
migrate_engine.execute(
"UPDATE page_tag_association SET user_id=( SELECT user_id FROM page WHERE page_tag_association.page_id = page.id )")
# Create stored_workflow_tag_association table.
create_table(StoredWorkflowTagAssociation_table)
# Create workflow_tag_association table.
create_table(WorkflowTagAssociation_table)
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
# Drop workflow_tag_association table.
drop_table(WorkflowTagAssociation_table)
# Drop stored_workflow_tag_association table.
drop_table(StoredWorkflowTagAssociation_table)
# Drop user_id column from page_tag_association table.
drop_column('user_id', 'page_tag_association', metadata)
# Drop user_id column from history_dataset_association_tag_association table.
drop_column('user_id', 'history_dataset_association_tag_association', metadata)
# Drop user_id column from history_tag_association table.
drop_column('user_id', 'history_tag_association', metadata)
|
unittests/tools/test_acunetix_parser.py
|
mtcolman/django-DefectDojo
| 249 |
68495
|
import datetime
from ..dojo_test_case import DojoTestCase
from dojo.models import Test
from dojo.tools.acunetix.parser import AcunetixParser
class TestAcunetixParser(DojoTestCase):
def test_parse_file_with_one_finding(self):
testfile = open("unittests/scans/acunetix/one_finding.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(1, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(352, finding.cwe)
self.assertEqual(datetime.date(2018, 9, 24), finding.date)
self.assertIsNotNone(finding.description)
self.assertGreater(len(finding.description), 0)
self.assertFalse(finding.false_p)
self.assertEqual("Vijay Test Imapact", finding.impact)
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
self.assertEqual(1, len(finding.unsaved_endpoints))
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertEqual('https', endpoint.protocol)
self.assertEqual(443, endpoint.port)
self.assertEqual('vijaytest.com', endpoint.host)
self.assertEqual('some/path', endpoint.path)
def test_parse_file_with_multiple_finding(self):
testfile = open("unittests/scans/acunetix/many_findings.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(4, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("A single machine can take down another machine's web server with minimal bandwidth and side effects on unrelated services and ports.", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsecgames.com', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=1):
finding = findings[1]
self.assertEqual("Possible virtual host found", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(200, finding.cwe)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("Possible sensitive information disclosure.", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsecgames.com', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=2):
finding = findings[2]
self.assertEqual("Unencrypted connection (verified)", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(310, finding.cwe)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("Possible information disclosure.", finding.impact)
# check that this finding have no references
self.assertIsNone(finding.references)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsec<EMAIL>', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
def test_parse_file_with_example_com(self):
testfile = open("unittests/scans/acunetix/XML_http_example_co_id_.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(7, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("HTML form without CSRF protection", finding.title)
self.assertEqual("Medium", finding.severity)
self.assertEqual(datetime.date(2020, 4, 28), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:U/C:N/I:L/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertIn("An attacker could use CSRF to trick a victim into accessing a website hosted by the attacker,", finding.impact)
# aggregated
self.assertEqual(3, finding.nb_occurences)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(3, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertEqual('h/search', endpoint.path)
endpoint = finding.unsaved_endpoints[1]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertEqual('m/zmain', endpoint.path)
# check req/resp
self.assertEqual(3, len(finding.unsaved_req_resp))
for req_resp in finding.unsaved_req_resp:
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=6):
finding = findings[6]
self.assertEqual("Content Security Policy (CSP) not implemented", finding.title)
self.assertEqual("Info", finding.severity)
self.assertEqual(datetime.date(2020, 4, 28), finding.date)
self.assertIsNotNone(finding.description)
self.assertFalse(finding.false_p)
self.assertIn("CSP can be used to prevent and/or mitigate attacks that involve content/code injection,", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
|
tests/python/relay/test_relay_te_compiler.py
|
shengxinhu/tvm
| 4,640 |
68540
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
import tvm.testing
from tvm import relay
from tvm import autotvm
from tvm import topi
from tvm.relay.backend import te_compiler
from tvm.relay.testing import run_infer_type
from tvm.relay.testing.temp_op_attr import TempOpAttr
@autotvm.register_topi_compute("test/conv2d_1")
def _compute_conv2d_1(cfg, input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("test/conv2d_1")
def _schedule_conv2d_1(cfg, outs):
return topi.generic.schedule_conv2d_nchw(outs)
@autotvm.register_topi_compute("test/conv2d_2")
def _compute_conv2d_2(cfg, input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("test/conv2d_2")
def _schedule_conv2d_2(cfg, outs):
return topi.generic.schedule_conv2d_nchw(outs)
def _compute_conv2d_3(input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
def _schedule_conv2d_3(outs):
return topi.generic.schedule_conv2d_nchw(outs)
@tvm.target.override_native_generic_func("test_conv2d_strategy")
def _tmp_strategy(attrs, inputs, out_type, target):
strategy = relay.op.OpStrategy()
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_1),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_1),
name="conv2d_1",
plevel=10,
)
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_2),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_2),
name="conv2d_2",
plevel=15,
)
ic = inputs[0].shape[1]
with tvm.te.SpecializedCondition(ic >= 16):
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_3),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_3),
name="conv2d_3",
plevel=20,
)
return strategy
def _create_record(task_name, dshape, wshape, target, cost):
args = [te.placeholder(dshape), te.placeholder(wshape), (1, 1), (1, 1, 1, 1), (1, 1), "float32"]
task = autotvm.task.create(task_name, args, target)
cfg = autotvm.ConfigEntity(0, None, {}, [])
cfg.cost = cost
inp = autotvm.MeasureInput(target=target, task=task, config=cfg)
result = autotvm.MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
return (inp, result)
def test_get_valid_implementations():
target = tvm.target.Target("llvm")
def _get_impls(dshape, wshape):
data = relay.var("data", shape=dshape)
weight = relay.var("wshape", shape=wshape)
out = relay.nn.conv2d(data, weight, padding=(1, 1))
out = run_infer_type(out)
return relay.backend.te_compiler.get_valid_implementations(
relay.op.get("nn.conv2d"),
out.attrs,
[te.placeholder(dshape), te.placeholder(wshape)],
out.checked_type,
target,
)
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
impls = _get_impls((1, 8, 7, 7), (32, 8, 3, 3))
assert len(impls) == 2
impls = _get_impls((1, 16, 7, 7), (32, 16, 3, 3))
assert len(impls) == 3
def test_select_implementation():
target = tvm.target.Target("llvm")
def _select_impl(dshape, wshape, use_autotvm=False):
data = relay.var("data", shape=dshape)
weight = relay.var("wshape", shape=wshape)
out = relay.nn.conv2d(data, weight, padding=(1, 1))
out = run_infer_type(out)
return relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"),
out.attrs,
[te.placeholder(dshape), te.placeholder(wshape)],
out.checked_type,
target,
use_autotvm,
)
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3))
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3))
assert impl.name == "conv2d_3"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_3"
# add autotvm record
records = []
records.append(_create_record("test/conv2d_1", (1, 8, 7, 7), (32, 8, 3, 3), target, 0.5))
records.append(_create_record("test/conv2d_1", (1, 16, 7, 7), (32, 16, 3, 3), target, 1.0))
with target:
with autotvm.apply_history_best(records):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_1"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_1"
records.append(_create_record("test/conv2d_2", (1, 8, 7, 7), (32, 8, 3, 3), target, 0.2))
records.append(_create_record("test/conv2d_1", (1, 16, 7, 7), (32, 16, 3, 3), target, 1.2))
with target:
with autotvm.apply_history_best(records):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_1"
def test_te_compiler():
tec = relay.backend.te_compiler.get()
def get_func(shape):
x = relay.var("x", shape=shape)
y = relay.add(x, x)
z = relay.add(y, x)
f = relay.Function([x], z)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
return mod["main"]
z1 = tec.lower(get_func((10,)), "llvm")
z2 = tec.lower(get_func((10,)), "llvm")
z3 = tec.lower(get_func(()), "llvm")
assert z1.same_as(z2)
assert not z3.same_as(z1)
if tvm.testing.device_enabled("cuda"):
z4 = tec.lower(get_func(()), "cuda")
assert not z3.same_as(z4)
# Test JIT target
for target in ["llvm"]:
dev = tvm.device(target)
if tvm.testing.device_enabled(target):
f = tec.jit(get_func((10,)), target)
x = tvm.nd.array(np.ones(10).astype("float32"), device=dev)
y = tvm.nd.empty((10,), device=dev)
f(x, y)
tvm.testing.assert_allclose(y.numpy(), x.numpy() * 3)
# Note: Once the te compiler is removed, we should keep this test so that
# we make sure that opt_level=0 passes are being called correctly.
def test_compile_placeholder_bypass():
te_compiler = relay.backend.te_compiler.get()
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
z = relay.var("z", shape=(2, 3))
result = relay.Tuple([x, relay.op.concatenate([y, z], axis=0)])
func = relay.Function(relay.analysis.free_vars(result), result)
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(tvm.IRModule.from_expr(func), "llvm")
def test_compile_injective_with_tuple():
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
x_transpose = relay.transpose(x)
output = relay.Tuple([x_transpose, y])
func = relay.Function([x, y], output)
relay.build(tvm.IRModule.from_expr(func), "llvm")
def test_compile_tuple_dup():
x = relay.var("data", shape=(16, 16))
log = relay.log(x)
output = relay.Tuple([log, log])
f = relay.Function([x], output)
relay.build(tvm.IRModule.from_expr(f), "llvm")
def test_compile_full():
# Shape calculations can happen in int64. The test checks that full operator
# can handle when shapes are not int32
shape = (
tvm.tir.IntImm("int32", 1),
tvm.tir.IntImm("int64", 16),
tvm.tir.IntImm("int64", 16),
tvm.tir.IntImm("int32", 64),
)
output = relay.full(relay.const(0, "int32"), shape=shape, dtype="int32")
f = relay.Function([], output)
mod = tvm.IRModule.from_expr(f)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
relay.build(mod, "llvm")
def test_compile_nhwc_pack():
data = relay.var("data", shape=(1, 1, 1, 1024), dtype="uint8")
weight = relay.var("weight", shape=(1, 1, 1024, 1001), dtype="int8")
p2 = relay.var("p2", shape=(1, 1, 1, 1), dtype="int32")
conv = relay.nn.conv2d(
data,
weight,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
multiply = relay.multiply(relay.const(-22, dtype="int32"), p2)
tile = relay.tile(multiply, reps=(1, 1, 1, 1001))
subtract = relay.subtract(conv, tile)
func = subtract
mod = relay.Function(relay.analysis.free_vars(func), func)
relay.build(mod, target="llvm")
def test_compile_propogate_hash():
data = relay.var("data", shape=(1, 1, 1, 1024), dtype="uint8")
weight = relay.var("weight", shape=(1, 1, 1024, 1001), dtype="int8")
p2 = relay.var("p2", shape=(1, 1, 1, 1), dtype="int32")
conv = relay.nn.conv2d(
data,
weight,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
multiply = relay.multiply(relay.const(-22, dtype="int32"), p2)
tile = relay.tile(multiply, reps=(1, 1, 1, 1001))
subtract = relay.subtract(conv, tile)
func = subtract
mod = tvm.IRModule.from_expr(relay.Function(relay.analysis.free_vars(func), func))
vm = relay.vm.VMCompiler()
opt_mod, _ = vm.optimize(mod, target="llvm")
for f in opt_mod.functions.values():
assert "hash" in f.attrs.keys()
if __name__ == "__main__":
test_get_valid_implementations()
test_select_implementation()
test_te_compiler()
test_compile_placeholder_bypass()
test_compile_injective_with_tuple()
test_compile_tuple_dup()
test_compile_full()
test_compile_nhwc_pack()
|
onmt/IO.py
|
Flamexmt/LMA
| 321 |
68550
|
# -*- coding: utf-8 -*-
import codecs
from collections import Counter, defaultdict
from itertools import chain, count
import torch
import torchtext.data
import torchtext.vocab
PAD_WORD = '<blank>'
UNK = 0
BOS_WORD = '<s>'
EOS_WORD = '</s>'
def __getstate__(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def __setstate__(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
torchtext.vocab.Vocab.__getstate__ = __getstate__
torchtext.vocab.Vocab.__setstate__ = __setstate__
def extract_features(tokens):
"Given a list of token separate out words and features (if any)."
if not tokens:
return [], [], -1
split_tokens = [token.split(u"│") for token in tokens]
split_tokens = [token for token in split_tokens if token[0]]
token_size = len(split_tokens[0])
assert all(len(token) == token_size for token in split_tokens), \
"all words must have the same number of features"
words_and_features = list(zip(*split_tokens))
words = words_and_features[0]
features = words_and_features[1:]
return words, features, token_size - 1
def merge_vocabs(vocabs, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = Counter(chain(*[vocab.freqs for vocab in vocabs]))
return torchtext.vocab.Vocab(merged,
specials=[PAD_WORD, BOS_WORD, EOS_WORD],
max_size=vocab_size)
def make_features(batch, side):
"""
Args:
batch (Variable): a batch of source or target data.
side (str): for source or for target.
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch).
"""
assert side in ['src', 'tgt']
if isinstance(batch.__dict__[side], tuple):
data = batch.__dict__[side][0]
else:
data = batch.__dict__[side]
feat_start = side + "_feat_"
features = sorted(batch.__dict__[k]
for k in batch.__dict__ if feat_start in k)
levels = [data] + features
return torch.cat([level.unsqueeze(2) for level in levels], 2)
def join_dicts(*args):
"""
args: dictionaries with disjoint keys
returns: a single dictionary that has the union of these keys
"""
return dict(chain(*[d.items() for d in args]))
class OrderedIterator(torchtext.data.Iterator):
def create_batches(self):
if self.train:
self.batches = torchtext.data.pool(
self.data(), self.batch_size,
self.sort_key, self.batch_size_fn,
random_shuffler=self.random_shuffler)
else:
self.batches = []
for b in torchtext.data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
class ONMTDataset(torchtext.data.Dataset):
"""Defines a dataset for machine translation."""
@staticmethod
def sort_key(ex):
"Sort in reverse size order"
return -len(ex.src)
def __init__(self, src_path, tgt_path, fields, opt, src_img_dir=None, **kwargs):
"""
Create a TranslationDataset given paths and fields.
src_path: location of source-side data
tgt_path: location of target-side data or None. If it exists, it
source and target data must be the same length.
fields:
src_img_dir: if not None, uses images instead of text for the
source. TODO: finish
"""
if src_img_dir:
self.type_ = "img"
else:
self.type_ = "text"
if self.type_ == "text":
self.src_vocabs = []
src_truncate = 0 if opt is None else opt.src_seq_length_trunc
src_point = next(self._read_corpus_file(src_path, src_truncate))
self.nfeatures = src_point[2]
src_data = self._read_corpus_file(src_path, src_truncate)
src_examples = self._construct_examples(src_data, "src")
else:
# TODO finish this.
if not transforms:
load_image_libs()
if tgt_path is not None:
tgt_truncate = 0 if opt is None else opt.tgt_seq_length_trunc
tgt_data = self._read_corpus_file(tgt_path, tgt_truncate)
# assert len(src_data) == len(tgt_data), \
# "Len src and tgt do not match"
tgt_examples = self._construct_examples(tgt_data, "tgt")
else:
tgt_examples = None
# examples: one for each src line or (src, tgt) line pair.
# Each element is a dictionary whose keys represent at minimum
# the src tokens and their indices and potentially also the
# src and tgt features and alignment information.
if tgt_examples is not None:
examples = (join_dicts(src, tgt) for src, tgt in zip(src_examples, tgt_examples))
else:
examples = src_examples
def dynamic_dict(examples):
for example in examples:
src = example["src"]
src_vocab = torchtext.vocab.Vocab(Counter(src))
self.src_vocabs.append(src_vocab)
# mapping source tokens to indices in the dynamic dict
src_map = torch.LongTensor([src_vocab.stoi[w] for w in src])
example["src_map"] = src_map
if "tgt" in example:
tgt = example["tgt"]
mask = torch.LongTensor([0] + [src_vocab.stoi[w] for w in tgt] + [0])
example["alignment"] = mask
yield example
if opt is None or opt.dynamic_dict:
examples = dynamic_dict(examples)
# Peek at the first to see which fields are used.
ex = next(examples)
keys = ex.keys()
fields = [(k, fields[k]) for k in (list(keys) + ["indices"])]
def construct_final(examples):
for i, ex in enumerate(examples):
yield torchtext.data.Example.fromlist([ex[k] for k in keys] + [i], fields)
def filter_pred(example):
if tgt_examples is None:
return 0 < len(example.src) <= opt.src_seq_length
else:
return 0 < len(example.src) <= opt.src_seq_length and 0 < len(example.tgt) <= opt.tgt_seq_length
super(ONMTDataset, self).__init__(
list(construct_final(chain([ex], examples))),
fields,
filter_pred if opt is not None else None)
def _read_corpus_file(self, path, truncate):
"""
path: location of a src or tgt file
truncate: maximum sequence length (0 for unlimited)
returns: (word, features, nfeat) triples for each line
"""
with codecs.open(path, "r", "utf-8") as corpus_file:
lines = (line.split() for line in corpus_file)
if truncate:
lines = (line[:truncate] for line in lines)
for line in lines:
yield extract_features(line)
def _construct_examples(self, lines, side):
assert side in ["src", "tgt"]
for line in lines:
words, feats, _ = line
example_dict = {side: words}
if feats:
prefix = side + "_feat_"
example_dict.update((prefix + str(j), f) for j, f in enumerate(feats))
yield example_dict
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def __reduce_ex__(self, proto):
"This is a hack. Something is broken with torch pickle."
return super(ONMTDataset, self).__reduce_ex__()
def collapse_copy_scores(self, scores, batch, tgt_vocab):
"""Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambigious.
"""
offset = len(tgt_vocab)
for b in range(batch.batch_size):
index = batch.indices.data[b]
src_vocab = self.src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
scores[:, b, ti] += scores[:, b, offset + i]
scores[:, b, offset + i].fill_(1e-20)
return scores
@staticmethod
def load_fields(vocab):
vocab = dict(vocab)
fields = ONMTDataset.get_fields(len(ONMTDataset.collect_features(vocab)))
for k, v in vocab.items():
# Hack. Can't pickle defaultdict :(
v.stoi = defaultdict(lambda: 0, v.stoi)
fields[k].vocab = v
return fields
@staticmethod
def save_vocab(fields):
vocab = []
for k, f in fields.items():
if 'vocab' in f.__dict__:
f.vocab.stoi = dict(f.vocab.stoi)
vocab.append((k, f.vocab))
return vocab
@staticmethod
def collect_features(fields, side="src"):
assert side in ["src", "tgt"]
feats = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feats.append(key)
return feats
@staticmethod
def collect_feature_dicts(fields):
feature_dicts = []
for j in count():
key = "src_feat_" + str(j)
if key not in fields:
break
feature_dicts.append(fields[key].vocab)
return feature_dicts
@staticmethod
def get_fields(nFeatures=0):
fields = {}
fields["src"] = torchtext.data.Field(
pad_token=PAD_WORD,
include_lengths=True)
# fields = [("src_img", torchtext.data.Field(
# include_lengths=True))]
for j in range(nFeatures):
fields["src_feat_"+str(j)] = \
torchtext.data.Field(pad_token=PAD_WORD)
fields["tgt"] = torchtext.data.Field(
init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
def make_src(data, _):
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
fields["src_map"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.FloatTensor,
postprocessing=make_src, sequential=False)
def make_tgt(data, _):
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
fields["alignment"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.LongTensor,
postprocessing=make_tgt, sequential=False)
fields["indices"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.LongTensor,
sequential=False)
return fields
@staticmethod
def build_vocab(train, opt):
fields = train.fields
fields["src"].build_vocab(train, max_size=opt.src_vocab_size,
min_freq=opt.src_words_min_frequency)
for j in range(train.nfeatures):
fields["src_feat_" + str(j)].build_vocab(train)
fields["tgt"].build_vocab(train, max_size=opt.tgt_vocab_size,
min_freq=opt.tgt_words_min_frequency)
# Merge the input and output vocabularies.
if opt.share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
merged_vocab = merge_vocabs(
[fields["src"].vocab, fields["tgt"].vocab],
vocab_size=opt.src_vocab_size)
fields["src"].vocab = merged_vocab
fields["tgt"].vocab = merged_vocab
def load_image_libs():
"Conditional import of torch image libs."
global Image, transforms
from PIL import Image
from torchvision import transforms
|
library/connecter/database/mongo.py
|
GNHJM/lykops
| 141 |
68554
|
<gh_stars>100-1000
'''
参考文献https://pypi.python.org/pypi/pymongo/3.4.0
mongodb官方推荐
'''
import time, logging, pymongo
from bson.objectid import ObjectId
from library.config.database import mongo_config
from library.utils.dict import dot2_, _2dot
class Op_Mongo():
'''
连接并操作mongo
'''
def __init__(self, dest=None, idletime=1000 * 60 * 60):
self.logger = logging.getLogger("database")
config = mongo_config
if dest is None :
dest_conf = config['default']
else :
dest_conf = config[dest]
host = dest_conf['host']
port = dest_conf['port']
db_name = dest_conf['db']
user = dest_conf['user']
pwd = <PASSWORD>_<PASSWORD>['<PASSWORD>']
mechanism = config['mechanism']
self.conn = pymongo.MongoClient(host=host , port=port, socketKeepAlive=True, maxIdleTimeMS=idletime, minPoolSize=0, maxPoolSize=64)
# 长连接的使用必须要主机名,端口,标识符,用户名以及密码一样才行,否则会重新创建一个长连接
# http://api.mongodb.com/python/current/api/pymongo/mongo_client.html
# 连接mongodb服务器
self.dbs = self.conn[db_name]
# 连接数据库,或者db = self.conn.db_name
self.log_prefix = 'MongoDB服务器' + host + ':' + str(port)
try :
self.connecter = self.dbs.authenticate(user, pwd, mechanism=mechanism)
if not self.connecter :
self.error_reason = self.log_prefix + '连接失败,原因:' + str(self.connecter)
else :
self.logger.info(self.log_prefix + '连接成功')
except Exception as e:
self.connecter = False
conn_result = str(e)
if conn_result == 'Authentication failed.' :
self.error_reason = self.log_prefix + '连接失败,原因:账号或者密码错误'
else :
self.error_reason = self.log_prefix + '连接失败,原因:' + conn_result
def batch_insert(self, insert_list, addtime=True):
'''
同库批量插入数据
'''
if not self.connecter:
self.logger.error(self.error_reason)
return (False, self.error_reason)
if not (isinstance(insert_list, (list, tuple))):
self.logger.error(self.log_prefix + ' 批量插入数据失败,原因:参数insert_list不是列表或者元组')
return (False, '批量插入数据失败,参数insert_list不是列表或者元组')
insert_data = {}
for insert_dict in insert_list :
try :
collect_name = insert_dict['collect']
# 获取集合
except :
continue
try :
data = insert_dict['data']
# 获取插入数据
if not isinstance(data, dict):
continue
# 插入数据类型不为字典,不执行
if addtime :
data['add_time'] = time.time()
except :
continue
if not collect_name in insert_data :
insert_data[collect_name] = [data]
else :
insert_data[collect_name].append(data)
insert_data = dot2_(insert_data)
result_dict = {}
for collect_name , data in insert_data.items() :
collection = self.dbs[collect_name]
try :
result = collection.insert_many(data)
except Exception as e :
self.logger.error(self.log_prefix + ' 批量插入数据到集合' + collect_name + '失败,原因:' + str(e))
return (False, '批量插入数据失败,' + str(e))
result_dict[collect_name] = result
self.logger.error(self.log_prefix + ' 批量插入数据到集合' + collect_name + '成功')
return (True, result_dict)
def insert(self, insert_dict, addtime=True):
'''
指定表插入数据
:参数
insert_dict:需要插入的数据
addtime是否追插入日期
:返回
一个元组,(False,原因)或者(True, 结果)
'''
if not self.connecter:
self.logger.error(self.error_reason)
return (False, self.error_reason)
try :
collect_name = insert_dict['collect']
# 获取集合
except Exception as e :
self.logger.error(self.log_prefix + ' 插入数据失败,原因:参数insert_dict格式出错,缺少collect,即缺少集合名')
return (False, '插入数据失败,参数insert_dict格式出错,缺少collect,即缺少集合名')
try :
data = insert_dict['data']
# 获取插入数据
if not isinstance(data, dict):
self.logger.error(self.log_prefix + ' 插入数据到集合' + collect_name + '失败,原因:参数insert_dict格式出错,data值不为字典')
return (False, '插入数据到集合' + collect_name + '失败,参数insert_dict格式出错,data值不为字典')
except Exception as e:
self.logger.error(self.log_prefix + ' 插入数据到集合' + collect_name + '失败,原因:参数insert_dict格式出错,缺少键data')
return (False, '插入数据失败,参数insert_dict格式出错,缺少键data')
if addtime :
data['add_time'] = time.time()
collection = self.dbs[collect_name]
data = dot2_(data)
try :
result = collection.insert(data)
self.logger.info(self.log_prefix + ' 插入数据到集合' + collect_name + '成功')
return (True, result)
except Exception as e :
self.logger.error(self.log_prefix + ' 插入数据到集合' + collect_name + '失败,原因:' + str(e))
return (False, '插入数据失败,' + str(e))
def _handler_condition(self, condition_dict):
'''
在查询或更新数据时,对查询条件进行处理
'''
condition_dict = dot2_(condition_dict)
for k, v in condition_dict.items() :
if k == '_id' :
# 把_id的值进行转化
if isinstance(v, str) :
try :
v = int(v)
except :
v = ObjectId(v)
condition_dict[k] = v
return condition_dict
def _handler_result(self, query_list, get_field=[]):
'''
在查询数据时,处理查询结果
'''
result_list = []
for result_dict in query_list :
result_dict = _2dot(result_dict)
try :
del result_dict['traceback']
except :
pass
del result_dict['_id']
if not isinstance(get_field, (list, tuple)) or not get_field :
temp_dict = result_dict
else:
temp_dict = {}
for field in get_field :
try :
temp_dict[field] = result_dict[field]
except :
temp_dict[field] = 'no data'
result_list.append(temp_dict)
return result_list
def _getresult_fielddict(self, result_dict, field_dict):
temp_dict = {}
for field, subfield in field_dict.items() :
if not subfield or subfield == '' :
sub_resultdict = result_dict[field]
else :
if isinstance(subfield, dict):
try :
sub_resultdict = result_dict[field]
sub_resultdict = self._getresult_fielddict(sub_resultdict, subfield)
except :
pass
temp_dict[field] = sub_resultdict
return temp_dict
def find(self, collect_name, get_field=[], limits=0, condition_dict={}, iscount=False, sort_dict={}):
'''
通过mongodb的find命令获取指定字段或者所有数据
:参数:
collect_name:集合名
get_field_list:获取需要的字段
condition_dict:查询条件
iscount:统计数量
sort_dict:排序方式
:返回
一个列表
'''
if not self.connecter:
self.logger.error(self.error_reason)
return (False, self.error_reason)
try :
limits = int(limits)
except :
limits = 0
if not sort_dict :
sort_dict = {}
collection = self.dbs[collect_name]
if not iscount :
try :
if isinstance(condition_dict, dict) and condition_dict != {} :
condition_dict = self._handler_condition(condition_dict)
if limits > 0 :
query_list = collection.find(condition_dict).limit(limits)
else :
query_list = collection.find(condition_dict)
if sort_dict :
sort_list = []
for key , scend in sort_dict.items() :
if scend not in [1, -1] :
scend = 1
if scend == 1 :
sort_list.append((key, pymongo.ASCENDING))
else :
sort_list.append((key, pymongo.DESCENDING))
if limits > 0 :
try :
query_list = collection.find(condition_dict).sort(sort_list).limit(limits)
except Exception as e:
self.logger.error(self.log_prefix + ' 从集合' + collect_name + '查询数据失败,原因:排序查询错误,' + str(e))
return (False, '查询数据失败,排序查询错误,' + str(e))
else :
try :
query_list = collection.find(condition_dict).sort(sort_list)
except Exception as e:
self.logger.error(self.log_prefix + ' 从集合' + collect_name + '查询数据失败,原因:排序查询错误,' + str(e))
return (False, '查询数据失败,排序查询错误,' + str(e))
else :
if limits > 0 :
query_list = collection.find(condition_dict).limit(limits)
else :
query_list = collection.find(condition_dict)
else :
query_list = collection.find()
result_list = self._handler_result(query_list, get_field=get_field)
self.logger.info(self.log_prefix + ' 从集合' + collect_name + '查询数据成功')
return (True, result_list)
except Exception as e :
self.logger.error(self.log_prefix + ' 从集合' + collect_name + '查询数据失败,原因:' + str(e))
return (False, ['查询数据失败,' + str(e)])
else :
try :
if isinstance(condition_dict, dict) and condition_dict != {} :
get_dict = self._handler_condition(condition_dict)
result = collection.find(get_dict).count()
else :
result = collection.find().count()
self.logger.info(self.log_prefix + ' 从集合' + collect_name + '查询数据后统计条数成功')
return (True, result)
except Exception as e :
self.logger.error(self.log_prefix + ' 从集合' + collect_name + '查询数据后统计条数失败,原因:' + str(e))
return (False, '查询数据失败,' + str(e))
def find_one(self, collect_name, get_field=[], condition_dict={}):
'''
指定表的部分数据
:参数
collect_name:集合名
get_field_list:获取需要的字段
condition_dict:查询条件
:返回
一个列表
'''
if not self.connecter:
self.logger.error(self.error_reason)
return (False, self.error_reason)
condition_dict = dot2_(condition_dict)
collection = self.dbs[collect_name]
try :
if isinstance(condition_dict, dict) :
if condition_dict != {} :
condition_dict = self._handler_condition(condition_dict)
query_list = collection.find_one(condition_dict)
else :
query_list = collection.find_one()
else :
query_list = collection.find_one()
result = self._handler_result([query_list], get_field=get_field)
self.logger.info(self.log_prefix + ' 从集合' + collect_name + '查询并返回其中一条数据成功')
return (True, result)
except Exception as e :
self.logger.error(self.log_prefix + ' 从集合' + collect_name + '查询并返回其中一条数据失败,原因:' + str(e))
return (False, '查询并返回其中一条数据失败,原因:' + str(e))
def update(self, condition_dict, update_dict, addtime=True):
'''
通过mongodb的update命令修改数据数据,如果没有该数据,直接插入
:参数
collect_name:集合名
condition_dict:查询条件
update_dict:{'collect':集合名,'data':更新字典}
:返回
一个列表
'''
if not self.connecter:
self.logger.error(self.error_reason)
return (False, self.error_reason)
try :
collect_name = update_dict['collect']
# 获取集合
except Exception as e :
self.logger.error(self.log_prefix + ' 更新数据失败,原因:参数update_dict格式出错,缺少collect,即缺少集合名')
return (False, '更新数据失败,参数update_dict格式出错,缺少collect,即缺少集合名')
updatedict = update_dict['data']
condition_dict = dot2_(condition_dict)
updatedict = dot2_(updatedict)
if not collect_name or not (isinstance(updatedict, dict) and updatedict != {}):
self.logger.error(self.log_prefix + ' 从集合' + collect_name + '更新数据失败,原因:参数update_dict格式出错')
return (False, False)
collection = self.dbs[collect_name]
condition_dict = self._handler_condition(condition_dict)
result = collection.update(condition_dict, updatedict)
# result 类似与{'n': 1, 'nModified': 1, 'ok': 1.0, 'updatedExisting': True}
# {'n': 0, 'nModified': 0, 'ok': 1.0, 'updatedExisting': False}
try :
res_code = result['n']
if res_code :
self.logger.info(self.log_prefix + ' 从集合' + collect_name + '更新数据成功')
return (True, True)
else :
self.logger.warn(self.log_prefix + ' 从集合' + collect_name + '更新数据失败,原因:根据查询条件无法查询到指定数据,使用插入函数进行处理')
return self.insert(update_dict, addtime=addtime)
except Exception as e:
self.logger.warn(self.log_prefix + ' 从集合' + collect_name + '更新数据失败,原因:根据查询条件无法查询到指定数据,使用插入函数进行处理,' + str(e))
return self.insert(update_dict, addtime=addtime)
def group_by(self, collect, field):
'''
通过mongodb的aggregate命令进行group by计算s
:参数
collect_name:集合名
filed:group by字段(或者列表、字典)
:返回
一个列表
'''
if not self.connecter:
self.logger.error(self.error_reason)
return (False, self.error_reason)
if not isinstance(collect, str):
self.logger.error(self.log_prefix + ' 使用group方式查询数据失败,原因:参数collect(即集合名)不为字符串')
return (False, False)
collection = self.dbs[collect]
if isinstance(field, str) :
condition = [{"$group":{"_id" :"$" + field}}]
elif isinstance(field, (list, tuple)) :
if len(field) == 1 :
field = field[0]
condition = [{"$group":{"_id" :"$" + field}}]
elif len(field) == 0 :
self.logger.error(self.log_prefix + ' 从集合' + collect + '使用group方式查询数据失败,原因:参数field(即查询字段)为空')
return (False, False)
else :
temp_dict = {}
for f in field :
temp_dict[f] = '$'+f
condition = [{"$group":{"_id" :temp_dict}}]
elif isinstance(field, dict):
condition = [{"$group":{"_id" :field}}]
else :
self.logger.error(self.log_prefix + ' 从集合' + collect + '使用group方式查询数据失败,原因:参数field(即查询字段)必须是字符串、字典、列表等数据类型')
return (False, False)
try :
query_list = collection.aggregate(condition)
except Exception as e :
self.logger.error(self.log_prefix + ' 从集合' + collect + '使用group方式查询数据失败,原因:' + str(e))
return (False, False)
result_list = []
for query in query_list :
result = query['_id']
result_list.append(result)
self.logger.info(self.log_prefix + ' 从集合' + collect + '使用group方式查询数据成功')
return (True, result_list)
def remove(self, collect_name, condition_dict):
'''
删除指定数据
:参数
collect_name:集合名
condition_dict:查询条件
:返回
一个列表
'''
if not self.connecter:
self.logger.error(self.error_reason)
return (False, self.error_reason)
if not isinstance(condition_dict, dict):
self.logger.error(self.log_prefix + ' 从集合' + collect_name + '删除指定数据失败,原因:条件数据类型不为字典')
return (False, '删除指定数据失败,条件数据类型不为字典')
if not condition_dict :
self.logger.error(self.log_prefix + ' 从集合' + collect_name + '删除指定数据失败,原因:条件不能为空')
return (False, '删除指定数据失败,条件不能为空')
collection = self.dbs[collect_name]
try :
result = collection.remove(condition_dict)
if result["ok"] == 1.0 :
self.logger.info(self.log_prefix + ' 从集合' + collect_name + '删除指定数据成功,删除条数为' + str(result["n"]))
return (True, '删除指定数据成功,删除条数为' + str(result["n"]))
else :
self.logger.error(self.log_prefix + ' 从集合' + collect_name + '删除指定数据失败,原因:' + str(result))
return (False, result)
except Exception as e:
self.logger.error(self.log_prefix + ' 从集合' + collect_name + '删除指定数据失败,原因:未知错误,' + str(e))
return (False, '未知错误,原因:' + str(e))
def remove_all(self, collect_name):
'''
删除所有数据,尽量勿用(最好使用重命名)
:参数
collect_name:集合名
:返回
一个列表
'''
if not self.connecter:
self.logger.error(self.error_reason)
return (False, self.error_reason)
collection = self.dbs[collect_name]
try :
result = collection.remove({})
if result["ok"] == 1.0 :
self.logger.info(self.log_prefix + ' 从集合' + collect_name + '删除所有数据成功,删除条数为' + str(result["n"]))
return (True, '删除所有数据成功,删除条数为' + str(result["n"]))
else :
self.logger.error(self.log_prefix + ' 从集合' + collect_name + '删除所有数据成功,原因:' + str(result))
return (False, '删除所有数据成功,原因:' + str(result))
except Exception as e:
self.logger.error(self.log_prefix + ' 从集合' + collect_name + '删除所有数据成功,原因:未知错误,' + str(e))
return (False, '删除所有数据成功,未知错误,原因:' + str(e))
def rename_collect(self, old_collect , new_collect):
'''
重命名集合
:参数
old_collect:旧集合名
new_collect:新集合名
:返回
一个列表
'''
if not self.connecter:
self.logger.error(self.error_reason)
return (False, self.error_reason)
collection = self.dbs[old_collect]
try :
collection.rename(new_collect)
self.logger.info(self.log_prefix + ' 集合' + old_collect + '重命名为' + new_collect + '成功')
return (True, '重命名成功')
except Exception as e:
self.logger.error(self.log_prefix + ' 集合' + old_collect + '重命名为' + new_collect + '失败,原因:' + str(e))
return (False, '重命名失败,原因:' + str(e))
def drop_collect(self, collect):
'''
删除集合
:参数
collect:集合名
:返回
一个列表
'''
if not self.connecter:
self.logger.error(self.error_reason)
return (False, self.error_reason)
collection = self.dbs[collect]
try :
collection.drop()
self.logger.info(self.log_prefix + ' 删除集合' + collect + '成功')
return (True, '删除集合成功')
except Exception as e:
self.logger.info(self.log_prefix + ' 删除集合' + collect + '合败,原因:' + str(e))
return (False, '删除失集合败,原因:' + str(e))
|
examples/esgf_integration_example.py
|
prashantarya12/climate
| 132 |
68588
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
esgf_integration_example.py
Use OCW to download an ESGF dataset into the common format of an OCW dataset object.
In this example:
1. Download an ESGF (https://esgf.llnl.gov/) dataset and load it into a OCW dataset object.
OCW modules demonstrated:
1. datasource/esgf
"""
from __future__ import print_function
import ssl
import sys
from getpass import getpass
import ocw.data_source.esgf as esgf
def main():
"""
An example of using the OCW ESGF library. Connects to an ESGF
server and downloads a dataset.
"""
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
dataset_id = 'obs4mips.CNES.AVISO.zos.mon.v20110829|esgf-data.jpl.nasa.gov'
variable = 'zosStderr'
if sys.version_info[0] >= 3:
username = input('Enter your ESGF OpenID:\n')
else:
username = raw_input('Enter your ESGF OpenID:\n')
password = getpass(prompt='Enter your ESGF Password:\n')
# Multiple datasets are returned in a list if the ESGF dataset is
# divided into multiple files.
datasets = esgf.load_dataset(dataset_id, variable, username, password)
# For this example, our dataset is only stored in a single file so
# we only need to look at the 0-th value in the returned list.
dataset = datasets[0]
print('\n--------\n')
print('Variable: ', dataset.variable)
print('Shape: ', dataset.values.shape)
print('A Value: ', dataset.values[100][100][100])
if __name__ == '__main__':
main()
|
xmodaler/engine/build.py
|
cclauss/xmodaler
| 830 |
68589
|
# Copyright 2021 JD.com, Inc., JD AI
"""
@author: <NAME>
@contact: <EMAIL>
"""
from xmodaler.utils.registry import Registry
ENGINE_REGISTRY = Registry("ENGINE")
ENGINE_REGISTRY.__doc__ = """
Registry for engine
"""
def build_engine(cfg):
engine = ENGINE_REGISTRY.get(cfg.ENGINE.NAME)(cfg)
return engine
|
speechbrain/nnet/loss/si_snr_loss.py
|
mj-kh/speechbrain
| 3,913 |
68606
|
"""
# Authors:
* <NAME> 2021
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
"""
import torch
import numpy as np
smallVal = np.finfo("float").eps # To avoid divide by zero
def si_snr_loss(y_pred_batch, y_true_batch, lens, reduction="mean"):
"""Compute the si_snr score and return -1 * that score.
This function can be used as a loss function for training
with SGD-based updates.
Arguments
---------
y_pred_batch : torch.Tensor
The degraded (enhanced) waveforms.
y_true_batch : torch.Tensor
The clean (reference) waveforms.
lens : torch.Tensor
The relative lengths of the waveforms within the batch.
reduction : str
The type of reduction ("mean" or "batch") to use.
Example
-------
"""
y_pred_batch = torch.squeeze(y_pred_batch, dim=-1)
y_true_batch = torch.squeeze(y_true_batch, dim=-1)
batch_size = y_pred_batch.shape[0]
SI_SNR = torch.zeros(batch_size)
for i in range(0, batch_size): # Run over mini-batches
s_target = y_true_batch[i, 0 : int(lens[i] * y_pred_batch.shape[1])]
s_estimate = y_pred_batch[i, 0 : int(lens[i] * y_pred_batch.shape[1])]
# s_target = <s', s>s / ||s||^2
dot = torch.sum(s_estimate * s_target, dim=0, keepdim=True)
s_target_energy = (
torch.sum(s_target ** 2, dim=0, keepdim=True) + smallVal
)
proj = dot * s_target / s_target_energy
# e_noise = s' - s_target
e_noise = s_estimate - proj
# SI-SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)
si_snr_beforelog = torch.sum(proj ** 2, dim=0) / (
torch.sum(e_noise ** 2, dim=0) + smallVal
)
SI_SNR[i] = 10 * torch.log10(si_snr_beforelog + smallVal)
if reduction == "mean":
return -SI_SNR.mean()
return -SI_SNR
|
nodes/2.x/python/Group.IsMirrored.py
|
andydandy74/ClockworkForDynamo
| 147 |
68608
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
refgroup = UnwrapElement(IN[0])
groups = UnwrapElement(IN[1])
# Get Mirrored state of first family instance in reference group instance
refGroupMembers = refgroup.GetMemberIds()
numMembers = len(refGroupMembers)
counter = 0
membernum = None
refGroupType = refgroup.GroupType.Id.IntegerValue
for member in refGroupMembers:
elem = refgroup.Document.GetElement(member)
if elem.GetType().ToString() == "Autodesk.Revit.DB.FamilyInstance":
state = elem.Mirrored
membernum = counter
famtype = elem.GetTypeId().IntegerValue
break
counter += 1
# Default values for flags
refGroupIntact = True
noFamInsts = False
# Set a flag if the reference group contains no family instances
if membernum == None: noFamInsts = True
else:
bools = []
# Compare Mirrored state with corresponding members of other group instances
for group in groups:
# Get number of group members
theseMembers = group.GetMemberIds()
theseMembersNum = len(theseMembers)
# Set a flag if any group instance has more members than the reference group instance
# (only if both are of the same group type)
if theseMembersNum > numMembers and refGroupType == group.GroupType.Id.IntegerValue:
refGroupIntact = False
break
# Return null if group is of another group type
elif refGroupType != group.GroupType.Id.IntegerValue: bools.append(None)
# Return null for group instances with excluded members
elif theseMembersNum < numMembers: bools.append(None)
# Return null if family instance to compare if of a diffent type
elif group.Document.GetElement(theseMembers[membernum]).GetTypeId().IntegerValue != famtype: bools.append(None)
# Otherwise compare Mirrored state
else: bools.append(group.Document.GetElement(theseMembers[membernum]).Mirrored != state)
# Return null for all groups if the first group has excluded members
# or if it does not contain any fanily instances
if not refGroupIntact or noFamInsts: bools = [None] * len(groups)
OUT = bools
|
envpool/python/api.py
|
TachikakaMin/envpool
| 330 |
68618
|
<reponame>TachikakaMin/envpool
# Copyright 2021 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Api wrapper layer for EnvPool."""
from typing import Tuple, Type
from .dm_envpool import DMEnvPoolMeta
from .env_spec import EnvSpecMeta
from .gym_envpool import GymEnvPoolMeta
from .protocol import EnvPool, EnvSpec
def py_env(
envspec: Type[EnvSpec], envpool: Type[EnvPool]
) -> Tuple[Type[EnvSpec], Type[EnvPool], Type[EnvPool]]:
"""Initialize EnvPool for users."""
# remove the _ prefix added when registering cpp class via pybind
spec_name = envspec.__name__[1:]
pool_name = envpool.__name__[1:]
return (
EnvSpecMeta(spec_name, (envspec,), {}), # type: ignore[return-value]
DMEnvPoolMeta(pool_name.replace("EnvPool", "DMEnvPool"), (envpool,), {}),
GymEnvPoolMeta(pool_name.replace("EnvPool", "GymEnvPool"), (envpool,), {}),
)
|
fastai_do_not_use/callbacks/rnn.py
|
anhquan0412/fastai_v1
| 115 |
68623
|
"Regroups lr adjustment to seq_len, AR and TAR"
from ..torch_core import *
from ..callback import *
from ..basic_train import Learner
__all__ = ['RNNTrainer']
@dataclass
class RNNTrainer(Callback):
"`Callback` that regroups lr adjustment to seq_len, AR and TAR"
learn:Learner
bptt:int
alpha:float=0.
beta:float=0.
adjust:bool=True
def on_loss_begin(self, last_output:Tuple[Tensor,Tensor,Tensor], **kwargs):
#Save the extra outputs for later and only returns the true output.
self.raw_out,self.out = last_output[1],last_output[2]
return last_output[0]
def on_backward_begin(self, last_loss:Rank0Tensor, last_input:Tensor, last_output:Tensor, **kwargs):
#Adjusts the lr to the bptt selected
if self.adjust: self.learn.opt.lr *= last_input.size(0) / self.bptt
#AR and TAR
if self.alpha != 0.: last_loss += (self.alpha * self.out[-1].pow(2).mean()).sum()
if self.beta != 0.:
h = self.raw_out[-1]
if len(h)>1: last_loss += (self.beta * (h[1:] - h[:-1]).pow(2).mean()).sum()
return last_loss
|
bookwyrm/migrations/0093_alter_sitesettings_instance_short_description.py
|
mouse-reeve/fedireads
| 270 |
68635
|
# Generated by Django 3.2.4 on 2021-09-10 19:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0092_sitesettings_instance_short_description"),
]
operations = [
migrations.AlterField(
model_name="sitesettings",
name="instance_short_description",
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
visualdl/utils/update_util.py
|
rainyfly/VisualDL
| 4,861 |
68637
|
# Copyright (c) 2020 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
import threading
import hashlib
import requests
from visualdl import __version__
from visualdl.proto.record_pb2 import DESCRIPTOR
def md5(text):
if isinstance(text, str):
text = text.encode("utf8")
md5 = hashlib.md5()
md5.update(text)
return md5.hexdigest()
class PbUpdater(threading.Thread):
def __init__(self, product='normal'):
self.product = product
threading.Thread.__init__(self)
def update_pb(self,
version=__version__,
md5_code=md5(str(DESCRIPTOR))
):
payload = {
"data": {
"version": version,
"md5": md5_code,
"product": self.product
}
}
url = 'https://paddlepaddle.org.cn/paddlehub/stat?from=vdl'
try:
r = requests.post(url=url, json=payload)
if r.json().get("update_flag", 0) == 1:
pb_bin = r.json().get("pb_bin")
with open('/visualdl/proto/record_pb2.py', mode='wb') as fp:
fp.write(pb_bin)
print('Update pb file successfully.')
except Exception:
pass
def run(self):
self.update_pb(version=__version__,
md5_code=md5(str(DESCRIPTOR))
)
|
tests/issues/gh195.py
|
aureooms-contrib/ics-py
| 312 |
68643
|
import pytest
from ics import Calendar, ContentLine
def test_gh195_override_prodid():
lines = [
"BEGIN:VCALENDAR",
"VERSION:2.0",
"X-WR-CALNAME:<NAME>",
"X-APPLE-CALENDAR-COLOR:#996633",
"END:VCALENDAR"
]
with pytest.raises(ValueError, match="attribute PRODID is required but got no value"):
Calendar(lines)
calendar = Calendar()
assert calendar.prodid == Calendar.DEFAULT_PRODID
assert ContentLine("PRODID", value=Calendar.DEFAULT_PRODID) in calendar.to_container()
test_prodid = "TEST_PRODID 123456 GitHub Issue 195"
lines.insert(1, "PRODID:" + test_prodid)
calendar = Calendar(lines)
assert calendar.prodid == test_prodid
assert ContentLine("PRODID", value=test_prodid) in calendar.to_container()
|
tests/core/actions/test_loops.py
|
fintzd/rasa
| 9,701 |
68649
|
<reponame>fintzd/rasa
from typing import List, Any, Text
import pytest
from rasa.core.actions.loops import LoopAction
from rasa.core.channels import CollectingOutputChannel
from rasa.shared.core.domain import Domain
from rasa.shared.core.events import (
Event,
ActionExecutionRejected,
ActionExecuted,
ActiveLoop,
SlotSet,
)
from rasa.core.nlg import TemplatedNaturalLanguageGenerator
from rasa.shared.core.trackers import DialogueStateTracker
async def test_whole_loop():
expected_activation_events = [
ActionExecutionRejected("tada"),
ActionExecuted("test"),
]
expected_do_events = [ActionExecuted("do")]
expected_deactivation_events = [SlotSet("deactivated")]
form_name = "my form"
class MyLoop(LoopAction):
def name(self) -> Text:
return form_name
async def activate(self, *args: Any) -> List[Event]:
return expected_activation_events
async def do(self, *args: Any) -> List[Event]:
events_so_far = args[-1]
assert events_so_far == [ActiveLoop(form_name), *expected_activation_events]
return expected_do_events
async def deactivate(self, *args) -> List[Event]:
events_so_far = args[-1]
assert events_so_far == [
ActiveLoop(form_name),
*expected_activation_events,
*expected_do_events,
ActiveLoop(None),
]
return expected_deactivation_events
async def is_done(self, *args) -> bool:
events_so_far = args[-1]
return events_so_far == [
ActiveLoop(form_name),
*expected_activation_events,
*expected_do_events,
]
tracker = DialogueStateTracker.from_events("some sender", [])
domain = Domain.empty()
action = MyLoop()
actual = await action.run(
CollectingOutputChannel(),
TemplatedNaturalLanguageGenerator(domain.responses),
tracker,
domain,
)
assert actual == [
ActiveLoop(form_name),
*expected_activation_events,
*expected_do_events,
ActiveLoop(None),
*expected_deactivation_events,
]
async def test_loop_without_deactivate():
expected_activation_events = [
ActionExecutionRejected("tada"),
ActionExecuted("test"),
]
expected_do_events = [ActionExecuted("do")]
form_name = "my form"
class MyLoop(LoopAction):
def name(self) -> Text:
return form_name
async def activate(self, *args: Any) -> List[Event]:
return expected_activation_events
async def do(self, *args: Any) -> List[Event]:
return expected_do_events
async def deactivate(self, *args) -> List[Event]:
raise ValueError("this shouldn't be called")
async def is_done(self, *args) -> bool:
return False
tracker = DialogueStateTracker.from_events("some sender", [])
domain = Domain.empty()
action = MyLoop()
actual = await action.run(
CollectingOutputChannel(),
TemplatedNaturalLanguageGenerator(domain.responses),
tracker,
domain,
)
assert actual == [
ActiveLoop(form_name),
*expected_activation_events,
*expected_do_events,
]
async def test_loop_without_activate_and_without_deactivate():
expected_do_events = [ActionExecuted("do")]
form_name = "my form"
class MyLoop(LoopAction):
def name(self) -> Text:
return form_name
async def activate(self, *args: Any) -> List[Event]:
raise ValueError("this shouldn't be called")
async def do(self, *args: Any) -> List[Event]:
return expected_do_events
async def deactivate(self, *args) -> List[Event]:
return [SlotSet("deactivated")]
async def is_activated(self, *args: Any) -> bool:
return True
async def is_done(self, *args) -> bool:
return False
tracker = DialogueStateTracker.from_events("some sender", [])
domain = Domain.empty()
action = MyLoop()
actual = await action.run(
CollectingOutputChannel(),
TemplatedNaturalLanguageGenerator(domain.responses),
tracker,
domain,
)
assert actual == [*expected_do_events]
async def test_raise_not_implemented_error():
loop = LoopAction()
with pytest.raises(NotImplementedError):
await loop.do(None, None, None, None, [])
with pytest.raises(NotImplementedError):
await loop.is_done(None, None, None, None, [])
|
plugins/modules/oci_database_autonomous_container_database_dataguard_association_actions.py
|
slmjy/oci-ansible-collection
| 108 |
68659
|
<reponame>slmjy/oci-ansible-collection
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_autonomous_container_database_dataguard_association_actions
short_description: Perform actions on an AutonomousContainerDatabaseDataguardAssociation resource in Oracle Cloud Infrastructure
description:
- Perform actions on an AutonomousContainerDatabaseDataguardAssociation resource in Oracle Cloud Infrastructure
- For I(action=failover), fails over the standby Autonomous Container Database identified by the autonomousContainerDatabaseId parameter to the primary
Autonomous Container Database after the existing primary Autonomous Container Database fails or becomes unreachable.
A failover can result in data loss, depending on the protection mode in effect at the time the primary Autonomous Container Database fails.
- For I(action=reinstate), reinstates a disabled standby Autonomous Container Database, identified by the autonomousContainerDatabaseId parameter, to an
active standby Autonomous Container Database.
- For I(action=switchover), switches over the primary Autonomous Container Database of an Autonomous Data Guard peer association to standby role. The
standby Autonomous Container Database associated with autonomousContainerDatabaseDataguardAssociationId assumes the primary Autonomous Container Database
role.
A switchover incurs no data loss.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
autonomous_container_database_id:
description:
- The Autonomous Container Database L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
type: str
required: true
autonomous_container_database_dataguard_association_id:
description:
- The Autonomous Container Database-Autonomous Data Guard association
L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
type: str
aliases: ["id"]
required: true
action:
description:
- The action to perform on the AutonomousContainerDatabaseDataguardAssociation.
type: str
required: true
choices:
- "failover"
- "reinstate"
- "switchover"
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Perform action failover on autonomous_container_database_dataguard_association
oci_database_autonomous_container_database_dataguard_association_actions:
# required
autonomous_container_database_id: "ocid1.autonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx"
autonomous_container_database_dataguard_association_id: "ocid1.autonomouscontainerdatabasedataguardassociation.oc1..xxxxxxEXAMPLExxxxxx"
action: failover
- name: Perform action reinstate on autonomous_container_database_dataguard_association
oci_database_autonomous_container_database_dataguard_association_actions:
# required
autonomous_container_database_id: "ocid1.autonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx"
autonomous_container_database_dataguard_association_id: "ocid1.autonomouscontainerdatabasedataguardassociation.oc1..xxxxxxEXAMPLExxxxxx"
action: reinstate
- name: Perform action switchover on autonomous_container_database_dataguard_association
oci_database_autonomous_container_database_dataguard_association_actions:
# required
autonomous_container_database_id: "ocid1.autonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx"
autonomous_container_database_dataguard_association_id: "ocid1.autonomouscontainerdatabasedataguardassociation.oc1..xxxxxxEXAMPLExxxxxx"
action: switchover
"""
RETURN = """
autonomous_container_database_dataguard_association:
description:
- Details of the AutonomousContainerDatabaseDataguardAssociation resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The OCID of the Autonomous Data Guard created for a given Autonomous Container Database.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
autonomous_container_database_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Autonomous Container Database that has a
relationship with the peer Autonomous Container Database.
returned: on success
type: str
sample: "ocid1.autonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx"
role:
description:
- The Data Guard role of the Autonomous Container Database or Autonomous Database, if Autonomous Data Guard is enabled.
returned: on success
type: str
sample: PRIMARY
lifecycle_state:
description:
- The current state of Autonomous Data Guard.
returned: on success
type: str
sample: PROVISIONING
lifecycle_details:
description:
- Additional information about the current lifecycleState, if available.
returned: on success
type: str
sample: lifecycle_details_example
peer_autonomous_container_database_dataguard_association_id:
description:
- The OCID of the peer Autonomous Container Database-Autonomous Data Guard association.
returned: on success
type: str
sample: "ocid1.peerautonomouscontainerdatabasedataguardassociation.oc1..xxxxxxEXAMPLExxxxxx"
peer_autonomous_container_database_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the peer Autonomous Container Database.
returned: on success
type: str
sample: "ocid1.peerautonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx"
peer_role:
description:
- The Data Guard role of the Autonomous Container Database or Autonomous Database, if Autonomous Data Guard is enabled.
returned: on success
type: str
sample: PRIMARY
peer_lifecycle_state:
description:
- The current state of Autonomous Data Guard.
returned: on success
type: str
sample: PROVISIONING
protection_mode:
description:
- The protection mode of this Autonomous Data Guard association. For more information, see
L(Oracle Data Guard Protection Modes,http://docs.oracle.com/database/122/SBYDB/oracle-data-guard-protection-modes.htm#SBYDB02000)
in the Oracle Data Guard documentation.
returned: on success
type: str
sample: MAXIMUM_AVAILABILITY
apply_lag:
description:
- The lag time between updates to the primary Autonomous Container Database and application of the redo data on the standby Autonomous Container
Database,
as computed by the reporting database.
- "Example: `9 seconds`"
returned: on success
type: str
sample: apply_lag_example
apply_rate:
description:
- The rate at which redo logs are synchronized between the associated Autonomous Container Databases.
- "Example: `180 Mb per second`"
returned: on success
type: str
sample: apply_rate_example
transport_lag:
description:
- The approximate number of seconds of redo data not yet available on the standby Autonomous Container Database,
as computed by the reporting database.
- "Example: `7 seconds`"
returned: on success
type: str
sample: transport_lag_example
time_last_synced:
description:
- The date and time of the last update to the apply lag, apply rate, and transport lag values.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_created:
description:
- The date and time the Autonomous DataGuard association was created.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_last_role_changed:
description:
- The date and time when the last role change action happened.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"autonomous_container_database_id": "ocid1.autonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx",
"role": "PRIMARY",
"lifecycle_state": "PROVISIONING",
"lifecycle_details": "lifecycle_details_example",
"peer_autonomous_container_database_dataguard_association_id": "ocid1.peerautonomouscontainerdatabasedataguardassociation.oc1..xxxxxxEXAMPLExxxxxx",
"peer_autonomous_container_database_id": "ocid1.peerautonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx",
"peer_role": "PRIMARY",
"peer_lifecycle_state": "PROVISIONING",
"protection_mode": "MAXIMUM_AVAILABILITY",
"apply_lag": "apply_lag_example",
"apply_rate": "apply_rate_example",
"transport_lag": "transport_lag_example",
"time_last_synced": "2013-10-20T19:20:30+01:00",
"time_created": "2013-10-20T19:20:30+01:00",
"time_last_role_changed": "2013-10-20T19:20:30+01:00"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.work_requests import WorkRequestClient
from oci.database import DatabaseClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class AutonomousContainerDatabaseDataguardAssociationActionsHelperGen(
OCIActionsHelperBase
):
"""
Supported actions:
failover
reinstate
switchover
"""
def __init__(self, *args, **kwargs):
super(
AutonomousContainerDatabaseDataguardAssociationActionsHelperGen, self
).__init__(*args, **kwargs)
self.work_request_client = WorkRequestClient(
self.client._config, **self.client._kwargs
)
@staticmethod
def get_module_resource_id_param():
return "autonomous_container_database_dataguard_association_id"
def get_module_resource_id(self):
return self.module.params.get(
"autonomous_container_database_dataguard_association_id"
)
def get_get_fn(self):
return self.client.get_autonomous_container_database_dataguard_association
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_autonomous_container_database_dataguard_association,
autonomous_container_database_id=self.module.params.get(
"autonomous_container_database_id"
),
autonomous_container_database_dataguard_association_id=self.module.params.get(
"autonomous_container_database_dataguard_association_id"
),
)
def failover(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.failover_autonomous_container_database_dataguard_association,
call_fn_args=(),
call_fn_kwargs=dict(
autonomous_container_database_id=self.module.params.get(
"autonomous_container_database_id"
),
autonomous_container_database_dataguard_association_id=self.module.params.get(
"autonomous_container_database_dataguard_association_id"
),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.work_request_client,
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def reinstate(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.reinstate_autonomous_container_database_dataguard_association,
call_fn_args=(),
call_fn_kwargs=dict(
autonomous_container_database_id=self.module.params.get(
"autonomous_container_database_id"
),
autonomous_container_database_dataguard_association_id=self.module.params.get(
"autonomous_container_database_dataguard_association_id"
),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.work_request_client,
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def switchover(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.switchover_autonomous_container_database_dataguard_association,
call_fn_args=(),
call_fn_kwargs=dict(
autonomous_container_database_id=self.module.params.get(
"autonomous_container_database_id"
),
autonomous_container_database_dataguard_association_id=self.module.params.get(
"autonomous_container_database_dataguard_association_id"
),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.work_request_client,
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
AutonomousContainerDatabaseDataguardAssociationActionsHelperCustom = get_custom_class(
"AutonomousContainerDatabaseDataguardAssociationActionsHelperCustom"
)
class ResourceHelper(
AutonomousContainerDatabaseDataguardAssociationActionsHelperCustom,
AutonomousContainerDatabaseDataguardAssociationActionsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=True
)
module_args.update(
dict(
autonomous_container_database_id=dict(type="str", required=True),
autonomous_container_database_dataguard_association_id=dict(
aliases=["id"], type="str", required=True
),
action=dict(
type="str",
required=True,
choices=["failover", "reinstate", "switchover"],
),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="autonomous_container_database_dataguard_association",
service_client_class=DatabaseClient,
namespace="database",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
|
proj/mnv2_first/gateware/output.py
|
keadwen/CFU-Playground
| 240 |
68726
|
#!/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nmigen import Signal
from .registerfile import Xetter
class OutputQueueGetter(Xetter):
"""A getter to read from a 32-bit wide FIFO.
Blocks until data is available.
Public Interface
----------------
r_data: Signal(32) in
The FIFO's r_data signal
r_rdy: Signal() in
The FIFO's r_rdy signal
r_en: Signal() out
The FIFO's r_en signal
"""
def __init__(self):
super().__init__()
self.r_data = Signal(32)
self.r_rdy = Signal()
self.r_en = Signal()
def connect(self, fifo):
"""Returns statements to comb this instance to a fifo"""
return [
self.r_data.eq(fifo.r_data),
self.r_rdy.eq(fifo.r_rdy),
fifo.r_en.eq(self.r_en),
]
def elab(self, m):
waiting = Signal()
with m.If (self.start | waiting):
m.d.comb += self.r_en.eq(1)
with m.If(self.r_rdy):
m.d.comb += [
self.output.eq(self.r_data),
self.done.eq(1),
]
m.d.sync += waiting.eq(0)
with m.Else():
m.d.sync += waiting.eq(1)
|
executor/cook/progress.py
|
CGe0516/Cook
| 345 |
68731
|
import logging
import os
import re
import time
from threading import Event, Lock, Thread
import cook.util as cu
class ProgressSequenceCounter:
"""Utility class that supports atomically incrementing the sequence value."""
def __init__(self, initial=0):
self.lock = Lock()
self.value = initial
def increment_and_get(self):
"""Atomically increments by one the current value and returns the new value."""
with self.lock:
self.value += 1
return self.value
class ProgressUpdater(object):
"""This class is responsible for sending progress updates to the scheduler.
It throttles the rate at which progress updates are sent.
"""
def __init__(self, task_id, max_message_length, poll_interval_ms, send_progress_message_fn):
"""
task_id: string
The task id.
max_message_length: int
The allowed max message length after encoding.
poll_interval_ms: int
The interval after which to send a subsequent progress update.
send_progress_message_fn: function(message)
The helper function used to send the progress message.
"""
self.task_id = task_id
self.max_message_length = max_message_length
self.poll_interval_ms = poll_interval_ms
self.last_reported_time = None
self.last_progress_data_sent = None
self.send_progress_message = send_progress_message_fn
self.lock = Lock()
def has_enough_time_elapsed_since_last_update(self):
"""Returns true if enough time (based on poll_interval_ms) has elapsed since
the last progress update (available in last_reported_time).
"""
if self.last_reported_time is None:
return True
else:
current_time = time.time()
time_diff_ms = (current_time - self.last_reported_time) * 1000
return time_diff_ms >= self.poll_interval_ms
def is_increasing_sequence(self, progress_data):
"""Checks if the sequence number in progress_data is larger than the previously published progress.
Parameters
----------
progress_data: dictionary
The progress data to send.
Returns
-------
True if the sequence number in progress_data is larger than the previously published progress, False otherwise
"""
last_progress_data = self.last_progress_data_sent
last_progress_sequence = last_progress_data['progress-sequence'] if last_progress_data else -1
return progress_data['progress-sequence'] > last_progress_sequence
def send_progress_update(self, progress_data, force_send=False):
"""Sends a progress update if enough time has elapsed since the last progress update.
The force_send flag can be used to ignore the check for enough time having elapsed.
Using this method is thread-safe.
Parameters
----------
progress_data: dictionary
The progress data to send.
force_send: boolean, optional
Defaults to false.
Returns
-------
Nothing
"""
with self.lock:
# ensure we do not send outdated progress data due to parallel repeated calls to this method
if progress_data is None or not self.is_increasing_sequence(progress_data):
logging.info('Skipping invalid/outdated progress data {}'.format(progress_data))
elif not force_send and not self.has_enough_time_elapsed_since_last_update():
logging.debug('Not sending progress data as enough time has not elapsed since last update')
else:
logging.info('Sending progress message {}'.format(progress_data))
message_dict = dict(progress_data)
message_dict['task-id'] = self.task_id
raw_progress_message = progress_data['progress-message']
try:
progress_str = raw_progress_message.decode('ascii').strip()
except UnicodeDecodeError:
logging.info('Unable to decode progress message in ascii, using empty string instead')
progress_str = ''
if len(progress_str) <= self.max_message_length:
message_dict['progress-message'] = progress_str
else:
allowed_progress_message_length = max(self.max_message_length - 3, 0)
new_progress_str = progress_str[:allowed_progress_message_length].strip() + '...'
logging.info('Progress message trimmed to {}'.format(new_progress_str))
message_dict['progress-message'] = new_progress_str
send_success = self.send_progress_message(message_dict)
if send_success:
self.last_progress_data_sent = progress_data
self.last_reported_time = time.time()
else:
logging.info('Unable to send progress message {}'.format(message_dict))
class ProgressWatcher(object):
"""This class tails the output from the target file listening for progress messages.
The retrieve_progress_states generates all progress messages iteratively.
"""
def __init__(self, output_name, location_tag, sequence_counter, max_bytes_read_per_line, progress_regex_string,
stop_signal, task_completed_signal, progress_termination_signal):
"""The ProgressWatcher constructor.
Parameters
----------
progress_regex_string: string
The progress regex to match against, it must return one or two capture groups.
The first capture group represents the progress percentage.
The second capture group, if present, represents the progress message.
"""
self.target_file = output_name
self.location_tag = location_tag
self.sequence_counter = sequence_counter
self.max_bytes_read_per_line = max_bytes_read_per_line
self.progress_regex_string = progress_regex_string
self.progress_regex_pattern = re.compile(progress_regex_string.encode())
self.progress = None
self.stop_signal = stop_signal
self.task_completed_signal = task_completed_signal
self.progress_termination_signal = progress_termination_signal
def current_progress(self):
"""Returns the current progress dictionary."""
return self.progress
def tail(self, sleep_time_ms):
"""This method incrementally generates lines from a file by waiting for new content from a file.
It behaves like the 'tail -f' shell command.
Parameters
----------
sleep_time_ms: int
The unit of time in ms to repetitively sleep when the file has not been created or no new
content is available in the file being tailed.
Returns
-------
an incrementally generated list of lines in the file being tailed.
"""
try:
sleep_param = sleep_time_ms / 1000
if os.path.exists(self.target_file) and not os.path.isfile(self.target_file):
logging.info('Skipping progress monitoring on %s as it is not a file', self.target_file)
return
if not os.path.isfile(self.target_file):
logging.debug('Awaiting creation of file %s [tag=%s]', self.target_file, self.location_tag)
while not os.path.isfile(self.target_file) and not self.task_completed_signal.isSet():
time.sleep(sleep_param)
if not os.path.isfile(self.target_file):
logging.info('Progress output file has not been created [tag=%s]', self.location_tag)
return
if self.stop_signal.isSet():
logging.info('Parsing progress messages interrupted [tag=%s]', self.location_tag)
return
logging.info('File has been created, reading contents [tag=%s]', self.location_tag)
linesep_bytes = os.linesep.encode()
fragment_index = 0
line_index = 0
def log_tail_summary():
log_message = '%s fragments and %s lines read while processing progress messages [tag=%s]'
logging.info(log_message, fragment_index, line_index, self.location_tag)
with open(self.target_file, 'rb') as target_file_obj:
while not self.stop_signal.isSet():
if self.progress_termination_signal.isSet():
logging.info('tail short-circuiting due to progress termination [tag=%s]', self.location_tag)
log_tail_summary()
break
line = target_file_obj.readline(self.max_bytes_read_per_line)
if not line:
# exit if program has completed and there are no more lines to read
if self.task_completed_signal.isSet():
log_tail_summary()
break
# no new line available, sleep before trying again
time.sleep(sleep_param)
continue
fragment_index += 1
if line.endswith(linesep_bytes):
line_index += 1
yield line
if self.stop_signal.isSet() and not self.task_completed_signal.isSet():
logging.info('Task requested to be killed, may not have processed all progress messages')
except Exception as exception:
logging.exception('Error while tailing %s [tag=%s]', self.target_file, self.location_tag)
raise exception
def match_progress_update(self, input_data):
"""Returns the progress tuple when the input string matches the provided regex.
Parameters
----------
input_data: bytes
The input data.
Returns
-------
the tuple (percent, message) if the string matches the provided regex,
else return None.
"""
matches = self.progress_regex_pattern.findall(input_data)
return matches[0] if len(matches) >= 1 else None
def __update_progress(self, progress_report):
"""Updates the progress field with the data from progress_report if it is valid."""
if isinstance(progress_report, tuple) and len(progress_report) == 2:
percent_data, message_data = progress_report
elif isinstance(progress_report, tuple) and len(progress_report) == 1:
percent_data, message_data = progress_report[0], b''
else:
percent_data, message_data = progress_report, b''
percent_float = float(percent_data.decode())
if percent_float < 0 or percent_float > 100:
logging.info('Skipping "%s" as the percent is not in [0, 100]', progress_report)
return False
percent_int = int(round(percent_float))
logging.debug('Updating progress to %s percent [tag=%s]', percent_int, self.location_tag)
self.progress = {'progress-message': message_data,
'progress-percent': percent_int,
'progress-sequence': self.sequence_counter.increment_and_get()}
return True
def retrieve_progress_states(self):
"""Generates the progress states by tailing the target_file.
It tails a target file (using the tail() method) and uses the provided
regex to find a match for a progress message. The regex is expected to
generate two components in the match: the progress percent as an int and
a progress message string. When such a message is found, this method
yields the current progress as a dictionary.
Note: This function must rethrow any OSError exceptions that it encounters.
Returns
-------
An incrementally generated list of progress states.
"""
last_unprocessed_report = None
if self.progress_regex_string:
sleep_time_ms = 50
for line in self.tail(sleep_time_ms):
try:
progress_report = self.match_progress_update(line)
if progress_report is not None:
if self.task_completed_signal.isSet():
last_unprocessed_report = progress_report
elif self.__update_progress(progress_report):
yield self.progress
except Exception as exception:
if cu.is_out_of_memory_error(exception):
raise exception
else:
logging.exception('Skipping "%s" as a progress entry', line)
if last_unprocessed_report is not None:
if self.__update_progress(last_unprocessed_report):
yield self.progress
class ProgressTracker(object):
"""Helper class to track progress messages from the specified location."""
def __init__(self, config, stop_signal, task_completed_signal, counter, progress_updater,
progress_termination_signal, location, location_tag, os_error_handler):
"""Launches the threads that track progress and send progress updates to the driver.
Parameters
----------
config: cook.config.ExecutorConfig
The current executor config.
stop_signal: threading.Event
Event that determines if an interrupt was sent
task_completed_signal: threading.Event
Event that tracks task execution completion
progress_updater: ProgressUpdater
The progress updater used to send the progress messages
counter: ProgressSequenceCounter
The sequence counter
location: string
The target location to read for progress messages
location_tag: string
A tag to identify the target location.
os_error_handler: fn(os_error)
OSError exception handler for out of memory situations."""
self.location_tag = location_tag
self.os_error_handler = os_error_handler
self.progress_complete_event = Event()
self.watcher = ProgressWatcher(location, location_tag, counter, config.max_bytes_read_per_line,
config.progress_regex_string, stop_signal, task_completed_signal,
progress_termination_signal)
self.updater = progress_updater
def start(self):
"""Launches a thread that starts monitoring the progress location for progress messages."""
logging.info('Starting progress monitoring from [tag=%s]', self.location_tag)
tracker_thread = Thread(target=self.track_progress, args=())
tracker_thread.daemon = True
tracker_thread.start()
def wait(self, timeout=None):
"""Waits for the progress tracker thread to run to completion."""
self.progress_complete_event.wait(timeout=timeout)
if self.progress_complete_event.isSet():
logging.info('Progress monitoring complete [tag=%s]', self.location_tag)
else:
logging.info('Progress monitoring did not complete [tag=%s]', self.location_tag)
def track_progress(self):
"""Retrieves and sends progress updates using send_progress_update_fn.
It sets the progress_complete_event before returning."""
try:
for current_progress in self.watcher.retrieve_progress_states():
self.updater.send_progress_update(current_progress)
except Exception as exception:
if cu.is_out_of_memory_error(exception):
self.os_error_handler(exception)
else:
logging.exception('Exception while tracking progress [tag=%s]', self.location_tag)
finally:
self.progress_complete_event.set()
def force_send_progress_update(self):
"""Retrieves the latest progress message and attempts to force send it to the scheduler."""
latest_progress = self.watcher.current_progress()
self.updater.send_progress_update(latest_progress, force_send=True)
|
allure-python-commons-test/src/report.py
|
ammarnajjar/allure-python
| 558 |
68751
|
<reponame>ammarnajjar/allure-python
"""
>>> from hamcrest import assert_that
>>> class Report(object):
... def __init__(self):
... self.test_cases = [
... {
... 'fullName': 'package.module.test',
... 'id': '1'
... },
... {
... 'fullName': 'package.module.test[param]',
... 'id': '2'
... },
... {
... 'fullName': 'package.module.Class#test[param]',
... 'id': '3'
... }
... ]
>>> assert_that(Report(),
... has_test_case('test')
... )
>>> assert_that(Report(),
... has_test_case('test[param]')
... )
>>> assert_that(Report(),
... has_test_case('Class#test[param]')
... )
>>> assert_that(Report(),
... has_test_case('wrong_test_case_name')
... ) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: ...
Expected: ...
but: property 'test_cases' was <[{...}]>
<BLANKLINE>
>>> assert_that(Report(),
... has_test_case('test',
... has_entry('id', '1')
... )
... )
>>> assert_that(Report(),
... has_test_case('Class#test[param]',
... has_entry('id', '2')
... )
... ) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: ...
Expected: ...
but: property 'test_cases' was <[{...}]>
<BLANKLINE>
"""
import sys
import os
import json
import fnmatch
from hamcrest import all_of, any_of
from hamcrest import has_property
from hamcrest import has_item
from hamcrest import has_entry
from hamcrest import ends_with, starts_with
from hamcrest import only_contains
from hamcrest.core.base_matcher import BaseMatcher
if sys.version_info[0] < 3:
from io import open
class AllureReport(object):
def __init__(self, result):
self.result_dir = result
self.test_cases = [json.load(item) for item in self._report_items(result, '*result.json')]
self.test_containers = [json.load(item) for item in self._report_items(result, '*container.json')]
self.attachments = [item.read() for item in self._report_items(result, '*attachment.*')]
@staticmethod
def _report_items(report_dir, glob):
for _file in os.listdir(report_dir):
if fnmatch.fnmatch(_file, glob):
with open(os.path.join(report_dir, _file), encoding="utf-8") as report_file:
yield report_file
def has_test_case(name, *matchers):
return has_property('test_cases',
has_item(
all_of(
any_of(
has_entry('fullName', ends_with(name)),
has_entry('name', starts_with(name))
),
*matchers
)
)
)
class HasOnlyTetcases(BaseMatcher):
def __init__(self, *matchers):
self.matchers = matchers
def _matches(self, item):
return has_property('test_cases',
only_contains(any_of(*self.matchers))
).matches(item)
def describe_to(self, description):
pass
def has_only_testcases(*matchers):
return HasOnlyTetcases(*matchers)
class ContainsExactly(BaseMatcher):
def __init__(self, num, matcher):
self.matcher = matcher
self.count = 0
self.num = num
def _matches(self, item):
self.count = 0
for subitem in item:
if self.matcher.matches(subitem):
self.count += 1
if self.count == self.num:
return True
else:
return False
def describe_to(self, description):
description.append_text('exactly {} item(s) matching '.format(self.num)).append_text(self.matcher)
def has_only_n_test_cases(name, num, *matchers):
return has_property('test_cases',
ContainsExactly(num,
all_of(
any_of(
has_entry('fullName', ends_with(name)),
has_entry('name', ends_with(name))
),
*matchers
)
)
)
|
telegram_messages_dump/exporters/jsonl.py
|
emanuelegit/telegram-messages-dump
| 109 |
68762
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
import json
from datetime import date, datetime
from .common import common
class jsonl(object):
""" jsonl exporter plugin.
As opposed to json exporter jsonl serializes messages as one JSON object per line, not as
one giant array.
See http://jsonlines.org.
"""
# pylint: disable=no-self-use
def __init__(self):
""" constructor """
pass
# pylint: disable=unused-argument
def format(self, msg, exporter_context):
""" Formatter method. Takes raw msg and converts it to a *one-line* string.
:param msg: Raw message object :class:`telethon.tl.types.Message` and derivatives.
https://core.telegram.org/type/Message
:returns: *one-line* string containing one message data.
"""
# pylint: disable=line-too-long
name, _, content, re_id, is_sent_by_bot, is_contains_media, media_content = common.extract_message_data(msg)
msgDictionary = {
'message_id': msg.id,
'from_id': msg.from_id,
'reply_id': re_id,
'author': name,
'sent_by_bot': is_sent_by_bot,
'date': msg.date,
'content': content,
'contains_media': is_contains_media,
'media_content': media_content
}
msg_dump_str = json.dumps(
msgDictionary, default=self._json_serial, ensure_ascii=False)
return msg_dump_str
def begin_final_file(self, resulting_file, exporter_context):
""" Hook executes at the beginning of writing a resulting file.
(After BOM is written in case of --addbom)
"""
pass
def _json_serial(self, obj):
"""JSON serializer for objects not serializable by default json code
https://stackoverflow.com/questions/11875770/how-to-overcome-datetime-datetime-not-json-serializable
"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
|
utils/superpixel_projections.py
|
niqbal996/ViewAL
| 126 |
68772
|
<gh_stars>100-1000
import numpy as np
import os
import constants
from numpy.linalg import inv
from dataloader import indoor_scenes
import torch
from collections import OrderedDict, Counter
from tqdm import tqdm
def project_image_to_world(x, y, depth, cam2world, depth_intrinsic):
I = torch.zeros(4, depth.shape[0]).type(torch.cuda.FloatTensor)
I[0, :] = x * depth
I[1, :] = y * depth
I[2, :] = depth
I[3, :] = 1.0
world_coordinates = torch.mm(torch.from_numpy(cam2world).type(torch.cuda.FloatTensor), torch.mm(
torch.from_numpy(inv(depth_intrinsic)).type(torch.cuda.FloatTensor), I))
del I, x, y, depth
torch.cuda.empty_cache()
return world_coordinates
def project_images_to_world(depths, cam2worlds, depth_intrinsic, superpixels, frames):
x = np.linspace(0, constants.DEPTH_WIDTH - 1, constants.DEPTH_WIDTH)
y = np.linspace(0, constants.DEPTH_HEIGHT - 1, constants.DEPTH_HEIGHT)
x_mesh, y_mesh = np.meshgrid(x, y)
world_coordinates = torch.zeros(4, len(depths) * constants.DEPTH_WIDTH * constants.DEPTH_HEIGHT).type(torch.cuda.FloatTensor)
frame_origins = torch.zeros(len(depths) * constants.DEPTH_WIDTH * constants.DEPTH_HEIGHT).type(torch.cuda.IntTensor)
superpixel_origins = torch.zeros(len(depths) * constants.DEPTH_WIDTH * constants.DEPTH_HEIGHT).type(torch.cuda.IntTensor)
for im_idx in range(len(depths)):
world_coordinates[:, im_idx * constants.DEPTH_WIDTH * constants.DEPTH_HEIGHT: (im_idx + 1) * constants.DEPTH_WIDTH * constants.DEPTH_HEIGHT] = project_image_to_world(torch.from_numpy(x_mesh).type(torch.cuda.FloatTensor).flatten(),
torch.from_numpy(y_mesh).type(torch.cuda.FloatTensor).flatten(), torch.from_numpy(depths[im_idx][:]).type(torch.cuda.FloatTensor).flatten(), cam2worlds[im_idx], depth_intrinsic)
frame_origins[im_idx * constants.DEPTH_WIDTH * constants.DEPTH_HEIGHT: (im_idx + 1) * constants.DEPTH_WIDTH * constants.DEPTH_HEIGHT] = torch.ones(
constants.DEPTH_WIDTH * constants.DEPTH_HEIGHT).type(torch.cuda.IntTensor) * frames[im_idx]
superpixel_origins[im_idx * constants.DEPTH_WIDTH * constants.DEPTH_HEIGHT: (im_idx + 1) * constants.DEPTH_WIDTH * constants.DEPTH_HEIGHT] = torch.from_numpy(superpixels[im_idx].astype(np.int).flatten()).type(torch.cuda.IntTensor)
# visualize_point_cloud(world_coordinates)
return world_coordinates, frame_origins, superpixel_origins
def project_world_to_image(depth, superpixel_map, cam2world, depth_intrinsic, world_coordinates, frame_origins, superpixel_origins):
world_coordinates_copy = world_coordinates.transpose(0, 1)[:, :3]
projected_points = torch.mm(torch.mm(torch.from_numpy(depth_intrinsic).type(torch.cuda.FloatTensor),
torch.from_numpy(inv(cam2world)).type(torch.cuda.FloatTensor)), world_coordinates)
projected_points = projected_points.transpose(0, 1)[:, :3]
projected_points[:, 0] /= projected_points[:, 2]
projected_points[:, 1] /= projected_points[:, 2]
projected_points[:, 2] /= projected_points[:, 2]
selection_mask = ~torch.isnan(projected_points[:, 2])
projected_points = torch.round(projected_points[selection_mask])
frame_origins = frame_origins[selection_mask]
superpixel_origins = superpixel_origins[selection_mask]
world_coordinates_copy = world_coordinates_copy[selection_mask]
# remove out of frame bounds
selection_mask = (projected_points[:, 0] >= 0) & (projected_points[:, 0] < constants.DEPTH_WIDTH) & (
projected_points[:, 1] >= 0) & (projected_points[:, 1] < constants.DEPTH_HEIGHT)
projected_points = projected_points[selection_mask][:, :2]
frame_origins = frame_origins[selection_mask]
superpixel_origins = superpixel_origins[selection_mask]
world_coordinates_copy = world_coordinates_copy[selection_mask]
depth = torch.from_numpy(depth).type(torch.cuda.FloatTensor)
depth = depth[projected_points[:, 1].type(torch.cuda.LongTensor), projected_points[:, 0].type(torch.cuda.LongTensor)].flatten()
backprojected_points = project_image_to_world(projected_points[:, 0], projected_points[
:, 1], depth, cam2world, depth_intrinsic).transpose(0, 1)[:, :3]
selection_mask = (torch.norm(world_coordinates_copy - backprojected_points, dim=1) < constants.WORLD_DISTANCE_THRESHOLD)
projected_points = projected_points[selection_mask]
if projected_points.shape[0] == 0:
return None
frame_origins = frame_origins[selection_mask]
superpixel_origins = superpixel_origins[selection_mask]
superpixel_targets = superpixel_map[projected_points[:, 1].type(torch.cuda.LongTensor).cpu().numpy(), projected_points[:, 0].type(torch.cuda.LongTensor).cpu().numpy()].flatten()
t1, t2 = np.unique(superpixel_map, return_counts=True)
target_superpixel_sizes = dict(zip(t1, t2))
frame_spx = torch.zeros((frame_origins.shape[0], 3)).type(torch.cuda.IntTensor)
frame_spx[:, 0] = torch.from_numpy(superpixel_targets.astype(np.int)).type(torch.cuda.IntTensor)
frame_spx[:, 1] = frame_origins
frame_spx[:, 2] = superpixel_origins
uniques, counts = torch.unique(frame_spx, dim=0, return_counts=True)
frame_spx_counts = {}
for idx, u in enumerate(uniques.tolist()):
frame_spx_counts[tuple(u)] = float(counts[idx].cpu().item())
coverage_dict = {}
for i in frame_spx_counts:
coverage = frame_spx_counts[i] / target_superpixel_sizes[i[0]]
coverage_dict[(i[0], i[1], i[2])] = coverage
return coverage_dict # , projected_points
def find_superpixel_coverage(dataset_name, lmdb_handle, superpixel_dir, base_size, images):
dataset = indoor_scenes.IndoorScenesWithAllInfo(dataset_name, lmdb_handle, superpixel_dir, base_size, images)
scene_id_to_index = dataset.scene_id_to_index
image_paths = []
for scene_id in tqdm(scene_id_to_index, desc='Scene[Coverage]'):
all_frame_coverages = OrderedDict()
depths = []
poses = []
superpixels = []
intrinsic = None
for frame_id in scene_id_to_index[scene_id]:
sample = dataset[frame_id]
depths.append(sample['depth'])
poses.append(sample['pose'])
superpixels.append(sample['superpixel'])
intrinsic = sample['intrinsic']
world_coordinates, frame_origins, superpixel_origins = project_images_to_world(depths, poses, intrinsic, superpixels, scene_id_to_index[scene_id])
for frame_id in tqdm(scene_id_to_index[scene_id], desc='Scene[Project]'):
sample = dataset[frame_id]
frame_coverages = project_world_to_image(sample['depth'], sample['superpixel'], sample['pose'], sample['intrinsic'], world_coordinates, frame_origins, superpixel_origins)
if not frame_coverages is None:
all_frame_coverages[frame_id] = frame_coverages
image_paths.append(images[frame_id])
#from pprint import pprint
#pprint(all_frame_coverages)
np.save(os.path.join(constants.SSD_DATASET_ROOT, dataset_name, "raw", "selections", "coverage_"+superpixel_dir, f'{scene_id}.npy'), all_frame_coverages)
del world_coordinates, frame_origins, superpixel_origins
del depths, poses, superpixels, all_frame_coverages
torch.cuda.empty_cache()
with open(os.path.join(constants.SSD_DATASET_ROOT, dataset_name, "raw", "selections", "coverage_"+superpixel_dir, "coverage_paths.txt"), "w") as fptr:
for p in image_paths:
fptr.write(p.decode() + "\n")
def test_coverage_scannet_sample():
import constants
import os
from dataloader import dataset_base
from dataloader.indoor_scenes import IndoorScenes
lmdb_handle = dataset_base.LMDBHandle(os.path.join(constants.HDD_DATASET_ROOT, "scannet-sample", "dataset.lmdb"), False)
train_set = IndoorScenes('scannet-sample', lmdb_handle, (240, 320), 'train')
find_superpixel_coverage('scannet-sample', lmdb_handle, (240, 320), train_set.image_path_subset)
if __name__=='__main__':
test_coverage_scannet_sample()
|
corehq/blobs/migrations/0009_delete_blobexpiration.py
|
dimagilg/commcare-hq
| 471 |
68783
|
# Generated by Django 1.11.21 on 2019-06-12 15:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blobs', '0008_deletedblobmeta'),
]
operations = [
migrations.DeleteModel(
name='BlobExpiration',
),
]
|
chainer/functions/normalization/local_response_normalization.py
|
zaltoprofen/chainer
| 3,705 |
68800
|
import numpy
import six
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function_node
from chainer.utils import type_check
def _cu_conv_sum(y, x, n):
# Convolutional sum
# TODO(beam2d): Use scan computation
rdim = x.size // (x.shape[0] * x.shape[1])
cuda.elementwise(
'raw T x, int32 rdim, int32 N, int32 n_', 'raw T y',
'''
int half_n = n_ / 2;
int offset = i / rdim * N * rdim + i % rdim;
float sum_part = 0;
for (int j = 0; j < N + half_n; ++j) {
if (j < N) {
sum_part += x[offset + j * rdim];
}
if (j >= n_) {
sum_part -= x[offset + (j - n_) * rdim];
}
if (j >= half_n) {
y[offset + (j - half_n) * rdim] = sum_part;
}
}
''', 'lrn_conv_sum')(x, rdim, x.shape[1], n, y,
size=x.shape[0] * rdim)
class LocalResponseNormalization(function_node.FunctionNode):
"""Cross-channel normalization function used in AlexNet."""
_use_ideep = False
def __init__(self, n=5, k=2, alpha=1e-4, beta=.75):
self.n = n
self.k = k
self.alpha = alpha
self.beta = beta
self.scale = None
self.indexes = None
self.unit_scale = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= 2,
)
def forward_cpu(self, inputs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs, (4,))):
self._use_ideep = True
return self.forward_ideep(inputs)
x, = inputs
self.retain_inputs((0,))
self.retain_outputs((0,))
half_n = self.n // 2
x2 = numpy.square(x)
sum_part = x2.copy()
for i in six.moves.range(1, half_n + 1):
sum_part[:, i:] += x2[:, :-i]
sum_part[:, :-i] += x2[:, i:]
self.unit_scale = self.k + self.alpha * sum_part
self.scale = self.unit_scale ** -self.beta
y = x * self.scale
return y,
def forward_ideep(self, inputs):
x, = inputs
self.retain_inputs((0,))
self.retain_outputs((0,))
param = intel64.ideep.localResponseNormalizationParam(
self.n, self.k, self.n * self.alpha, self.beta,
intel64.ideep.localResponseNormalizationParam.lrn_across_channels)
y, indexes = intel64.ideep.localResponseNormalization.Forward(
intel64.ideep.array(x), param)
self.indexes = indexes
return y,
def forward_gpu(self, inputs):
x, = inputs
self.retain_inputs((0,))
self.retain_outputs((0,))
self.y = cuda.cupy.square(x) # temporary
self.scale = cuda.cupy.empty_like(self.y)
_cu_conv_sum(self.scale, self.y, self.n)
cuda.elementwise(
'T x, T k, T alpha, T beta',
'T y, T scale',
'''scale = k + alpha * scale;
y = x * pow(scale, -beta);''',
'lrn_fwd')(x, self.k, self.alpha, self.beta,
self.y, self.scale)
return self.y,
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
y, = self.get_retained_outputs()
gy, = grad_outputs
f = LocalResponseNormalizationGrad(
self.n, self.k, self.alpha, self.beta, self._use_ideep,
self.scale, self.indexes, self.unit_scale,)
return f.apply((x, y, gy))
class LocalResponseNormalizationGrad(function_node.FunctionNode):
def __init__(self, n, k, alpha, beta, use_ideep,
scale=None, indexes=None, unit_scale=None):
self.n = n
self.k = k
self.alpha = alpha
self.beta = beta
self._use_ideep = use_ideep
self.scale = scale
self.indexes = indexes
self.unit_scale = unit_scale
def forward_cpu(self, inputs):
if self._use_ideep:
return self._backward_ideep(inputs)
x, y, gy = inputs
half_n = self.n // 2
summand = y * gy / self.unit_scale
sum_part = summand.copy()
for i in six.moves.range(1, half_n + 1):
sum_part[:, i:] += summand[:, :-i]
sum_part[:, :-i] += summand[:, i:]
gx = gy * self.scale - 2 * self.alpha * self.beta * x * sum_part
return gx,
def _backward_ideep(self, inputs):
x, y, gy = inputs
param = intel64.ideep.localResponseNormalizationParam(
self.n, self.k, self.n * self.alpha, self.beta,
intel64.ideep.localResponseNormalizationParam.lrn_across_channels
)
gx = intel64.ideep.localResponseNormalization.Backward(
intel64.ideep.array(x),
intel64.ideep.array(gy),
self.indexes,
param)
return gx,
def forward_gpu(self, inputs):
x, y, gy = inputs
summand = cuda.elementwise(
'T scale, T y, T gy', 'T summand',
'summand = y * gy / scale',
'lrn_bwd_summand')(self.scale, y, gy)
gx = cuda.cupy.empty_like(x)
_cu_conv_sum(gx, summand, self.n)
cuda.elementwise(
' T x, T gy, T scale, T beta, T coeff', 'T gx',
'gx = pow(scale, -beta) * gy - coeff * x * gx',
'lrn_bwd')(x, gy, self.scale,
self.beta, 2 * self.alpha * self.beta, gx)
return gx,
def backward(self, indexes, grad_outputs):
# No trivial way to implement double-backward for this function.
raise NotImplementedError
def local_response_normalization(x, n=5, k=2, alpha=1e-4, beta=.75):
"""Local response normalization across neighboring channels.
This function implements normalization across channels. Let :math:`x` an
input image with :math:`N` channels. Then, this function computes an output
image :math:`y` by following formula:
.. math::
y_i = {x_i \\over \\left( k + \\
\\alpha \\sum_{j=\\max{1, i - n/2}}^{\\min{N, i + n/2}} \\
x_j^2 \\right)^\\beta}.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
n (int): Normalization window width.
k (float): Smoothing parameter.
alpha (float): Normalizer scaling parameter.
beta (float): Normalizer power parameter.
Returns:
~chainer.Variable: Output variable.
See: Section 3.3 of `ImageNet Classification with Deep Convolutional
Neural Networks <https://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_
"""
return LocalResponseNormalization(n, k, alpha, beta).apply((x,))[0]
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/settings/dev.py
|
lendlsmith/wagtail-cookiecutter-foundation
| 182 |
68820
|
<gh_stars>100-1000
# flake8: noqa
from .base import *
from os.path import abspath, dirname, join
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
INSTALLED_APPS += (
'debug_toolbar',
'django_extensions',
)
INTERNAL_IPS = ('127.0.0.1',)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
MIDDLEWARE += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
SECRET_KEY = env('DJANGO_SECRET_KEY',
default='<KEY>')
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Use Dummy cache for development
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# Process all tasks synchronously.
# Helpful for local development and running tests
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
CELERY_ALWAYS_EAGER = True
try:
from .local import *
except ImportError:
pass
|
third_party/mapreduce/mapreduce/base_handler.py
|
tingshao/catapult
| 2,151 |
68828
|
#!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base handler class for all mapreduce handlers."""
# pylint: disable=protected-access
# pylint: disable=g-bad-name
# pylint: disable=g-import-not-at-top
import httplib
import logging
try:
import json
except ImportError:
import simplejson as json
try:
from mapreduce import pipeline_base
except ImportError:
pipeline_base = None
try:
# Check if the full cloudstorage package exists. The stub part is in runtime.
import cloudstorage
if hasattr(cloudstorage, "_STUB"):
cloudstorage = None
except ImportError:
cloudstorage = None
from google.appengine.ext import webapp
from mapreduce import errors
from mapreduce import json_util
from mapreduce import model
from mapreduce import parameters
class Error(Exception):
"""Base-class for exceptions in this module."""
class BadRequestPathError(Error):
"""The request path for the handler is invalid."""
class TaskQueueHandler(webapp.RequestHandler):
"""Base class for handlers intended to be run only from the task queue.
Sub-classes should implement
1. the 'handle' method for all POST request.
2. '_preprocess' method for decoding or validations before handle.
3. '_drop_gracefully' method if _preprocess fails and the task has to
be dropped.
In Python27 runtime, webapp2 will automatically replace webapp.
"""
_DEFAULT_USER_AGENT = "App Engine Python MR"
def __init__(self, *args, **kwargs):
# webapp framework invokes initialize after __init__.
# webapp2 framework invokes initialize within __init__.
# Python27 runtime swap webapp with webapp2 underneath us.
# Since initialize will conditionally change this field,
# it needs to be set before calling super's __init__.
self._preprocess_success = False
super(TaskQueueHandler, self).__init__(*args, **kwargs)
if cloudstorage:
cloudstorage.set_default_retry_params(
cloudstorage.RetryParams(
min_retries=5,
max_retries=10,
urlfetch_timeout=parameters._GCS_URLFETCH_TIMEOUT_SEC,
save_access_token=True,
_user_agent=self._DEFAULT_USER_AGENT))
def initialize(self, request, response):
"""Initialize.
1. call webapp init.
2. check request is indeed from taskqueue.
3. check the task has not been retried too many times.
4. run handler specific processing logic.
5. run error handling logic if precessing failed.
Args:
request: a webapp.Request instance.
response: a webapp.Response instance.
"""
super(TaskQueueHandler, self).initialize(request, response)
# Check request is from taskqueue.
if "X-AppEngine-QueueName" not in self.request.headers:
logging.error(self.request.headers)
logging.error("Task queue handler received non-task queue request")
self.response.set_status(
403, message="Task queue handler received non-task queue request")
return
# Check task has not been retried too many times.
if self.task_retry_count() + 1 > parameters.config.TASK_MAX_ATTEMPTS:
logging.error(
"Task %s has been attempted %s times. Dropping it permanently.",
self.request.headers["X-AppEngine-TaskName"],
self.task_retry_count() + 1)
self._drop_gracefully()
return
try:
self._preprocess()
self._preprocess_success = True
# pylint: disable=bare-except
except:
self._preprocess_success = False
logging.error(
"Preprocess task %s failed. Dropping it permanently.",
self.request.headers["X-AppEngine-TaskName"])
self._drop_gracefully()
def post(self):
if self._preprocess_success:
self.handle()
def handle(self):
"""To be implemented by subclasses."""
raise NotImplementedError()
def _preprocess(self):
"""Preprocess.
This method is called after webapp initialization code has been run
successfully. It can thus access self.request, self.response and so on.
"""
pass
def _drop_gracefully(self):
"""Drop task gracefully.
When preprocess failed, this method is called before the task is dropped.
"""
pass
def task_retry_count(self):
"""Number of times this task has been retried."""
return int(self.request.headers.get("X-AppEngine-TaskExecutionCount", 0))
def retry_task(self):
"""Ask taskqueue to retry this task.
Even though raising an exception can cause a task retry, it
will flood logs with highly visible ERROR logs. Handlers should uses
this method to perform controlled task retries. Only raise exceptions
for those deserve ERROR log entries.
"""
self.response.set_status(httplib.SERVICE_UNAVAILABLE, "Retry task")
self.response.clear()
class JsonHandler(webapp.RequestHandler):
"""Base class for JSON handlers for user interface.
Sub-classes should implement the 'handle' method. They should put their
response data in the 'self.json_response' dictionary. Any exceptions raised
by the sub-class implementation will be sent in a JSON response with the
name of the error_class and the error_message.
"""
def __init__(self, *args):
"""Initializer."""
super(JsonHandler, self).__init__(*args)
self.json_response = {}
def base_path(self):
"""Base path for all mapreduce-related urls.
JSON handlers are mapped to /base_path/command/command_name thus they
require special treatment.
Raises:
BadRequestPathError: if the path does not end with "/command".
Returns:
The base path.
"""
path = self.request.path
base_path = path[:path.rfind("/")]
if not base_path.endswith("/command"):
raise BadRequestPathError(
"Json handlers should have /command path prefix")
return base_path[:base_path.rfind("/")]
def _handle_wrapper(self):
"""The helper method for handling JSON Post and Get requests."""
if self.request.headers.get("X-Requested-With") != "XMLHttpRequest":
logging.error("Got JSON request with no X-Requested-With header")
self.response.set_status(
403, message="Got JSON request with no X-Requested-With header")
return
self.json_response.clear()
try:
self.handle()
except errors.MissingYamlError:
logging.debug("Could not find 'mapreduce.yaml' file.")
self.json_response.clear()
self.json_response["error_class"] = "Notice"
self.json_response["error_message"] = "Could not find 'mapreduce.yaml'"
except Exception, e:
logging.exception("Error in JsonHandler, returning exception.")
# TODO(user): Include full traceback here for the end-user.
self.json_response.clear()
self.json_response["error_class"] = e.__class__.__name__
self.json_response["error_message"] = str(e)
self.response.headers["Content-Type"] = "text/javascript"
try:
output = json.dumps(self.json_response, cls=json_util.JsonEncoder)
# pylint: disable=broad-except
except Exception, e:
logging.exception("Could not serialize to JSON")
self.response.set_status(500, message="Could not serialize to JSON")
return
else:
self.response.out.write(output)
def handle(self):
"""To be implemented by sub-classes."""
raise NotImplementedError()
class PostJsonHandler(JsonHandler):
"""JSON handler that accepts POST requests."""
def post(self):
self._handle_wrapper()
class GetJsonHandler(JsonHandler):
"""JSON handler that accepts GET posts."""
def get(self):
self._handle_wrapper()
class HugeTaskHandler(TaskQueueHandler):
"""Base handler for processing HugeTasks."""
class _RequestWrapper(object):
"""Container of a request and associated parameters."""
def __init__(self, request):
self._request = request
self._params = model.HugeTask.decode_payload(request)
def get(self, name, default=""):
return self._params.get(name, default)
def set(self, name, value):
self._params[name] = value
def __getattr__(self, name):
return getattr(self._request, name)
def __init__(self, *args, **kwargs):
super(HugeTaskHandler, self).__init__(*args, **kwargs)
def _preprocess(self):
self.request = self._RequestWrapper(self.request)
if pipeline_base:
# For backward compatiblity.
PipelineBase = pipeline_base.PipelineBase
else:
PipelineBase = None
|
src/bepasty/tests/test_storage.py
|
Emojigit/bepasty-server
| 123 |
68842
|
import pytest
from bepasty.storage.filesystem import Storage
def test_contains(tmpdir):
storage = Storage(str(tmpdir))
name = "foo"
# check if it is not there yet
assert name not in storage
with storage.create(name, 0):
# we just want it created, no need to write sth into it
pass
# check if it is there
assert name in storage
storage.remove(name)
# check if it is gone
assert name not in storage
def test_iter(tmpdir):
storage = Storage(str(tmpdir))
# nothing there yet
assert list(storage) == []
names = ["foo", "bar", "baz", ]
for name in names:
with storage.create(name, 0):
# we just want it created, no need to write sth into it
pass
assert set(list(storage)) == set(names)
def test_invalid_name(tmpdir):
storage = Storage(str(tmpdir))
name = "../invalid"
with pytest.raises(RuntimeError):
storage.create(name, 0)
|
base/site-packages/authsub/urls.py
|
edisonlz/fastor
| 285 |
68844
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^login/$', 'authsub.views.login', name="authsub_login"),
)
|
vumi/transports/imimobile/imimobile_ussd.py
|
seidu626/vumi
| 199 |
68852
|
# -*- test-case-name: vumi.transports.imimobile.tests.test_imimobile_ussd -*-
import re
import json
from datetime import datetime, timedelta
from twisted.python import log
from twisted.web import http
from twisted.internet.defer import inlineCallbacks
from vumi.components.session import SessionManager
from vumi.message import TransportUserMessage
from vumi.transports.httprpc import HttpRpcTransport
class ImiMobileUssdTransport(HttpRpcTransport):
"""
HTTP transport for USSD with IMImobile in India.
Configuration parameters:
:param str transport_name:
The name this transport instance will use to create its queues
:param str web_path:
The HTTP path to listen on.
:param int web_port:
The HTTP port to listen on.
:param dict suffix_to_addrs:
Mappings between url suffixes and to addresses.
:param str user_terminated_session_message:
A regex used to identify user terminated session messages. Default is
'^Map Dialog User Abort User Reason'.
:param str user_terminated_session_response:
Response given back to the user if the user terminated the session.
Default is 'Session Ended'.
:param dict redis_manager:
The configuration parameters for connecting to Redis.
:param int ussd_session_timeout:
Number of seconds before USSD session information stored in Redis
expires. Default is 600s.
"""
transport_type = 'ussd'
ENCODING = 'utf-8'
EXPECTED_FIELDS = set(['msisdn', 'msg', 'code', 'tid', 'dcs'])
# errors
RESPONSE_FAILURE_ERROR = "Response to http request failed."
INSUFFICIENT_MSG_FIELDS_ERROR = "Insufficiant message fields provided."
def validate_config(self):
super(ImiMobileUssdTransport, self).validate_config()
# Mappings between url suffixes and the tags used as the to_addr for
# inbound messages (e.g. shortcodes or longcodes). This is necessary
# since the requests from ImiMobile do not provided us with this.
self.suffix_to_addrs = self.config['suffix_to_addrs']
# IMImobile do not provide a parameter or header to signal termination
# of the session by the user, other than sending "Map Dialog User Abort
# User Reason: User specific reason" as the request's message content.
self.user_terminated_session_re = re.compile(
self.config.get('user_terminated_session_message',
'^Map Dialog User Abort User Reason'))
self.user_terminated_session_response = self.config.get(
'user_terminated_session_response', 'Session Ended')
@inlineCallbacks
def setup_transport(self):
super(ImiMobileUssdTransport, self).setup_transport()
# configure session manager
r_config = self.config.get('redis_manager', {})
r_prefix = "vumi.transports.imimobile_ussd:%s" % self.transport_name
session_timeout = int(self.config.get("ussd_session_timeout", 600))
self.session_manager = yield SessionManager.from_redis_config(
r_config, r_prefix, max_session_length=session_timeout)
@inlineCallbacks
def teardown_transport(self):
yield super(ImiMobileUssdTransport, self).teardown_transport()
yield self.session_manager.stop()
def get_to_addr(self, request):
"""
Extracts the request url path's suffix and uses it to obtain the tag
associated with the suffix. Returns a tuple consisting of the tag and
a dict of errors encountered.
"""
errors = {}
[suffix] = request.postpath
tag = self.suffix_to_addrs.get(suffix, None)
if tag is None:
errors['unknown_suffix'] = suffix
return tag, errors
@classmethod
def ist_to_utc(cls, timestamp):
"""
Accepts a timestamp in the format `[M]M/[D]D/YYYY HH:MM:SS (am|pm)` and
in India Standard Time, and returns a datetime object normalized to
UTC time.
"""
return (datetime.strptime(timestamp, '%m/%d/%Y %I:%M:%S %p')
- timedelta(hours=5, minutes=30))
def user_has_terminated_session(self, content):
return self.user_terminated_session_re.match(content) is not None
@inlineCallbacks
def handle_raw_inbound_message(self, message_id, request):
errors = {}
to_addr, to_addr_errors = self.get_to_addr(request)
errors.update(to_addr_errors)
values, field_value_errors = self.get_field_values(request,
self.EXPECTED_FIELDS)
errors.update(field_value_errors)
if errors:
log.msg('Unhappy incoming message: %s' % (errors,))
yield self.finish_request(
message_id, json.dumps(errors), code=http.BAD_REQUEST)
return
from_addr = values['msisdn']
log.msg('ImiMobileTransport receiving inbound message from %s to %s.' %
(from_addr, to_addr))
content = values['msg']
if self.user_has_terminated_session(content):
yield self.session_manager.clear_session(from_addr)
session_event = TransportUserMessage.SESSION_CLOSE
# IMImobile use 0 for termination of a session
self.finish_request(
message_id,
self.user_terminated_session_response,
headers={'X-USSD-SESSION': ['0']})
else:
# We use the msisdn (from_addr) to make a guess about the
# whether the session is new or not.
session = yield self.session_manager.load_session(from_addr)
if session:
session_event = TransportUserMessage.SESSION_RESUME
yield self.session_manager.save_session(from_addr, session)
else:
session_event = TransportUserMessage.SESSION_NEW
yield self.session_manager.create_session(
from_addr, from_addr=from_addr, to_addr=to_addr)
yield self.publish_message(
message_id=message_id,
content=content,
to_addr=to_addr,
from_addr=from_addr,
provider='imimobile',
session_event=session_event,
transport_type=self.transport_type,
transport_metadata={
'imimobile_ussd': {
'tid': values['tid'],
'code': values['code'],
'dcs': values['dcs'],
}
})
@inlineCallbacks
def handle_outbound_message(self, message):
error = None
message_id = message['message_id']
if message.payload.get('in_reply_to') and 'content' in message.payload:
# IMImobile use 1 for resume and 0 for termination of a session
session_header_value = '1'
if message['session_event'] == TransportUserMessage.SESSION_CLOSE:
yield self.session_manager.clear_session(message['to_addr'])
session_header_value = '0'
response_id = self.finish_request(
message['in_reply_to'],
message['content'].encode(self.ENCODING),
headers={'X-USSD-SESSION': [session_header_value]})
if response_id is None:
error = self.RESPONSE_FAILURE_ERROR
else:
error = self.INSUFFICIENT_MSG_FIELDS_ERROR
if error is not None:
yield self.publish_nack(message_id, error)
return
yield self.publish_ack(user_message_id=message_id,
sent_message_id=message_id)
|
tests/property/test_discrete_log.py
|
thommignot/electionguard-python
| 112 |
68867
|
<gh_stars>100-1000
import asyncio
from hypothesis import given
from hypothesis.strategies import integers
from tests.base_test_case import BaseTestCase
from electionguard.constants import get_generator, get_large_prime
from electionguard.discrete_log import (
compute_discrete_log,
discrete_log_async,
DiscreteLog,
)
from electionguard.group import (
ElementModP,
ElementModQ,
ONE_MOD_P,
ONE_MOD_Q,
mult_p,
g_pow_p,
)
def _discrete_log_uncached(e: ElementModP) -> int:
"""
A simpler implementation of discrete_log, only meant for comparison testing of the caching version.
"""
count = 0
g_inv = ElementModP(pow(get_generator(), -1, get_large_prime()), False)
while e != ONE_MOD_P:
e = mult_p(e, g_inv)
count = count + 1
return count
class TestDiscreteLogFunctions(BaseTestCase):
"""Discrete log tests"""
@given(integers(0, 100))
def test_uncached(self, exp: int):
# Arrange
plaintext = ElementModQ(exp)
exp_plaintext = g_pow_p(plaintext)
# Act
plaintext_again = _discrete_log_uncached(exp_plaintext)
# Assert
self.assertEqual(plaintext, plaintext_again)
@given(integers(0, 1000))
def test_cached(self, exp: int):
# Arrange
cache = {ONE_MOD_P: 0}
plaintext = ElementModQ(exp)
exp_plaintext = g_pow_p(plaintext)
# Act
(plaintext_again, returned_cache) = compute_discrete_log(exp_plaintext, cache)
# Assert
self.assertEqual(plaintext, plaintext_again)
self.assertEqual(len(cache), len(returned_cache))
def test_cached_one(self):
cache = {ONE_MOD_P: 0}
plaintext = ONE_MOD_Q
ciphertext = g_pow_p(plaintext)
(plaintext_again, returned_cache) = compute_discrete_log(ciphertext, cache)
self.assertEqual(plaintext, plaintext_again)
self.assertEqual(len(cache), len(returned_cache))
def test_cached_one_async(self):
# Arrange
cache = {ONE_MOD_P: 0}
plaintext = ONE_MOD_Q
ciphertext = g_pow_p(plaintext)
# Act
loop = asyncio.new_event_loop()
(plaintext_again, returned_cache) = loop.run_until_complete(
discrete_log_async(ciphertext, cache)
)
loop.close()
# Assert
self.assertEqual(plaintext, plaintext_again)
self.assertEqual(len(cache), len(returned_cache))
class TestDiscreteLogClass(BaseTestCase):
"""Discrete log tests"""
@given(integers(0, 1000))
def test_cached(self, exp: int):
# Arrange
plaintext = ElementModQ(exp)
exp_plaintext = g_pow_p(plaintext)
# Act
plaintext_again = DiscreteLog().discrete_log(exp_plaintext)
# Assert
self.assertEqual(plaintext, plaintext_again)
def test_cached_one(self):
# Arrange
plaintext = ONE_MOD_Q
ciphertext = g_pow_p(plaintext)
# Act
plaintext_again = DiscreteLog().discrete_log(ciphertext)
# Assert
self.assertEqual(plaintext, plaintext_again)
def test_cached_one_async(self):
# Arrange
plaintext = ONE_MOD_Q
ciphertext = g_pow_p(plaintext)
# Act
loop = asyncio.new_event_loop()
plaintext_again = loop.run_until_complete(
DiscreteLog().discrete_log_async(ciphertext)
)
loop.close()
# Assert
self.assertEqual(plaintext, plaintext_again)
|
up/utils/general/registry_factory.py
|
ModelTC/EOD
| 196 |
68871
|
<gh_stars>100-1000
from .registry import Registry
# model
MODULE_ZOO_REGISTRY = Registry()
MODULE_PROCESS_REGISTRY = Registry()
MODULE_WRAPPER_REGISTRY = Registry()
MODEL_WRAPPER_REGISTRY = Registry()
EMA_REGISTRY = Registry()
# data
DATASET_REGISTRY = Registry()
DATALOADER_REGISTRY = Registry()
BATCH_SAMPLER_REGISTRY = Registry()
AUGMENTATION_REGISTRY = Registry()
BATCHING_REGISTRY = Registry()
# predictor
ROI_PREDICTOR_REGISTRY = Registry()
BBOX_PREDICTOR_REGISTRY = Registry()
MASK_PREDICTOR_REGISTRY = Registry()
# supervisior
ROI_SUPERVISOR_REGISTRY = Registry()
BBOX_SUPERVISOR_REGISTRY = Registry()
MASK_SUPERVISOR_REGISTRY = Registry()
# matcher
MATCHER_REGISTRY = Registry()
# sampler
ROI_SAMPLER_REGISTRY = Registry()
SAMPLER_REGISTRY = Registry()
# merger
ROI_MERGER_REGISTRY = Registry()
# lr
WARM_LR_REGISTRY = Registry()
LR_REGISTRY = Registry()
# evaluator
EVALUATOR_REGISTRY = Registry()
# loss
LOSSES_REGISTRY = Registry()
# image reader
IMAGE_READER_REGISTRY = Registry()
# hook
HOOK_REGISTRY = Registry()
# saver
SAVER_REGISTRY = Registry()
# anchor generate
ANCHOR_GENERATOR_REGISTRY = Registry()
# mask target generate
MASK_GENERATOR_REGISTRY = Registry()
# subcommand
SUBCOMMAND_REGISTRY = Registry()
# initializer
INITIALIZER_REGISTRY = Registry()
# runner
RUNNER_REGISTRY = Registry()
# inferencer
INFERENCER_REGISTRY = Registry()
VISUALIZER_REGISTRY = Registry()
# optimizer
OPTIMIZER_REGISTRY = Registry()
LR_SCHEDULER_REGISTY = Registry()
WARM_SCHEDULER_REGISTY = Registry()
DATA_BUILDER_REGISTY = Registry()
MODEL_HELPER_REGISTRY = Registry()
# distill
MIMIC_REGISTRY = Registry()
MIMIC_LOSS_REGISTRY = Registry()
# box_coder
BOX_CODER_REGISTRY = Registry()
|
code/certify.py
|
RuntianZ/smoothing-adversarial
| 213 |
68877
|
# evaluate a smoothed classifier on a dataset
import argparse
import datetime
import os
from time import time
from architectures import get_architecture
from core import Smooth
from datasets import get_dataset, DATASETS, get_num_classes
import torch
parser = argparse.ArgumentParser(description='Certify many examples')
parser.add_argument("dataset", choices=DATASETS, help="which dataset")
parser.add_argument("base_classifier", type=str, help="path to saved pytorch model of base classifier")
parser.add_argument("sigma", type=float, help="noise hyperparameter")
parser.add_argument("outfile", type=str, help="output file")
parser.add_argument("--batch", type=int, default=1000, help="batch size")
parser.add_argument("--skip", type=int, default=1, help="how many examples to skip")
parser.add_argument("--max", type=int, default=-1, help="stop after this many examples")
parser.add_argument("--split", choices=["train", "test"], default="test", help="train or test set")
parser.add_argument("--N0", type=int, default=100)
parser.add_argument("--N", type=int, default=100000, help="number of samples to use")
parser.add_argument("--alpha", type=float, default=0.001, help="failure probability")
args = parser.parse_args()
if __name__ == "__main__":
# load the base classifier
checkpoint = torch.load(args.base_classifier)
base_classifier = get_architecture(checkpoint["arch"], args.dataset)
base_classifier.load_state_dict(checkpoint['state_dict'])
# create the smooothed classifier g
smoothed_classifier = Smooth(base_classifier, get_num_classes(args.dataset), args.sigma)
# prepare output file
f = open(args.outfile, 'w')
print("idx\tlabel\tpredict\tradius\tcorrect\ttime", file=f, flush=True)
# iterate through the dataset
dataset = get_dataset(args.dataset, args.split)
for i in range(len(dataset)):
# only certify every args.skip examples, and stop after args.max examples
if i % args.skip != 0:
continue
if i == args.max:
break
(x, label) = dataset[i]
before_time = time()
# certify the prediction of g around x
x = x.cuda()
prediction, radius = smoothed_classifier.certify(x, args.N0, args.N, args.alpha, args.batch)
after_time = time()
correct = int(prediction == label)
time_elapsed = str(datetime.timedelta(seconds=(after_time - before_time)))
print("{}\t{}\t{}\t{:.3}\t{}\t{}".format(
i, label, prediction, radius, correct, time_elapsed), file=f, flush=True)
f.close()
|
pyleus/__init__.py
|
dapuck/pyleus
| 166 |
68885
|
from __future__ import absolute_import
import pkg_resources
__version__ = '0.3.0'
BASE_JAR = "pyleus-base.jar"
BASE_JAR_PATH = pkg_resources.resource_filename('pyleus', BASE_JAR)
|
neural_structural_optimization/autograd_lib_test.py
|
yjxkwp/neural-structural-optimization
| 103 |
68909
|
<filename>neural_structural_optimization/autograd_lib_test.py
# lint as python3
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-complex-comprehension
import autograd.numpy
from autograd.test_util import check_grads
from neural_structural_optimization import autograd_lib
import numpy as np
from absl.testing import absltest
cone_filter = autograd_lib.cone_filter
gaussian_filter = autograd_lib.gaussian_filter
scatter1d = autograd_lib.scatter1d
solve_coo = autograd_lib.solve_coo
inverse_permutation = autograd_lib.inverse_permutation
find_root = autograd_lib.find_root
class AutogradLibTest(absltest.TestCase):
def test_gaussian_filter(self):
image = np.random.RandomState(0).rand(9, 9)
width = 4
np.testing.assert_allclose(gaussian_filter(image, width).sum(), image.sum())
check_grads(lambda x: gaussian_filter(x, width), modes=['rev'])(image)
def test_cone_filter(self):
image = np.random.RandomState(0).rand(5, 5)
width = 4
check_grads(lambda x: cone_filter(x, width), modes=['rev'])(image)
def test_inverse_permutation(self):
indices = np.array([4, 2, 1, 7, 9, 5, 6, 0, 3, 8])
inv_indices = inverse_permutation(indices)
np.testing.assert_array_equal(np.array([7, 2, 1, 8, 0, 5, 6, 3, 9, 4]),
inv_indices)
def test_scatter1d(self):
# also tests the `inverse_permutation` function
nonzero_values = [4, 2, 7, 9]
nonzero_indices = [2, 3, 7, 8]
array_len = 10
u = scatter1d(nonzero_values, nonzero_indices, array_len)
np.testing.assert_array_equal(
np.array([0., 0., 4., 2., 0., 0., 0., 7., 9., 0.]), u)
def test_coo_solve(self):
# test solve_coo gradients
indices = np.array([[i % 10, (i - j) % 10]
for i in range(10) for j in range(-3, 4)]).T
entries = np.random.RandomState(0).randn(indices.shape[-1])
b = np.random.RandomState(0).rand(10)
check_grads(lambda x: solve_coo(entries, indices, x), modes=['rev'])(b)
check_grads(lambda x: solve_coo(x, indices, b), modes=['rev'])(entries)
def test_find_root(self):
# solve for a literal square root
f = lambda x, y: y ** 2 - x
result = find_root(f, 2, lower_bound=0, upper_bound=2)
np.testing.assert_allclose(result, np.sqrt(2))
def test_find_root_grad(self):
f = lambda x, y: y ** 2 - abs(autograd.numpy.mean(x))
x0 = np.random.RandomState(0).randn(3)
check_grads(lambda x: find_root(f, x, 0, 10, 1e-12), modes=['rev'])(x0)
if __name__ == '__main__':
absltest.main()
|
third_party/boost.context/tools/build/src/tools/gcc.py
|
sunny-shu/libgo
| 2,831 |
68919
|
<reponame>sunny-shu/libgo
# Status: being ported by Steven Watanabe
# Base revision: 47077
# TODO: common.jam needs to be ported
# TODO: generators.jam needs to have register_c_compiler.
#
# Copyright 2001 <NAME>.
# Copyright 2002-2006 <NAME>.
# Copyright 2002-2003 <NAME>.
# Copyright (c) 2005 <NAME>.
# Copyright 2006 <NAME>.
# Copyright 2007 <NAME>
# Copyright 2007 <NAME>.
# Copyright 2008 <NAME>
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import subprocess
import re
import bjam
from b2.tools import unix, common, rc, pch, builtin
from b2.build import feature, type, toolset, generators, property_set
from b2.build.property import Property
from b2.util.utility import os_name, on_windows
from b2.manager import get_manager
from b2.build.generators import Generator
from b2.build.toolset import flags
from b2.util.utility import to_seq
__debug = None
def debug():
global __debug
if __debug is None:
__debug = "--debug-configuration" in bjam.variable("ARGV")
return __debug
feature.extend('toolset', ['gcc'])
toolset.inherit_generators('gcc', [], 'unix', ['unix.link', 'unix.link.dll'])
toolset.inherit_flags('gcc', 'unix')
toolset.inherit_rules('gcc', 'unix')
generators.override('gcc.prebuilt', 'builtin.prebuilt')
generators.override('gcc.searched-lib-generator', 'searched-lib-generator')
# Target naming is determined by types/lib.jam and the settings below this
# comment.
#
# On *nix:
# libxxx.a static library
# libxxx.so shared library
#
# On windows (mingw):
# libxxx.lib static library
# xxx.dll DLL
# xxx.lib import library
#
# On windows (cygwin) i.e. <target-os>cygwin
# libxxx.a static library
# xxx.dll DLL
# libxxx.dll.a import library
#
# Note: user can always override by using the <tag>@rule
# This settings have been choosen, so that mingw
# is in line with msvc naming conventions. For
# cygwin the cygwin naming convention has been choosen.
# Make the "o" suffix used for gcc toolset on all
# platforms
type.set_generated_target_suffix('OBJ', ['<toolset>gcc'], 'o')
type.set_generated_target_suffix('STATIC_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'a')
type.set_generated_target_suffix('IMPORT_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'dll.a')
type.set_generated_target_prefix('IMPORT_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'lib')
__machine_match = re.compile('^([^ ]+)')
__version_match = re.compile('^([0-9.]+)')
def init(version = None, command = None, options = None):
"""
Initializes the gcc toolset for the given version. If necessary, command may
be used to specify where the compiler is located. The parameter 'options' is a
space-delimited list of options, each one specified as
<option-name>option-value. Valid option names are: cxxflags, linkflags and
linker-type. Accepted linker-type values are gnu, darwin, osf, hpux or sun
and the default value will be selected based on the current OS.
Example:
using gcc : 3.4 : : <cxxflags>foo <linkflags>bar <linker-type>sun ;
"""
options = to_seq(options)
command = to_seq(command)
# Information about the gcc command...
# The command.
command = to_seq(common.get_invocation_command('gcc', 'g++', command))
# The root directory of the tool install.
root = feature.get_values('<root>', options) ;
# The bin directory where to find the command to execute.
bin = None
# The flavor of compiler.
flavor = feature.get_values('<flavor>', options)
# Autodetect the root and bin dir if not given.
if command:
if not bin:
bin = common.get_absolute_tool_path(command[-1])
if not root:
root = os.path.dirname(bin)
# Autodetect the version and flavor if not given.
if command:
machine_info = subprocess.Popen(command + ['-dumpmachine'], stdout=subprocess.PIPE).communicate()[0]
machine = __machine_match.search(machine_info).group(1)
version_info = subprocess.Popen(command + ['-dumpversion'], stdout=subprocess.PIPE).communicate()[0]
version = __version_match.search(version_info).group(1)
if not flavor and machine.find('mingw') != -1:
flavor = 'mingw'
condition = None
if flavor:
condition = common.check_init_parameters('gcc', None,
('version', version),
('flavor', flavor))
else:
condition = common.check_init_parameters('gcc', None,
('version', version))
if command:
command = command[0]
common.handle_options('gcc', condition, command, options)
linker = feature.get_values('<linker-type>', options)
if not linker:
if os_name() == 'OSF':
linker = 'osf'
elif os_name() == 'HPUX':
linker = 'hpux' ;
else:
linker = 'gnu'
init_link_flags('gcc', linker, condition)
# If gcc is installed in non-standard location, we'd need to add
# LD_LIBRARY_PATH when running programs created with it (for unit-test/run
# rules).
if command:
# On multilib 64-bit boxes, there are both 32-bit and 64-bit libraries
# and all must be added to LD_LIBRARY_PATH. The linker will pick the
# right onces. Note that we don't provide a clean way to build 32-bit
# binary with 64-bit compiler, but user can always pass -m32 manually.
lib_path = [os.path.join(root, 'bin'),
os.path.join(root, 'lib'),
os.path.join(root, 'lib32'),
os.path.join(root, 'lib64')]
if debug():
print 'notice: using gcc libraries ::', condition, '::', lib_path
toolset.flags('gcc.link', 'RUN_PATH', condition, lib_path)
# If it's not a system gcc install we should adjust the various programs as
# needed to prefer using the install specific versions. This is essential
# for correct use of MinGW and for cross-compiling.
# - The archive builder.
archiver = common.get_invocation_command('gcc',
'ar', feature.get_values('<archiver>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.AR', condition, [archiver])
if debug():
print 'notice: using gcc archiver ::', condition, '::', archiver
# - Ranlib
ranlib = common.get_invocation_command('gcc',
'ranlib', feature.get_values('<ranlib>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.RANLIB', condition, [ranlib])
if debug():
print 'notice: using gcc archiver ::', condition, '::', ranlib
# - The resource compiler.
rc_command = common.get_invocation_command_nodefault('gcc',
'windres', feature.get_values('<rc>', options), [bin], path_last=True)
rc_type = feature.get_values('<rc-type>', options)
if not rc_type:
rc_type = 'windres'
if not rc_command:
# If we can't find an RC compiler we fallback to a null RC compiler that
# creates empty object files. This allows the same Jamfiles to work
# across the board. The null RC uses the assembler to create the empty
# objects, so configure that.
rc_command = common.get_invocation_command('gcc', 'as', [], [bin], path_last=True)
rc_type = 'null'
rc.configure(rc_command, condition, '<rc-type>' + rc_type)
###if [ os.name ] = NT
###{
### # This causes single-line command invocation to not go through .bat files,
### # thus avoiding command-line length limitations.
### JAMSHELL = % ;
###}
#FIXME: when register_c_compiler is moved to
# generators, these should be updated
builtin.register_c_compiler('gcc.compile.c++', ['CPP'], ['OBJ'], ['<toolset>gcc'])
builtin.register_c_compiler('gcc.compile.c', ['C'], ['OBJ'], ['<toolset>gcc'])
builtin.register_c_compiler('gcc.compile.asm', ['ASM'], ['OBJ'], ['<toolset>gcc'])
# pch support
# The compiler looks for a precompiled header in each directory just before it
# looks for the include file in that directory. The name searched for is the
# name specified in the #include directive with ".gch" suffix appended. The
# logic in gcc-pch-generator will make sure that BASE_PCH suffix is appended to
# full name of the header.
type.set_generated_target_suffix('PCH', ['<toolset>gcc'], 'gch')
# GCC-specific pch generator.
class GccPchGenerator(pch.PchGenerator):
# Inherit the __init__ method
def run_pch(self, project, name, prop_set, sources):
# Find the header in sources. Ignore any CPP sources.
header = None
for s in sources:
if type.is_derived(s.type(), 'H'):
header = s
# Error handling: Base header file name should be the same as the base
# precompiled header name.
header_name = header.name()
header_basename = os.path.basename(header_name).rsplit('.', 1)[0]
if header_basename != name:
location = project.project_module
###FIXME:
raise Exception()
### errors.user-error "in" $(location)": pch target name `"$(name)"' should be the same as the base name of header file `"$(header-name)"'" ;
pch_file = Generator.run(self, project, name, prop_set, [header])
# return result of base class and pch-file property as usage-requirements
# FIXME: what about multiple results from generator.run?
return (property_set.create([Property('pch-file', pch_file[0]),
Property('cflags', '-Winvalid-pch')]),
pch_file)
# Calls the base version specifying source's name as the name of the created
# target. As result, the PCH will be named whatever.hpp.gch, and not
# whatever.gch.
def generated_targets(self, sources, prop_set, project, name = None):
name = sources[0].name()
return Generator.generated_targets(self, sources,
prop_set, project, name)
# Note: the 'H' source type will catch both '.h' header and '.hpp' header. The
# latter have HPP type, but HPP type is derived from H. The type of compilation
# is determined entirely by the destination type.
generators.register(GccPchGenerator('gcc.compile.c.pch', False, ['H'], ['C_PCH'], ['<pch>on', '<toolset>gcc' ]))
generators.register(GccPchGenerator('gcc.compile.c++.pch', False, ['H'], ['CPP_PCH'], ['<pch>on', '<toolset>gcc' ]))
# Override default do-nothing generators.
generators.override('gcc.compile.c.pch', 'pch.default-c-pch-generator')
generators.override('gcc.compile.c++.pch', 'pch.default-cpp-pch-generator')
flags('gcc.compile', 'PCH_FILE', ['<pch>on'], ['<pch-file>'])
# Declare flags and action for compilation
flags('gcc.compile', 'OPTIONS', ['<optimization>off'], ['-O0'])
flags('gcc.compile', 'OPTIONS', ['<optimization>speed'], ['-O3'])
flags('gcc.compile', 'OPTIONS', ['<optimization>space'], ['-Os'])
flags('gcc.compile', 'OPTIONS', ['<inlining>off'], ['-fno-inline'])
flags('gcc.compile', 'OPTIONS', ['<inlining>on'], ['-Wno-inline'])
flags('gcc.compile', 'OPTIONS', ['<inlining>full'], ['-finline-functions', '-Wno-inline'])
flags('gcc.compile', 'OPTIONS', ['<warnings>off'], ['-w'])
flags('gcc.compile', 'OPTIONS', ['<warnings>on'], ['-Wall'])
flags('gcc.compile', 'OPTIONS', ['<warnings>all'], ['-Wall', '-pedantic'])
flags('gcc.compile', 'OPTIONS', ['<warnings-as-errors>on'], ['-Werror'])
flags('gcc.compile', 'OPTIONS', ['<debug-symbols>on'], ['-g'])
flags('gcc.compile', 'OPTIONS', ['<profiling>on'], ['-pg'])
flags('gcc.compile.c++', 'OPTIONS', ['<rtti>off'], ['-fno-rtti'])
flags('gcc.compile.c++', 'OPTIONS', ['<exception-handling>off'], ['-fno-exceptions'])
# On cygwin and mingw, gcc generates position independent code by default, and
# warns if -fPIC is specified. This might not be the right way of checking if
# we're using cygwin. For example, it's possible to run cygwin gcc from NT
# shell, or using crosscompiling. But we'll solve that problem when it's time.
# In that case we'll just add another parameter to 'init' and move this login
# inside 'init'.
if not os_name () in ['CYGWIN', 'NT']:
# This logic will add -fPIC for all compilations:
#
# lib a : a.cpp b ;
# obj b : b.cpp ;
# exe c : c.cpp a d ;
# obj d : d.cpp ;
#
# This all is fine, except that 'd' will be compiled with -fPIC even though
# it's not needed, as 'd' is used only in exe. However, it's hard to detect
# where a target is going to be used. Alternative, we can set -fPIC only
# when main target type is LIB but than 'b' will be compiled without -fPIC.
# In x86-64 that will lead to link errors. So, compile everything with
# -fPIC.
#
# Yet another alternative would be to create propagated <sharedable>
# feature, and set it when building shared libraries, but that's hard to
# implement and will increase target path length even more.
flags('gcc.compile', 'OPTIONS', ['<link>shared'], ['-fPIC'])
if os_name() != 'NT' and os_name() != 'OSF' and os_name() != 'HPUX':
# OSF does have an option called -soname but it doesn't seem to work as
# expected, therefore it has been disabled.
HAVE_SONAME = ''
SONAME_OPTION = '-h'
flags('gcc.compile', 'USER_OPTIONS', [], ['<cflags>'])
flags('gcc.compile.c++', 'USER_OPTIONS',[], ['<cxxflags>'])
flags('gcc.compile', 'DEFINES', [], ['<define>'])
flags('gcc.compile', 'INCLUDES', [], ['<include>'])
engine = get_manager().engine()
engine.register_action('gcc.compile.c++.pch',
'"$(CONFIG_COMMAND)" -x c++-header $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"')
engine.register_action('gcc.compile.c.pch',
'"$(CONFIG_COMMAND)" -x c-header $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"')
def gcc_compile_cpp(targets, sources, properties):
# Some extensions are compiled as C++ by default. For others, we need to
# pass -x c++. We could always pass -x c++ but distcc does not work with it.
extension = os.path.splitext (sources [0]) [1]
lang = ''
if not extension in ['.cc', '.cp', '.cxx', '.cpp', '.c++', '.C']:
lang = '-x c++'
get_manager().engine().set_target_variable (targets, 'LANG', lang)
engine.add_dependency(targets, bjam.call('get-target-variable', targets, 'PCH_FILE'))
def gcc_compile_c(targets, sources, properties):
engine = get_manager().engine()
# If we use the name g++ then default file suffix -> language mapping does
# not work. So have to pass -x option. Maybe, we can work around this by
# allowing the user to specify both C and C++ compiler names.
#if $(>:S) != .c
#{
engine.set_target_variable (targets, 'LANG', '-x c')
#}
engine.add_dependency(targets, bjam.call('get-target-variable', targets, 'PCH_FILE'))
engine.register_action(
'gcc.compile.c++',
'"$(CONFIG_COMMAND)" $(LANG) -ftemplate-depth-128 $(OPTIONS) ' +
'$(USER_OPTIONS) -D$(DEFINES) -I"$(PCH_FILE:D)" -I"$(INCLUDES)" ' +
'-c -o "$(<:W)" "$(>:W)"',
function=gcc_compile_cpp,
bound_list=['PCH_FILE'])
engine.register_action(
'gcc.compile.c',
'"$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) ' +
'-I"$(PCH_FILE:D)" -I"$(INCLUDES)" -c -o "$(<)" "$(>)"',
function=gcc_compile_c,
bound_list=['PCH_FILE'])
def gcc_compile_asm(targets, sources, properties):
get_manager().engine().set_target_variable(targets, 'LANG', '-x assembler-with-cpp')
engine.register_action(
'gcc.compile.asm',
'"$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"',
function=gcc_compile_asm)
class GccLinkingGenerator(unix.UnixLinkingGenerator):
"""
The class which check that we don't try to use the <runtime-link>static
property while creating or using shared library, since it's not supported by
gcc/libc.
"""
def run(self, project, name, ps, sources):
# TODO: Replace this with the use of a target-os property.
no_static_link = False
if bjam.variable('UNIX'):
no_static_link = True;
##FIXME: what does this mean?
## {
## switch [ modules.peek : JAMUNAME ]
## {
## case * : no-static-link = true ;
## }
## }
reason = None
if no_static_link and ps.get('runtime-link') == 'static':
if ps.get('link') == 'shared':
reason = "On gcc, DLL can't be build with '<runtime-link>static'."
elif type.is_derived(self.target_types[0], 'EXE'):
for s in sources:
source_type = s.type()
if source_type and type.is_derived(source_type, 'SHARED_LIB'):
reason = "On gcc, using DLLS together with the " +\
"<runtime-link>static options is not possible "
if reason:
print 'warning:', reason
print 'warning:',\
"It is suggested to use '<runtime-link>static' together",\
"with '<link>static'." ;
return
else:
generated_targets = unix.UnixLinkingGenerator.run(self, project,
name, ps, sources)
return generated_targets
if on_windows():
flags('gcc.link.dll', '.IMPLIB-COMMAND', [], ['-Wl,--out-implib,'])
generators.register(
GccLinkingGenerator('gcc.link', True,
['OBJ', 'SEARCHED_LIB', 'STATIC_LIB', 'IMPORT_LIB'],
[ 'EXE' ],
[ '<toolset>gcc' ]))
generators.register(
GccLinkingGenerator('gcc.link.dll', True,
['OBJ', 'SEARCHED_LIB', 'STATIC_LIB', 'IMPORT_LIB'],
['IMPORT_LIB', 'SHARED_LIB'],
['<toolset>gcc']))
else:
generators.register(
GccLinkingGenerator('gcc.link', True,
['LIB', 'OBJ'],
['EXE'],
['<toolset>gcc']))
generators.register(
GccLinkingGenerator('gcc.link.dll', True,
['LIB', 'OBJ'],
['SHARED_LIB'],
['<toolset>gcc']))
# Declare flags for linking.
# First, the common flags.
flags('gcc.link', 'OPTIONS', ['<debug-symbols>on'], ['-g'])
flags('gcc.link', 'OPTIONS', ['<profiling>on'], ['-pg'])
flags('gcc.link', 'USER_OPTIONS', [], ['<linkflags>'])
flags('gcc.link', 'LINKPATH', [], ['<library-path>'])
flags('gcc.link', 'FINDLIBS-ST', [], ['<find-static-library>'])
flags('gcc.link', 'FINDLIBS-SA', [], ['<find-shared-library>'])
flags('gcc.link', 'LIBRARIES', [], ['<library-file>'])
# For <runtime-link>static we made sure there are no dynamic libraries in the
# link. On HP-UX not all system libraries exist as archived libraries (for
# example, there is no libunwind.a), so, on this platform, the -static option
# cannot be specified.
if os_name() != 'HPUX':
flags('gcc.link', 'OPTIONS', ['<runtime-link>static'], ['-static'])
# Now, the vendor specific flags.
# The parameter linker can be either gnu, darwin, osf, hpux or sun.
def init_link_flags(toolset, linker, condition):
"""
Now, the vendor specific flags.
The parameter linker can be either gnu, darwin, osf, hpux or sun.
"""
toolset_link = toolset + '.link'
if linker == 'gnu':
# Strip the binary when no debugging is needed. We use --strip-all flag
# as opposed to -s since icc (intel's compiler) is generally
# option-compatible with and inherits from the gcc toolset, but does not
# support -s.
# FIXME: what does unchecked translate to?
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,--strip-all']) # : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
flags(toolset_link, 'START-GROUP', condition, ['-Wl,--start-group'])# : unchecked ;
flags(toolset_link, 'END-GROUP', condition, ['-Wl,--end-group']) # : unchecked ;
# gnu ld has the ability to change the search behaviour for libraries
# referenced by -l switch. These modifiers are -Bstatic and -Bdynamic
# and change search for -l switches that follow them. The following list
# shows the tried variants.
# The search stops at the first variant that has a match.
# *nix: -Bstatic -lxxx
# libxxx.a
#
# *nix: -Bdynamic -lxxx
# libxxx.so
# libxxx.a
#
# windows (mingw,cygwin) -Bstatic -lxxx
# libxxx.a
# xxx.lib
#
# windows (mingw,cygwin) -Bdynamic -lxxx
# libxxx.dll.a
# xxx.dll.a
# libxxx.a
# xxx.lib
# cygxxx.dll (*)
# libxxx.dll
# xxx.dll
# libxxx.a
#
# (*) This is for cygwin
# Please note that -Bstatic and -Bdynamic are not a guarantee that a
# static or dynamic lib indeed gets linked in. The switches only change
# search patterns!
# On *nix mixing shared libs with static runtime is not a good idea.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bdynamic']) # : unchecked ;
# On windows allow mixing of static and dynamic libs with static
# runtime.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bdynamic']) # : unchecked ;
flags(toolset_link, 'OPTIONS',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
elif linker == 'darwin':
# On Darwin, the -s option to ld does not work unless we pass -static,
# and passing -static unconditionally is a bad idea. So, don't pass -s.
# at all, darwin.jam will use separate 'strip' invocation.
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
elif linker == 'osf':
# No --strip-all, just -s.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# This does not supports -R.
flags(toolset_link, 'RPATH_OPTION', condition, ['-rpath']) # : unchecked ;
# -rpath-link is not supported at all.
elif linker == 'sun':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# Solaris linker does not have a separate -rpath-link, but allows to use
# -L for the same purpose.
flags(toolset_link, 'LINKPATH', condition, ['<xdll-path>']) # : unchecked ;
# This permits shared libraries with non-PIC code on Solaris.
# VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the
# following is not needed. Whether -fPIC should be hardcoded, is a
# separate question.
# AH, 2004/10/16: it is still necessary because some tests link against
# static libraries that were compiled without PIC.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), ['-mimpure-text'])
# : unchecked ;
elif linker == 'hpux':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition),
['-Wl,-s']) # : unchecked ;
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition),
['-fPIC']) # : unchecked ;
else:
# FIXME:
errors.user_error(
"$(toolset) initialization: invalid linker '$(linker)' " +
"The value '$(linker)' specified for <linker> is not recognized. " +
"Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'")
# Declare actions for linking.
def gcc_link(targets, sources, properties):
engine = get_manager().engine()
engine.set_target_variable(targets, 'SPACE', ' ')
# Serialize execution of the 'link' action, since running N links in
# parallel is just slower. For now, serialize only gcc links, it might be a
# good idea to serialize all links.
engine.set_target_variable(targets, 'JAM_SEMAPHORE', '<s>gcc-link-semaphore')
engine.register_action(
'gcc.link',
'"$(CONFIG_COMMAND)" -L"$(LINKPATH)" ' +
'-Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,"$(RPATH)" ' +
'-Wl,-rpath-link$(SPACE)-Wl,"$(RPATH_LINK)" -o "$(<)" ' +
'$(START-GROUP) "$(>)" "$(LIBRARIES)" $(FINDLIBS-ST-PFX) ' +
'-l$(FINDLIBS-ST) $(FINDLIBS-SA-PFX) -l$(FINDLIBS-SA) $(END-GROUP) ' +
'$(OPTIONS) $(USER_OPTIONS)',
function=gcc_link,
bound_list=['LIBRARIES'])
# Default value. Mostly for the sake of intel-linux that inherits from gcc, but
# does not have the same logic to set the .AR variable. We can put the same
# logic in intel-linux, but that's hardly worth the trouble as on Linux, 'ar' is
# always available.
__AR = 'ar'
flags('gcc.archive', 'AROPTIONS', [], ['<archiveflags>'])
def gcc_archive(targets, sources, properties):
# Always remove archive and start again. Here's rationale from
#
# <NAME>:
#
# I had a file, say a1.c, that was included into liba.a. I moved a1.c to
# a2.c, updated my Jamfiles and rebuilt. My program was crashing with absurd
# errors. After some debugging I traced it back to the fact that a1.o was
# *still* in liba.a
#
# <NAME>:
#
# Originally removing the archive was done by splicing an RM onto the
# archive action. That makes archives fail to build on NT when they have
# many files because it will no longer execute the action directly and blow
# the line length limit. Instead we remove the file in a different action,
# just before building the archive.
clean = targets[0] + '(clean)'
bjam.call('TEMPORARY', clean)
bjam.call('NOCARE', clean)
engine = get_manager().engine()
engine.set_target_variable('LOCATE', clean, bjam.call('get-target-variable', targets, 'LOCATE'))
engine.add_dependency(clean, sources)
engine.add_dependency(targets, clean)
engine.set_update_action('common.RmTemps', clean, targets)
# Declare action for creating static libraries.
# The letter 'r' means to add files to the archive with replacement. Since we
# remove archive, we don't care about replacement, but there's no option "add
# without replacement".
# The letter 'c' suppresses the warning in case the archive does not exists yet.
# That warning is produced only on some platforms, for whatever reasons.
engine.register_action('gcc.archive',
'''"$(.AR)" $(AROPTIONS) rc "$(<)" "$(>)"
"$(.RANLIB)" "$(<)"
''',
function=gcc_archive,
flags=['piecemeal'])
def gcc_link_dll(targets, sources, properties):
engine = get_manager().engine()
engine.set_target_variable(targets, 'SPACE', ' ')
engine.set_target_variable(targets, 'JAM_SEMAPHORE', '<s>gcc-link-semaphore')
engine.set_target_variable(targets, "HAVE_SONAME", HAVE_SONAME)
engine.set_target_variable(targets, "SONAME_OPTION", SONAME_OPTION)
engine.register_action(
'gcc.link.dll',
# Differ from 'link' above only by -shared.
'"$(CONFIG_COMMAND)" -L"$(LINKPATH)" ' +
'-Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,"$(RPATH)" ' +
'"$(.IMPLIB-COMMAND)$(<[1])" -o "$(<[-1])" ' +
'$(HAVE_SONAME)-Wl,$(SONAME_OPTION)$(SPACE)-Wl,$(<[-1]:D=) ' +
'-shared $(START-GROUP) "$(>)" "$(LIBRARIES)" $(FINDLIBS-ST-PFX) ' +
'-l$(FINDLIBS-ST) $(FINDLIBS-SA-PFX) -l$(FINDLIBS-SA) $(END-GROUP) ' +
'$(OPTIONS) $(USER_OPTIONS)',
function = gcc_link_dll,
bound_list=['LIBRARIES'])
# Set up threading support. It's somewhat contrived, so perform it at the end,
# to avoid cluttering other code.
if on_windows():
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-mthreads'])
elif bjam.variable('UNIX'):
jamuname = bjam.variable('JAMUNAME')
host_os_name = jamuname[0]
if host_os_name.startswith('SunOS'):
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthreads'])
flags('gcc', 'FINDLIBS-SA', [], ['rt'])
elif host_os_name == 'BeOS':
# BeOS has no threading options, don't set anything here.
pass
elif host_os_name == 'Haiku':
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-lroot'])
# there is no -lrt on Haiku, and -pthread is implicit
elif host_os_name.endswith('BSD'):
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread'])
# there is no -lrt on BSD
elif host_os_name == 'DragonFly':
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread'])
# there is no -lrt on BSD - DragonFly is a FreeBSD variant,
# which anoyingly doesn't say it's a *BSD.
elif host_os_name == 'IRIX':
# gcc on IRIX does not support multi-threading, don't set anything here.
pass
elif host_os_name == 'Darwin':
# Darwin has no threading options, don't set anything here.
pass
else:
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread'])
flags('gcc', 'FINDLIBS-SA', [], ['rt'])
def cpu_flags(toolset, variable, architecture, instruction_set, values, default=None):
#FIXME: for some reason this fails. Probably out of date feature code
## if default:
## flags(toolset, variable,
## ['<architecture>' + architecture + '/<instruction-set>'],
## values)
flags(toolset, variable,
#FIXME: same as above
[##'<architecture>/<instruction-set>' + instruction_set,
'<architecture>' + architecture + '/<instruction-set>' + instruction_set],
values)
# Set architecture/instruction-set options.
#
# x86 and compatible
flags('gcc', 'OPTIONS', ['<architecture>x86/<address-model>32'], ['-m32'])
flags('gcc', 'OPTIONS', ['<architecture>x86/<address-model>64'], ['-m64'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'native', ['-march=native'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'i486', ['-march=i486'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'i586', ['-march=i586'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'i686', ['-march=i686'], default=True)
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium', ['-march=pentium'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium-mmx', ['-march=pentium-mmx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentiumpro', ['-march=pentiumpro'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium2', ['-march=pentium2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium3', ['-march=pentium3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium3m', ['-march=pentium3m'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium-m', ['-march=pentium-m'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium4', ['-march=pentium4'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium4m', ['-march=pentium4m'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'prescott', ['-march=prescott'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'nocona', ['-march=nocona'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'core2', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'conroe', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'conroe-xe', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'conroe-l', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'allendale', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'wolfdale', ['-march=core2', '-msse4.1'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'merom', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'merom-xe', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'kentsfield', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'kentsfield-xe', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'yorksfield', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'penryn', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'corei7', ['-march=corei7'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'nehalem', ['-march=corei7'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'corei7-avx', ['-march=corei7-avx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'sandy-bridge', ['-march=corei7-avx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'core-avx-i', ['-march=core-avx-i'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'ivy-bridge', ['-march=core-avx-i'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'haswell', ['-march=core-avx-i', '-mavx2', '-mfma', '-mbmi', '-mbmi2', '-mlzcnt'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k6', ['-march=k6'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k6-2', ['-march=k6-2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k6-3', ['-march=k6-3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon', ['-march=athlon'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-tbird', ['-march=athlon-tbird'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-4', ['-march=athlon-4'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-xp', ['-march=athlon-xp'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-mp', ['-march=athlon-mp'])
##
cpu_flags('gcc', 'OPTIONS', 'x86', 'k8', ['-march=k8'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'opteron', ['-march=opteron'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon64', ['-march=athlon64'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-fx', ['-march=athlon-fx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k8-sse3', ['-march=k8-sse3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'opteron-sse3', ['-march=opteron-sse3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon64-sse3', ['-march=athlon64-sse3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'amdfam10', ['-march=amdfam10'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'barcelona', ['-march=barcelona'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'bdver1', ['-march=bdver1'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'bdver2', ['-march=bdver2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'bdver3', ['-march=bdver3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'btver1', ['-march=btver1'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'btver2', ['-march=btver2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'winchip-c6', ['-march=winchip-c6'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'winchip2', ['-march=winchip2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'c3', ['-march=c3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'c3-2', ['-march=c3-2'])
##
cpu_flags('gcc', 'OPTIONS', 'x86', 'atom', ['-march=atom'])
# Sparc
flags('gcc', 'OPTIONS', ['<architecture>sparc/<address-model>32'], ['-m32'])
flags('gcc', 'OPTIONS', ['<architecture>sparc/<address-model>64'], ['-m64'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'v7', ['-mcpu=v7'], default=True)
cpu_flags('gcc', 'OPTIONS', 'sparc', 'cypress', ['-mcpu=cypress'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'v8', ['-mcpu=v8'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'supersparc', ['-mcpu=supersparc'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclite', ['-mcpu=sparclite'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'hypersparc', ['-mcpu=hypersparc'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclite86x', ['-mcpu=sparclite86x'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'f930', ['-mcpu=f930'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'f934', ['-mcpu=f934'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclet', ['-mcpu=sparclet'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'tsc701', ['-mcpu=tsc701'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'v9', ['-mcpu=v9'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'ultrasparc', ['-mcpu=ultrasparc'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'ultrasparc3', ['-mcpu=ultrasparc3'])
# RS/6000 & PowerPC
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>32'], ['-m32'])
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>64'], ['-m64'])
cpu_flags('gcc', 'OPTIONS', 'power', '403', ['-mcpu=403'])
cpu_flags('gcc', 'OPTIONS', 'power', '505', ['-mcpu=505'])
cpu_flags('gcc', 'OPTIONS', 'power', '601', ['-mcpu=601'])
cpu_flags('gcc', 'OPTIONS', 'power', '602', ['-mcpu=602'])
cpu_flags('gcc', 'OPTIONS', 'power', '603', ['-mcpu=603'])
cpu_flags('gcc', 'OPTIONS', 'power', '603e', ['-mcpu=603e'])
cpu_flags('gcc', 'OPTIONS', 'power', '604', ['-mcpu=604'])
cpu_flags('gcc', 'OPTIONS', 'power', '604e', ['-mcpu=604e'])
cpu_flags('gcc', 'OPTIONS', 'power', '620', ['-mcpu=620'])
cpu_flags('gcc', 'OPTIONS', 'power', '630', ['-mcpu=630'])
cpu_flags('gcc', 'OPTIONS', 'power', '740', ['-mcpu=740'])
cpu_flags('gcc', 'OPTIONS', 'power', '7400', ['-mcpu=7400'])
cpu_flags('gcc', 'OPTIONS', 'power', '7450', ['-mcpu=7450'])
cpu_flags('gcc', 'OPTIONS', 'power', '750', ['-mcpu=750'])
cpu_flags('gcc', 'OPTIONS', 'power', '801', ['-mcpu=801'])
cpu_flags('gcc', 'OPTIONS', 'power', '821', ['-mcpu=821'])
cpu_flags('gcc', 'OPTIONS', 'power', '823', ['-mcpu=823'])
cpu_flags('gcc', 'OPTIONS', 'power', '860', ['-mcpu=860'])
cpu_flags('gcc', 'OPTIONS', 'power', '970', ['-mcpu=970'])
cpu_flags('gcc', 'OPTIONS', 'power', '8540', ['-mcpu=8540'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power', ['-mcpu=power'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power2', ['-mcpu=power2'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power3', ['-mcpu=power3'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power4', ['-mcpu=power4'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power5', ['-mcpu=power5'])
cpu_flags('gcc', 'OPTIONS', 'power', 'powerpc', ['-mcpu=powerpc'])
cpu_flags('gcc', 'OPTIONS', 'power', 'powerpc64', ['-mcpu=powerpc64'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rios', ['-mcpu=rios'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rios1', ['-mcpu=rios1'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rios2', ['-mcpu=rios2'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rsc', ['-mcpu=rsc'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rs64a', ['-mcpu=rs64'])
# AIX variant of RS/6000 & PowerPC
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>32/<target-os>aix'], ['-maix32'])
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>64/<target-os>aix'], ['-maix64'])
flags('gcc', 'AROPTIONS', ['<architecture>power/<address-model>64/<target-os>aix'], ['-X64'])
|
codebraid/converters/base.py
|
musm/codebraid
| 270 |
68931
|
<reponame>musm/codebraid
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018-2019, <NAME>
# All rights reserved.
#
# Licensed under the BSD 3-Clause License:
# http://opensource.org/licenses/BSD-3-Clause
#
import collections
from collections import OrderedDict as ODict
import hashlib
import io
import json
import os
import pathlib
import re
import sys
import textwrap
from typing import Dict, List, Optional, Sequence, Union
import zipfile
from .. import codeprocessors
from .. import err
from .. import util
class Include(dict):
'''
Store code chunk options related to including a file or other external
resource. Also perform the include and modify the code chunk as
necessary.
'''
def __init__(self, code_chunk, include_options):
# Start by creating fallback values for attributes
self.code_lines = None
self.code_chunk = code_chunk
if not isinstance(include_options, dict):
code_chunk.source_errors.append('Invalid "include" value "{0}"'.format(include_options))
return
if not all(k in self.keywords for k in include_options):
unknown_keys = ', '.join("{0}".format(k) for k in include_options if k not in self.keywords)
code_chunk.source_errors.append('Unknown "include" keywords: {0}'.format(unknown_keys))
if not all(isinstance(v, str) and v for v in include_options.values()):
invalid_value_keys = ', '.join("{0}".format(k) for k, v in include_options.items() if not isinstance(v, str) or not v)
code_chunk.source_errors.append('Invalid values for "include" keywords: {0}'.format(invalid_value_keys))
start_keywords = tuple(k for k in include_options if k in self._start_keywords)
end_keywords = tuple(k for k in include_options if k in self._end_keywords)
range_keywords = tuple(k for k in include_options if k in self._range_keywords)
if ((range_keywords and (start_keywords or end_keywords)) or
len(range_keywords) > 1 or len(start_keywords) > 1 or len(end_keywords) > 1):
conflicting_keys = ', '.join("{0}".format(k) for k in include_options if k in self._selection_keywords)
code_chunk.source_errors.append('Too many keywords for selecting part of an "include" file: {0}'.format(conflicting_keys))
file = include_options.get('file', None)
encoding = include_options.get('encoding', 'utf8')
if file is None:
code_chunk.source_errors.append('Missing "include" keyword "file"')
if code_chunk.source_errors:
return
file_path = pathlib.Path(file).expanduser()
try:
text = file_path.read_text(encoding=encoding)
except FileNotFoundError:
code_chunk.source_errors.append('Cannot include nonexistent file "{0}"'.format(file))
except LookupError:
code_chunk.source_errors.append('Unknown encoding "{0}"'.format(encoding))
except PermissionError:
code_chunk.source_errors.append('Insufficient permissions to access file "{0}"'.format(file))
except UnicodeDecodeError:
code_chunk.source_errors.append('Cannot decode file "{0}" with encoding "{1}"'.format(file, encoding))
if code_chunk.source_errors:
return
selection_keywords = start_keywords + end_keywords + range_keywords
if selection_keywords:
for kw in selection_keywords:
text = getattr(self, '_option_'+kw)(include_options[kw], text)
if code_chunk.source_errors:
return
code_lines = util.splitlines_lf(text)
self.code_lines = code_lines
self.update(include_options)
keywords = set(['file', 'encoding', 'lines', 'regex',
'start_string', 'start_regex', 'after_string', 'after_regex',
'before_string', 'before_regex', 'end_string', 'end_regex'])
_start_keywords = set(['start_string', 'start_regex', 'after_string', 'after_regex'])
_end_keywords = set(['before_string', 'before_regex', 'end_string', 'end_regex'])
_range_keywords = set(['lines', 'regex'])
_selection_keywords = _start_keywords | _end_keywords | _range_keywords
def _option_lines(self, value, text,
pattern_re=re.compile(r'{n}(?:-(?:{n})?)?(?:,{n}(?:-(?:{n})?)?)*\Z'.format(n='[1-9][0-9]*'))):
value = value.replace(' ', '')
if not pattern_re.match(value):
self.code_chunk.source_errors.append('Invalid value for "include" option "lines"')
return
max_line_number = text.count('\n')
if text[-1:] != '\n':
max_line_number += 1
include_line_indices = set()
for line_range in value.split(','):
if '-' not in line_range:
include_line_indices.add(int(line_range)-1)
else:
start, end = line_range.split('-')
start = int(start) - 1
end = int(end) if end else max_line_number
include_line_indices.update(range(start, end))
text_lines = util.splitlines_lf(text)
return '\n'.join(text_lines[n] for n in sorted(include_line_indices))
def _option_regex(self, value, text):
try:
pattern_re = re.compile(value, re.MULTILINE | re.DOTALL)
except re.error:
self.code_chunk.source_errors.append('Invalid regex pattern for "include" option "regex"')
return
match = pattern_re.search(text)
if match is None:
self.code_chunk.source_errors.append('The pattern given by "include" option "regex" was not found')
return
return match.group()
def _option_start_string(self, value, text):
index = text.find(value)
if index < 0:
self.code_chunk.source_errors.append('The pattern given by "include" option "start_string" was not found')
return
return text[index:]
def _option_start_regex(self, value, text):
try:
pattern_re = re.compile(value, re.MULTILINE | re.DOTALL)
except re.error:
self.code_chunk.source_errors.append('Invalid regex pattern for "include" option "start_regex"')
return
match = pattern_re.search(text)
if match is None:
self.code_chunk.source_errors.append('The pattern given by "include" option "start_regex" was not found')
return
return text[match.start():]
def _option_after_string(self, value, text):
index = text.find(value)
if index < 0:
self.code_chunk.source_errors.append('The pattern given by "include" option "after_string" was not found')
return
return text[index+len(value):]
def _option_after_regex(self, value, text):
try:
pattern_re = re.compile(value, re.MULTILINE | re.DOTALL)
except re.error:
self.code_chunk.source_errors.append('Invalid regex pattern for "include" option "after_regex"')
return
match = pattern_re.search(text)
if match is None:
self.code_chunk.source_errors.append('The pattern given by "include" option "after_regex" was not found')
return
return text[match.end():]
def _option_before_string(self, value, text):
index = text.find(value)
if index < 0:
self.code_chunk.source_errors.append('The pattern given by "include" option "before_string" was not found')
return
return text[:index]
def _option_before_regex(self, value, text):
try:
pattern_re = re.compile(value, re.MULTILINE | re.DOTALL)
except re.error:
self.code_chunk.source_errors.append('Invalid regex pattern for "include" option "before_regex"')
return
match = pattern_re.search(text)
if match is None:
self.code_chunk.source_errors.append('The pattern given by "include" option "before_regex" was not found')
return
return text[:match.start()]
def _option_end_string(self, value, text):
index = text.find(value)
if index < 0:
self.code_chunk.source_errors.append('The pattern given by "include" option "end_string" was not found')
return
return text[:index+len(value)]
def _option_end_regex(self, value, text):
try:
pattern_re = re.compile(value, re.MULTILINE | re.DOTALL)
except re.error:
self.code_chunk.source_errors.append('Invalid regex pattern for "include" option "end_regex"')
return
match = pattern_re.search(text)
if match is None:
self.code_chunk.source_errors.append('The pattern given by "include" option "end_regex" was not found')
return
return text[:match.end()]
class Options(dict):
'''
Store code chunk options. Also modify the code chunk as necessary based
on the options.
Option processing methods check options for validity and process them, but
do not perform any type conversions. Any desired type conversions must be
performed in format-specific subclasses of CodeChunk, which can take into
account the data types that a given document format allows for options.
Subclasses must also handle duplicate options, since at this point options
must have been reduced to a dict.
The effect of all options is independent of their order. When two options
would have an order-dependent effect, only one of them is permitted at a
time.
Invalid options related to presentation result in warnings, while invalid
options related to code execution result in errors. When possible, option
processing proceeds even after an error, to give a more complete error
message. There are two approaches to handling errors: Stop all code
execution, or stop all code execution related to the error. The latter
approach is currently taken. Processing as many options as possible makes
it easier to determine which code execution is related to an error. For
example, if the session option is processed for a code chunk with an
error, then only that session can be disabled, instead of the entire
language related to the error.
'''
def __init__(self, code_chunk, custom_options):
self.code_chunk = code_chunk
if code_chunk.inline:
self.update(self._default_inline_options)
else:
self.update(self._default_block_options)
if code_chunk.execute:
self['session'] = None
else:
self['source'] = None
self['first_chunk_options'] = {}
if any(k not in self.keywords for k in custom_options):
unknown_keys = ', '.join('"{0}"'.format(k) for k in custom_options if k not in self.keywords)
# Raise an error for unknown options. There is no way to tell
# whether an execution or presentation option was intended, so
# take the safer approach.
code_chunk.source_errors.append('Unknown keywords: {0}'.format(unknown_keys))
# Treat received `custom_options` as immutable
custom_options = {k: v for k, v in custom_options.items() if k in self.keywords}
self.custom_options = custom_options
for k, v in custom_options.items():
if k not in self._after_copy_keywords:
getattr(self, '_option_'+k)(k, v)
if not code_chunk.source_errors and 'copy' not in self:
# Only handle 'show' and 'hide' if there are no errors so far and
# there is not a pending 'copy', which for some commands might
# change `.is_expr` or the defaults for 'show'. If there are
# errors, 'show' and 'hide' are never used.
if code_chunk.inline:
self['show'] = self._default_inline_show[code_chunk.command].copy()
else:
self['show'] = self._default_block_show[code_chunk.command].copy()
for k, v in custom_options.items():
if k in self._after_copy_keywords:
getattr(self, '_option_'+k)(k, v)
def finalize_after_copy(self):
'''
Complete any option processing that must wait until after copying.
For the paste command, 'show' can be inherited. For paste and code,
`.is_expr` can be inherited. 'lang' can also be inherited.
'''
code_chunk = self.code_chunk
custom_options = self.custom_options
if self['lang'] is None:
self['lang'] = code_chunk.copy_chunks[0].options['lang']
if code_chunk.inline:
if code_chunk.command == 'paste' and 'show' not in custom_options:
self['show'] = code_chunk.copy_chunks[0].options['show'].copy() # Inherit
else:
self['show'] = self._default_inline_show[code_chunk.command].copy()
else:
if code_chunk.command == 'paste' and 'show' not in custom_options:
self['show'] = code_chunk.copy_chunks[0].options['show'].copy() # Inherit
else:
self['show'] = self._default_block_show[code_chunk.command].copy()
for key in self._after_copy_keywords:
if key in custom_options:
getattr(self, '_option_'+key)(key, custom_options[key])
_base_keywords = set(['complete', 'copy', 'example', 'hide', 'hide_markup_keys', 'include',
'lang', 'name', 'outside_main', 'session', 'source', 'show'])
_layout_keywords = set(['{0}_{1}'.format(dsp, kw) if dsp else kw
for dsp in ('', 'markup', 'copied_markup', 'code', 'stdout', 'stderr')
for kw in ('first_number', 'line_numbers', 'rewrap_lines', 'rewrap_width', 'expand_tabs', 'tab_size')])
_first_chunk_execute_keywords = set(['executable', 'jupyter_kernel'])
_first_chunk_save_keywords = set(['save', 'save_as'])
_first_chunk_other_keywords = set(['jupyter_timeout', 'live_output'])
_first_chunk_keywords = _first_chunk_execute_keywords | _first_chunk_save_keywords | _first_chunk_other_keywords
keywords = _base_keywords | _layout_keywords | _first_chunk_keywords
_after_copy_keywords = set(['hide', 'show'])
# Default values for show and session/source are inserted later based on
# command and inline status
_default_inline_options = {'complete': True,
'example': False,
'lang': None,
'outside_main': False}
_default_block_options = _default_inline_options.copy()
_default_block_options.update({'code_first_number': 'next',
'code_line_numbers': True})
# The defaultdict handles unknown commands that are represented as None
_default_rich_output = 'latex|markdown|png|jpg|plain'.split('|')
_default_inline_show = collections.defaultdict(lambda: ODict(), # Unknown -> show nothing
{'code': ODict([('code', 'verbatim')]),
'expr': ODict([('expr', 'raw'),
('stderr', 'verbatim')]),
# expr and rich_output don't clash, because expr is only present
# with the built-in code execution system, while rich_output
# requires a Jupyter kernel. If the built-in system gains
# rich_output capabilities or there are other related changes,
# this may need refactoring.
'nb': ODict([('expr', 'verbatim'),
('rich_output', _default_rich_output),
('stderr', 'verbatim')]),
'paste': ODict(),
'run': ODict([('stdout', 'raw'),
('stderr', 'verbatim'),
('rich_output', _default_rich_output)])})
_default_block_show = collections.defaultdict(lambda: ODict(), # Unknown -> show nothing
{'code': ODict([('code', 'verbatim')]),
'nb': ODict([('code', 'verbatim'),
('stdout', 'verbatim'),
('stderr', 'verbatim'),
('rich_output', _default_rich_output)]),
'paste': ODict(),
'repl': ODict([('repl', 'verbatim')]),
'run': ODict([('stdout', 'raw'),
('stderr', 'verbatim'),
('rich_output', _default_rich_output)])})
def _option_bool_warning(self, key, value):
if not isinstance(value, bool):
self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
self[key] = value
def _option_bool_error(self, key, value):
if not isinstance(value, bool):
self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
self[key] = value
def _option_str_warning(self, key, value):
if not isinstance(value, str):
self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
self[key] = value
def _option_str_error(self, key, value):
if not isinstance(value, str):
self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
self[key] = value
def _option_positive_int_warning(self, key, value):
if not isinstance(value, int) or value <= 0:
self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
self[key] = value
def _option_positive_int_error(self, key, value):
if not isinstance(value, int) or value <= 0:
self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
self[key] = value
def _option_first_chunk_bool_error(self, key, value):
if not isinstance(value, bool):
self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
self['first_chunk_options'][key] = value
def _option_first_chunk_string_error(self, key, value):
if not isinstance(value, str):
self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
first_chunk_options = self['first_chunk_options']
if (key in self._first_chunk_execute_keywords and
any(k in first_chunk_options for k in self._first_chunk_execute_keywords)):
conflicting_options = ', '.join('"{0}"'.format(k) for k in self._first_chunk_execute_keywords if k in first_chunk_options)
self.code_chunk.source_errors.append('Conflicting options: {0}'.format(conflicting_options))
else:
first_chunk_options[key] = value
def _option_first_chunk_int_warning(self, key, value):
if not isinstance(value, int):
self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
self['first_chunk_options'][key] = value
_option_executable = _option_first_chunk_string_error
_option_jupyter_kernel = _option_first_chunk_string_error
_option_jupyter_timeout = _option_first_chunk_int_warning
_option_save = _option_first_chunk_bool_error
_option_save_as = _option_first_chunk_string_error
_option_live_output = _option_first_chunk_bool_error
_option_example = _option_bool_warning
_option_lang = _option_str_error
def _option_complete(self, key, value):
if not isinstance(value, bool):
self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key, value))
elif not self.code_chunk.execute:
self.code_chunk.source_errors.append('Option "complete" is only compatible with executed code chunks')
elif self.code_chunk.command == 'repl':
self.code_chunk.source_errors.append('Option "complete" is not compatible with "repl" command')
elif self.code_chunk.is_expr and not value:
self.code_chunk.source_errors.append('Option "complete" value "false" is incompatible with expr command')
elif self['outside_main']:
# Technically, this is only required for complete=true, but
# prohibiting it in all cases is more consistent
self.code_chunk.source_errors.append('Option "complete" is incompatible with "outside_main" value "true"')
else:
self[key] = value
def _option_copy(self, key, value):
if not isinstance(value, str):
self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key, value))
elif 'include' in self:
self.code_chunk.source_errors.append('Option "copy" is incompatible with "include"')
else:
# Since non-identifier code chunk names can't be defined, there's
# no need to check for identifier-style names here
self[key] = [x.strip() for x in value.split('+')]
def _option_expand_tabs(self, key, value):
if key == 'expand_tabs':
key = 'code_expand_tabs'
self._option_bool_warning(key, value)
_option_markup_expand_tabs = _option_expand_tabs
_option_copied_markup_expand_tabs = _option_expand_tabs
_option_code_expand_tabs = _option_expand_tabs
_option_stdout_expand_tabs = _option_expand_tabs
_option_stderr_expand_tabs = _option_expand_tabs
def _option_first_number(self, key, value):
if not ((isinstance(value, int) and value > 0) or (isinstance(value, str) and value == 'next')):
self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
if key == 'first_number':
key = 'code_first_number'
self[key] = value
_option_markup_first_number = _option_first_number
_option_copied_markup_first_number = _option_first_number
_option_code_first_number = _option_first_number
_option_stdout_first_number = _option_first_number
_option_stderr_first_number = _option_first_number
def _option_rewrap_lines(self, key, value):
if key == 'rewrap_lines':
key = 'code_rewrap_lines'
self._option_bool_warning(key, value)
_option_markup_rewrap_lines = _option_rewrap_lines
_option_copied_markup_rewrap_lines = _option_rewrap_lines
_option_code_rewrap_lines = _option_rewrap_lines
_option_stdout_rewrap_lines = _option_rewrap_lines
_option_stderr_rewrap_lines = _option_rewrap_lines
def _option_rewrap_width(self, key, value):
if key == 'rewrap_width':
key = 'code_rewrap_width'
self._option_positive_int_warning(key, value)
_option_markup_rewrap_width = _option_rewrap_width
_option_copied_markup_rewrap_width = _option_rewrap_width
_option_code_rewrap_width = _option_rewrap_width
_option_stdout_rewrap_width = _option_rewrap_width
_option_stderr_rewrap_width = _option_rewrap_width
def _option_hide(self, key, value,
display_values=set(['markup', 'copied_markup', 'code', 'stdout', 'stderr', 'expr', 'rich_output'])):
# 'hide' may be processed during `finalize_after_copy()` to allow for
# 'show' and `.is_expr` inheritance.
if not isinstance(value, str):
self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key, value))
elif 'show' in self.custom_options:
# 'hide' checks for 'show' conflict, so 'show' does not. Check
# in `custom_options` since there's a default 'show' in `self`.
self.code_chunk.source_warnings.append('Option "hide" cannot be used with "show"')
elif value == 'all':
self['show'] = ODict()
else:
hide_values = value.replace(' ', '').split('+')
if not all(v in display_values for v in hide_values):
self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
for v in hide_values:
self['show'].pop(v, None)
def _option_hide_markup_keys(self, key, value):
if not isinstance(value, str):
self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
# No need to check keys for validity; this is a display option.
hide_keys = set(value.replace(' ', '').split('+'))
hide_keys.add('hide_markup_keys')
self[key] = hide_keys
def _option_include(self, key, value):
# Include() does its own value check, so this isn't technically needed
if not isinstance(value, dict):
self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key, value))
elif 'copy' in self:
self.code_chunk.source_errors.append('Option "include" is incompatible with "copy"')
else:
include = Include(self.code_chunk, value)
if include.code_lines is not None:
self[key] = include
def _option_line_numbers(self, key, value):
if not isinstance(value, bool):
self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key, value))
else:
if key == 'line_numbers':
key = 'code_line_numbers'
self[key] = value
_option_markup_line_numbers = _option_line_numbers
_option_copied_markup_line_numbers = _option_line_numbers
_option_code_line_numbers = _option_line_numbers
_option_stdout_line_numbers = _option_line_numbers
_option_stderr_line_numbers = _option_line_numbers
def _option_name(self, key, value):
if not isinstance(value, str):
self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key, value))
elif not value.isidentifier():
self.code_chunk.source_warnings.append('Option "{0}" has invalid, non-identifier value "{1}"'.format(key, value))
else:
self[key] = value
def _option_outside_main(self, key, value):
if not isinstance(value, bool):
self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key, value))
elif not self.code_chunk.execute:
self.code_chunk.source_errors.append('Option "outside_main" is only compatible with executed code chunks')
elif self.code_chunk.command == 'repl':
self.code_chunk.source_errors.append('Option "outside_main" is not compatible with "repl" command')
elif self.code_chunk.is_expr and value:
self.code_chunk.source_errors.append('Option "outside_main" value "true" is incompatible with expr command')
elif value and 'complete' in self.custom_options:
self.code_chunk.source_errors.append('Option "outside_main" value "true" is incompatible with "complete"')
else:
self['complete'] = False
self[key] = value
def _option_source(self, key, value):
if not isinstance(value, str):
self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key, value))
elif self.code_chunk.execute and self.code_chunk.command is not None:
# Always preserve sources for unknown commands, so that these
# sources can be marked as having potential errors later
self.code_chunk.source_errors.append('Option "source" is only compatible with non-executed code chunks; otherwise, use "session"')
elif not value.isidentifier():
self.code_chunk.source_errors.append('Option "{0}" has invalid, non-identifier value "{1}"'.format(key, value))
else:
self[key] = value
def _option_session(self, key, value):
if not isinstance(value, str):
self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key, value))
elif not self.code_chunk.execute and self.code_chunk.command is not None:
# Always preserve sessions for unknown commands, so that these
# sessions can be marked as having potential errors later
self.code_chunk.source_errors.append('Option "session" is only compatible with executed code chunks; otherwise, use "source"')
elif not value.isidentifier():
self.code_chunk.source_errors.append('Option "{0}" has invalid, non-identifier value "{1}"'.format(key, value))
else:
self[key] = value
mime_map = {'latex': 'text/latex',
'html': 'text/html',
'markdown': 'text/markdown',
'plain': 'text/plain',
'png': 'image/png',
'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',
'svg': 'image/svg+xml',
'pdf': 'application/pdf'}
mime_map_with_text_display = {}
rich_text_default_display = {}
for k, v in mime_map.items():
mime_map_with_text_display[k] = v
if v.startswith('text/'):
mime_map_with_text_display[k+':raw'] = v
mime_map_with_text_display[k+':verbatim'] = v
mime_map_with_text_display[k+':verbatim_or_empty'] = v
if k == 'plain':
rich_text_default_display[k] = 'verbatim'
else:
rich_text_default_display[k] = 'raw'
def _option_show(self, key, value):
# 'show' may be processed during `finalize_after_copy()` to allow for
# 'show' and `.is_expr` inheritance. 'hide' checks for 'show'
# conflict, so 'show' does not.
if not (isinstance(value, str) or value is None):
self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key, value))
elif value in ('none', None):
self[key] = ODict()
else:
value_processed = ODict()
for output_and_format in value.replace(' ', '').split('+'):
if ':' not in output_and_format:
output = output_and_format
format = None
else:
output, format = output_and_format.split(':', 1)
if output in value_processed:
self.code_chunk.source_warnings.append('Option "{0}" value "{1}" contains duplicate "{2}"'.format(key, value, output))
continue
if output in ('markup', 'copied_markup', 'code', 'repl'):
if format is None:
format = 'verbatim'
elif format != 'verbatim':
self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"'.format(key, output_and_format))
continue
if output == 'copied_markup' and 'copy' not in self.custom_options:
self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"; can only be used with "copy"'.format(key, output_and_format))
continue
elif output in ('stdout', 'stderr'):
if format is None:
format = 'verbatim'
elif format not in ('verbatim', 'verbatim_or_empty', 'raw'):
self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"'.format(key, output_and_format))
continue
elif output == 'expr':
if not self.code_chunk.is_expr:
self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"'.format(key, output_and_format))
continue
if format is None:
format = 'raw'
elif format not in ('verbatim', 'verbatim_or_empty', 'raw'):
self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"'.format(key, output_and_format))
continue
elif output == 'rich_output':
if format is None:
format = self._default_rich_output
else:
format = format.split('|')
if not all(fmt in self.mime_map_with_text_display for fmt in format):
self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"'.format(key, output_and_format))
continue
else:
self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"'.format(key, output_and_format))
continue
value_processed[output] = format
self[key] = value_processed
def _option_tab_size(self, key, value):
if key == 'tab_size':
key = 'code_tab_size'
self._option_positive_int_warning(key, value)
_option_markup_tab_size = _option_tab_size
_option_copied_markup_tab_size = _option_tab_size
_option_code_tab_size = _option_tab_size
_option_stdout_tab_size = _option_tab_size
_option_stderr_tab_size = _option_tab_size
class CodeChunk(object):
'''
Base class for code chunks.
'''
def __init__(self,
command: str,
code: Union[str, List[str]],
custom_options: dict,
*,
source_name: Optional[str]=None,
source_start_line_number: Optional[int]=None,
inline: Optional[bool]=None):
self.__pre_init__()
if command not in self.commands:
if command is None:
self.source_errors.append('Missing valid Codebraid command')
else:
self.source_errors.append('Unknown Codebraid command "{0}"'.format(command))
self.command = None
else:
self.command = command
if command == 'expr' and not inline:
self.source_errors.append('Codebraid command "{0}" is only allowed inline'.format(command))
if command == 'repl' and inline:
self.source_errors.append('Codebraid command "{0}" is not supported inline'.format(command))
self.execute = self._default_execute[command]
if command == 'expr' or (inline and command == 'nb'):
self.is_expr = True
else:
# For the paste command, or code with 'copy', this value can
# change later due to inheritance
self.is_expr = False
self.source_name = source_name
self.source_start_line_number = source_start_line_number
self.inline = inline
# Check for len(code_lines) > 1 for inline later
self._code = None
if isinstance(code, list):
code_lines = code
else:
code_lines = util.splitlines_lf(code) or ['']
if 'copy' not in custom_options and 'include' not in custom_options:
if inline and len(code_lines) > 1:
self.source_errors.append('Inline code cannot be longer that 1 line')
self.code_lines = code_lines
self.placeholder_code_lines = None
else:
if inline:
if len(code_lines) > 1 or code_lines[0] not in ('', ' ', '_'):
self.source_errors.append('Invalid placeholder code for copy or include (need space or underscore)')
elif len(code_lines) > 1 or code_lines[0].rstrip(' ') not in ('', '_'):
self.source_errors.append('Invalid placeholder code for copy or include (need empty, space, or underscore)')
# Copying or including code could result in more than one line of
# code in an inline context. That is only an issue if the code is
# actually displayed. This is checked later when code is
# included/copied.
self.placeholder_code_lines = code_lines
self.code_lines = None
self.options = Options(self, custom_options)
if 'include' in self.options and not self.source_errors:
# Copy over include only if no source errors -- otherwise it isn't
# used and 'show' may not exist
include = self.options['include']
if inline and 'code' in self.options['show'] and len(include.code_lines) > 1:
self.source_errors.append('Cannot include and then display multiple lines of code in an inline context')
else:
self.code_lines = include.code_lines
if command == 'paste':
if 'copy' not in custom_options:
self.source_errors.append('Command "paste" cannot be used without specifying a target via "copy"')
self.has_output = False
else:
self.has_output = True # Whether need output from copying
if 'copy' in self.options:
self.copy_chunks = []
if self.execute:
self.session_obj = None
self.session_index = None
self.session_output_index = None
else:
self.source_obj = None
self.source_index = None
self.stdout_lines = None
self.stderr_lines = None
self.repl_lines = None
self.rich_output = None
if self.is_expr:
self.expr_lines = None
self.markup_start_line_number = None
self.code_start_line_number = None
self.stdout_start_line_number = None
self.stderr_start_line_number = None
def __pre_init__(self):
'''
Create lists of errors and warnings. Subclasses may need to register
errors or warnings during preprocessing, before they are ready
for `super().__init__()`
'''
if not hasattr(self, 'source_errors'):
self.source_errors = []
self.runtime_source_error = False
self.source_warnings = []
commands = set(['code', 'expr', 'nb', 'paste', 'repl', 'run'])
_default_execute = collections.defaultdict(lambda: False, # Unknown command -> do not run
{k: True for k in ('expr', 'nb', 'repl', 'run')})
@property
def code(self):
code = self._code
if code is not None:
return code
code = '\n'.join(self.code_lines)
self._code = code
return code
def finalize_after_copy(self):
'''
Finalize options. This can be redefined by subclasses so that they
can modify themselves based on inherited 'lang' or 'show'.
'''
self.options.finalize_after_copy()
def copy_code(self):
'''
Copy code for 'copy' option. Code is copied before execution, which
is more flexible. Output (stdout, stderr, expr) must be copied
separately after execution.
This should only be invoked for a code chunk with no source errors,
with copy targets that all exist and have no source errors.
'''
copy_chunks = self.copy_chunks
if any(cc.is_expr for cc in copy_chunks):
if len(copy_chunks) > 1:
invalid_cc_names = ', '.join(cc.options['name'] for cc in copy_chunks if cc.is_expr)
self.source_errors.append('Cannot copy multiple code chunks when some are expressions: {0}'.format(invalid_cc_names))
if self.command in ('paste', 'code'):
# Some commands inherit expression status. The code command
# inherits so that subsequent copying doesn't result in
# incorrectly concatenated expressions. Since the code
# command never has output, this has no display side effects.
self.is_expr = True
self.expr_lines = None
elif not self.is_expr:
self.source_errors.append('A non-expression command cannot copy an expression code chunk')
elif self.is_expr:
self.source_errors.append('An expression command cannot copy a non-expression code chunk')
if self.source_errors:
return
# Finalization must come after any potential `.is_expr` modifications
self.finalize_after_copy()
if self.inline and 'code' in self.options['show'] and (len(copy_chunks) > 1 or len(copy_chunks[0].code_lines) > 1):
self.source_errors.append('Cannot copy and then display multiple lines of code in an inline context')
return
if len(copy_chunks) == 1:
self.code_lines = copy_chunks[0].code_lines
else:
self.code_lines = [line for x in copy_chunks for line in x.code_lines]
if self.command == 'paste':
if all(cc.command == 'code' for cc in copy_chunks):
# When possible, simplify the copying resolution process
self.has_output = True
self.code_start_line_number = copy_chunks[0].code_start_line_number
def copy_output(self):
'''
Copy output (stdout, stderr, expr) for 'copy' option. This must be
copied separately from code, after execution.
This should only be invoked for a code chunk with no source errors,
with copy targets that all exist and have no source errors.
'''
if self.command != 'paste':
raise TypeError
copy_chunks = self.copy_chunks
# The case of all code chunks being code commands has already been
# handled in `copy_code()`
if any(cc.command == 'paste' for cc in copy_chunks):
if len(copy_chunks) > 1:
if all(cc.command == 'paste' for cc in copy_chunks):
self.source_errors.append('Can only copy a single paste code chunk; cannot combine multiple paste chunks')
else:
self.source_errors.append('Cannot copy a mixture of paste and other code chunks')
elif any(cc.execute for cc in copy_chunks):
if not all(cc.execute for cc in copy_chunks):
self.source_errors.append('Copying output of multiple code chunks requires that all or none are executed')
elif len(copy_chunks) > 1:
if len(set(cc.session_obj for cc in copy_chunks)) > 1:
self.source_errors.append('Cannot copy output from code chunks in multiple sessions')
elif any(ccx.session_index != ccy.session_index-1 for ccx, ccy in zip(copy_chunks[:-1], copy_chunks[1:])):
if any(ccx is ccy for ccx, ccy in zip(copy_chunks[:-1], copy_chunks[1:])):
self.source_errors.append('Cannot copy output of a single code chunk multiple times')
elif any(ccx.session_index > ccy.session_index for ccx, ccy in zip(copy_chunks[:-1], copy_chunks[1:])):
self.source_errors.append('Cannot copy output of code chunks out of order')
else:
self.source_errors.append('Cannot copy output of code chunks when some chunks in a sequence are omitted')
else:
raise ValueError
if self.source_errors:
# If errors, discard what has already been copied
self.code_lines = None
return
if len(copy_chunks) == 1:
self.stdout_lines = copy_chunks[0].stdout_lines
self.stderr_lines = copy_chunks[0].stderr_lines
self.repl_lines = copy_chunks[0].repl_lines
self.rich_output = copy_chunks[0].rich_output
else:
self.stdout_lines = [line for x in copy_chunks if x.stdout_lines is not None for line in x.stdout_lines] or None
self.stderr_lines = [line for x in copy_chunks if x.stderr_lines is not None for line in x.stderr_lines] or None
self.repl_lines = [line for x in copy_chunks if x.repl_lines is not None for line in x.repl_lines] or None
self.rich_output = [ro for x in copy_chunks if x.rich_output is not None for ro in x.rich_output] or None
if self.is_expr:
# expr compatibility has already been checked in `copy_code()`
self.expr_lines = copy_chunks[0].expr_lines
self.stdout_start_line_number = copy_chunks[0].stdout_start_line_number
self.stderr_start_line_number = copy_chunks[0].stderr_start_line_number
self.has_output = True
def layout_output(self, output_type, output_format, lines=None):
'''
Layout all forms of output, except for rich output that is not
text/plain, by performing operations such as line rewrapping and tab
expansion. If `lines` is supplied, it is used. Otherwise, the
default lines (if any) are accessed for the specified output type.
'''
if lines is not None:
if not lines and output_format == 'verbatim_or_empty':
lines = ['\xa0']
pass
elif output_type == 'code':
lines = self.code_lines
elif output_type == 'repl':
lines = self.repl_lines
elif output_type in ('expr', 'stdout', 'stderr'):
lines = getattr(self, output_type+'_lines')
if lines is None and output_format == 'verbatim_or_empty':
lines = ['\xa0']
elif output_type == 'markup':
lines = self.as_markup_lines
elif output_type == 'example_markup':
lines = self.as_example_markup_lines
elif output_type == 'copied_markup':
if len(self.copy_chunks) == 1:
lines = self.copy_chunks[0].as_markup_lines
elif self.inline:
lines = []
for cc in self.copy_chunks:
lines.extend(cc.as_markup_lines)
else:
lines = []
last_cc = self.copy_chunks[-1]
for cc in self.copy_chunks:
lines.extend(cc.as_markup_lines)
if cc is not last_cc:
lines.append('')
else:
raise ValueError
rewrap_lines = self.options.get(output_type+'_rewrap_lines', False)
rewrap_width = self.options.get(output_type+'_rewrap_width', 78)
expand_tabs = self.options.get(output_type+'_expand_tabs', False)
tab_size = self.options.get(output_type+'_tab_size', 8)
# This should be rewritten once rewrapping design is finalized, since
# textwrap doesn't necessarily do everything as might be desired, and
# the use of textwrap could be optimized if it continues to be used.
# Nothing is done yet with tabs.
if rewrap_lines:
new_lines = []
for line in lines:
if not line:
new_lines.append(line)
continue
line_stripped = line.lstrip(' \t')
indent = line[:len(line)-len(line_stripped)]
new_lines.extend(textwrap.wrap(line_stripped, width=rewrap_width-len(indent), initial_indent=indent, subsequent_indent=indent))
lines = new_lines
if self.inline:
return ' '.join(lines)
return '\n'.join(lines)
class MetaConverter(type):
'''
Metaclass for converters. Allows converters to register themselves
by name and by compatible formats.
'''
def __init__(cls, name, bases, dct):
if not hasattr(cls, '_registry'):
# Base Converter class
cls._registry = {}
else:
# Subclass
cls._registry[name.lower()] = cls
if not all(attr is None or
(isinstance(attr, set) and attr and all(isinstance(x, str) for x in attr))
for attr in [cls.from_formats, cls.multi_source_formats, cls.to_formats]):
raise TypeError
if (cls.from_formats is not None and cls.multi_source_formats is not None and
cls.multi_source_formats - cls.from_formats):
raise ValueError
super().__init__(name, bases, dct)
class Converter(object):
'''
Base class for converters.
'''
__metaclass__ = MetaConverter
def __init__(self, *,
strings: Optional[Union[str, Sequence[str]]]=None,
paths: Optional[Union[str, Sequence[str], pathlib.Path, Sequence[pathlib.Path]]]=None,
no_cache: Optional[bool]=False,
cache_path: Optional[Union[str, pathlib.Path]]=None,
cross_source_sessions: bool=True,
expanduser: bool=False,
expandvars: bool=False,
from_format: Optional[str]=None,
session_defaults: Optional[Dict[str, Union[bool, str]]]=None,
synctex: bool=False):
if not all(isinstance(x, bool) for x in (cross_source_sessions, expanduser, expandvars)):
raise TypeError
self.cross_source_sessions = cross_source_sessions
self.expanduser = expanduser
self.expandvars = expandvars
self.session_defaults = session_defaults
if paths is not None and strings is None:
if isinstance(paths, str):
paths = [pathlib.Path(paths)]
elif isinstance(paths, pathlib.Path):
paths = [paths]
elif isinstance(paths, collections.abc.Sequence) and paths:
if all(isinstance(x, str) for x in paths):
paths = [pathlib.Path(x) for x in paths]
elif not all(isinstance(x, pathlib.Path) for x in paths):
raise TypeError
else:
raise TypeError
self.raw_source_paths = paths
# Names are based on paths BEFORE any expansion
source_names = [p.as_posix() for p in paths]
if not all(isinstance(x, bool) for x in (expanduser, expandvars)):
raise TypeError
if expandvars:
paths = [pathlib.Path(os.path.expandvars(str(p))) for p in paths]
if expanduser:
paths = [p.expanduser() for p in paths]
self.expanded_source_paths = collections.OrderedDict(zip(source_names, paths))
source_strings = []
for p in paths:
try:
source_string = p.read_text(encoding='utf_8_sig')
except Exception as e:
if not p.is_file():
raise ValueError('File "{0}" does not exist'.format(p))
raise ValueError('Failed to read file "{0}":\n {1}'.format(p, e))
if not source_string:
source_string = '\n'
source_strings.append(source_string)
self.sources = collections.OrderedDict(zip(source_names, source_strings))
if self.from_formats is not None:
if from_format is None:
try:
source_formats = set([self._file_extension_to_format_dict[p.suffix] for p in paths])
except KeyError:
raise TypeError('Cannot determine document format from file extensions, or unsupported format')
from_format = source_formats.pop()
if source_formats:
raise TypeError('Cannot determine unambiguous document format from file extensions')
if from_format not in self.from_formats:
raise ValueError('Unsupported document format {0}'.format(from_format))
self.from_format = from_format
elif strings is not None and paths is None:
if not all(x is False for x in (expanduser, expandvars)):
if not all(isinstance(x, bool) for x in (expanduser, expandvars)):
raise TypeError
raise ValueError
if isinstance(strings, str):
strings = [strings]
elif not (isinstance(strings, collections.abc.Sequence) and
strings and all(isinstance(x, str) for x in strings)):
raise TypeError
# Normalize newlines, as if read from file with universal newlines
source_strings = [io.StringIO(s, newline=None).read() or '\n' for s in strings]
if len(strings) == 1:
source_names = ['<string>']
else:
source_names = ['<string({0})>'.format(n+1) for n in range(len(strings))]
self.sources = collections.OrderedDict(zip(source_names, source_strings))
self.raw_source_paths = None
self.expanded_source_paths = None
if from_format is None:
raise TypeError('Document format is required')
if self.from_formats is not None and from_format not in self.from_formats:
raise ValueError('Unsupported document format {0}'.format(from_format))
self.from_format = from_format
else:
raise TypeError
if len(self.sources) > 1 and from_format not in self.multi_source_formats:
raise TypeError('Multiple sources are not supported for format {0}'.format(from_format))
if not isinstance(no_cache, bool):
raise TypeError
self.no_cache = no_cache
if cache_path is None:
cache_path = pathlib.Path('_codebraid')
elif isinstance(cache_path, str):
cache_path = pathlib.Path(cache_path)
elif not isinstance(cache_path, pathlib.Path):
raise TypeError
if expandvars:
cache_path = pathlib.Path(os.path.expandvars(cache_path.as_posix()))
if expanduser:
cache_path = cache_path.expanduser()
self.cache_path = cache_path
if sys.version_info < (3, 6):
cache_key_hasher = hashlib.sha512()
else:
cache_key_hasher = hashlib.blake2b()
if self.expanded_source_paths is None:
cache_source_paths = None
cache_key_hasher.update(b'<string>')
else:
cache_source_paths = []
for p in self.expanded_source_paths.values():
try:
p_final = pathlib.Path('~') / p.absolute().relative_to(pathlib.Path.home())
except ValueError:
p_final = p.absolute()
cache_source_paths.append(p_final)
cache_key_hasher.update(p_final.as_posix().encode('utf8'))
cache_key_hasher.update(cache_key_hasher.digest())
self.cache_source_paths = cache_source_paths
self.cache_key = cache_key_hasher.hexdigest()[:16]
self._io_map = False
if not isinstance(synctex, bool):
raise TypeError
self.synctex = synctex
if synctex:
self._io_map = True
self.code_chunks = []
self.code_options = {}
from_formats = None
to_formats = None
multi_source_formats = None
_file_extension_to_format_dict = {'.md': 'markdown', '.markdown': 'markdown',
'.tex': 'latex', '.ltx': 'latex'}
def code_braid(self):
self._extract_code_chunks()
self._process_code_chunks()
self._postprocess_code_chunks()
def _extract_code_chunks(self):
raise NotImplementedError
def _process_code_chunks(self):
cp = codeprocessors.CodeProcessor(code_chunks=self.code_chunks,
code_options=self.code_options,
cross_source_sessions=self.cross_source_sessions,
no_cache=self.no_cache,
cache_path=self.cache_path,
cache_key=self.cache_key,
cache_source_paths=self.cache_source_paths,
session_defaults=self.session_defaults)
cp.process()
def _postprocess_code_chunks(self):
raise NotImplementedError
def convert(self, *, to_format):
raise NotImplementedError
def _save_synctex_data(self, data):
zip_path = self.cache_path / 'synctex.zip'
with zipfile.ZipFile(str(zip_path), 'w', compression=zipfile.ZIP_DEFLATED) as zf:
zf.writestr('synctex.json', json.dumps(data))
|
test/distribution/test_label_smoothing.py
|
Xiaoxiong-Liu/gluon-ts
| 2,648 |
68943
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import itertools
import mxnet as mx
import numpy as np
import pytest
from gluonts.mx.distribution import Binned, BinnedOutput
COMMON_KWARGS = {
"bin_log_probs": mx.nd.array([[0.1, 0.2, 0.1, 0.05, 0.2, 0.1, 0.25]])
.log()
.repeat(axis=0, repeats=2),
"bin_centers": mx.nd.array([[-5, -3, -1.2, -0.5, 0, 0.1, 0.2]]).repeat(
axis=0, repeats=2
),
}
@pytest.fixture
def labels():
return mx.random.uniform(low=-6, high=1, shape=(2,)) # T, N
@pytest.mark.parametrize(
"K,alpha", itertools.product([1000, 10000, 100000], [0.001, 0.01, 0.1])
)
def test_smooth_mask_adds_to_one(K, alpha):
bin_log_probs = mx.nd.log_softmax(mx.nd.ones(K))
bin_centers = mx.nd.arange(K)
dist = Binned(
bin_log_probs=bin_log_probs,
bin_centers=bin_centers,
label_smoothing=0.2,
)
labels = mx.random.uniform(low=0, high=K, shape=(12,)).expand_dims(-1)
mask = dist._get_mask(labels)
smooth_mask = dist._smooth_mask(mx.nd, mask, alpha=mx.nd.array([alpha]))
# check smooth mask adds to one
assert np.allclose(
smooth_mask.asnumpy().sum(axis=-1), np.ones(12), atol=1e-6
)
def test_get_smooth_mask_correct(labels):
dist = Binned(**COMMON_KWARGS, label_smoothing=0.2)
binned = Binned(**COMMON_KWARGS)
labels = labels.expand_dims(-1)
mask = dist._get_mask(labels)
assert np.allclose(mask.asnumpy(), binned._get_mask(labels).asnumpy())
smooth_mask = dist._smooth_mask(mx.nd, mask, alpha=mx.nd.array([0.2]))
# check smooth mask adds to one
assert np.allclose(smooth_mask.asnumpy().sum(axis=-1), np.ones(2))
# check smooth mask peaks same
assert np.allclose(
np.argmax(smooth_mask.asnumpy(), axis=-1),
np.argmax(mask.asnumpy(), axis=-1),
)
# check smooth mask mins correct
assert np.allclose(
smooth_mask.asnumpy().min(axis=-1), np.ones(2) * 0.2 / 7 # alpha / K
)
def test_loss_correct(labels):
smooth_alpha = Binned(**COMMON_KWARGS, label_smoothing=0.4)
smooth_noalpha = Binned(**COMMON_KWARGS, label_smoothing=0.0)
binned = Binned(**COMMON_KWARGS)
assert np.allclose(
binned.loss(labels).asnumpy(), smooth_noalpha.loss(labels).asnumpy()
)
assert not np.allclose(
binned.loss(labels).asnumpy(), smooth_alpha.loss(labels).asnumpy()
)
@pytest.mark.parametrize("hybridize", [True, False])
def test_output_sets_alpha(labels, hybridize):
binned_output = BinnedOutput(
bin_centers=COMMON_KWARGS["bin_centers"][0], label_smoothing=0.35
)
arg_proj = binned_output.get_args_proj()
if hybridize:
arg_proj.hybridize()
arg_proj.initialize()
assert (
binned_output.distribution(
arg_proj(mx.nd.random.uniform(2, 10))
).label_smoothing
== 0.35
)
|
lib/lambda.d/tunasync-handler/index.py
|
bovi/opentuna
| 136 |
68944
|
<gh_stars>100-1000
import json
import urllib3
import os
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
http = urllib3.PoolManager()
tunasync_manager_url = os.environ['TUNASYNC_MANAGER_URL']
def handler(event, context):
logger.info(event)
requestUrl = tunasync_manager_url + '/cmd'
requestBody = {}
requestBody['cmd'] = 'start'
requestBody['worker_id'] = 'tunasync-worker'
requestBody['mirror_id'] = event['repo']
body = json.dumps(requestBody)
logger.info("Request body:\n" + body)
headers = {
'content-type' : 'application/json',
'content-length' : str(len(body))
}
try:
response = http.request('POST',
requestUrl,
body=body,
headers=headers,
retries=False)
logger.info("Status code: " + str(response.status))
except Exception as e:
logger.error("Unable to send request to Tunasync manager")
logger.exception(e)
|
macadam/tc/t04_charcnn.py
|
yongzhuo/Macadam
| 290 |
68967
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/5/7 21:06
# @author : Mo
# @function: CharCNN [Character-level Convolutional Networks for Text Classification](https://arxiv.org/pdf/1509.01626.pdf)
from macadam.base.graph import graph
from macadam import K, L, M, O
class CharCNNGraph(graph):
def __init__(self, hyper_parameters):
self.char_cnn_layers = hyper_parameters.get("graph", {}).get('char_cnn_layers', [[256, 7, 3], [256, 7, 3],
[256, 3, -1], [256, 3, -1], [256, 3, -1], [256, 3, 3]], )
self.full_connect_layers = hyper_parameters.get("graph", {}).get('full_connect_layers', [1024, 1024], )
self.threshold = hyper_parameters.get("graph", {}).get('threshold', 1e-6)
super().__init__(hyper_parameters)
def build_model(self, inputs, outputs):
x = None
# cnn + pool
for char_cnn_size in self.char_cnn_layers:
x = L.Convolution1D(filters=char_cnn_size[0],
kernel_size=char_cnn_size[1], )(outputs)
x = L.ThresholdedReLU(self.threshold)(x)
if char_cnn_size[2] != -1:
x = L.MaxPooling1D(pool_size=char_cnn_size[2], strides=1)(x)
x = L.Flatten()(x)
# full-connect 2
for full in self.full_connect_layers:
x = L.Dense(units=full, )(x)
x = L.ThresholdedReLU(self.threshold)(x)
x = L.Dropout(self.dropout)(x)
# dense label
self.outputs = L.Dense(units=self.label, activation=self.activate_end)(x)
self.model = M.Model(inputs=inputs, outputs=self.outputs)
self.model.summary(132)
|
test/UnitTest/data.py
|
jason-fox/fogflow
| 102 |
68992
|
subscription_data=\
{
"description": "A subscription to get info about Room1",
"subject": {
"entities": [
{
"id": "Room1",
"type": "Room",
}
],
"condition": {
"attrs": [
"p3"
]
}
},
"notification": {
"http": {
"url": "http://192.168.100.162:8888"
},
"attrs": [
"p1",
"p2",
"p3"
]
},
"expires": "2040-01-01T14:00:00.00Z",
"throttling": 5
}
#data to test the following code for broker.thinBroker.go:946
'''
subReqv2 := SubscriptionRequest{}
err := r.DecodeJsonPayload(&subReqv2)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
'''
subscriptionWrongPaylaod=\
{
"description": "A subscription to get info about Room1",
"subject": {
"entities": [
{
"id": "Room1",
"type": "Room",
"ispattern":"false"
}
],
"condition": {
"attrs": [
"p3"
]
}
},
"notification": {
"http": {
"url": "http://192.168.100.162:8888"
},
"attrs": [
"p1",
"p2",
"p3"
]
},
"expires": "2040-01-01T14:00:00.00Z",
"throttling": 5
}
v1SubData=\
{
"entities": [
{
"id": "Room1",
"type": "Room",
}
],
"reference": "http://192.168.100.162:8668/ngsi10/updateContext"
}
updateDataWithupdateaction=\
{
"contextElements": [
{
"entityId": {
"id": "Room1",
"type": "Room"
},
"attributes": [
{
"name": "p1",
"type": "float",
"value": 60
},
{
"name": "p3",
"type": "float",
"value": 69
},
{
"name": "p2",
"type": "float",
"value": 32
}
],
"domainMetadata": [
{
"name": "location",
"type": "point",
"value": {
"latitude": 49.406393,
"longitude": 8.684208
}
}
]
}
],
"updateAction": "UPDATE"
}
createDataWithupdateaction=\
{
"contextElements": [
{
"entityId": {
"id": "Room1",
"type": "Room"
},
"attributes": [
{
"name": "p1",
"type": "float",
"value": 90
},
{
"name": "p3",
"type": "float",
"value": 70
},
{
"name": "p2",
"type": "float",
"value": 12
}
],
"domainMetadata": [
{
"name": "location",
"type": "point",
"value": {
"latitude": 49.406393,
"longitude": 8.684208
}
}
]
}
],
"updateAction": "CRETAE"
}
deleteDataWithupdateaction=\
{
"contextElements": [
{
"entityId": {
"id": "Room1",
"type": "Room"
},
"attributes": [
{
"name": "p1",
"type": "float",
"value": 12
},
{
"name": "p3",
"type": "float",
"value": 13
},
{
"name": "p2",
"type": "float",
"value": 14
}
],
"domainMetadata": [
{
"name": "location",
"type": "point",
"value": {
"latitude": 49.406393,
"longitude": 8.684208
}
}
]
}
],
"updateAction": "DELETE"
}
|
tests/test_userfields_with_plain_serializer.py
|
simiotics/djangorestframework-queryfields
| 195 |
69017
|
from rest_framework.test import APIClient
from tests.app.serializers import QuoteSerializer
from tests.utils import decode_content
def test_list_response_unfiltered():
response = APIClient().get('/quotes/')
expected = [
{
'character': 'Customer',
'line': "It's certainly uncontaminated by cheese",
'sketch': 'CHEESE SHOP',
},
{
'character': 'The Black Knight',
'line': "It's just a flesh wound",
'sketch': 'HOLY GRAIL',
},
]
content = decode_content(response)
assert content == expected
def test_detail_response_unfiltered():
response = APIClient().get('/quotes/parrot/')
expected = {
'character': 'Shopkeeper',
'line': "Well, he's...he's, ah...probably pining for the fjords",
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_list_response_filtered_includes():
response = APIClient().get('/quotes/?fields=character,line')
expected = [
{
'character': 'Customer',
'line': "It's certainly uncontaminated by cheese",
},
{
'character': 'The Black Knight',
'line': "It's just a flesh wound",
},
]
content = decode_content(response)
assert content == expected
def test_detail_response_filtered_includes():
response = APIClient().get('/quotes/parrot/?fields=character,line')
expected = {
'character': 'Shopkeeper',
'line': "Well, he's...he's, ah...probably pining for the fjords",
}
content = decode_content(response)
assert content == expected
def test_list_response_filtered_excludes():
response = APIClient().get('/quotes/?fields!=character')
expected = [
{
'line': "It's certainly uncontaminated by cheese",
'sketch': 'CHEESE SHOP',
},
{
'line': "It's just a flesh wound",
'sketch': 'HOLY GRAIL',
},
]
content = decode_content(response)
assert content == expected
def test_detail_response_filtered_excludes():
response = APIClient().get('/quotes/parrot/?fields!=character')
expected = {
'line': "Well, he's...he's, ah...probably pining for the fjords",
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_response_filtered_with_some_bogus_fields():
response = APIClient().get('/quotes/parrot/?fields=sketch,spam,eggs')
expected = {
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_response_filtered_with_only_bogus_fields():
response = APIClient().get('/quotes/parrot/?fields=blah')
expected = {}
content = decode_content(response)
assert content == expected
def test_response_filtered_with_multiple_fields_in_separate_query_args():
response = APIClient().get('/quotes/parrot/?fields=character&fields=sketch')
expected = {
'character': 'Shopkeeper',
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_response_filtered_with_include_and_exclude():
response = APIClient().get('/quotes/parrot/?fields=character&fields=sketch&fields!=line')
expected = {
'character': 'Shopkeeper',
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_exclude_wins_for_ambiguous_filtering():
response = APIClient().get('/quotes/parrot/?fields=line,sketch&fields!=line')
expected = {
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_post_ignores_queryfields():
# Ensures that fields aren't dropped for other types of request
response = APIClient().post('/quotes/?fields=line,sketch')
expected = {
'request_method': 'POST',
'serializer_instance_fields': ['character', 'line', 'sketch'],
'request_query': {'fields': 'line,sketch'},
}
content = decode_content(response)
assert content == expected
def test_instantiate_without_request_context():
# just test that it doesn't crash or b0rk the serializer to omit request context
data = {
'character': 'the character',
'line': 'the line',
'sketch': 'the sketch',
}
serializer = QuoteSerializer(data=data)
assert serializer.is_valid()
assert sorted(serializer.get_fields()) == ['character', 'line', 'sketch']
|
im2mesh/data/core.py
|
kitakou0313/differentiable_volumetric_rendering
| 595 |
69018
|
<reponame>kitakou0313/differentiable_volumetric_rendering<filename>im2mesh/data/core.py
import os
import logging
from torch.utils import data
import numpy as np
import yaml
logger = logging.getLogger(__name__)
# Fields
class Field(object):
''' Data fields class.
'''
def load(self, data_path, idx, category):
''' Loads a data point.
Args:
data_path (str): path to data file
idx (int): index of data point
category (int): index of category
'''
raise NotImplementedError
def check_complete(self, files):
''' Checks if set is complete.
Args:
files: files
'''
raise NotImplementedError
class Shapes3dDataset(data.Dataset):
''' 3D Shapes dataset class.
'''
def __init__(self, dataset_folder, fields, split=None,
categories=None, no_except=True, transform=None,
shared_dict={}, n_views=24, cache_fields=False,
split_model_for_images=False):
''' Initialization of the the 3D shape dataset.
Args:
dataset_folder (str): dataset folder
fields (dict): dictionary of fields
split (str): which split is used
categories (list): list of categories to use
no_except (bool): no exception
transform (callable): transformation applied to data points
shared_dict (dict): shared dictionary (used for field caching)
n_views (int): number of views (only relevant when using field
caching)
cache_fields(bool): whether to cache fields; this option can be
useful for small overfitting experiments
split_model_for_images (bool): whether to split a model by its
views (can be relevant for small overfitting experiments to
perform validation on all views)
'''
# Attributes
self.dataset_folder = dataset_folder
self.fields = fields
self.no_except = no_except
self.transform = transform
self.cache_fields = cache_fields
self.n_views = n_views
self.cached_fields = shared_dict
self.split_model_for_images = split_model_for_images
if split_model_for_images:
assert(n_views > 0)
print('You are splitting the models by images. Make sure that you entered the correct number of views.')
# If categories is None, use all subfolders
if categories is None:
categories = os.listdir(dataset_folder)
categories = [c for c in categories
if os.path.isdir(os.path.join(dataset_folder, c))]
categories.sort()
# Read metadata file
metadata_file = os.path.join(dataset_folder, 'metadata.yaml')
if os.path.exists(metadata_file):
with open(metadata_file, 'r') as f:
self.metadata = yaml.load(f)
else:
self.metadata = {
c: {'id': c, 'name': 'n/a'} for c in categories
}
# Set index
for c_idx, c in enumerate(categories):
self.metadata[c]['idx'] = c_idx
# Get all models
self.models = []
for c_idx, c in enumerate(categories):
subpath = os.path.join(dataset_folder, c)
if not os.path.isdir(subpath):
logger.warning('Category %s does not exist in dataset.' % c)
split_file = os.path.join(subpath, str(split) + '.lst')
if not os.path.exists(split_file):
models_c = [f for f in os.listdir(
subpath) if os.path.isdir(os.path.join(subpath, f))]
else:
with open(split_file, 'r') as f:
models_c = f.read().split('\n')
models_c = list(filter(lambda x: len(x) > 0, models_c))
if split_model_for_images:
for m in models_c:
for i in range(n_views):
self.models += [
{'category': c, 'model': m,
'category_id': c_idx, 'image_id': i}
]
else:
self.models += [
{'category': c, 'model': m, 'category_id': c_idx}
for m in models_c
]
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
category = self.models[idx]['category']
model = self.models[idx]['model']
c_idx = self.metadata[category]['idx']
model_path = os.path.join(self.dataset_folder, category, model)
data = {}
for field_name, field in self.fields.items():
try:
if self.cache_fields:
if self.split_model_for_images:
idx_img = self.models[idx]['image_id']
else:
idx_img = np.random.randint(0, self.n_views)
k = '%s_%s_%d' % (model_path, field_name, idx_img)
if k in self.cached_fields:
field_data = self.cached_fields[k]
else:
field_data = field.load(model_path, idx, c_idx,
input_idx_img=idx_img)
self.cached_fields[k] = field_data
else:
if self.split_model_for_images:
idx_img = self.models[idx]['image_id']
field_data = field.load(
model_path, idx, c_idx, idx_img)
else:
field_data = field.load(model_path, idx, c_idx)
except Exception:
if self.no_except:
logger.warn(
'Error occurred when loading field %s of model %s (%s)'
% (field_name, model, category)
)
return None
else:
raise
if isinstance(field_data, dict):
for k, v in field_data.items():
if k is None:
data[field_name] = v
else:
data['%s.%s' % (field_name, k)] = v
else:
data[field_name] = field_data
if self.transform is not None:
data = self.transform(data)
return data
def get_model_dict(self, idx):
return self.models[idx]
def test_model_complete(self, category, model):
''' Tests if model is complete.
Args:
model (str): modelname
'''
model_path = os.path.join(self.dataset_folder, category, model)
files = os.listdir(model_path)
for field_name, field in self.fields.items():
if not field.check_complete(files):
logger.warn('Field "%s" is incomplete: %s'
% (field_name, model_path))
return False
return True
def collate_remove_none(batch):
''' Collater that puts each data field into a tensor with outer dimension
batch size.
Args:
batch: batch
'''
batch = list(filter(lambda x: x is not None, batch))
return data.dataloader.default_collate(batch)
def worker_init_fn(worker_id):
''' Worker init function to ensure true randomness.
'''
random_data = os.urandom(4)
base_seed = int.from_bytes(random_data, byteorder="big")
np.random.seed(base_seed + worker_id)
|
plugins/idaskins/objectinspector.py
|
fengjixuchui/IDASkins
| 934 |
69019
|
from __future__ import absolute_import, division, print_function
import os
from idaskins import UI_DIR
from PyQt5 import uic
from PyQt5.Qt import qApp
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor, QFont, QKeySequence
from PyQt5.QtWidgets import QShortcut, QWidget
Ui_ObjectInspector, ObjectInspectorBase = uic.loadUiType(
os.path.join(UI_DIR, 'ObjectInspector.ui')
)
class ObjectInspector(ObjectInspectorBase):
"""
Rudimentary Qt object inspector.
Allows for easier finding of object names and classes
for usage in QSS stylesheets.
"""
def __init__(self, *args, **kwargs):
super(ObjectInspector, self).__init__(*args, **kwargs)
self._selected_widget = None
self._ui = Ui_ObjectInspector()
self._ui.setupUi(self)
# Make everything monospace.
font = QFont('Monospace')
font.setStyleHint(QFont.TypeWriter)
self._ui.teInspectionResults.setFont(font)
# Register signals.
self._update_key = QShortcut(QKeySequence(Qt.Key_F7), self)
self._ui.btnSelectParent.released.connect(self.select_parent)
self._update_key.activated.connect(self.update_inspection)
def update_inspection(self):
widget = qApp.widgetAt(QCursor.pos())
self.update_selected_widget(widget)
def select_parent(self):
if self._selected_widget:
parent = self._selected_widget.parent()
if parent and parent.inherits('QWidget'):
self.update_selected_widget(parent)
def update_selected_widget(self, widget):
if self._selected_widget:
self._selected_widget.destroyed.disconnect(
self.on_selected_widget_destroyed
)
self._selected_widget = widget
if widget:
self._ui.btnSelectParent.setEnabled(widget.parent() is not None)
self._ui.teInspectionResults.setText((
"Type: {}\n"
"Name: {}\n"
"Number of children: {}\n"
"QSS: {}"
).format(
widget.metaObject().className(),
widget.objectName() or '<none>',
len(widget.children()),
widget.styleSheet() or '<none>',
))
self._selected_widget.destroyed.connect(
self.on_selected_widget_destroyed
)
else:
self._ui.teInspectionResults.setText('<no object under cursor>')
def on_selected_widget_destroyed(self, obj):
self._selected_widget = None
|
tests/test_app.py
|
eliksir/Flask-RQ2
| 160 |
69048
|
<reponame>eliksir/Flask-RQ2<filename>tests/test_app.py
# -*- coding: utf-8 -*-
from redis import StrictRedis
from rq.queue import Queue
from rq.utils import import_attribute
from rq.worker import Worker
from rq_scheduler import Scheduler
import pytest
from flask_rq2 import RQ
def exception_handler(*args, **kwargs):
pass
def test_init_app(app, config):
rq = RQ()
assert 'rq2' not in getattr(app, 'extensions', {})
assert getattr(rq, 'module', None) is None
rq.init_app(app)
assert rq.redis_url == config.RQ_REDIS_URL
assert isinstance(rq.connection, StrictRedis)
assert 'rq2' in getattr(app, 'extensions', {})
def test_rq_outside_flask():
rq = RQ()
assert pytest.raises(RuntimeError, lambda: rq.connection)
def test_config_redis(config, rq):
assert rq.redis_url == config.RQ_REDIS_URL
assert isinstance(rq.connection, StrictRedis)
def test_config_queues(config, rq):
assert rq.queues == config.RQ_QUEUES
def test_config_async(app, config, rq):
assert rq._is_async == config.RQ_ASYNC
def test_config_async_override(app, config, rq):
rq2 = RQ(app, is_async=not config.RQ_ASYNC)
assert rq2._is_async != config.RQ_ASYNC
def test_config_default_timeout(app, config):
rq3 = RQ(app, default_timeout=911)
assert rq3.default_timeout != Queue.DEFAULT_TIMEOUT
assert rq3.default_timeout == 911
def test_config_scheduler_interval(config, rq):
rq.scheduler_interval == config.RQ_SCHEDULER_INTERVAL
def test_config_scheduler_queue(config, rq):
rq.scheduler_queue = config.RQ_SCHEDULER_QUEUE
def test_exception_handler(rq):
rq.exception_handler(exception_handler)
assert 'test_app.exception_handler' in rq._exception_handlers
def test_get_worker(rq):
worker = rq.get_worker()
assert isinstance(worker, Worker)
assert [queue.name for queue in worker.queues] == rq.queues
def test_get_worker_with_queues(rq):
worker = rq.get_worker('some-queue')
assert isinstance(worker, Worker)
queue_names = [queue.name for queue in worker.queues]
assert queue_names != rq.queues
assert 'some-queue' in queue_names
def test_get_worker_with_exception_handlers(rq):
rq.exception_handler(exception_handler)
worker = rq.get_worker()
assert exception_handler in worker._exc_handlers
def test_get_queue(rq):
assert rq._queue_instances == {}
queue = rq.get_queue()
assert rq._queue_instances != {}
assert queue in rq._queue_instances.values()
assert isinstance(queue, Queue)
assert isinstance(queue, import_attribute(rq.queue_class))
assert queue.name == rq.default_queue
assert queue._default_timeout == rq.default_timeout
assert queue._is_async == rq._is_async
assert queue.connection == rq.connection
def test_get_queue_with_name(rq):
queue = rq.get_queue('some-queue')
assert queue.name == 'some-queue'
assert queue.name in rq._queue_instances
name2 = 'some-other-queue'
assert name2 not in rq._queue_instances
queue2 = rq.get_queue(name2)
assert queue2.name == name2
assert name2 in rq._queue_instances
def test_get_scheduler(rq):
scheduler = rq.get_scheduler()
assert isinstance(scheduler, Scheduler)
assert isinstance(scheduler, import_attribute(rq.scheduler_class))
assert scheduler.queue_name == rq.scheduler_queue
assert scheduler._interval == rq.scheduler_interval
assert scheduler.connection == rq.connection
def test_get_scheduler_interval(rq):
scheduler = rq.get_scheduler(interval=23)
assert scheduler._interval != rq.scheduler_interval
assert scheduler._interval == 23
def test_get_scheduler_queue(rq):
scheduler = rq.get_scheduler(queue='other')
assert scheduler.queue_name == 'other'
def test_get_scheduler_importerror(rq):
# in case scheduler can't be imported
rq.scheduler_class = 'non.existing.Scheduler'
with pytest.raises(ImportError):
rq.get_scheduler()
|
tests/test_decoders.py
|
sandbornm/SteganoGAN
| 179 |
69049
|
<filename>tests/test_decoders.py
# -*- coding: utf-8 -*-
import copy
from unittest import TestCase
from unittest.mock import Mock, call, patch
import torch
from steganogan import decoders
from tests.utils import assert_called_with_tensors
class TestBasicDecoder(TestCase):
class TestDecoder(decoders.BasicDecoder):
def __init__(self):
pass
def setUp(self):
self.test_decoder = self.TestDecoder()
@patch('steganogan.decoders.nn.Conv2d', autospec=True)
def test__covn2d(self, conv2d_mock):
"""Conv2d must be called with given args and kernel_size=3 and padding=1"""
# run
result = self.test_decoder._conv2d(2, 4)
# asserts
assert result == conv2d_mock.return_value
conv2d_mock.assert_called_once_with(
in_channels=2,
out_channels=4,
kernel_size=3,
padding=1
)
@patch('steganogan.decoders.nn.Sequential')
@patch('steganogan.decoders.nn.Conv2d')
@patch('steganogan.decoders.nn.BatchNorm2d')
def test___init__(self, batchnorm_mock, conv2d_mock, sequential_mock):
"""Test the init params and that the layers are created correctly"""
# run
decoders.BasicDecoder(2, 5)
# assert
expected_batch_calls = [call(5), call(5), call(5)]
assert batchnorm_mock.call_args_list == expected_batch_calls
expected_conv_calls = [
call(in_channels=3, out_channels=5, kernel_size=3, padding=1),
call(in_channels=5, out_channels=5, kernel_size=3, padding=1),
call(in_channels=5, out_channels=5, kernel_size=3, padding=1),
call(in_channels=5, out_channels=2, kernel_size=3, padding=1),
]
assert conv2d_mock.call_args_list == expected_conv_calls
def test_upgrade_legacy_without_version(self):
"""Upgrade legacy must create self._models from conv1, conv2, conv3, conv4"""
# setup
self.test_decoder.layers = Mock(return_value=torch.Tensor([[5, 6], [7, 8]]))
# run
self.test_decoder.upgrade_legacy()
# assert
assert self.test_decoder._models == [self.test_decoder.layers]
assert self.test_decoder.version == '1'
@patch('steganogan.decoders.nn.Sequential', autospec=True)
def test_upgrade_legacy_with_version_1(self, sequential_mock):
"""The object must be the same and not changed by the method"""
# setup
decoder = decoders.BasicDecoder(1, 1)
expected = copy.deepcopy(decoder)
# run
decoder.upgrade_legacy()
# assert
assert decoder.__dict__ == expected.__dict__
def test_forward_1_layer(self):
"""If there is only one layer it must be called with image as the only argument."""
# setup
layer1 = Mock(return_value=torch.Tensor([[5, 6], [7, 8]]))
self.test_decoder._models = [layer1]
# run
image = torch.Tensor([[1, 2], [3, 4]])
result = self.test_decoder.forward(image)
# assert
assert (result == torch.Tensor([[5, 6], [7, 8]])).all()
call_1 = call(torch.Tensor([[1, 2], [3, 4]]))
assert_called_with_tensors(layer1, [call_1])
def test_forward_more_than_2_layers(self):
"""If there are more than 2 layers, they must be called adding data to each result"""
# setup
layer1 = Mock(return_value=torch.Tensor([[5, 6], [7, 8]]))
layer2 = Mock(return_value=torch.Tensor([[9, 10], [11, 12]]))
layer3 = Mock(return_value=torch.Tensor([[13, 14], [15, 16]]))
self.test_decoder._models = [layer1, layer2, layer3]
# run
image = torch.Tensor([[1, 2], [3, 4]])
result = self.test_decoder.forward(image)
# asserts
call_layer_1 = call(torch.Tensor([[1, 2], [3, 4]]))
call_layer_2 = call(torch.Tensor([[5, 6], [7, 8]]))
call_layer_3 = call(torch.Tensor([[5, 6, 9, 10], [7, 8, 11, 12]]))
assert_called_with_tensors(layer1, [call_layer_1])
assert_called_with_tensors(layer2, [call_layer_2])
assert_called_with_tensors(layer3, [call_layer_3])
assert (result == torch.Tensor([[13, 14], [15, 16]])).all()
class TestDenseDecoder(TestCase):
class TestDecoder(decoders.DenseDecoder):
def __init__(self):
pass
def test_upgrade_legacy_without_version(self):
"""Upgrade legacy must create self._models from conv1, conv2, conv3, conv4"""
# setup
test_decoder = self.TestDecoder() # instance an empty decoder
test_decoder.conv1 = Mock(return_value=torch.Tensor([[5, 6], [7, 8]]))
test_decoder.conv2 = Mock(return_value=torch.Tensor([[9, 10], [11, 12]]))
test_decoder.conv3 = Mock(return_value=torch.Tensor([[13, 14], [15, 16]]))
test_decoder.conv4 = Mock(return_value=torch.Tensor([[17, 18], [19, 20]]))
# run
test_decoder.upgrade_legacy()
# assert
expected_models = [
test_decoder.conv1,
test_decoder.conv2,
test_decoder.conv3,
test_decoder.conv4,
]
assert test_decoder._models == expected_models
assert test_decoder.version == '1'
@patch('steganogan.decoders.nn.Sequential', autospec=True)
def test_upgrade_legacy_with_version_1(self, sequential_mock):
"""The object must be the same and not changed by the method"""
# setup
decoder = decoders.DenseDecoder(1, 1)
expected = copy.deepcopy(decoder)
# run
decoder.upgrade_legacy()
# assert
assert decoder.__dict__ == expected.__dict__
@patch('steganogan.decoders.nn.Sequential')
@patch('steganogan.decoders.nn.Conv2d')
@patch('steganogan.decoders.nn.BatchNorm2d')
def test___init__(self, batchnorm_mock, conv2d_mock, sequential_mock):
"""Test the init params and that the layers are created correctly"""
# run
decoders.DenseDecoder(2, 5)
# assert
expected_batch_calls = [call(5), call(5), call(5)]
assert batchnorm_mock.call_args_list == expected_batch_calls
expected_conv_calls = [
call(in_channels=3, out_channels=5, kernel_size=3, padding=1),
call(in_channels=5, out_channels=5, kernel_size=3, padding=1),
call(in_channels=10, out_channels=5, kernel_size=3, padding=1),
call(in_channels=15, out_channels=2, kernel_size=3, padding=1),
]
assert conv2d_mock.call_args_list == expected_conv_calls
|
test_frame/test_xiaoxianrou/download_xiaoxianrou_pictures.py
|
DJMIN/funboost
| 120 |
69070
|
from funboost import boost
import re
import requests
from parsel import Selector
from pathlib import Path
"""
http://www.5442tu.com/mingxing/list_2_1.html 下载所有明星图片
"""
@boost('xiaoxianrou_list_page', qps=0.05)
def cralw_list_page(page_index):
url = f'http://www.5442tu.com/mingxing/list_2_{page_index}.html'
resp = requests.get(url)
sel = Selector(resp.content.decode('gbk'))
detail_sels = sel.xpath('//div[@class="imgList2"]/ul/li/a[1]')
for detail_sel in detail_sels:
crawl_detail_page.push(detail_sel.xpath('./@href').extract_first(), detail_sel.xpath('./@title').extract_first(), 1, is_first_picture=True)
@boost('xiaoxianrou_detail_page', qps=2, do_task_filtering=True)
def crawl_detail_page(url, title, picture_index, is_first_picture=False,):
resp = requests.get(url)
sel = Selector(resp.content.decode('gbk'))
if is_first_picture: # 详情页图册也需要翻页。
total_page_str = sel.xpath('//div[@class="page"]/ul/li/a/text()').extract_first()
total_page = int(re.search(r'共(\d+)页', total_page_str).group(1))
for p in range(2, total_page + 1):
next_pic_page_url = url[:-5] + f'_{p}.html'
crawl_detail_page.push(next_pic_page_url, title, picture_index=p)
pic_url = sel.xpath('//p[@align="center"]/a/img/@src').extract_first()
resp_pic = requests.get(pic_url)
Path(f'./pictures/{title}/').mkdir(parents=True, exist_ok=True)
(Path(f'./pictures/{title}/') / Path(f'./{title}_{picture_index}.jpg')).write_bytes(resp_pic.content) # 保存图片。
print(f'''保存图片成功:\n {(Path(f'./pictures/{title}/') / Path(f'./{title}_{picture_index}.jpg')).absolute()} ''')
if __name__ == '__main__':
# cralw_list_page(1)
# crawl_detail_page('https://www.5442tu.com/mingxing/20181105/78924.html','范冰冰弟弟范丞丞阳光帅气明星壁纸图片高清',1,True)
cralw_list_page.clear()
crawl_detail_page.clear()
for p in range(1, 10):
cralw_list_page.push(p)
cralw_list_page.consume()
crawl_detail_page.consume()
|
blaze/expr/tests/test_slicing.py
|
quantopian-enterprise/blaze
| 2,106 |
69117
|
from blaze.expr import symbol
import numpy as np
from datashape import dshape, isscalar
def test_array_dshape():
x = symbol('x', '5 * 3 * float32')
assert x.shape == (5, 3)
assert x.schema == dshape('float32')
assert len(x) == 5
assert x.ndim == 2
def test_element():
x = symbol('x', '5 * 3 * float32')
assert isscalar(x[1, 2].dshape)
assert x[1, 2].dshape == dshape('float32')
assert str(x[1, 2]) == 'x[1, 2]'
x = symbol('x', '5 * float32')
assert isscalar(x[3].dshape)
def test_slice():
x = symbol('x', '5 * 3 * {name: string, amount: float32}')
assert x[2:, 0].dshape == dshape('3 * {name: string, amount: float32}')
assert x[2:].dshape == x[2:, :].dshape
# Make sure that these are hashable
hash(x[:2])
hash(x[0, :2])
assert str(x[1]) == 'x[1]'
assert str(x[:2]) == 'x[:2]'
assert str(x[0, :2]) == 'x[0, :2]'
assert str(x[1:4:2, :2]) == 'x[1:4:2, :2]'
def test_negative_slice():
x = symbol('x', '10 * 10 * int32')
assert x[:5, -3:].shape == (5, 3)
def test_None_slice():
x = symbol('x', '10 * 10 * int32')
assert x[:5, None, -3:].shape == (5, 1, 3)
def test_list_slice():
x = symbol('x', '10 * 10 * int32')
assert x[[1, 2, 3], [4, 5]].shape == (3, 2)
def test_list_slice_string():
x = symbol('x', '10 * 10 * int32')
assert str(x[[1, 2, 3]]) == "x[[1, 2, 3]]"
def test_slice_with_boolean_list():
x = symbol('x', '5 * int32')
expr = x[[True, False, False, True, False]]
assert expr.index == ([0, 3],)
def test_slice_with_numpy_array():
x = symbol('x', '2 * int32')
assert x[np.array([True, False])].isidentical(x[[True, False]])
|
nova/conf/base.py
|
zjzh/nova
| 1,874 |
69120
|
<filename>nova/conf/base.py
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 <NAME>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
base_options = [
cfg.IntOpt(
'password_length',
default=12,
min=0,
help='Length of generated instance admin passwords.'),
cfg.StrOpt(
'instance_usage_audit_period',
default='month',
regex='^(hour|month|day|year)(@([0-9]+))?$',
help='''
Time period to generate instance usages for. It is possible to define optional
offset to given period by appending @ character followed by a number defining
offset.
Possible values:
* period, example: ``hour``, ``day``, ``month` or ``year``
* period with offset, example: ``month@15`` will result in monthly audits
starting on 15th day of month.
'''),
cfg.BoolOpt(
'use_rootwrap_daemon',
default=False,
help='''
Start and use a daemon that can run the commands that need to be run with
root privileges. This option is usually enabled on nodes that run nova compute
processes.
'''),
cfg.StrOpt(
'rootwrap_config',
default="/etc/nova/rootwrap.conf",
help='''
Path to the rootwrap configuration file.
Goal of the root wrapper is to allow a service-specific unprivileged user to
run a number of actions as the root user in the safest manner possible.
The configuration file used here must match the one defined in the sudoers
entry.
'''),
cfg.StrOpt(
'tempdir',
help='Explicitly specify the temporary working directory.'),
]
def register_opts(conf):
conf.register_opts(base_options)
def list_opts():
return {'DEFAULT': base_options}
|
vendor/github.com/google/certificate-transparency/python/ct/crypto/asn1/oid_test.py
|
weltonrodrigo/origin
| 807 |
69124
|
<filename>vendor/github.com/google/certificate-transparency/python/ct/crypto/asn1/oid_test.py
#!/usr/bin/env python
import unittest
from ct.crypto import error
from ct.crypto.asn1 import oid
from ct.crypto.asn1 import type_test_base
class ObjectIdentifierTest(type_test_base.TypeTestBase):
asn1_type = oid.ObjectIdentifier
hashable = True
initializers = (
((0, 0), "0.0"),
((1, 2), "1.2"),
((2, 5), "2.5"),
((1, 2, 3, 4), "1.2.3.4"),
((1, 2, 840, 113549), "1.2.840.113549"),
((1, 2, 840, 113549, 1), "1.2.840.113549.1"),
)
bad_initializers = (
# Too short.
("0", ValueError),
((0,), ValueError),
(("1"), ValueError),
((1,), ValueError),
# Negative components.
("-1", ValueError),
((-1,), ValueError),
("1.2.3.-4", ValueError),
((1, 2, 3, -4), ValueError),
# Invalid components.
("3.2.3.4", ValueError),
((3, 2, 3, 4), ValueError),
("0.40.3.4", ValueError),
((0, 40, 3, 4), ValueError),
)
encode_test_vectors = (
# Example from ASN.1 spec.
("2.100.3", "0603813403"),
# More examples.
("0.0", "060100"),
("1.2", "06012a"),
("2.5", "060155"),
("1.2.3.4", "06032a0304"),
("1.2.840", "06032a8648"),
("1.2.840.113549", "06062a864886f70d"),
("1.2.840.113549.1", "06072a864886f70d01")
)
bad_encodings = (
# Empty OID.
("0600"),
# Last byte has high bit set.
("06020080"),
("06032a86c8"),
# Leading '80'-octets in component.
("06042a8086c8"),
# Indefinite length.
("06808134030000")
)
bad_strict_encodings = ()
def test_dictionary(self):
rsa = oid.ObjectIdentifier(value=oid.RSA_ENCRYPTION)
self.assertEqual("rsaEncryption", rsa.long_name)
self.assertEqual("RSA", rsa.short_name)
def test_unknown_oids(self):
unknown = oid.ObjectIdentifier(value="1.2.3.4")
self.assertEqual("1.2.3.4", unknown.long_name)
self.assertEqual("1.2.3.4", unknown.short_name)
if __name__ == '__main__':
unittest.main()
|
openpoiservice/server/categories/categories.py
|
larsrinn/openpoiservice
| 131 |
69133
|
<filename>openpoiservice/server/categories/categories.py
# openpoiservice/server/categories.py
import yaml
import os
import copy
class CategoryTools(object):
def __init__(self, categories_file):
self.basedir = os.path.abspath(os.path.dirname(__file__))
self.categories_object = yaml.safe_load(open(os.path.join(self.basedir, categories_file)))
self.category_group_ids = []
self.category_ids = []
self.group_index = {}
self.category_to_group_index = {}
self.category_index = {}
self.category_ids_index = {}
self.generate_category_indices()
def unify_categories(self, filters):
category_ids_of_group = []
if 'category_group_ids' in filters:
for group_id in filters['category_group_ids']:
if group_id in self.group_index:
category_ids_of_group.extend(self.group_index[group_id])
if 'category_ids' in filters:
in_first = set(category_ids_of_group)
in_second = set(filters['category_ids'])
in_second_but_not_in_first = in_second - in_first
result = category_ids_of_group + list(in_second_but_not_in_first)
return result
return category_ids_of_group
def generate_category_indices(self):
for k, v in copy.deepcopy(self.categories_object).items():
group_name = k
group_id = v['id']
self.group_index[group_id] = []
self.category_group_ids.append(int(group_id))
group_children = v['children']
for tag_name, pois in group_children.items():
if tag_name in self.category_index:
self.category_index[tag_name].update(pois)
else:
self.category_index[tag_name] = pois
for poi, cat_id in pois.items():
self.category_ids_index[cat_id] = {
'poi_name': poi,
'poi_group': group_name
}
self.category_ids.append(int(cat_id))
self.group_index[group_id].append(int(cat_id))
if cat_id not in self.category_to_group_index:
self.category_to_group_index[cat_id] = {'group_id': v['id'],
'group_name': k}
def get_category(self, tags):
categories = []
if bool(tags):
for tag_name, tag_value in tags.items():
if tag_name and tag_name in self.category_index and tag_value in self.category_index[tag_name]:
category_id = self.category_index[tag_name][tag_value]
if category_id > 0:
categories.append(category_id)
return categories
|
pyfr/backends/opencl/types.py
|
rishit2307/PyFR
| 185 |
69148
|
<filename>pyfr/backends/opencl/types.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
from functools import cached_property
import numpy as np
import pyfr.backends.base as base
class _OpenCLMatrixCommon:
@cached_property
def _as_parameter_(self):
return int(self.data)
class OpenCLMatrixBase(_OpenCLMatrixCommon, base.MatrixBase):
def onalloc(self, basedata, offset):
self.basedata = basedata
self.offset = offset
# If necessary, slice the buffer
if offset:
self.data = basedata.slice(offset, self.nbytes)
else:
self.data = basedata
# Process any initial value
if self._initval is not None:
self._set(self._initval)
# Remove
del self._initval
def _get(self):
# Allocate an empty buffer
buf = np.empty((self.nrow, self.leaddim), dtype=self.dtype)
# Copy
self.backend.queue.barrier()
self.backend.cl.memcpy(self.backend.queue, buf, self.data, self.nbytes,
blocking=True)
# Unpack
return self._unpack(buf[None, :, :])
def _set(self, ary):
buf = self._pack(ary)
# Copy
self.backend.queue.barrier()
self.backend.cl.memcpy(self.backend.queue, self.data, buf, self.nbytes,
blocking=True)
class OpenCLMatrixSlice(_OpenCLMatrixCommon, base.MatrixSlice):
@cached_property
def data(self):
if self.offset:
nbytes = ((self.nrow - 1)*self.leaddim + self.ncol)*self.itemsize
return self.basedata.slice(self.offset, nbytes)
else:
return self.basedata
class OpenCLMatrix(OpenCLMatrixBase, base.Matrix): pass
class OpenCLConstMatrix(OpenCLMatrixBase, base.ConstMatrix): pass
class OpenCLView(base.View): pass
class OpenCLXchgView(base.XchgView): pass
class OpenCLXchgMatrix(OpenCLMatrix, base.XchgMatrix):
def __init__(self, backend, ioshape, initval, extent, aliases, tags):
super().__init__(backend, ioshape, initval, extent, aliases, tags)
# Allocate an empty buffer on the host for MPI to send/recv from
shape, dtype = (self.nrow, self.ncol), self.dtype
self.hdata = backend.cl.pagelocked_empty(shape, dtype)
class OpenCLGraph(base.Graph):
def commit(self):
super().commit()
# Map from kernels to event table locations
evtidxs = {}
# Kernel list complete with dependency information
self.klist = klist = []
for i, k in enumerate(self.knodes):
evtidxs[k] = i
# Resolve the event indices of kernels we depend on
wait_evts = [evtidxs[dep] for dep in self.kdeps[k]] or None
klist.append((k, wait_evts, k in self.depk))
# Dependent MPI request list
self.mreqlist = mreqlist = []
for req, deps in zip(self.mpi_reqs, self.mpi_req_deps):
if deps:
mreqlist.append((req, [evtidxs[dep] for dep in deps]))
def run(self, queue):
from mpi4py import MPI
events = [None]*len(self.klist)
wait_for_events = self.backend.cl.wait_for_events
# Submit the kernels to the queue
for i, (k, wait_for, ret_evt) in enumerate(self.klist):
if wait_for is not None:
wait_for = [events[j] for j in wait_for]
events[i] = k.run(queue, wait_for, ret_evt)
# Flush the queue to ensure the kernels have started
queue.flush()
# Start all dependency-free MPI requests
MPI.Prequest.Startall(self.mpi_root_reqs)
# Start any remaining requests once their dependencies are satisfied
for req, wait_for in self.mreqlist:
wait_for_events([events[j] for j in wait_for])
req.Start()
# Wait for all of the MPI requests to finish
MPI.Prequest.Waitall(self.mpi_reqs)
|
screenshots/Part-4/ext/table.py
|
haha517/mywriter
| 177 |
69150
|
<filename>screenshots/Part-4/ext/table.py
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
class Table(QtGui.QDialog):
def __init__(self,parent = None):
QtGui.QDialog.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
# Rows
rowsLabel = QtGui.QLabel("Rows: ",self)
self.rows = QtGui.QSpinBox(self)
# Columns
colsLabel = QtGui.QLabel("Columns",self)
self.cols = QtGui.QSpinBox(self)
# Cell spacing (distance between cells)
spaceLabel = QtGui.QLabel("Cell spacing",self)
self.space = QtGui.QSpinBox(self)
# Cell padding (distance between cell and inner text)
padLabel = QtGui.QLabel("Cell padding",self)
self.pad = QtGui.QSpinBox(self)
self.pad.setValue(10)
# Button
insertButton = QtGui.QPushButton("Insert",self)
insertButton.clicked.connect(self.insert)
# Layout
layout = QtGui.QGridLayout()
layout.addWidget(rowsLabel,0,0)
layout.addWidget(self.rows,0,1)
layout.addWidget(colsLabel,1,0)
layout.addWidget(self.cols,1,1)
layout.addWidget(padLabel,2,0)
layout.addWidget(self.pad,2,1)
layout.addWidget(spaceLabel,3,0)
layout.addWidget(self.space,3,1)
layout.addWidget(insertButton,4,0,1,2)
self.setWindowTitle("Insert Table")
self.setGeometry(300,300,200,100)
self.setLayout(layout)
def insert(self):
cursor = self.parent.text.textCursor()
# Get the configurations
rows = self.rows.value()
cols = self.cols.value()
if not rows or not cols:
popup = QtGui.QMessageBox(QtGui.QMessageBox.Warning,
"Parameter error",
"Row and column numbers may not be zero!",
QtGui.QMessageBox.Ok,
self)
popup.show()
else:
padding = self.pad.value()
space = self.space.value()
# Set the padding and spacing
fmt = QtGui.QTextTableFormat()
fmt.setCellPadding(padding)
fmt.setCellSpacing(space)
# Inser the new table
cursor.insertTable(rows,cols,fmt)
self.close()
|
models/network_srmd.py
|
WestCityInstitute/KAIR
| 1,521 |
69151
|
<gh_stars>1000+
import torch.nn as nn
import models.basicblock as B
import torch
"""
# --------------------------------------------
# SRMD (15 conv layers)
# --------------------------------------------
Reference:
@inproceedings{zhang2018learning,
title={Learning a single convolutional super-resolution network for multiple degradations},
author={<NAME> and <NAME> and <NAME>},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={3262--3271},
year={2018}
}
http://openaccess.thecvf.com/content_cvpr_2018/papers/Zhang_Learning_a_Single_CVPR_2018_paper.pdf
"""
# --------------------------------------------
# SRMD (SRMD, in_nc = 3+15+1 = 19)
# SRMD (SRMDNF, in_nc = 3+15 = 18)
# --------------------------------------------
class SRMD(nn.Module):
def __init__(self, in_nc=19, out_nc=3, nc=128, nb=12, upscale=4, act_mode='R', upsample_mode='pixelshuffle'):
"""
# ------------------------------------
in_nc: channel number of input, default: 3+15
out_nc: channel number of output
nc: channel number
nb: total number of conv layers
upscale: scale factor
act_mode: batch norm + activation function; 'BR' means BN+ReLU
upsample_mode: default 'pixelshuffle' = conv + pixelshuffle
# ------------------------------------
"""
super(SRMD, self).__init__()
assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
bias = True
if upsample_mode == 'upconv':
upsample_block = B.upsample_upconv
elif upsample_mode == 'pixelshuffle':
upsample_block = B.upsample_pixelshuffle
elif upsample_mode == 'convtranspose':
upsample_block = B.upsample_convtranspose
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
m_head = B.conv(in_nc, nc, mode='C'+act_mode[-1], bias=bias)
m_body = [B.conv(nc, nc, mode='C'+act_mode, bias=bias) for _ in range(nb-2)]
m_tail = upsample_block(nc, out_nc, mode=str(upscale), bias=bias)
self.model = B.sequential(m_head, *m_body, m_tail)
# def forward(self, x, k_pca):
# m = k_pca.repeat(1, 1, x.size()[-2], x.size()[-1])
# x = torch.cat((x, m), 1)
# x = self.body(x)
def forward(self, x):
x = self.model(x)
return x
if __name__ == '__main__':
from utils import utils_model
model = SRMD(in_nc=18, out_nc=3, nc=64, nb=15, upscale=4, act_mode='R', upsample_mode='pixelshuffle')
print(utils_model.describe_model(model))
x = torch.randn((2, 3, 100, 100))
k_pca = torch.randn(2, 15, 1, 1)
x = model(x, k_pca)
print(x.shape)
# run models/network_srmd.py
|
tests/pybaseball/test_plotting.py
|
akern40/pybaseball
| 650 |
69186
|
<filename>tests/pybaseball/test_plotting.py
import pytest
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
from pybaseball.plotting import transform_coordinates
@pytest.fixture
def coords():
return pd.DataFrame({"x": [1.0, 2.0, -1.0], "y": [1.0, 0.0, 10.0]})
def test_transform_coordinates_identity_scale(coords):
transformed_coords = transform_coordinates(coords, scale=1)
assert_series_equal(coords.x, transformed_coords.x)
assert_series_equal(-coords.y, transformed_coords.y)
def test_transform_coordinates(coords):
transformed_coords = transform_coordinates(coords, scale=2, x_center=0, y_center=0)
assert_series_equal(2 * coords.x, transformed_coords.x)
assert_series_equal(-2 * coords.y, transformed_coords.y)
transformed_coords = transform_coordinates(coords, scale=2, x_center=1, y_center=1)
expected = pd.DataFrame({"x": [1.0, 3.0, -3.0], "y": [-1.0, 1.0, -19.0]})
assert_frame_equal(expected, transformed_coords)
xc = 123.4
yc = 432.1
transformed_coords = transform_coordinates(coords, scale=0, x_center=xc, y_center=yc)
assert_series_equal(pd.Series(name="x", data=3 * [xc]), transformed_coords.x)
assert_series_equal(pd.Series(name="y", data=3 * [yc]), -transformed_coords.y)
|
third_party/closure_linter/closure_linter/typeannotation_test.py
|
xzhan96/chromium.src
| 123 |
69188
|
<filename>third_party/closure_linter/closure_linter/typeannotation_test.py
#!/usr/bin/env python
"""Unit tests for the typeannotation module."""
import unittest as googletest
from closure_linter import testutil
from closure_linter.common import erroraccumulator
CRAZY_TYPE = ('Array.<!function(new:X,{a:null},...(c|d)):'
'function(...(Object.<string>))>')
class TypeErrorException(Exception):
"""Exception for TypeErrors."""
def __init__(self, errors):
super(TypeErrorException, self).__init__()
self.errors = errors
class TypeParserTest(googletest.TestCase):
"""Tests for typeannotation parsing."""
def _ParseComment(self, script):
"""Parse a script that contains one comment and return it."""
accumulator = erroraccumulator.ErrorAccumulator()
_, comments = testutil.ParseFunctionsAndComments(script, accumulator)
if accumulator.GetErrors():
raise TypeErrorException(accumulator.GetErrors())
self.assertEquals(1, len(comments))
return comments[0]
def _ParseType(self, type_str):
"""Creates a comment to parse and returns the parsed type."""
comment = self._ParseComment('/** @type {%s} **/' % type_str)
return comment.GetDocFlags()[0].jstype
def assertProperReconstruction(self, type_str, matching_str=None):
"""Parses the type and asserts the its repr matches the type.
If matching_str is specified, it will assert that the repr matches this
string instead.
Args:
type_str: The type string to parse.
matching_str: A string the __repr__ of the parsed type should match.
Returns:
The parsed js_type.
"""
parsed_type = self._ParseType(type_str)
# Use listEqual assertion to more easily identify the difference
self.assertListEqual(list(matching_str or type_str),
list(repr(parsed_type)))
self.assertEquals(matching_str or type_str, repr(parsed_type))
# Newlines will be inserted by the file writer.
self.assertEquals(type_str.replace('\n', ''), parsed_type.ToString())
return parsed_type
def assertNullable(self, type_str, nullable=True):
parsed_type = self.assertProperReconstruction(type_str)
self.assertEquals(nullable, parsed_type.GetNullability(),
'"%s" should %sbe nullable' %
(type_str, 'not ' if nullable else ''))
def assertNotNullable(self, type_str):
return self.assertNullable(type_str, nullable=False)
def testReconstruction(self):
self.assertProperReconstruction('*')
self.assertProperReconstruction('number')
self.assertProperReconstruction('(((number)))')
self.assertProperReconstruction('!number')
self.assertProperReconstruction('?!number')
self.assertProperReconstruction('number=')
self.assertProperReconstruction('number=!?', '?!number=')
self.assertProperReconstruction('number|?string')
self.assertProperReconstruction('(number|string)')
self.assertProperReconstruction('?(number|string)')
self.assertProperReconstruction('Object.<number,string>')
self.assertProperReconstruction('function(new:Object)')
self.assertProperReconstruction('function(new:Object):number')
self.assertProperReconstruction('function(new:Object,Element):number')
self.assertProperReconstruction('function(this:T,...)')
self.assertProperReconstruction('{a:?number}')
self.assertProperReconstruction('{a:?number,b:(number|string)}')
self.assertProperReconstruction('{c:{nested_element:*}|undefined}')
self.assertProperReconstruction('{handleEvent:function(?):?}')
self.assertProperReconstruction('function():?|null')
self.assertProperReconstruction('null|function():?|bar')
def testOptargs(self):
self.assertProperReconstruction('number=')
self.assertProperReconstruction('number|string=')
self.assertProperReconstruction('(number|string)=')
self.assertProperReconstruction('(number|string=)')
self.assertProperReconstruction('(number=|string)')
self.assertProperReconstruction('function(...):number=')
def testIndepth(self):
# Do an deeper check of the crazy identifier
crazy = self.assertProperReconstruction(CRAZY_TYPE)
self.assertEquals('Array.', crazy.identifier)
self.assertEquals(1, len(crazy.sub_types))
func1 = crazy.sub_types[0]
func2 = func1.return_type
self.assertEquals('function', func1.identifier)
self.assertEquals('function', func2.identifier)
self.assertEquals(3, len(func1.sub_types))
self.assertEquals(1, len(func2.sub_types))
self.assertEquals('Object.', func2.sub_types[0].sub_types[0].identifier)
def testIterIdentifiers(self):
nested_identifiers = self._ParseType('(a|{b:(c|function(new:d):e)})')
for identifier in ('a', 'b', 'c', 'd', 'e'):
self.assertIn(identifier, nested_identifiers.IterIdentifiers())
def testIsEmpty(self):
self.assertTrue(self._ParseType('').IsEmpty())
self.assertFalse(self._ParseType('?').IsEmpty())
self.assertFalse(self._ParseType('!').IsEmpty())
self.assertFalse(self._ParseType('<?>').IsEmpty())
def testIsConstructor(self):
self.assertFalse(self._ParseType('').IsConstructor())
self.assertFalse(self._ParseType('Array.<number>').IsConstructor())
self.assertTrue(self._ParseType('function(new:T)').IsConstructor())
def testIsVarArgsType(self):
self.assertTrue(self._ParseType('...number').IsVarArgsType())
self.assertTrue(self._ParseType('...Object|Array').IsVarArgsType())
self.assertTrue(self._ParseType('...(Object|Array)').IsVarArgsType())
self.assertFalse(self._ParseType('Object|...Array').IsVarArgsType())
self.assertFalse(self._ParseType('(...Object|Array)').IsVarArgsType())
def testIsUnknownType(self):
self.assertTrue(self._ParseType('?').IsUnknownType())
self.assertTrue(self._ParseType('Foo.<?>').sub_types[0].IsUnknownType())
self.assertFalse(self._ParseType('?|!').IsUnknownType())
self.assertTrue(self._ParseType('?|!').sub_types[0].IsUnknownType())
self.assertFalse(self._ParseType('!').IsUnknownType())
long_type = 'function():?|{handleEvent:function(?=):?,sample:?}|?='
record = self._ParseType(long_type)
# First check that there's not just one type with 3 return types, but three
# top-level types.
self.assertEquals(3, len(record.sub_types))
# Now extract all unknown type instances and verify that they really are.
handle_event, sample = record.sub_types[1].sub_types
for i, sub_type in enumerate([
record.sub_types[0].return_type,
handle_event.return_type,
handle_event.sub_types[0],
sample,
record.sub_types[2]]):
self.assertTrue(sub_type.IsUnknownType(),
'Type %d should be the unknown type: %s\n%s' % (
i, sub_type.tokens, record.Dump()))
def testTypedefNames(self):
easy = self._ParseType('{a}')
self.assertTrue(easy.record_type)
easy = self.assertProperReconstruction('{a}', '{a:}').sub_types[0]
self.assertEquals('a', easy.key_type.identifier)
self.assertEquals('', easy.identifier)
easy = self.assertProperReconstruction('{a:b}').sub_types[0]
self.assertEquals('a', easy.key_type.identifier)
self.assertEquals('b', easy.identifier)
def assertTypeError(self, type_str):
"""Asserts that parsing the given type raises a linter error."""
self.assertRaises(TypeErrorException, self._ParseType, type_str)
def testParseBadTypes(self):
"""Tests that several errors in types don't break the parser."""
self.assertTypeError('<')
self.assertTypeError('>')
self.assertTypeError('Foo.<Bar')
self.assertTypeError('Foo.Bar>=')
self.assertTypeError('Foo.<Bar>>=')
self.assertTypeError('(')
self.assertTypeError(')')
self.assertTypeError('Foo.<Bar)>')
self._ParseType(':')
self._ParseType(':foo')
self.assertTypeError(':)foo')
self.assertTypeError('(a|{b:(c|function(new:d):e')
def testNullable(self):
self.assertNullable('null')
self.assertNullable('Object')
self.assertNullable('?string')
self.assertNullable('?number')
self.assertNotNullable('string')
self.assertNotNullable('number')
self.assertNotNullable('boolean')
self.assertNotNullable('function(Object)')
self.assertNotNullable('function(Object):Object')
self.assertNotNullable('function(?Object):?Object')
self.assertNotNullable('!Object')
self.assertNotNullable('boolean|string')
self.assertNotNullable('(boolean|string)')
self.assertNullable('(boolean|string|null)')
self.assertNullable('(?boolean)')
self.assertNullable('?(boolean)')
self.assertNullable('(boolean|Object)')
self.assertNotNullable('(boolean|(string|{a:}))')
def testSpaces(self):
"""Tests that spaces don't change the outcome."""
type_str = (' A < b | ( c | ? ! d e f ) > | '
'function ( x : . . . ) : { y : z = } ')
two_spaces = type_str.replace(' ', ' ')
no_spaces = type_str.replace(' ', '')
newlines = type_str.replace(' ', '\n * ')
self.assertProperReconstruction(no_spaces)
self.assertProperReconstruction(type_str, no_spaces)
self.assertProperReconstruction(two_spaces, no_spaces)
self.assertProperReconstruction(newlines, no_spaces)
if __name__ == '__main__':
googletest.main()
|
sympy/mpmath/tests/test_linalg.py
|
shipci/sympy
| 319 |
69189
|
<reponame>shipci/sympy
# TODO: don't use round
from __future__ import division
from sympy.mpmath import *
xrange = libmp.backend.xrange
# XXX: these shouldn't be visible(?)
LU_decomp = mp.LU_decomp
L_solve = mp.L_solve
U_solve = mp.U_solve
householder = mp.householder
improve_solution = mp.improve_solution
A1 = matrix([[3, 1, 6],
[2, 1, 3],
[1, 1, 1]])
b1 = [2, 7, 4]
A2 = matrix([[ 2, -1, -1, 2],
[ 6, -2, 3, -1],
[-4, 2, 3, -2],
[ 2, 0, 4, -3]])
b2 = [3, -3, -2, -1]
A3 = matrix([[ 1, 0, -1, -1, 0],
[ 0, 1, 1, 0, -1],
[ 4, -5, 2, 0, 0],
[ 0, 0, -2, 9,-12],
[ 0, 5, 0, 0, 12]])
b3 = [0, 0, 0, 0, 50]
A4 = matrix([[10.235, -4.56, 0., -0.035, 5.67],
[-2.463, 1.27, 3.97, -8.63, 1.08],
[-6.58, 0.86, -0.257, 9.32, -43.6 ],
[ 9.83, 7.39, -17.25, 0.036, 24.86],
[-9.31, 34.9, 78.56, 1.07, 65.8 ]])
b4 = [8.95, 20.54, 7.42, 5.60, 58.43]
A5 = matrix([[ 1, 2, -4],
[-2, -3, 5],
[ 3, 5, -8]])
A6 = matrix([[ 1.377360, 2.481400, 5.359190],
[ 2.679280, -1.229560, 25.560210],
[-1.225280+1.e6, 9.910180, -35.049900-1.e6]])
b6 = [23.500000, -15.760000, 2.340000]
A7 = matrix([[1, -0.5],
[2, 1],
[-2, 6]])
b7 = [3, 2, -4]
A8 = matrix([[1, 2, 3],
[-1, 0, 1],
[-1, -2, -1],
[1, 0, -1]])
b8 = [1, 2, 3, 4]
A9 = matrix([[ 4, 2, -2],
[ 2, 5, -4],
[-2, -4, 5.5]])
b9 = [10, 16, -15.5]
A10 = matrix([[1.0 + 1.0j, 2.0, 2.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]])
b10 = [1.0, 1.0 + 1.0j, 1.0]
def test_LU_decomp():
A = A3.copy()
b = b3
A, p = LU_decomp(A)
y = L_solve(A, b, p)
x = U_solve(A, y)
assert p == [2, 1, 2, 3]
assert [round(i, 14) for i in x] == [3.78953107960742, 2.9989094874591098,
-0.081788440567070006, 3.8713195201744801, 2.9171210468920399]
A = A4.copy()
b = b4
A, p = LU_decomp(A)
y = L_solve(A, b, p)
x = U_solve(A, y)
assert p == [0, 3, 4, 3]
assert [round(i, 14) for i in x] == [2.6383625899619201, 2.6643834462368399,
0.79208015947958998, -2.5088376454101899, -1.0567657691375001]
A = randmatrix(3)
bak = A.copy()
LU_decomp(A, overwrite=1)
assert A != bak
def test_inverse():
for A in [A1, A2, A5]:
inv = inverse(A)
assert mnorm(A*inv - eye(A.rows), 1) < 1.e-14
def test_householder():
mp.dps = 15
A, b = A8, b8
H, p, x, r = householder(extend(A, b))
assert H == matrix(
[[mpf('3.0'), mpf('-2.0'), mpf('-1.0'), 0],
[-1.0,mpf('3.333333333333333'),mpf('-2.9999999999999991'),mpf('2.0')],
[-1.0, mpf('-0.66666666666666674'),mpf('2.8142135623730948'),
mpf('-2.8284271247461898')],
[1.0, mpf('-1.3333333333333333'),mpf('-0.20000000000000018'),
mpf('4.2426406871192857')]])
assert p == [-2, -2, mpf('-1.4142135623730949')]
assert round(norm(r, 2), 10) == 4.2426406870999998
y = [102.102, 58.344, 36.463, 24.310, 17.017, 12.376, 9.282, 7.140, 5.610,
4.488, 3.6465, 3.003]
def coeff(n):
# similiar to Hilbert matrix
A = []
for i in range(1, 13):
A.append([1. / (i + j - 1) for j in range(1, n + 1)])
return matrix(A)
residuals = []
refres = []
for n in range(2, 7):
A = coeff(n)
H, p, x, r = householder(extend(A, y))
x = matrix(x)
y = matrix(y)
residuals.append(norm(r, 2))
refres.append(norm(residual(A, x, y), 2))
assert [round(res, 10) for res in residuals] == [15.1733888877,
0.82378073210000002, 0.302645887, 0.0260109244,
0.00058653999999999998]
assert norm(matrix(residuals) - matrix(refres), inf) < 1.e-13
def test_factorization():
A = randmatrix(5)
P, L, U = lu(A)
assert mnorm(P*A - L*U, 1) < 1.e-15
def test_solve():
assert norm(residual(A6, lu_solve(A6, b6), b6), inf) < 1.e-10
assert norm(residual(A7, lu_solve(A7, b7), b7), inf) < 1.5
assert norm(residual(A8, lu_solve(A8, b8), b8), inf) <= 3 + 1.e-10
assert norm(residual(A6, qr_solve(A6, b6)[0], b6), inf) < 1.e-10
assert norm(residual(A7, qr_solve(A7, b7)[0], b7), inf) < 1.5
assert norm(residual(A8, qr_solve(A8, b8)[0], b8), 2) <= 4.3
assert norm(residual(A10, lu_solve(A10, b10), b10), 2) < 1.e-10
assert norm(residual(A10, qr_solve(A10, b10)[0], b10), 2) < 1.e-10
def test_solve_overdet_complex():
A = matrix([[1, 2j], [3, 4j], [5, 6]])
b = matrix([1 + j, 2, -j])
assert norm(residual(A, lu_solve(A, b), b)) < 1.0208
def test_singular():
mp.dps = 15
A = [[5.6, 1.2], [7./15, .1]]
B = repr(zeros(2))
b = [1, 2]
def _assert_ZeroDivisionError(statement):
try:
eval(statement)
assert False
except (ZeroDivisionError, ValueError):
pass
for i in ['lu_solve(%s, %s)' % (A, b), 'lu_solve(%s, %s)' % (B, b),
'qr_solve(%s, %s)' % (A, b), 'qr_solve(%s, %s)' % (B, b)]:
_assert_ZeroDivisionError(i)
def test_cholesky():
assert fp.cholesky(fp.matrix(A9)) == fp.matrix([[2, 0, 0], [1, 2, 0], [-1, -3/2, 3/2]])
x = fp.cholesky_solve(A9, b9)
assert fp.norm(fp.residual(A9, x, b9), fp.inf) == 0
def test_det():
assert det(A1) == 1
assert round(det(A2), 14) == 8
assert round(det(A3)) == 1834
assert round(det(A4)) == 4443376
assert det(A5) == 1
assert round(det(A6)) == 78356463
assert det(zeros(3)) == 0
def test_cond():
mp.dps = 15
A = matrix([[1.2969, 0.8648], [0.2161, 0.1441]])
assert cond(A, lambda x: mnorm(x,1)) == mpf('327065209.73817754')
assert cond(A, lambda x: mnorm(x,inf)) == mpf('327065209.73817754')
assert cond(A, lambda x: mnorm(x,'F')) == mpf('249729266.80008656')
@extradps(50)
def test_precision():
A = randmatrix(10, 10)
assert mnorm(inverse(inverse(A)) - A, 1) < 1.e-45
def test_interval_matrix():
mp.dps = 15
iv.dps = 15
a = iv.matrix([['0.1','0.3','1.0'],['7.1','5.5','4.8'],['3.2','4.4','5.6']])
b = iv.matrix(['4','0.6','0.5'])
c = iv.lu_solve(a, b)
assert c[0].delta < 1e-13
assert c[1].delta < 1e-13
assert c[2].delta < 1e-13
assert 5.25823271130625686059275 in c[0]
assert -13.155049396267837541163 in c[1]
assert 7.42069154774972557628979 in c[2]
def test_LU_cache():
A = randmatrix(3)
LU = LU_decomp(A)
assert A._LU == LU_decomp(A)
A[0,0] = -1000
assert A._LU is None
def test_improve_solution():
A = randmatrix(5, min=1e-20, max=1e20)
b = randmatrix(5, 1, min=-1000, max=1000)
x1 = lu_solve(A, b) + randmatrix(5, 1, min=-1e-5, max=1.e-5)
x2 = improve_solution(A, x1, b)
assert norm(residual(A, x2, b), 2) < norm(residual(A, x1, b), 2)
def test_exp_pade():
for i in range(3):
dps = 15
extra = 15
mp.dps = dps + extra
dm = 0
N = 3
dg = range(1,N+1)
a = diag(dg)
expa = diag([exp(x) for x in dg])
# choose a random matrix not close to be singular
# to avoid adding too much extra precision in computing
# m**-1 * M * m
while abs(dm) < 0.01:
m = randmatrix(N)
dm = det(m)
m = m/dm
a1 = m**-1 * a * m
e2 = m**-1 * expa * m
mp.dps = dps
e1 = expm(a1, method='pade')
mp.dps = dps + extra
d = e2 - e1
#print d
mp.dps = dps
assert norm(d, inf).ae(0)
mp.dps = 15
def test_qr():
mp.dps = 15 # used default value for dps
lowlimit = -9 # lower limit of matrix element value
uplimit = 9 # uppter limit of matrix element value
maxm = 4 # max matrix size
flg = False # toggle to create real vs complex matrix
zero = mpf('0.0')
for k in xrange(0,10):
exdps = 0
mode = 'full'
flg = bool(k % 2)
# generate arbitrary matrix size (2 to maxm)
num1 = nint(2 + (maxm-2)*rand())
num2 = nint(2 + (maxm-2)*rand())
m = int(max(num1, num2))
n = int(min(num1, num2))
# create matrix
A = mp.matrix(m,n)
# populate matrix values with arbitrary integers
if flg:
flg = False
dtype = 'complex'
for j in xrange(0,n):
for i in xrange(0,m):
val = nint(lowlimit + (uplimit-lowlimit)*rand())
val2 = nint(lowlimit + (uplimit-lowlimit)*rand())
A[i,j] = mpc(val, val2)
else:
flg = True
dtype = 'real'
for j in xrange(0,n):
for i in xrange(0,m):
val = nint(lowlimit + (uplimit-lowlimit)*rand())
A[i,j] = mpf(val)
# perform A -> QR decomposition
Q, R = qr(A, mode, edps = exdps)
#print('\n\n A = \n', nstr(A, 4))
#print('\n Q = \n', nstr(Q, 4))
#print('\n R = \n', nstr(R, 4))
#print('\n Q*R = \n', nstr(Q*R, 4))
maxnorm = mpf('1.0E-11')
n1 = norm(A - Q * R)
#print '\n Norm of A - Q * R = ', n1
if n1 > maxnorm:
raise ValueError('Excessive norm value')
if dtype == 'real':
n1 = norm(eye(m) - Q.T * Q)
#print ' Norm of I - Q.T * Q = ', n1
if n1 > maxnorm:
raise ValueError('Excessive norm value')
n1 = norm(eye(m) - Q * Q.T)
#print ' Norm of I - Q * Q.T = ', n1
if n1 > maxnorm:
raise ValueError('Excessive norm value')
if dtype == 'complex':
n1 = norm(eye(m) - Q.T * Q.conjugate())
#print ' Norm of I - Q.T * Q.conjugate() = ', n1
if n1 > maxnorm:
raise ValueError('Excessive norm value')
n1 = norm(eye(m) - Q.conjugate() * Q.T)
#print ' Norm of I - Q.conjugate() * Q.T = ', n1
if n1 > maxnorm:
raise ValueError('Excessive norm value')
|
src/test/py/bazel/query_test.py
|
jobechoi/bazel
| 16,989 |
69232
|
# pylint: disable=g-bad-file-header
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from src.test.py.bazel import test_base
class QueryTest(test_base.TestBase):
def testSimpleQuery(self):
self.ScratchFile('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'exports_files(["exported.txt"])',
'filegroup(name = "top-rule", srcs = [":dep-rule"])',
'filegroup(name = "dep-rule", srcs = ["src.txt"])',
])
self.ScratchFile('foo/src.txt')
self.ScratchFile('foo/exported.txt')
self.ScratchFile('foo/non-exported.txt')
self._AssertQueryOutput('//foo:top-rule', '//foo:top-rule')
self._AssertQueryOutput('//foo:*', '//foo:top-rule', '//foo:dep-rule',
'//foo:src.txt', '//foo:exported.txt',
'//foo:BUILD')
self._AssertQueryOutput('deps(//foo:top-rule)', '//foo:top-rule',
'//foo:dep-rule', '//foo:src.txt')
self._AssertQueryOutput('deps(//foo:top-rule, 1)', '//foo:top-rule',
'//foo:dep-rule')
def _AssertQueryOutput(self, query_expr, *expected_results):
exit_code, stdout, stderr = self.RunBazel(['query', query_expr])
self.AssertExitCode(exit_code, 0, stderr)
stdout = sorted(x for x in stdout if x)
self.assertEqual(len(stdout), len(expected_results))
self.assertListEqual(stdout, sorted(expected_results))
if __name__ == '__main__':
unittest.main()
|
src/sultan/echo/colorlog/notests/test_colorlog.py
|
curtismuntz/sultan
| 692 |
69250
|
"""Test the colorlog.colorlog module."""
import sys
import pytest
def test_colored_formatter(create_and_test_logger):
create_and_test_logger()
def test_custom_colors(create_and_test_logger):
"""Disable all colors and check no escape codes are output."""
create_and_test_logger(
log_colors={}, reset=False,
validator=lambda line: '\x1b[' not in line)
def test_reset(create_and_test_logger):
create_and_test_logger(
reset=True, validator=lambda l: l.endswith('\x1b[0m'))
def test_no_reset(create_and_test_logger):
create_and_test_logger(
fmt="%(reset)s%(log_color)s%(levelname)s:%(name)s:%(message)s",
reset=False,
# Check that each line does not end with an escape code
validator=lambda line: not line.endswith('\x1b[0m'))
def test_secondary_colors(create_and_test_logger):
expected = ':\x1b[31mtest_secondary_colors:\x1b[34m'
create_and_test_logger(
fmt=(
"%(log_color)s%(levelname)s:"
"%(name_log_color)s%(name)s:"
"%(message_log_color)s%(message)s"
),
secondary_log_colors={
'name': {
'DEBUG': 'red',
'INFO': 'red',
'WARNING': 'red',
'ERROR': 'red',
'CRITICAL': 'red',
},
'message': {
'DEBUG': 'blue',
'INFO': 'blue',
'WARNING': 'blue',
'ERROR': 'blue',
'CRITICAL': 'blue',
}
},
validator=lambda line: expected in line)
def test_some_secondary_colors(create_and_test_logger):
lines = create_and_test_logger(
fmt="%(message_log_color)s%(message)s",
secondary_log_colors={
'message': {
'ERROR': 'red',
'CRITICAL': 'red'
}
})
# Check that only two lines are colored
assert len([l for l in lines if '\x1b[31m' in l]) == 2
@pytest.mark.skipif(sys.version_info < (3, 2), reason="requires python3.2")
def test_braces_style(create_and_test_logger):
create_and_test_logger(
fmt='{log_color}{levelname}:{name}:{message}', style='{')
@pytest.mark.skipif(sys.version_info < (3, 2), reason="requires python3.2")
def test_template_style(create_and_test_logger):
create_and_test_logger(
fmt='${log_color}${levelname}:${name}:${message}', style='$')
|
fsspec/compression.py
|
ernestoeperez88/filesystem_spec
| 1,738 |
69257
|
"""Helper functions for a standard streaming compression API"""
from bz2 import BZ2File
from gzip import GzipFile
from zipfile import ZipFile
import fsspec.utils
from fsspec.spec import AbstractBufferedFile
def noop_file(file, mode, **kwargs):
return file
# should be functions of the form func(infile, mode=, **kwargs) -> file-like
compr = {None: noop_file}
def register_compression(name, callback, extensions, force=False):
"""Register an "inferable" file compression type.
Registers transparent file compression type for use with fsspec.open.
Compression can be specified by name in open, or "infer"-ed for any files
ending with the given extensions.
Args:
name: (str) The compression type name. Eg. "gzip".
callback: A callable of form (infile, mode, **kwargs) -> file-like.
Accepts an input file-like object, the target mode and kwargs.
Returns a wrapped file-like object.
extensions: (str, Iterable[str]) A file extension, or list of file
extensions for which to infer this compression scheme. Eg. "gz".
force: (bool) Force re-registration of compression type or extensions.
Raises:
ValueError: If name or extensions already registered, and not force.
"""
if isinstance(extensions, str):
extensions = [extensions]
# Validate registration
if name in compr and not force:
raise ValueError("Duplicate compression registration: %s" % name)
for ext in extensions:
if ext in fsspec.utils.compressions and not force:
raise ValueError(
"Duplicate compression file extension: %s (%s)" % (ext, name)
)
compr[name] = callback
for ext in extensions:
fsspec.utils.compressions[ext] = name
def unzip(infile, mode="rb", filename=None, **kwargs):
if "r" not in mode:
filename = filename or "file"
z = ZipFile(infile, mode="w", **kwargs)
fo = z.open(filename, mode="w")
fo.close = lambda closer=fo.close: closer() or z.close()
return fo
z = ZipFile(infile)
if filename is None:
filename = z.namelist()[0]
return z.open(filename, mode="r", **kwargs)
register_compression("zip", unzip, "zip")
register_compression("bz2", BZ2File, "bz2")
register_compression("gzip", lambda f, **kwargs: GzipFile(fileobj=f, **kwargs), "gz")
try:
import lzma
register_compression("lzma", lzma.LZMAFile, "xz")
register_compression("xz", lzma.LZMAFile, "xz", force=True)
except ImportError:
pass
try:
import lzmaffi
register_compression("lzma", lzmaffi.LZMAFile, "xz", force=True)
register_compression("xz", lzmaffi.LZMAFile, "xz", force=True)
except ImportError:
pass
class SnappyFile(AbstractBufferedFile):
def __init__(self, infile, mode, **kwargs):
import snappy
self.details = {"size": 999999999} # not true, but OK if we don't seek
super().__init__(fs=None, path="snappy", mode=mode.strip("b") + "b", **kwargs)
self.infile = infile
if "r" in mode:
self.codec = snappy.StreamDecompressor()
else:
self.codec = snappy.StreamCompressor()
def _upload_chunk(self, final=False):
self.buffer.seek(0)
out = self.codec.add_chunk(self.buffer.read())
self.infile.write(out)
return True
def seek(self, loc, whence=0):
raise NotImplementedError("SnappyFile is not seekable")
def seekable(self):
return False
def _fetch_range(self, start, end):
"""Get the specified set of bytes from remote"""
data = self.infile.read(end - start)
return self.codec.decompress(data)
try:
import snappy
snappy.compress
# Snappy may use the .sz file extension, but this is not part of the
# standard implementation.
register_compression("snappy", SnappyFile, [])
except (ImportError, NameError):
pass
try:
import lz4.frame
register_compression("lz4", lz4.frame.open, "lz4")
except ImportError:
pass
try:
import zstandard as zstd
def zstandard_file(infile, mode="rb"):
if "r" in mode:
cctx = zstd.ZstdDecompressor()
return cctx.stream_reader(infile)
else:
cctx = zstd.ZstdCompressor(level=10)
return cctx.stream_writer(infile)
register_compression("zstd", zstandard_file, "zst")
except ImportError:
pass
|
tuplex/python/tuplex/utils/framework.py
|
rahulyesantharao/tuplex
| 778 |
69265
|
<gh_stars>100-1000
#!/usr/bin/env python3
#----------------------------------------------------------------------------------------------------------------------#
# #
# Tuplex: Blazing Fast Python Data Science #
# #
# #
# (c) 2017 - 2021, Tuplex team #
# Created by <NAME> first on 8/3/2021 #
# License: Apache 2.0 #
#----------------------------------------------------------------------------------------------------------------------#
# this file contains Framework specific exceptions
class TuplexException(Exception):
"""Base Exception class on which all Tuplex Framework specific exceptions are based"""
pass
class UDFCodeExtractionError(TuplexException):
"""thrown when UDF code extraction/reflection failed"""
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.