max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
corehq/util/workbook_reading/__init__.py
|
dimagilg/commcare-hq
| 471 |
133832
|
<gh_stars>100-1000
from corehq.util.workbook_reading.exceptions import (
CellValueError,
SpreadsheetFileEncrypted,
SpreadsheetFileError,
SpreadsheetFileExtError,
SpreadsheetFileInvalidError,
SpreadsheetFileNotFound,
)
from .datamodels import Cell, Workbook, Worksheet
from .adapters import (
make_worksheet,
open_any_workbook,
open_csv_workbook,
open_xls_workbook,
open_xlsx_workbook,
valid_extensions,
)
__all__ = [
'open_csv_workbook',
'open_xls_workbook',
'open_xlsx_workbook',
'open_any_workbook',
'make_worksheet',
'valid_extensions',
'SpreadsheetFileError',
'SpreadsheetFileExtError',
'SpreadsheetFileInvalidError',
'SpreadsheetFileNotFound',
'SpreadsheetFileEncrypted',
'Workbook',
'Worksheet',
'Cell',
]
|
tests/ut/python/nn/probability/distribution/test_beta.py
|
PowerOlive/mindspore
| 3,200 |
133842
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Test nn.probability.distribution.Gamma.
"""
import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import dtype
from mindspore import Tensor
from mindspore import context
skip_flag = context.get_context("device_target") != "Ascend"
def test_gamma_shape_errpr():
"""
Invalid shapes.
"""
with pytest.raises(ValueError):
msd.Gamma([[2.], [1.]], [[2.], [3.], [4.]], dtype=dtype.float32)
def test_type():
with pytest.raises(TypeError):
msd.Gamma([0.], [1.], dtype=dtype.int32)
def test_name():
with pytest.raises(TypeError):
msd.Gamma([0.], [1.], name=1.0)
def test_seed():
with pytest.raises(TypeError):
msd.Gamma([0.], [1.], seed='seed')
def test_concentration1():
with pytest.raises(ValueError):
msd.Gamma([0.], [1.])
with pytest.raises(ValueError):
msd.Gamma([-1.], [1.])
def test_concentration0():
with pytest.raises(ValueError):
msd.Gamma([1.], [0.])
with pytest.raises(ValueError):
msd.Gamma([1.], [-1.])
def test_scalar():
with pytest.raises(TypeError):
msd.Gamma(3., [4.])
with pytest.raises(TypeError):
msd.Gamma([3.], -4.)
def test_arguments():
"""
args passing during initialization.
"""
g = msd.Gamma()
assert isinstance(g, msd.Distribution)
g = msd.Gamma([3.0], [4.0], dtype=dtype.float32)
assert isinstance(g, msd.Distribution)
class GammaProb(nn.Cell):
"""
Gamma distribution: initialize with concentration1/concentration0.
"""
def __init__(self):
super(GammaProb, self).__init__()
self.gamma = msd.Gamma([3.0, 4.0], [1.0, 1.0], dtype=dtype.float32)
def construct(self, value):
prob = self.gamma.prob(value)
log_prob = self.gamma.log_prob(value)
return prob + log_prob
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_gamma_prob():
"""
Test probability functions: passing value through construct.
"""
net = GammaProb()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
ans = net(value)
assert isinstance(ans, Tensor)
class GammaProb1(nn.Cell):
"""
Gamma distribution: initialize without concentration1/concentration0.
"""
def __init__(self):
super(GammaProb1, self).__init__()
self.gamma = msd.Gamma()
def construct(self, value, concentration1, concentration0):
prob = self.gamma.prob(value, concentration1, concentration0)
log_prob = self.gamma.log_prob(value, concentration1, concentration0)
return prob + log_prob
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_gamma_prob1():
"""
Test probability functions: passing concentration1/concentration0, value through construct.
"""
net = GammaProb1()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
concentration1 = Tensor([2.0, 3.0], dtype=dtype.float32)
concentration0 = Tensor([1.0], dtype=dtype.float32)
ans = net(value, concentration1, concentration0)
assert isinstance(ans, Tensor)
class GammaKl(nn.Cell):
"""
Test class: kl_loss of Gamma distribution.
"""
def __init__(self):
super(GammaKl, self).__init__()
self.g1 = msd.Gamma(np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
self.g2 = msd.Gamma(dtype=dtype.float32)
def construct(self, concentration1_b, concentration0_b, concentration1_a, concentration0_a):
kl1 = self.g1.kl_loss('Gamma', concentration1_b, concentration0_b)
kl2 = self.g2.kl_loss('Gamma', concentration1_b, concentration0_b, concentration1_a, concentration0_a)
return kl1 + kl2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_kl():
"""
Test kl_loss.
"""
net = GammaKl()
concentration1_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration0_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration1_a = Tensor(np.array([2.0]).astype(np.float32), dtype=dtype.float32)
concentration0_a = Tensor(np.array([3.0]).astype(np.float32), dtype=dtype.float32)
ans = net(concentration1_b, concentration0_b, concentration1_a, concentration0_a)
assert isinstance(ans, Tensor)
class GammaCrossEntropy(nn.Cell):
"""
Test class: cross_entropy of Gamma distribution.
"""
def __init__(self):
super(GammaCrossEntropy, self).__init__()
self.g1 = msd.Gamma(np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
self.g2 = msd.Gamma(dtype=dtype.float32)
def construct(self, concentration1_b, concentration0_b, concentration1_a, concentration0_a):
h1 = self.g1.cross_entropy('Gamma', concentration1_b, concentration0_b)
h2 = self.g2.cross_entropy('Gamma', concentration1_b, concentration0_b, concentration1_a, concentration0_a)
return h1 + h2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_cross_entropy():
"""
Test cross entropy between Gamma distributions.
"""
net = GammaCrossEntropy()
concentration1_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration0_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration1_a = Tensor(np.array([2.0]).astype(np.float32), dtype=dtype.float32)
concentration0_a = Tensor(np.array([3.0]).astype(np.float32), dtype=dtype.float32)
ans = net(concentration1_b, concentration0_b, concentration1_a, concentration0_a)
assert isinstance(ans, Tensor)
class GammaBasics(nn.Cell):
"""
Test class: basic mean/sd function.
"""
def __init__(self):
super(GammaBasics, self).__init__()
self.g = msd.Gamma(np.array([3.0, 4.0]), np.array([4.0, 6.0]), dtype=dtype.float32)
def construct(self):
mean = self.g.mean()
sd = self.g.sd()
mode = self.g.mode()
return mean + sd + mode
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_bascis():
"""
Test mean/sd/mode/entropy functionality of Gamma.
"""
net = GammaBasics()
ans = net()
assert isinstance(ans, Tensor)
class GammaConstruct(nn.Cell):
"""
Gamma distribution: going through construct.
"""
def __init__(self):
super(GammaConstruct, self).__init__()
self.gamma = msd.Gamma([3.0], [4.0])
self.gamma1 = msd.Gamma()
def construct(self, value, concentration1, concentration0):
prob = self.gamma('prob', value)
prob1 = self.gamma('prob', value, concentration1, concentration0)
prob2 = self.gamma1('prob', value, concentration1, concentration0)
return prob + prob1 + prob2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_gamma_construct():
"""
Test probability function going through construct.
"""
net = GammaConstruct()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
concentration1 = Tensor([0.0], dtype=dtype.float32)
concentration0 = Tensor([1.0], dtype=dtype.float32)
ans = net(value, concentration1, concentration0)
assert isinstance(ans, Tensor)
|
SimG4CMS/Calo/python/APDSimu_cff.py
|
ckamtsikis/cmssw
| 852 |
133845
|
import FWCore.ParameterSet.Config as cms
def customise(process):
# add ECAL and HCAL specific Geant4 hits objects
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
instanceLabel = cms.untracked.string('EcalValidInfo'),
type = cms.string('EcalSimHitsValidProducer'),
verbose = cms.untracked.bool(False)
))
# use directly the generator output, no Hector
process.g4SimHits.Generator.HepMCProductLabel = cms.string('generatorSmeared')
# user schedule: use only calorimeters digitization and local reconstruction
process.g4SimHits.ECalSD.StoreSecondary = True
process.g4SimHits.CaloTrkProcessing.PutHistory = True
process.simEcalUnsuppressedDigis.apdAddToBarrel = True
return(process)
|
core/ignore.py
|
RandomRhythm/maltrail
| 4,758 |
133846
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2021 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function
# simple ignore rule mechanism configured by file 'misc/ignore_event.txt' and/or user defined `USER_IGNORELIST`
import re
from core.settings import config
from core.settings import IGNORE_EVENTS
def ignore_event(event_tuple):
retval = False
_, _, src_ip, src_port, dst_ip, dst_port, _, _, _, _, _ = event_tuple
if config.IGNORE_EVENTS_REGEX and re.search(config.IGNORE_EVENTS_REGEX, repr(event_tuple), re.I):
retval = True
for ignore_src_ip, ignore_src_port, ignore_dst_ip, ignore_dst_port in IGNORE_EVENTS:
if ignore_src_ip != '*' and ignore_src_ip != src_ip:
continue
if ignore_src_port != '*' and ignore_src_port != str(src_port):
continue
if ignore_dst_ip != '*' and ignore_dst_ip != dst_ip:
continue
if ignore_dst_port != '*' and ignore_dst_port != str(dst_port):
continue
retval = True
break
if retval and config.SHOW_DEBUG:
print("[i] ignore_event src_ip=%s, src_port=%s, dst_ip=%s, dst_port=%s" % (src_ip, src_port, dst_ip, dst_port))
return retval
|
dev/test/genrnd.py
|
Cam2337/snap-python
| 242 |
133851
|
<reponame>Cam2337/snap-python<filename>dev/test/genrnd.py
import random
import sys
sys.path.append("../swig")
import snap
def GenRndGnm():
Graph = snap.GenRndGnm_PNGraph(1000,10000)
print "Graph", str(type(Graph)), Graph.GetNodes(), Graph.GetEdges()
# save the graph
FName = "test2.graph"
print "Save", FName
FOut = snap.TFOut(snap.TStr(FName))
Graph.Save(FOut)
FOut.Flush()
# load the graph
print "Read", FName
FIn = snap.TFIn(snap.TStr(FName))
#Graph2 = snap.TNGraph(FIn)
#Graph2 = snap.TNGraph.Load(FIn)
Graph2 = snap.PNGraph.New()
print "Graph2", str(type(Graph2))
print str(dir(Graph2))
Graph2.Load(FIn)
#Graph2 = snap.Load_PNGraph(FIn)
#print "Read end", FName
#print "Graph2", str(type(Graph2)), Graph2.GetNodes(), Graph2.GetEdges()
if __name__ == '__main__':
print "----- GenRndGnm -----"
GenRndGnm()
|
ambari-common/src/main/python/ambari_commons/os_windows.py
|
likenamehaojie/Apache-Ambari-ZH
| 1,664 |
133856
|
# !/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import getpass
import os
import random
import shlex
from ambari_commons import subprocess32
import sys
import tempfile
import time
import string
import ctypes
import msvcrt
import pywintypes
import win32api
import win32con
import win32event
import win32file
import win32net
import win32netcon
import win32process
import win32security
import win32service
import win32serviceutil
import winerror
import winioctlcon
import wmi
from ambari_commons.exceptions import FatalException
from ambari_commons.logging_utils import print_info_msg, print_warning_msg
SERVICE_STATUS_UNKNOWN = "unknown"
SERVICE_STATUS_STARTING = "starting"
SERVICE_STATUS_RUNNING = "running"
SERVICE_STATUS_STOPPING = "stopping"
SERVICE_STATUS_STOPPED = "stopped"
SERVICE_STATUS_NOT_INSTALLED = "not installed"
WHOAMI_GROUPS = "whoami /groups"
ADMIN_ACCOUNT = "BUILTIN\\Administrators"
#
# os.symlink is not implemented in Windows. Patch it.
#
__CSL = None
def symlink(source, link_name):
'''symlink(source, link_name)
Creates a symbolic link pointing to source named link_name'''
global __CSL
if __CSL is None:
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
__CSL = csl
flags = 0
if source is not None and os.path.isdir(source):
flags = 1
if __CSL(link_name, source, flags) == 0:
raise ctypes.WinError()
os.symlink = symlink
# Win32file doesn't seem to have this attribute.
FILE_ATTRIBUTE_REPARSE_POINT = 1024
# To make things easier.
REPARSE_FOLDER = (win32file.FILE_ATTRIBUTE_DIRECTORY | FILE_ATTRIBUTE_REPARSE_POINT)
# For the parse_reparse_buffer function
SYMBOLIC_LINK = 'symbolic'
MOUNTPOINT = 'mountpoint'
GENERIC = 'generic'
def islink(fpath):
""" Windows islink implementation. """
if win32file.GetFileAttributes(fpath) & REPARSE_FOLDER == REPARSE_FOLDER:
return True
return False
os.path.islink = islink
def _parse_reparse_buffer(original, reparse_type=SYMBOLIC_LINK):
""" Implementing the below in Python:
typedef struct _REPARSE_DATA_BUFFER {
ULONG ReparseTag;
USHORT ReparseDataLength;
USHORT Reserved;
union {
struct {
USHORT SubstituteNameOffset;
USHORT SubstituteNameLength;
USHORT PrintNameOffset;
USHORT PrintNameLength;
ULONG Flags;
WCHAR PathBuffer[1];
} SymbolicLinkReparseBuffer;
struct {
USHORT SubstituteNameOffset;
USHORT SubstituteNameLength;
USHORT PrintNameOffset;
USHORT PrintNameLength;
WCHAR PathBuffer[1];
} MountPointReparseBuffer;
struct {
UCHAR DataBuffer[1];
} GenericReparseBuffer;
} DUMMYUNIONNAME;
} REPARSE_DATA_BUFFER, *PREPARSE_DATA_BUFFER;
"""
# Size of our data types
SZULONG = 4 # sizeof(ULONG)
SZUSHORT = 2 # sizeof(USHORT)
# Our structure.
# Probably a better way to iterate a dictionary in a particular order,
# but I was in a hurry, unfortunately, so I used pkeys.
buffer = {
'tag' : SZULONG,
'data_length' : SZUSHORT,
'reserved' : SZUSHORT,
SYMBOLIC_LINK : {
'substitute_name_offset' : SZUSHORT,
'substitute_name_length' : SZUSHORT,
'print_name_offset' : SZUSHORT,
'print_name_length' : SZUSHORT,
'flags' : SZULONG,
'buffer' : u'',
'pkeys' : [
'substitute_name_offset',
'substitute_name_length',
'print_name_offset',
'print_name_length',
'flags',
]
},
MOUNTPOINT : {
'substitute_name_offset' : SZUSHORT,
'substitute_name_length' : SZUSHORT,
'print_name_offset' : SZUSHORT,
'print_name_length' : SZUSHORT,
'buffer' : u'',
'pkeys' : [
'substitute_name_offset',
'substitute_name_length',
'print_name_offset',
'print_name_length',
]
},
GENERIC : {
'pkeys' : [],
'buffer': ''
}
}
# Header stuff
buffer['tag'] = original[:SZULONG]
buffer['data_length'] = original[SZULONG:SZUSHORT]
buffer['reserved'] = original[SZULONG+SZUSHORT:SZUSHORT]
original = original[8:]
# Parsing
k = reparse_type
for c in buffer[k]['pkeys']:
if type(buffer[k][c]) == int:
sz = buffer[k][c]
bytes = original[:sz]
buffer[k][c] = 0
for b in bytes:
n = ord(b)
if n:
buffer[k][c] += n
original = original[sz:]
# Using the offset and length's grabbed, we'll set the buffer.
buffer[k]['buffer'] = original
return buffer
def readlink(fpath):
""" Windows readlink implementation. """
# This wouldn't return true if the file didn't exist, as far as I know.
if not islink(fpath):
return None
try:
# Open the file correctly depending on the string type.
if type(fpath) == unicode:
handle = win32file.CreateFileW(fpath, win32file.GENERIC_READ, 0, None, win32file.OPEN_EXISTING, win32file.FILE_FLAG_OPEN_REPARSE_POINT | win32file.FILE_FLAG_BACKUP_SEMANTICS, 0)
else:
handle = win32file.CreateFile(fpath, win32file.GENERIC_READ, 0, None, win32file.OPEN_EXISTING, win32file.FILE_FLAG_OPEN_REPARSE_POINT | win32file.FILE_FLAG_BACKUP_SEMANTICS, 0)
# MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16384 = (16*1024)
buffer = win32file.DeviceIoControl(handle, winioctlcon.FSCTL_GET_REPARSE_POINT, None, 16*1024)
# Above will return an ugly string (byte array), so we'll need to parse it.
# But first, we'll close the handle to our file so we're not locking it anymore.
win32file.CloseHandle(handle)
# Minimum possible length (assuming that the length of the target is bigger than 0)
if len(buffer) < 9:
return None
# Parse and return our result.
result = _parse_reparse_buffer(buffer)
offset = result[SYMBOLIC_LINK]['substitute_name_offset']
ending = offset + result[SYMBOLIC_LINK]['substitute_name_length']
rpath = result[SYMBOLIC_LINK]['buffer'][offset:ending].replace('\x00','')
if len(rpath) > 4 and rpath[0:4] == '\\??\\':
rpath = rpath[4:]
return rpath
except pywintypes.error, e:
raise OSError(e.winerror, e.strerror, fpath)
os.readlink = readlink
class OSVERSIONINFOEXW(ctypes.Structure):
_fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),
('dwMajorVersion', ctypes.c_ulong),
('dwMinorVersion', ctypes.c_ulong),
('dwBuildNumber', ctypes.c_ulong),
('dwPlatformId', ctypes.c_ulong),
('szCSDVersion', ctypes.c_wchar*128),
('wServicePackMajor', ctypes.c_ushort),
('wServicePackMinor', ctypes.c_ushort),
('wSuiteMask', ctypes.c_ushort),
('wProductType', ctypes.c_byte),
('wReserved', ctypes.c_byte)]
def get_windows_version():
"""
Gets the OS major and minor versions. Returns a tuple of
(OS_MAJOR, OS_MINOR).
"""
os_version = OSVERSIONINFOEXW()
os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
if retcode != 0:
raise Exception("Failed to get OS version")
return os_version.dwMajorVersion, os_version.dwMinorVersion, os_version.dwBuildNumber
CHECK_FIREWALL_SCRIPT = """[string]$CName = $env:computername
$reg = [Microsoft.Win32.RegistryKey]::OpenRemoteBaseKey("LocalMachine",$computer)
$domain = $reg.OpenSubKey("System\CurrentControlSet\Services\SharedAccess\Parameters\FirewallPolicy\DomainProfile").GetValue("EnableFirewall")
$standart = $reg.OpenSubKey("System\CurrentControlSet\Services\SharedAccess\Parameters\FirewallPolicy\StandardProfile").GetValue("EnableFirewall")
$public = $reg.OpenSubKey("System\CurrentControlSet\Services\SharedAccess\Parameters\FirewallPolicy\PublicProfile").GetValue("EnableFirewall")
Write-Host $domain
Write-Host $standart
Write-Host $public
"""
def _create_tmp_files():
out_file = tempfile.TemporaryFile(mode="r+b")
err_file = tempfile.TemporaryFile(mode="r+b")
return (msvcrt.get_osfhandle(out_file.fileno()),
msvcrt.get_osfhandle(err_file.fileno()),
out_file,
err_file)
def _get_files_output(out, err):
out.seek(0)
err.seek(0)
return out.read().strip(), err.read().strip()
def _safe_duplicate_handle(h):
try:
h = win32api.DuplicateHandle(win32process.GetCurrentProcess(),
h,
win32process.GetCurrentProcess(),
0,
True,
win32con.DUPLICATE_SAME_ACCESS)
return True, h
except Exception as exc:
if exc.winerror == winerror.ERROR_INVALID_HANDLE:
return True, None
return False, None
def run_os_command_impersonated(cmd, user, password, domain='.'):
si = win32process.STARTUPINFO()
out_handle, err_handle, out_file, err_file = _create_tmp_files()
ok, si.hStdInput = _safe_duplicate_handle(win32api.GetStdHandle(win32api.STD_INPUT_HANDLE))
if not ok:
raise Exception("Unable to create StdInput for child process")
ok, si.hStdOutput = _safe_duplicate_handle(out_handle)
if not ok:
raise Exception("Unable to create StdOut for child process")
ok, si.hStdError = _safe_duplicate_handle(err_handle)
if not ok:
raise Exception("Unable to create StdErr for child process")
si.dwFlags = win32process.STARTF_USESTDHANDLES
si.lpDesktop = ""
user_token = win32security.LogonUser(user, domain, password,
win32con.LOGON32_LOGON_SERVICE, win32con.LOGON32_PROVIDER_DEFAULT)
primary_token = win32security.DuplicateTokenEx(user_token,
win32security.SecurityImpersonation, 0, win32security.TokenPrimary)
info = win32process.CreateProcessAsUser(primary_token, None, cmd, None, None, 1, 0, None, None, si)
hProcess, hThread, dwProcessId, dwThreadId = info
hThread.Close()
try:
win32event.WaitForSingleObject(hProcess, win32event.INFINITE)
except KeyboardInterrupt:
pass
out, err = _get_files_output(out_file, err_file)
exitcode = win32process.GetExitCodeProcess(hProcess)
return exitcode, out, err
def os_run_os_command(cmd, env=None, shell=False, cwd=None):
if isinstance(cmd,basestring):
cmd = cmd.replace("\\", "\\\\")
cmd = shlex.split(cmd)
process = subprocess32.Popen(cmd,
stdout=subprocess32.PIPE,
stdin=subprocess32.PIPE,
stderr=subprocess32.PIPE,
env=env,
cwd=cwd,
shell=shell
)
(stdoutdata, stderrdata) = process.communicate()
return process.returncode, stdoutdata, stderrdata
# execute powershell script passed in script_content. Script will be in temporary file to avoid different escape
# and formatting problems.
def run_powershell_script(script_content):
tmp_dir = tempfile.gettempdir()
random_filename = ''.join(random.choice(string.lowercase) for i in range(10))
script_file = open(os.path.join(tmp_dir,random_filename+".ps1"),"w")
script_file.write(script_content)
script_file.close()
result = os_run_os_command("powershell -ExecutionPolicy unrestricted -File {0}".format(script_file.name))
os.remove(script_file.name)
return result
def os_change_owner(filePath, user, recursive):
cmd = ['icacls', filePath, '/setowner', user]
if recursive:
cmd = ['icacls', filePath, '/t', '/setowner', user]
retcode, outdata, errdata = os_run_os_command(cmd)
return retcode
def os_is_root():
'''
Checks whether the current user is a member of the Administrators group
Returns True if yes, otherwise False
'''
retcode, out, err = os_run_os_command(WHOAMI_GROUPS)
if retcode != 0:
err_msg = "Unable to check the current user's group memberships. " \
"Command {0} returned exit code {1} with message: {2}".format(WHOAMI_GROUPS, retcode, err)
print_warning_msg(err_msg)
raise FatalException(retcode, err_msg)
#Check for Administrators group membership
if -1 != out.find('\n' + ADMIN_ACCOUNT):
return True
return False
def os_set_file_permissions(file, mod, recursive, user):
retcode = 0
#WARN_MSG = "Command {0} returned exit code {1} with message: {2}"
#if recursive:
# params = " -R "
#else:
# params = ""
#command = NR_CHMOD_CMD.format(params, mod, file)
#retcode, out, err = os_run_os_command(command)
#if retcode != 0:
# print_warning_msg(WARN_MSG.format(command, file, err))
#command = NR_CHOWN_CMD.format(params, user, file)
#retcode, out, err = os_run_os_command(command)
#if retcode != 0:
# print_warning_msg(WARN_MSG.format(command, file, err))
# rights = mod
# acls_remove_cmd = "icacls {0} /remove {1}".format(file, user)
# retcode, out, err = os_run_os_command(acls_remove_cmd)
# if retcode == 0:
# acls_modify_cmd = "icacls {0} /grant {1}:{2}".format(file, user, rights)
# retcode, out, err = os_run_os_command(acls_modify_cmd)
return retcode
def os_set_open_files_limit(maxOpenFiles):
# No open files limit in Windows. Not messing around with the System Resource Manager, at least for now.
pass
def os_getpass(prompt, stream=None):
"""Prompt for password with echo off, using Windows getch()."""
if sys.stdin is not sys.__stdin__:
return getpass.fallback_getpass(prompt, stream)
for c in prompt:
msvcrt.putch(c)
pw = ""
while True:
c = msvcrt.getch()
if c == '\r' or c == '\n':
break
if c == '\003':
raise KeyboardInterrupt
if c == '\b':
if pw == '':
pass
else:
pw = pw[:-1]
msvcrt.putch('\b')
msvcrt.putch(" ")
msvcrt.putch('\b')
else:
pw = pw + c
msvcrt.putch("*")
msvcrt.putch('\r')
msvcrt.putch('\n')
return pw
#[fbarca] Not used for now, keep it around just in case
def wait_for_pid_wmi(processName, parentPid, pattern, timeout):
"""
Check pid for existence during timeout
"""
tstart = time.time()
pid_live = 0
c = wmi.WMI(find_classes=False)
qry = "select * from Win32_Process where Name=\"%s\" and ParentProcessId=%d" % (processName, parentPid)
while int(time.time() - tstart) <= timeout:
for proc in c.query(qry):
cmdLine = proc.CommandLine
if cmdLine is not None and pattern in cmdLine:
return pid_live
time.sleep(1)
return 0
#need this for redirecting output form python process to file
class SyncStreamWriter(object):
def __init__(self, stream, hMutexWrite):
self.stream = stream
self.hMutexWrite = hMutexWrite
def write(self, data):
#Ensure that the output is thread-safe when writing from 2 separate streams into the same file
# (typical when redirecting both stdout and stderr to the same file).
win32event.WaitForSingleObject(self.hMutexWrite, win32event.INFINITE)
try:
self.stream.write(data)
self.stream.flush()
finally:
win32event.ReleaseMutex(self.hMutexWrite)
def __getattr__(self, attr):
return getattr(self.stream, attr)
class SvcStatusCallback(object):
def __init__(self, svc):
self.svc = svc
def reportStartPending(self):
self.svc.ReportServiceStatus(win32service.SERVICE_START_PENDING)
def reportStarted(self):
self.svc.ReportServiceStatus(win32service.SERVICE_RUNNING)
def reportStopPending(self):
self.svc.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
def reportStopped(self):
self.svc.ReportServiceStatus(win32service.SERVICE_STOPPED)
class WinServiceController:
@staticmethod
def Start(serviceName, waitSecs=30):
err = 0
msg = ''
try:
win32serviceutil.StartService(serviceName)
if waitSecs:
win32serviceutil.WaitForServiceStatus(serviceName, win32service.SERVICE_RUNNING, waitSecs)
except win32service.error, exc:
if exc.winerror != 1056:
msg = "Error starting service: %s" % exc.strerror
err = exc.winerror
return err, msg
@staticmethod
def Stop(serviceName, waitSecs=30):
err = 0
msg = ''
try:
if waitSecs:
win32serviceutil.StopServiceWithDeps(serviceName, waitSecs=waitSecs)
else:
win32serviceutil.StopService(serviceName)
if waitSecs:
win32serviceutil.WaitForServiceStatus(serviceName, win32service.SERVICE_STOPPED, waitSecs)
except win32service.error, exc:
if exc.winerror != 1062:
msg = "Error stopping service: %s (%d)" % (exc.strerror, exc.winerror)
err = exc.winerror
return err, msg
@staticmethod
def QueryStatus(serviceName):
statusString = SERVICE_STATUS_UNKNOWN
try:
status = win32serviceutil.QueryServiceStatus(serviceName)[1]
if status == win32service.SERVICE_STOPPED:
statusString = SERVICE_STATUS_STOPPED
elif status == win32service.SERVICE_START_PENDING:
statusString = SERVICE_STATUS_STARTING
elif status == win32service.SERVICE_RUNNING:
statusString = SERVICE_STATUS_RUNNING
elif status == win32service.SERVICE_STOP_PENDING:
statusString = SERVICE_STATUS_STOPPING
except win32api.error:
statusString = SERVICE_STATUS_NOT_INSTALLED
pass
return statusString
@staticmethod
def EnsureServiceIsStarted(serviceName, waitSecs=30):
err = 0
try:
status = win32serviceutil.QueryServiceStatus(serviceName)[1]
if win32service.SERVICE_RUNNING != status:
if win32service.SERVICE_START_PENDING != status:
win32serviceutil.StartService(serviceName)
if waitSecs:
win32serviceutil.WaitForServiceStatus(serviceName, win32service.SERVICE_RUNNING, waitSecs)
except win32service.error, exc:
if exc.winerror != 1056:
err = exc.winerror
return err
class WinService(win32serviceutil.ServiceFramework):
# _svc_name_ = The service name
# _svc_display_name_ = The service display name
# _svc_description_ = The service description
_heventSvcStop = win32event.CreateEvent(None, 1, 0, None)
_hmtxOut = win32event.CreateMutex(None, False, None) #[fbarca] Python doesn't support critical sections
def __init__(self, *args):
win32serviceutil.ServiceFramework.__init__(self, *args)
def SvcDoRun(self):
try:
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.ServiceMain()
except Exception, x:
#TODO: Log exception
self.SvcStop()
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(WinService._heventSvcStop)
# Service code entry point. Override it to implement the intended functionality.
def ServiceMain(self):
#Default implementation, does nothing.
win32event.WaitForSingleObject(WinService._heventSvcStop, win32event.INFINITE)
pass
@staticmethod
def DefCtrlCHandler():
print_info_msg("Ctrl+C handler invoked. Stopping.")
win32event.SetEvent(WinService._heventSvcStop)
pass
#username domain\\username : The Username the service is to run under
#password password : The password for the username
#startup [manual|auto|disabled|delayed] : How the service starts, default = auto
#interactive : Allow the service to interact with the desktop.
#perfmonini file: .ini file to use for registering performance monitor data
#perfmondll file: .dll file to use when querying the service for performance data, default = perfmondata.dll
@classmethod
def Install(cls, classPath = None, startupMode = "auto", username = None, password = None, interactive = False,
perfMonIni = None, perfMonDll = None):
installArgs = [sys.argv[0], "--startup=" + startupMode]
if username is not None and username:
if username.find('\\') == -1:
username = '.\\' + username
installArgs.append("--username=" + username)
if password is not None and password:
installArgs.append("--password=" + password)
if interactive:
installArgs.append("--interactive")
if perfMonIni is not None and perfMonIni:
installArgs.append("--perfmonini=" + perfMonIni)
if perfMonDll is not None and perfMonDll:
installArgs.append("--perfmondll=" + perfMonDll)
installArgs.append("install")
return win32serviceutil.HandleCommandLine(cls, classPath, installArgs)
@classmethod
def Start(cls, waitSecs = 30):
return WinServiceController.Start(cls._svc_name_, waitSecs)
@classmethod
def Stop(cls, waitSecs = 30):
return WinServiceController.Stop(cls._svc_name_, waitSecs)
@classmethod
def QueryStatus(cls):
return WinServiceController.QueryStatus(cls._svc_name_)
@classmethod
def set_ctrl_c_handler(cls, ctrlHandler):
win32api.SetConsoleCtrlHandler(ctrlHandler, True)
pass
def _RedirectOutputStreamsToFile(self, outFilePath):
outFileDir = os.path.dirname(outFilePath)
if not os.path.exists(outFileDir):
os.makedirs(outFileDir)
out_writer = SyncStreamWriter(file(outFilePath, "w"), self._hmtxOut)
sys.stderr = out_writer
sys.stdout = out_writer
pass
def CheckForStop(self):
#Check for stop event to be signaled
return win32event.WAIT_OBJECT_0 == win32event.WaitForSingleObject(WinService._heventSvcStop, 1)
def _StopOrWaitForChildProcessToFinish(self, childProcess):
#Wait for the child process to finish or for the stop event to be signaled
if(win32event.WAIT_OBJECT_0 == win32event.WaitForMultipleObjects([WinService._heventSvcStop, childProcess._handle],
False, win32event.INFINITE)):
# The OS only detaches the child process when the master process exits.
# We must kill it manually.
try:
#Sending signal.CTRL_BREAK_EVENT doesn't work. It only detaches the child process from the master.
# Must brutally terminate the child process. Sorry Java.
childProcess.terminate()
except OSError, e:
print_info_msg("Unable to stop Ambari Server - " + str(e))
return False
return True
class SystemWideLock(object):
def __init__(self, name):
self._mutex = win32event.CreateMutex(None, 0, name)
def lock(self, timeout=0):
result = win32event.WaitForSingleObject(self._mutex, timeout)
if result in [win32event.WAIT_TIMEOUT, win32event.WAIT_ABANDONED, win32event.WAIT_FAILED]:
return False
elif result == win32event.WAIT_OBJECT_0:
return True
def unlock(self):
try:
win32event.ReleaseMutex(self._mutex)
return True
except:
return False
def __del__(self):
win32api.CloseHandle(self._mutex)
class UserHelper(object):
ACTION_OK = 0
USER_EXISTS = 1
ACTION_FAILED = -1
def __init__(self, userName):
self.domainName, self.userName = UserHelper.parse_user_name(userName)
if self.domainName:
self.dcName = win32net.NetGetDCName(None, self.domainName)
else:
self.dcName = None
self._policy = win32security.LsaOpenPolicy(self.dcName,
win32security.POLICY_CREATE_ACCOUNT | win32security.POLICY_LOOKUP_NAMES)
@staticmethod
def parse_user_name(userName, defDomain=None):
domainName = defDomain
domainSepIndex = userName.find('\\')
if domainSepIndex != -1:
domainName = userName[0:domainSepIndex]
userName = userName[domainSepIndex + 1:]
if not domainName or domainName == '.' or domainName == win32api.GetComputerName():
domainName = defDomain
return (domainName, userName)
def create_user(self, password, comment="Ambari user"):
user_info = {}
user_info['name'] = self.userName
user_info['password'] = password
user_info['priv'] = win32netcon.USER_PRIV_USER
user_info['comment'] = comment
user_info['flags'] = win32netcon.UF_NORMAL_ACCOUNT | win32netcon.UF_SCRIPT
try:
win32net.NetUserAdd(self.dcName, 1, user_info)
except pywintypes.error as e:
if e.winerror == 2224:
return UserHelper.USER_EXISTS, e.strerror
else:
return UserHelper.ACTION_FAILED, e.strerror
return UserHelper.ACTION_OK, "User created."
def find_user(self):
try:
user_info = win32net.NetUserGetInfo(self.dcName, self.userName, 0)
except pywintypes.error as e:
if e.winerror == 2221:
return False
else:
raise
return True
def add_user_privilege(self, privilege):
try:
acc_sid = win32security.LookupAccountName(self.dcName, self.userName)[0]
win32security.LsaAddAccountRights(self._policy, acc_sid, (privilege,))
except pywintypes.error as e:
return UserHelper.ACTION_FAILED, e.strerror
return UserHelper.ACTION_OK, "Privilege added."
def remove_user_privilege(self, name, privilege):
try:
acc_sid = win32security.LookupAccountName(self.dcName, self.userName)[0]
win32security.LsaRemoveAccountRights(self._policy, acc_sid, 0, (privilege,))
except pywintypes.error as e:
return UserHelper.ACTION_FAILED, e.strerror
return UserHelper.ACTION_OK, "Privilege removed."
|
raft/node.py
|
shunliz/raft
| 113 |
133876
|
<filename>raft/node.py
import os
import json
import time
import random
import logging
from .log import Log
from .rpc import Rpc
from .config import config
# logging.basicConfig(level=logging.INFO,
# format='%(asctime)s %(levelname)s %(name)s %(funcName)s [line:%(lineno)d] %(message)s')
logger = logging.getLogger(__name__)
logger.propagate = False
env = os.environ.get("env")
conf = config[env] if env else config['DEV']
class Node(object):
"""
raft node
"""
def __init__(self, meta):
self.role = 'follower'
self.group_id = meta['group_id']
self.id = meta['id']
self.addr = meta['addr']
self.peers = meta['peers']
self.path = conf.node_path
if not os.path.exists(self.path):
os.makedirs(self.path)
# persistent state
self.current_term = 0
self.voted_for = None
# init persistent state
self.load()
logname = self.path+self.group_id + '_' + self.id + "_log.json"
self.log = Log(logname)
# volatile state
# rule 1, 2
self.commit_index = 0
self.last_applied = 0
# volatile state on leaders
# rule 1, 2
self.next_index = {_id: self.log.last_log_index + 1 for _id in self.peers}
self.match_index = {_id: -1 for _id in self.peers}
# append entries
self.leader_id = None
# request vote
self.vote_ids = {_id: 0 for _id in self.peers}
# client request
self.client_addr = None
# tick
self.wait_ms = (10, 20)
self.next_leader_election_time = time.time() + random.randint(*self.wait_ms)
self.next_heartbeat_time = 0
# rpc
self.rpc_endpoint = Rpc(self.addr, timeout=2)
# log
fmt = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(funcName)s [line:%(lineno)d] %(message)s')
handler = logging.FileHandler(self.path + self.group_id + '_' + self.id + '.log', 'a')
handler.setFormatter(fmt)
logger.addHandler(handler)
def load(self):
filename = self.path + self.group_id + "_" + self.id + '_persistent.json'
if os.path.exists(filename):
with open(filename, 'r') as f:
data = json.load(f)
self.current_term = data['current_term']
self.voted_for = data['voted_for']
else:
self.save()
def save(self):
data = {'current_term': self.current_term,
'voted_for': self.voted_for,
}
filename = self.path + self.group_id + "_" + self.id + '_persistent.json'
with open(filename, 'w') as f:
json.dump(data, f, indent=4)
def redirect(self, data, addr):
if data == None:
return None
if data['type'] == 'client_append_entries':
if self.role != 'leader':
if self.leader_id:
logger.info('redirect: client_append_entries to leader')
self.rpc_endpoint.send(data, self.peers[self.leader_id])
return None
else:
self.client_addr = (addr[0], conf.cport)
# logger.info("client addr " + self.client_addr[0] +'_' +str(self.client_addr[1]))
return data
if data['dst_id'] != self.id:
logger.info('redirect: to ' + data['dst_id'])
# logger.info('redirec to leader')
self.rpc_endpoint.send(data, self.peers[data['dst_id']])
return None
else:
return data
return data
def append_entries(self, data):
'''
append entries rpc
only used in follower state
'''
response = {'type': 'append_entries_response',
'src_id': self.id,
'dst_id': data['src_id'],
'term': self.current_term,
'success': False
}
# append_entries: rule 1
if data['term'] < self.current_term:
logger.info(' 2. smaller term')
logger.info(' 3. success = False: smaller term')
logger.info(' 4. send append_entries_response to leader ' + data['src_id'])
response['success'] = False
self.rpc_endpoint.send(response, self.peers[data['src_id']])
return
self.leader_id = data['leader_id']
# heartbeat
if data['entries'] == []:
logger.info(' 4. heartbeat')
return
prev_log_index = data['prev_log_index']
prev_log_term = data['prev_log_term']
tmp_prev_log_term = self.log.get_log_term(prev_log_index)
# append_entries: rule 2, 3
# append_entries: rule 3
if tmp_prev_log_term != prev_log_term:
logger.info(' 4. success = False: index not match or term not match')
logger.info(' 5. send append_entries_response to leader ' + data['src_id'])
logger.info(' 6. log delete_entries')
logger.info(' 6. log save')
response['success'] = False
self.rpc_endpoint.send(response, self.peers[data['src_id']])
self.log.delete_entries(prev_log_index)
# append_entries rule 4
else:
logger.info(' 4. success = True')
logger.info(' 5. send append_entries_response to leader ' + data['src_id'])
logger.info(' 6. log append_entries')
logger.info(' 7. log save')
response['success'] = True
self.rpc_endpoint.send(response, self.peers[data['src_id']])
self.log.append_entries(prev_log_index, data['entries'])
# append_entries rule 5
leader_commit = data['leader_commit']
if leader_commit > self.commit_index:
commit_index = min(leader_commit, self.log.last_log_index)
self.commit_index = commit_index
logger.info(' 8. commit_index = ' + str(commit_index))
return
def request_vote(self, data):
'''
request vote rpc
only used in follower state
'''
response = {'type': 'request_vote_response',
'src_id': self.id,
'dst_id': data['src_id'],
'term': self.current_term,
'vote_granted': False
}
# request vote: rule 1
if data['term'] < self.current_term:
logger.info(' 2. smaller term')
logger.info(' 3. success = False')
logger.info(' 4. send request_vote_response to candidate ' + data['src_id'])
response['vote_granted'] = False
self.rpc_endpoint.send(response, self.peers[data['src_id']])
return
logger.info(' 2. same term')
candidate_id = data['candidate_id']
last_log_index = data['last_log_index']
last_log_term = data['last_log_term']
if self.voted_for == None or self.voted_for == candidate_id:
if last_log_index >= self.log.last_log_index and last_log_term >= self.log.last_log_term:
self.voted_for = data['src_id']
self.save()
response['vote_granted'] = True
self.rpc_endpoint.send(response, self.peers[data['src_id']])
logger.info(' 3. success = True: candidate log is newer')
logger.info(' 4. send request_vote_response to candidate ' + data['src_id'])
else:
self.voted_for = None
self.save()
response['vote_granted'] = False
self.rpc_endpoint.send(response, self.peers[data['src_id']])
logger.info(' 3. success = False: candidate log is older')
logger.info(' 4. send request_vote_response to candidate ' + data['src_id'])
else:
response['vote_granted'] = False
self.rpc_endpoint.send(response, self.peers[data['src_id']])
logger.info(' 3. success = False: has vated for ' + self.voted_for)
logger.info(' 4. send request_vote_response to candidate ' + data['src_id'])
return
def all_do(self, data):
'''
all servers: rule 1, 2
'''
logger.info('-------------------------------all------------------------------------------')
t = time.time()
if self.commit_index > self.last_applied:
self.last_applied = self.commit_index
logger.info('all: 1. last_applied = ' + str(self.last_applied))
if data == None:
return
if data['type'] == 'client_append_entries':
return
if data['term'] > self.current_term:
logger.info( f'all: 1. bigger term: { data["term"]} > {self.current_term}' )
logger.info(' 2. become follower')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.role = 'follower'
self.current_term = data['term']
self.voted_for = None
self.save()
return
def follower_do(self, data):
'''
rules for servers: follower
'''
logger.info('-------------------------------follower-------------------------------------')
t = time.time()
# follower rules: rule 1
if data != None:
if data['type'] == 'append_entries':
logger.info('follower: 1. recv append_entries from leader ' + data['src_id'])
if data['term'] == self.current_term:
logger.info(' 2. same term')
logger.info(' 3. reset next_leader_election_time')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.append_entries(data)
elif data['type'] == 'request_vote':
logger.info('follower: 1. recv request_vote from candidate ' + data['src_id'])
self.request_vote(data)
# follower rules: rule 2
if t > self.next_leader_election_time:
logger.info('follower:1. become candidate')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.role = 'candidate'
self.current_term += 1
self.voted_for = self.id
self.save()
self.vote_ids = {_id: 0 for _id in self.peers}
return
def candidate_do(self, data):
'''
rules for servers: candidate
'''
logger.info('-------------------------------candidate------------------------------------')
t = time.time()
# candidate rules: rule 1
for dst_id in self.peers:
if self.vote_ids[dst_id] == 0:
logger.info('candidate: 1. send request_vote to peer ' + dst_id)
request = {
'type': 'request_vote',
'src_id': self.id,
'dst_id': dst_id,
'term': self.current_term,
'candidate_id': self.id,
'last_log_index': self.log.last_log_index,
'last_log_term': self.log.last_log_term
}
# logger.info(request)
self.rpc_endpoint.send(request, self.peers[dst_id])
# if data != None and data['term'] < self.current_term:
# logger.info('candidate: 1. smaller term from ' + data['src_id'])
# logger.info(' 2. ignore')
# return
if data != None and data['term'] == self.current_term:
# candidate rules: rule 2
if data['type'] == 'request_vote_response':
logger.info('candidate: 1. recv request_vote_response from follower ' + data['src_id'])
self.vote_ids[data['src_id']] = data['vote_granted']
vote_count = sum(list(self.vote_ids.values()))
if vote_count >= len(self.peers)//2:
logger.info(' 2. become leader')
self.role = 'leader'
self.voted_for = None
self.save()
self.next_heartbeat_time = 0
self.next_index = {_id: self.log.last_log_index + 1 for _id in self.peers}
self.match_index = {_id: 0 for _id in self.peers}
return
# candidate rules: rule 3
elif data['type'] == 'append_entries':
logger.info('candidate: 1. recv append_entries from leader ' + data['src_id'])
logger.info(' 2. become follower')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.role = 'follower'
self.voted_for = None
self.save()
return
# candidate rules: rule 4
if t > self.next_leader_election_time:
logger.info('candidate: 1. leader_election timeout')
logger.info(' 2. become candidate')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.role = 'candidate'
self.current_term += 1
self.voted_for = self.id
self.save()
self.vote_ids = {_id: 0 for _id in self.peers}
return
def leader_do(self, data):
'''
rules for servers: leader
'''
logger.info('-------------------------------leader---------------------------------------')
# leader rules: rule 1, 3
t = time.time()
if t > self.next_heartbeat_time:
self.next_heartbeat_time = t + random.randint(0, 5)
for dst_id in self.peers:
logger.info('leader:1. send append_entries to peer ' + dst_id)
request = {'type': 'append_entries',
'src_id': self.id,
'dst_id': dst_id,
'term': self.current_term,
'leader_id': self.id,
'prev_log_index': self.next_index[dst_id] - 1,
'prev_log_term': self.log.get_log_term(self.next_index[dst_id] - 1),
'entries': self.log.get_entries(self.next_index[dst_id]),
'leader_commit': self.commit_index
}
self.rpc_endpoint.send(request, self.peers[dst_id])
# leader rules: rule 2
if data != None and data['type'] == 'client_append_entries':
data['term'] = self.current_term
self.log.append_entries(self.log.last_log_index, [data])
logger.info('leader:1. recv append_entries from client')
logger.info(' 2. log append_entries')
logger.info(' 3. log save')
return
# leader rules: rule 3.1, 3.2
if data != None and data['term'] == self.current_term:
if data['type'] == 'append_entries_response':
logger.info('leader:1. recv append_entries_response from follower ' + data['src_id'])
if data['success'] == False:
self.next_index[data['src_id']] -= 1
logger.info(' 2. success = False')
logger.info(' 3. next_index - 1')
else:
self.match_index[data['src_id']] = self.next_index[data['src_id']]
self.next_index[data['src_id']] = self.log.last_log_index + 1
logger.info(' 2. success = True')
logger.info(' 3. match_index = ' + str(self.match_index[data['src_id']]) + ' next_index = ' + str(self.next_index[data['src_id']]))
# leader rules: rule 4
while True:
N = self.commit_index + 1
count = 0
for _id in self.match_index:
if self.match_index[_id] >= N:
count += 1
if count >= len(self.peers)//2:
self.commit_index = N
logger.info('leader:1. commit + 1')
if self.client_addr:
response = {'index': self.commit_index}
self.rpc_endpoint.send(response, self.client_addr)
break
else:
logger.info('leader:2. commit = ' + str(self.commit_index))
break
def run(self):
# data = {
# "type": "create_node_success",
# "group_id": self.group_id,
# "id": self.id
# }
# self.rpc_endpoint.send(data, (conf.ip, conf.cport))
# data = {
# "type": "create_group_node_success",
# "group_id": self.group_id,
# "id": self.id
# }
# self.rpc_endpoint.send(data, (conf.ip, conf.cport))
while True:
try:
try:
data, addr = self.rpc_endpoint.recv()
except Exception as e:
data, addr = None, None
data = self.redirect(data, addr)
self.all_do(data)
if self.role == 'follower':
self.follower_do(data)
if self.role == 'candidate':
self.candidate_do(data)
if self.role == 'leader':
self.leader_do(data)
except Exception as e:
logger.info(e)
# self.rpc_endpoint.close()
|
pkgs/notebook-4.1.0-py27_1/lib/python2.7/site-packages/notebook/tests/test_files.py
|
wangyum/anaconda
| 652 |
133882
|
# coding: utf-8
"""Test the /files/ handler."""
import io
import os
from unicodedata import normalize
pjoin = os.path.join
import requests
import json
from nbformat import write
from nbformat.v4 import (new_notebook,
new_markdown_cell, new_code_cell,
new_output)
from notebook.utils import url_path_join
from .launchnotebook import NotebookTestBase
from ipython_genutils import py3compat
class FilesTest(NotebookTestBase):
def test_hidden_files(self):
not_hidden = [
u'å b',
u'å b/ç. d',
]
hidden = [
u'.å b',
u'å b/.ç d',
]
dirs = not_hidden + hidden
nbdir = self.notebook_dir.name
for d in dirs:
path = pjoin(nbdir, d.replace('/', os.sep))
if not os.path.exists(path):
os.mkdir(path)
with open(pjoin(path, 'foo'), 'w') as f:
f.write('foo')
with open(pjoin(path, '.foo'), 'w') as f:
f.write('.foo')
url = self.base_url()
for d in not_hidden:
path = pjoin(nbdir, d.replace('/', os.sep))
r = requests.get(url_path_join(url, 'files', d, 'foo'))
r.raise_for_status()
self.assertEqual(r.text, 'foo')
r = requests.get(url_path_join(url, 'files', d, '.foo'))
self.assertEqual(r.status_code, 404)
for d in hidden:
path = pjoin(nbdir, d.replace('/', os.sep))
for foo in ('foo', '.foo'):
r = requests.get(url_path_join(url, 'files', d, foo))
self.assertEqual(r.status_code, 404)
def test_contents_manager(self):
"make sure ContentsManager returns right files (ipynb, bin, txt)."
nbdir = self.notebook_dir.name
base = self.base_url()
nb = new_notebook(
cells=[
new_markdown_cell(u'Created by test ³'),
new_code_cell("print(2*6)", outputs=[
new_output("stream", text="12"),
])
]
)
with io.open(pjoin(nbdir, 'testnb.ipynb'), 'w',
encoding='utf-8') as f:
write(nb, f, version=4)
with io.open(pjoin(nbdir, 'test.bin'), 'wb') as f:
f.write(b'\xff' + os.urandom(5))
f.close()
with io.open(pjoin(nbdir, 'test.txt'), 'w') as f:
f.write(u'foobar')
f.close()
r = requests.get(url_path_join(base, 'files', 'testnb.ipynb'))
self.assertEqual(r.status_code, 200)
self.assertIn('print(2*6)', r.text)
json.loads(r.text)
r = requests.get(url_path_join(base, 'files', 'test.bin'))
self.assertEqual(r.status_code, 200)
self.assertEqual(r.headers['content-type'], 'application/octet-stream')
self.assertEqual(r.content[:1], b'\xff')
self.assertEqual(len(r.content), 6)
r = requests.get(url_path_join(base, 'files', 'test.txt'))
self.assertEqual(r.status_code, 200)
self.assertEqual(r.headers['content-type'], 'text/plain')
self.assertEqual(r.text, 'foobar')
def test_download(self):
nbdir = self.notebook_dir.name
base = self.base_url()
text = 'hello'
with open(pjoin(nbdir, 'test.txt'), 'w') as f:
f.write(text)
r = requests.get(url_path_join(base, 'files', 'test.txt'))
disposition = r.headers.get('Content-Disposition', '')
self.assertNotIn('attachment', disposition)
r = requests.get(url_path_join(base, 'files', 'test.txt') + '?download=1')
disposition = r.headers.get('Content-Disposition', '')
self.assertIn('attachment', disposition)
self.assertIn('filename="test.txt"', disposition)
def test_old_files_redirect(self):
"""pre-2.0 'files/' prefixed links are properly redirected"""
nbdir = self.notebook_dir.name
base = self.base_url()
os.mkdir(pjoin(nbdir, 'files'))
os.makedirs(pjoin(nbdir, 'sub', 'files'))
for prefix in ('', 'sub'):
with open(pjoin(nbdir, prefix, 'files', 'f1.txt'), 'w') as f:
f.write(prefix + '/files/f1')
with open(pjoin(nbdir, prefix, 'files', 'f2.txt'), 'w') as f:
f.write(prefix + '/files/f2')
with open(pjoin(nbdir, prefix, 'f2.txt'), 'w') as f:
f.write(prefix + '/f2')
with open(pjoin(nbdir, prefix, 'f3.txt'), 'w') as f:
f.write(prefix + '/f3')
url = url_path_join(base, 'notebooks', prefix, 'files', 'f1.txt')
r = requests.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, prefix + '/files/f1')
url = url_path_join(base, 'notebooks', prefix, 'files', 'f2.txt')
r = requests.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, prefix + '/files/f2')
url = url_path_join(base, 'notebooks', prefix, 'files', 'f3.txt')
r = requests.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, prefix + '/f3')
|
extract_ped_per_frame.py
|
versey-sherry/Towards-Realtime-MOT
| 2,158 |
133886
|
import argparse
import json
import time
from pathlib import Path
from sklearn import metrics
from scipy import interpolate
import torch.nn.functional as F
from models import *
from utils.utils import *
from torchvision.transforms import transforms as T
from utils.datasets import LoadImages, JointDataset, collate_fn
def extract_ped_per_frame(
cfg,
input_root,
output_root,
weights,
batch_size=16,
img_size=416,
iou_thres=0.5,
conf_thres=0.3,
nms_thres=0.45,
print_interval=40,
nID=14455,
):
mkdir_if_missing(output_root)
# Initialize model
model = Darknet(cfg, img_size, nID)
# Load weights
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location='cpu')['model'], strict=False)
else: # darknet format
load_darknet_weights(model, weights)
model = torch.nn.DataParallel(model)
model.cuda().eval()
vlist = os.listdir(input_root)
vlist = [osp.join(input_root, v, 'img1') for v in vlist]
for vpath in vlist:
vroot = osp.join('/',*vpath.split('/')[:-1])
out_vroot = vroot.replace(input_root, output_root)
mkdir_if_missing(out_vroot)
dataloader = LoadImages(vpath, img_size)
for frame_id, (frame_path, frame, frame_ori) in enumerate(dataloader):
frame_ground_id = frame_path.split('/')[-1].split('.')[0]
if frame_id % 20 == 0:
print('Processing frame {} of video {}'.format(frame_id, frame_path))
blob = torch.from_numpy(frame).cuda().unsqueeze(0)
pred = model(blob)
pred = pred[pred[:,:,4] > conf_thres]
if len(pred) > 0:
dets = non_max_suppression(pred.unsqueeze(0), conf_thres, nms_thres)[0].cpu()
scale_coords(img_size, dets[:, :4], frame_ori.shape).round()
frame_dir = osp.join(out_vroot, frame_ground_id)
mkdir_if_missing(frame_dir)
dets = dets[:, :5]
for ped_id, det in enumerate(dets):
box = det[:4].int()
conf = det[4]
ped = frame_ori[box[1]:box[3], box[0]:box[2]]
ped_path = osp.join(frame_dir, ('{:04d}_'+ '{:d}_'*4 + '{:.2f}.jpg').format(ped_id, *box, conf))
cv2.imwrite(ped_path, ped)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--batch-size', type=int, default=40, help='size of each image batch')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/mot_64/latest.pt', help='path to weights file')
parser.add_argument('--iou-thres', type=float, default=0.3, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.3, help='iou threshold for non-maximum suppression')
parser.add_argument('--img-size', type=int, default=(1088, 608), help='size of each image dimension')
parser.add_argument('--print-interval', type=int, default=10, help='size of each image dimension')
parser.add_argument('--input-root', type=str, default='/home/wangzd/datasets/youtube/data/0004/frame', help='path to input frames')
parser.add_argument('--output-root', type=str, default='/home/wangzd/datasets/youtube/data/0004/ped_per_frame', help='path to output frames')
opt = parser.parse_args()
print(opt, end='\n\n')
with torch.no_grad():
extract_ped_per_frame(
opt.cfg,
opt.input_root,
opt.output_root,
opt.weights,
opt.batch_size,
opt.img_size,
opt.iou_thres,
opt.conf_thres,
opt.nms_thres,
opt.print_interval,
)
|
projects/DensePose/densepose/modeling/__init__.py
|
mmabrouk/detectron2
| 21,274 |
133906
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .confidence import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType
from .filter import DensePoseDataFilter
from .inference import densepose_inference
from .utils import initialize_module_params
from .build import (
build_densepose_data_filter,
build_densepose_embedder,
build_densepose_head,
build_densepose_losses,
build_densepose_predictor,
)
|
bin/pylama/lint/pylama_mccabe.py
|
ShadowLNC/linter-pylama
| 463 |
133913
|
"""Code complexity checking."""
from mccabe import McCabeChecker
from pylama.lint import Linter as Abstract
import ast
class Linter(Abstract):
"""Run complexity checking."""
@staticmethod
def run(path, code=None, params=None, **meta):
"""MCCabe code checking.
:return list: List of errors.
"""
tree = compile(code, path, "exec", ast.PyCF_ONLY_AST)
McCabeChecker.max_complexity = int(params.get('complexity', 10))
return [
{'lnum': lineno, 'offset': offset, 'text': text, 'type': McCabeChecker._code}
for lineno, offset, text, _ in McCabeChecker(tree, path).run()
]
# pylama:ignore=W0212
|
tests/syntax/async_def_missing_parens.py
|
matan-h/friendly
| 287 |
133925
|
<filename>tests/syntax/async_def_missing_parens.py
async def name:
pass
|
sanic_jwt_extended/decorators.py
|
devArtoria/Sanic-JWT-extended
| 465 |
133930
|
from functools import wraps
from typing import Callable, List, Optional, Tuple
from sanic.request import Request
from sanic_jwt_extended.exceptions import (
AccessDeniedError,
ConfigurationConflictError,
CSRFError,
FreshTokenRequiredError,
InvalidHeaderError,
NoAuthorizationError,
RevokedTokenError,
WrongTokenError,
)
from sanic_jwt_extended.jwt_manager import JWT
from sanic_jwt_extended.tokens import Token
try:
from hmac import compare_digest
except ImportError: # pragma: no cover
def compare_digest(a, b):
if isinstance(a, str):
a = a.encode("utf-8")
if isinstance(b, str):
b = b.encode("utf-8")
if len(a) != len(b):
return False
r = 0
for x, y in zip(a, b):
r |= x ^ y
return not r
jwt_get_function = Callable[[Request, bool], Tuple[str, Optional[str]]]
def _get_request(args) -> Request:
if isinstance(args[0], Request):
request = args[0]
else:
request = args[1]
return request
def _get_raw_jwt_from_request(request, is_access=True):
functions: List[jwt_get_function] = []
for eligible_location in JWT.config.token_location:
if eligible_location == "header":
functions.append(_get_raw_jwt_from_headers)
if eligible_location == "query":
functions.append(_get_raw_jwt_from_query_params)
if eligible_location == "cookies":
functions.append(_get_raw_jwt_from_cookies)
raw_jwt = None
csrf_value = None
errors = []
for f in functions:
try:
raw_jwt, csrf_value = f(request, is_access)
break
except NoAuthorizationError as e:
errors.append(str(e))
if not raw_jwt:
raise NoAuthorizationError(', '.join(errors))
return raw_jwt, csrf_value
def _get_raw_jwt_from_headers(request, is_access):
header_key = (
JWT.config.jwt_header_key if is_access else JWT.config.refresh_jwt_header_key
)
header_prefix = JWT.config.jwt_header_prefix
token_header = request.headers.get(header_key)
if not token_header:
raise NoAuthorizationError(f'Missing header "{header_key}"')
parts: List[str] = token_header.split()
if parts[0] != header_prefix or len(parts) != 2:
raise InvalidHeaderError(
f"Bad {header_key} header. Expected value '{header_prefix} <JWT>'"
)
encoded_token: str = parts[1]
return encoded_token, None
def _get_raw_jwt_from_query_params(request, _):
encoded_token = request.args.get(JWT.config.jwt_query_param_name)
if not encoded_token:
raise NoAuthorizationError(
f'Missing query parameter "{JWT.config.jwt_query_param_name}"'
)
return encoded_token, None
def _get_raw_jwt_from_cookies(request, is_access):
cookie_key = JWT.config.jwt_cookie if is_access else JWT.config.refresh_jwt_cookie
csrf_header_key = (
JWT.config.jwt_csrf_header if is_access else JWT.config.refresh_jwt_csrf_header
)
encoded_token = request.cookies.get(cookie_key)
csrf_value = None
if not encoded_token:
raise NoAuthorizationError(f'Missing cookie "{cookie_key}"')
if JWT.config.csrf_protect and request.method in JWT.config.csrf_request_methods:
csrf_value = request.headers.get(csrf_header_key)
if not csrf_value:
raise CSRFError("Missing CSRF token")
return encoded_token, csrf_value
def _csrf_check(csrf_from_request, csrf_from_jwt):
if not csrf_from_jwt or not isinstance(csrf_from_jwt, str):
raise CSRFError('Can not find valid CSRF data from token')
if not compare_digest(csrf_from_request, csrf_from_jwt):
raise CSRFError('CSRF double submit tokens do not match')
def jwt_required(
function=None, *, allow=None, deny=None, fresh_required=False,
):
def real(fn):
@wraps(fn)
async def wrapper(*args, **kwargs):
request = _get_request(args)
raw_jwt, csrf_value = _get_raw_jwt_from_request(request)
token_obj = Token(raw_jwt)
if csrf_value:
_csrf_check(csrf_value, token_obj.csrf)
if token_obj.type != "access":
raise WrongTokenError("Only access tokens are allowed")
if fresh_required and not token_obj.fresh:
raise FreshTokenRequiredError("Only fresh access tokens are allowed")
if allow and token_obj.role not in allow:
raise AccessDeniedError("You are not allowed to access here")
if deny and token_obj.role in deny:
raise AccessDeniedError("You are not allowed to access here")
if JWT.config.use_blacklist and await JWT.blacklist.is_blacklisted(
token_obj
):
raise RevokedTokenError("Token has been revoked")
kwargs["token"] = token_obj
return await fn(*args, **kwargs)
return wrapper
if function:
return real(function)
else:
if allow and deny:
raise ConfigurationConflictError(
"Can not use 'deny' and 'allow' option together."
)
return real
def jwt_optional(function):
@wraps(function)
async def wrapper(*args, **kwargs):
request = _get_request(args)
token_obj: Optional[Token] = None
try:
raw_jwt, csrf_value = _get_raw_jwt_from_request(request)
token_obj = Token(raw_jwt)
if csrf_value:
_csrf_check(csrf_value, token_obj.csrf)
if token_obj.type != "access":
raise WrongTokenError("Only access tokens are allowed")
except (NoAuthorizationError, InvalidHeaderError):
pass
kwargs["token"] = token_obj
return await function(*args, **kwargs)
return wrapper
def refresh_jwt_required(function=None, *, allow=None, deny=None):
def real(fn):
@wraps(fn)
async def wrapper(*args, **kwargs):
request = _get_request(args)
raw_jwt, csrf_value = _get_raw_jwt_from_request(request, is_access=False)
token_obj = Token(raw_jwt)
if csrf_value:
_csrf_check(csrf_value, token_obj.csrf)
if token_obj.type != "refresh":
raise WrongTokenError("Only refresh tokens are allowed")
if allow and token_obj.role not in allow:
raise AccessDeniedError("You are not allowed to refresh in here")
if deny and token_obj.role in deny:
raise AccessDeniedError("You are not allowed to refresh in here")
if JWT.config.use_blacklist and await JWT.blacklist.is_blacklisted(
token_obj
):
raise RevokedTokenError("Token has been revoked")
kwargs["token"] = token_obj
return await fn(*args, **kwargs)
return wrapper
if function:
return real(function)
else:
if allow and deny:
raise ConfigurationConflictError(
"Can not use 'deny' and 'allow' option together."
)
return real
|
dojo/rules/views.py
|
mtcolman/django-DefectDojo
| 1,772 |
133955
|
# Standard library imports
import json
import logging
# Third party imports
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.contrib.admin.utils import NestedObjects
from django.db import DEFAULT_DB_ALIAS
# Local application/library imports
from dojo.models import Rule,\
System_Settings, Finding, Test, Test_Type, Engagement, \
Product, Product_Type, Child_Rule
from dojo.forms import RuleFormSet, DeleteRuleForm, RuleForm
from dojo.utils import add_breadcrumb
logger = logging.getLogger(__name__)
# Fields for each model ruleset
finding_fields = [f.name for f in Finding._meta.fields]
test_fields = [f.name for f in Test._meta.fields]
test_type_fields = [f.name for f in Test_Type._meta.fields]
engagement_fields = [f.name for f in Engagement._meta.fields]
product_fields = [f.name for f in Product._meta.fields]
product_type_fields = [f.name for f in Product_Type._meta.fields]
field_dictionary = {}
field_dictionary['Finding'] = finding_fields
field_dictionary['Test Type'] = test_type_fields
field_dictionary['Test'] = test_fields
field_dictionary['Engagement'] = engagement_fields
field_dictionary['Product'] = product_fields
field_dictionary['Product Type'] = product_type_fields
@user_passes_test(lambda u: u.is_superuser)
def rules(request):
initial_queryset = Rule.objects.all().order_by('name')
add_breadcrumb(title="Rules", top_level=True, request=request)
return render(request, 'dojo/rules.html', {
'name': 'Rules List',
'metric': False,
'user': request.user,
'rules': initial_queryset})
@user_passes_test(lambda u: u.is_superuser)
def new_rule(request):
if request.method == 'POST':
form = RuleForm(request.POST)
if form.is_valid():
rule = form.save()
messages.add_message(request,
messages.SUCCESS,
'Rule created successfully.',
extra_tags='alert-success')
if "_Add Child" in request.POST:
return HttpResponseRedirect(reverse('Add Child', args=(rule.id,)))
return HttpResponseRedirect(reverse('rules'))
form = RuleForm()
add_breadcrumb(title="New Dojo Rule", top_level=False, request=request)
return render(request, 'dojo/new_rule2.html',
{'form': form,
'finding_fields': finding_fields,
'test_fields': test_fields,
'engagement_fields': engagement_fields,
'product_fields': product_fields,
'product_type_fields': product_type_fields,
'field_dictionary': json.dumps(field_dictionary)})
@user_passes_test(lambda u: u.is_superuser)
def add_child(request, pid):
rule = get_object_or_404(Rule, pk=pid)
if request.method == 'POST':
forms = RuleFormSet(request.POST)
for form in forms:
if form.is_valid():
cr = form.save(commit=False)
cr.parent_rule = rule
cr.save()
messages.add_message(request,
messages.SUCCESS,
'Rule created successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('rules'))
form = RuleFormSet(queryset=Child_Rule.objects.filter(parent_rule=rule))
add_breadcrumb(title="New Dojo Rule", top_level=False, request=request)
return render(request, 'dojo/new_rule.html',
{'form': form,
'pid': pid,
'finding_fields': finding_fields,
'test_fields': test_fields,
'engagement_fields': engagement_fields,
'product_fields': product_fields,
'product_type_fields': product_type_fields,
'field_dictionary': json.dumps(field_dictionary)})
@user_passes_test(lambda u: u.is_superuser)
def edit_rule(request, pid):
pt = get_object_or_404(Rule, pk=pid)
children = Rule.objects.filter(parent_rule=pt)
all_rules = children | Rule.objects.filter(pk=pid)
form = RuleForm(instance=pt)
if request.method == 'POST':
form = RuleForm(request.POST, instance=pt)
if form.is_valid():
pt = form.save()
messages.add_message(request,
messages.SUCCESS,
'Rule updated successfully.',
extra_tags='alert-success')
if "_Add Child" in request.POST:
return HttpResponseRedirect(reverse('Add Child', args=(pt.id,)))
return HttpResponseRedirect(reverse('rules'))
add_breadcrumb(title="Edit Rule", top_level=False, request=request)
return render(request, 'dojo/edit_rule.html', {
'name': 'Edit Rule',
'metric': False,
'user': request.user,
'form': form,
'field_dictionary': json.dumps(field_dictionary),
'pt': pt, })
@user_passes_test(lambda u: u.is_superuser)
def delete_rule(request, tid):
rule = get_object_or_404(Rule, pk=tid)
form = DeleteRuleForm(instance=rule)
if request.method == 'POST':
# print('id' in request.POST, file=sys.stderr)
# print(str(rule.id) == request.POST['id'], file=sys.stderr)
# print(str(rule.id) == request.POST['id'], file=sys.stderr)
# if 'id' in request.POST and str(rule.id) == request.POST['id']:
form = DeleteRuleForm(request.POST, instance=rule)
# print(form.is_valid(), file=sys.stderr)
# print(form.errors, file=sys.stderr)
# print(form.non_field_errors(), file=sys.stderr)
# print('id' in request.POST, file=sys.stderr)
if form.is_valid():
rule.delete()
messages.add_message(request,
messages.SUCCESS,
'Rule deleted.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('rules'))
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([rule])
rels = collector.nested()
add_breadcrumb(parent=rule, title="Delete", top_level=False, request=request)
system_settings = System_Settings.objects.get()
return render(request, 'dojo/delete_rule.html',
{'rule': rule,
'form': form,
'active_tab': 'findings',
'system_settings': system_settings,
'rels': rels,
})
|
GeneratorInterface/GenFilters/test/test_EMJetHeep_cfg.py
|
ckamtsikis/cmssw
| 852 |
133983
|
<reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
from Configuration.GenProduction.PythiaUESettings_cfi import *
process = cms.Process("TEST")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.load("Configuration.StandardSequences.Services_cff")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("GeneratorInterface.Pythia6Interface.pythiaDefault_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
generator = cms.PSet(
initialSeed = cms.untracked.uint32(123456789),
engineName = cms.untracked.string('HepJamesRandom')
)
)
# The following three lines reduce the clutter of repeated printouts
# of the same exception message.
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.enableStatistics = False
process.MessageLogger.cerr.threshold = "Warning"
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1000))
process.source = cms.Source("EmptySource")
process.generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(3),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(10000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring('MSEL=1 ! QCD events',
'CKIN(3)= 80.0 ! pt_hat_min',
'CKIN(4)=120.0 ! pt_hat_max'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
process.selection = cms.EDFilter("PythiaFilterEMJetHeep",
#
moduleLabel = cms.untracked.string('VtxSmeared'),
Minbias = cms.untracked.bool(False),
MinEventPt = cms.untracked.double(80.),
MaxPhotonEta= cms.untracked.double(2.7),
ConeClust = cms.untracked.double(0.10),
ConeIso = cms.untracked.double(0.50),
NumPartMin = cms.untracked.uint32(2),
dRMin = cms.untracked.double(0.40),
MaxEvents = cms.untracked.int32(1000),
Debug = cms.untracked.bool(False)
)
process.GEN = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('EMJetHeep_diem.root'),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('p')
),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN')
)
)
process.p = cms.Path(process.generator * process.selection)
process.outpath = cms.EndPath(process.GEN)
process.schedule = cms.Schedule(process.p, process.outpath)
|
diskimage_builder/block_device/level0/localloop.py
|
fooxlj07/diskimage-builder
| 262 |
133996
|
# Copyright 2016 <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from diskimage_builder.block_device.exception import \
BlockDeviceSetupException
from diskimage_builder.block_device.plugin import NodeBase
from diskimage_builder.block_device.plugin import PluginBase
from diskimage_builder.block_device.utils import exec_sudo
from diskimage_builder.block_device.utils import parse_abs_size_spec
logger = logging.getLogger(__name__)
def image_create(filename, size):
logger.info("Create image file [%s]", filename)
with open(filename, "w") as fd:
fd.seek(size - 1)
fd.write("\0")
def image_delete(filename):
logger.info("Remove image file [%s]", filename)
os.remove(filename)
def loopdev_attach(filename):
logger.info("loopdev attach")
logger.debug("Calling [sudo losetup --show -f %s]", filename)
block_device = exec_sudo(["losetup", "--show", "-f", filename])
# [:-1]: Cut of the newline
block_device = block_device[:-1]
logger.info("New block device [%s]", block_device)
return block_device
def loopdev_detach(loopdev):
logger.info("loopdev detach")
# loopback dev may be tied up a bit by udev events triggered
# by partition events
for try_cnt in range(10, 1, -1):
try:
exec_sudo(["losetup", "-d", loopdev])
return
except BlockDeviceSetupException as e:
# Do not raise an error - maybe other cleanup methods
# can at least do some more work.
logger.error("loopdev detach failed (%s)", e.returncode)
logger.debug("Gave up trying to detach [%s]", loopdev)
return 1
class LocalLoopNode(NodeBase):
"""Level0: Local loop image device handling.
This class handles local loop devices that can be used
for VM image installation.
"""
def __init__(self, config, default_config, state):
logger.debug("Creating LocalLoop object; config [%s] "
"default_config [%s]", config, default_config)
super(LocalLoopNode, self).__init__(config['name'], state)
if 'size' in config:
self.size = parse_abs_size_spec(config['size'])
logger.debug("Image size [%s]", self.size)
else:
self.size = parse_abs_size_spec(default_config['image-size'])
logger.debug("Using default image size [%s]", self.size)
if 'directory' in config:
self.image_dir = config['directory']
else:
self.image_dir = default_config['image-dir']
self.filename = os.path.join(self.image_dir, self.name + ".raw")
def get_edges(self):
"""Because this is created without base, there are no edges."""
return ([], [])
def create(self):
logger.debug("[%s] Creating loop on [%s] with size [%d]",
self.name, self.filename, self.size)
self.add_rollback(image_delete, self.filename)
image_create(self.filename, self.size)
block_device = loopdev_attach(self.filename)
self.add_rollback(loopdev_detach, block_device)
if 'blockdev' not in self.state:
self.state['blockdev'] = {}
self.state['blockdev'][self.name] = {"device": block_device,
"image": self.filename}
logger.debug("Created loop name [%s] device [%s] image [%s]",
self.name, block_device, self.filename)
return
def umount(self):
loopdev_detach(self.state['blockdev'][self.name]['device'])
def delete(self):
image_delete(self.state['blockdev'][self.name]['image'])
class LocalLoop(PluginBase):
def __init__(self, config, defaults, state):
super(LocalLoop, self).__init__()
self.node = LocalLoopNode(config, defaults, state)
def get_nodes(self):
return [self.node]
|
eggs/Cheetah-2.2.2-py2.7-linux-x86_64-ucs4.egg/Cheetah/Tools/RecursiveNull.py
|
bopopescu/phyG
| 115 |
134006
|
"""
Nothing, but in a friendly way. Good for filling in for objects you want to
hide. If $form.f1 is a RecursiveNull object, then
$form.f1.anything["you"].might("use") will resolve to the empty string.
This module was contributed by <NAME>.
"""
class RecursiveNull(object):
def __getattr__(self, attr):
return self
def __getitem__(self, item):
return self
def __call__(self, *args, **kwargs):
return self
def __str__(self):
return ''
def __repr__(self):
return ''
def __nonzero__(self):
return 0
def __eq__(self, x):
if x:
return False
return True
def __ne__(self, x):
return x and True or False
|
src/genie/libs/parser/iosxe/tests/ShowSpanningTreeMstDetail/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
| 204 |
134033
|
<filename>src/genie/libs/parser/iosxe/tests/ShowSpanningTreeMstDetail/cli/equal/golden_output_1_expected.py
expected_output = {
'mst_instances': {
6: {
'bridge_address': '5897.bdff.3b3a',
'bridge_priority': 20486,
'interfaces': {
'GigabitEthernet1/7': {
'cost': 20000,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 836828,
},
'designated_bridge_address': '5897.bdff.3b3a',
'designated_bridge_port_id': '128.7',
'designated_bridge_priority': 20486,
'designated_root_address': '58ac.78ff.c3f5',
'designated_root_cost': 2000,
'designated_root_priority': 8198,
'forward_delay': 0,
'forward_transitions': 1,
'message_expires': 0,
'name': 'GigabitEthernet1/7',
'port_id': '128.7',
'port_priority': 128,
'status': 'designated forwarding',
},
'TenGigabitEthernet2/10': {
'cost': 2000,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 1285480,
},
'designated_bridge_address': '5897.bdff.3b3a',
'designated_bridge_port_id': '128.138',
'designated_bridge_priority': 20486,
'designated_root_address': '58ac.78ff.c3f5',
'designated_root_cost': 2000,
'designated_root_priority': 8198,
'forward_delay': 0,
'forward_transitions': 2,
'message_expires': 0,
'name': 'TenGigabitEthernet2/10',
'port_id': '128.138',
'port_priority': 128,
'status': 'designated forwarding',
},
'TenGigabitEthernet2/3': {
'cost': 2000,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 1285495,
},
'designated_bridge_address': '5897.bdff.3b3a',
'designated_bridge_port_id': '128.131',
'designated_bridge_priority': 20486,
'designated_root_address': '58ac.78ff.c3f5',
'designated_root_cost': 2000,
'designated_root_priority': 8198,
'forward_delay': 0,
'forward_transitions': 2,
'message_expires': 0,
'name': 'TenGigabitEthernet2/3',
'port_id': '128.131',
'port_priority': 128,
'status': 'designated forwarding',
},
'TenGigabitEthernet2/4': {
'cost': 2000,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 1285500,
},
'designated_bridge_address': '5897.bdff.3b3a',
'designated_bridge_port_id': '128.132',
'designated_bridge_priority': 20486,
'designated_root_address': '58ac.78ff.c3f5',
'designated_root_cost': 2000,
'designated_root_priority': 8198,
'forward_delay': 0,
'forward_transitions': 2,
'message_expires': 0,
'name': 'TenGigabitEthernet2/4',
'port_id': '128.132',
'port_priority': 128,
'status': 'designated forwarding',
},
'TenGigabitEthernet2/5': {
'cost': 2000,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 1285475,
},
'designated_bridge_address': '5897.bdff.3b3a',
'designated_bridge_port_id': '128.133',
'designated_bridge_priority': 20486,
'designated_root_address': '58ac.78ff.c3f5',
'designated_root_cost': 2000,
'designated_root_priority': 8198,
'forward_delay': 0,
'forward_transitions': 2,
'message_expires': 0,
'name': 'TenGigabitEthernet2/5',
'port_id': '128.133',
'port_priority': 128,
'status': 'designated forwarding',
},
'TenGigabitEthernet2/6': {
'cost': 2000,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 1285487,
},
'designated_bridge_address': '5897.bdff.3b3a',
'designated_bridge_port_id': '128.134',
'designated_bridge_priority': 20486,
'designated_root_address': '58ac.78ff.c3f5',
'designated_root_cost': 2000,
'designated_root_priority': 8198,
'forward_delay': 0,
'forward_transitions': 2,
'message_expires': 0,
'name': 'TenGigabitEthernet2/6',
'port_id': '128.134',
'port_priority': 128,
'status': 'designated forwarding',
},
'TenGigabitEthernet2/7': {
'cost': 2000,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 1285497,
},
'designated_bridge_address': '5897.bdff.3b3a',
'designated_bridge_port_id': '128.135',
'designated_bridge_priority': 20486,
'designated_root_address': '58ac.78ff.c3f5',
'designated_root_cost': 2000,
'designated_root_priority': 8198,
'forward_delay': 0,
'forward_transitions': 2,
'message_expires': 0,
'name': 'TenGigabitEthernet2/7',
'port_id': '128.135',
'port_priority': 128,
'status': 'designated forwarding',
},
'TenGigabitEthernet2/8': {
'cost': 2000,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 1285497,
},
'designated_bridge_address': '5897.bdff.3b3a',
'designated_bridge_port_id': '128.136',
'designated_bridge_priority': 20486,
'designated_root_address': '58ac.78ff.c3f5',
'designated_root_cost': 2000,
'designated_root_priority': 8198,
'forward_delay': 0,
'forward_transitions': 2,
'message_expires': 0,
'name': 'TenGigabitEthernet2/8',
'port_id': '128.136',
'port_priority': 128,
'status': 'designated forwarding',
},
'TenGigabitEthernet2/9': {
'cost': 2000,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 1285494,
},
'designated_bridge_address': '5897.bdff.3b3a',
'designated_bridge_port_id': '128.137',
'designated_bridge_priority': 20486,
'designated_root_address': '58ac.78ff.c3f5',
'designated_root_cost': 2000,
'designated_root_priority': 8198,
'forward_delay': 0,
'forward_transitions': 2,
'message_expires': 0,
'name': 'TenGigabitEthernet2/9',
'port_id': '128.137',
'port_priority': 128,
'status': 'designated forwarding',
},
},
'mst_id': 6,
'root_address': '58ac.78ff.c3f5',
'root_priority': 8198,
'sysid': 6,
'vlan': '500-501,504-505,507-554,556-599',
},
},
}
|
lib/galaxy_test/api/test_workflows_cwl.py
|
quacksawbones/galaxy-1
| 1,085 |
134049
|
<filename>lib/galaxy_test/api/test_workflows_cwl.py
"""Test CWL workflow functionality."""
from galaxy_test.api.test_workflows import BaseWorkflowsApiTestCase
from galaxy_test.base.populators import CwlPopulator
class BaseCwlWorkflowTestCase(BaseWorkflowsApiTestCase):
allow_path_paste = True
require_admin_user = True
def setUp(self):
super().setUp()
self.cwl_populator = CwlPopulator(
self.dataset_populator, self.workflow_populator
)
|
examples/oauth_sqlite3_app_org_level.py
|
hirosassa/bolt-python
| 504 |
134050
|
import logging
logging.basicConfig(level=logging.DEBUG)
from slack_bolt import App, BoltContext
from slack_bolt.oauth import OAuthFlow
from slack_sdk import WebClient
app = App(oauth_flow=OAuthFlow.sqlite3(database="./slackapp.db"))
@app.use
def dump(context, next, logger):
logger.info(context)
next()
@app.use
def call_apis_with_team_id(context: BoltContext, client: WebClient, next):
# client.users_list()
client.bots_info(bot=context.bot_id)
next()
@app.event("app_mention")
def handle_app_mentions(body, say, logger):
logger.info(body)
say("What's up?")
@app.command("/org-level-command")
def command(ack):
ack("I got it!")
@app.shortcut("org-level-shortcut")
def shortcut(ack):
ack()
@app.event("team_access_granted")
def team_access_granted(event):
pass
@app.event("team_access_revoked")
def team_access_revoked(event):
pass
if __name__ == "__main__":
app.start(3000)
# pip install slack_bolt
# export SLACK_SIGNING_SECRET=***
# export SLACK_BOT_TOKEN=<PASSWORD>-***
# export SLACK_CLIENT_ID=111.111
# export SLACK_CLIENT_SECRET=***
# export SLACK_SCOPES=app_mentions:read,channels:history,im:history,chat:write
# python oauth_app.py
|
xt/agent/muzero/mcts.py
|
TianQi-777/xingtian
| 240 |
134057
|
"""MCTS module: where MuZero thinks inside the tree."""
import math
import random
import numpy as np
from xt.agent.muzero.default_config import PB_C_BASE, PB_C_INIT
from xt.agent.muzero.default_config import ROOT_DIRICHLET_ALPHA
from xt.agent.muzero.default_config import ROOT_EXPLORATION_FRACTION
from xt.agent.muzero.default_config import GAMMA
from xt.agent.muzero.util import MinMaxStats, Node, soft_max_sample
from xt.model.muzero.muzero_model import NetworkOutput
class Mcts(object):
"""MCTS operation."""
def __init__(self, agent, root_state):
self.network = agent.alg.actor
self.action_dim = agent.alg.action_dim
self.num_simulations = agent.num_simulations
self.min_max_stats = MinMaxStats(None)
self.discount = GAMMA
self.actions = range(self.action_dim)
self.pb_c_base = PB_C_BASE
self.pb_c_init = PB_C_INIT
self.root_dirichlet_alpha = ROOT_DIRICHLET_ALPHA
self.root_exploration_fraction = ROOT_EXPLORATION_FRACTION
self.root = Node(0)
root_state = root_state.reshape((1, ) + root_state.shape)
network_output = self.network.initial_inference(root_state)
self.init_node(self.root, network_output)
def init_node(self, node, network_output):
node.hidden_state = network_output.hidden_state
node.reward = network_output.reward
policy = [p for p in network_output.policy]
for action in self.actions:
node.children[action] = Node(policy[action])
def backpropagate(self, search_path, value):
"""Propagate the evaluation all the way up the tree to the root at the end of a simulation."""
for node in search_path[::-1]:
node.value_sum += value
node.visit_count += 1
self.min_max_stats.update(node.value())
value = node.reward + self.discount * value
def run_mcts(self):
"""
Run Core Monte Carlo Tree Search algorithm.
To decide on an action, we run N simulations, always starting at the root of
the search tree and traversing the tree according to the UCB formula until we
reach a leaf node.
"""
for _ in range(self.num_simulations):
node = self.root
search_path = [node]
history = []
while node.expanded():
action, node = self.select_child(node)
search_path.append(node)
history.append(action)
# Inside the search tree we use the dynamics function to obtain the next
# hidden state given an action and the previous hidden state.
parent = search_path[-2]
network_output = self.network.recurrent_inference(parent.hidden_state, history[-1])
self.init_node(node, network_output)
self.backpropagate(search_path, network_output.value)
def select_action(self, mode='softmax'):
"""
Select action.
After running simulations inside in MCTS, we select an action based on the root's children visit counts.
During training we use a softmax sample for exploration.
During evaluation we select the most visited child.
"""
node = self.root
visit_counts = [child.visit_count for child in node.children.values()]
actions = self.actions
action = None
if mode == 'softmax':
action = soft_max_sample(visit_counts, actions, 1)
elif mode == 'max':
action = np.argmax(visit_counts)
return action
def ucb_score(self, parent, child):
"""
Calculate UCB score.
The score for a node is based on its value, plus an exploration bonus based on the prior.
"""
pb_c = math.log((parent.visit_count + self.pb_c_base + 1) / self.pb_c_base) + self.pb_c_init
pb_c *= math.sqrt(parent.visit_count) / (child.visit_count + 1)
prior_score = pb_c * child.prior
if child.visit_count > 0:
value_score = self.min_max_stats.normalize(child.value())
else:
value_score = 0
return prior_score + value_score
def add_exploration_noise(self, node):
actions = self.actions
noise = np.random.dirichlet([self.root_dirichlet_alpha] * self.action_dim)
frac = self.root_exploration_fraction
for i, _noise in zip(actions, noise):
node.children[i].prior = node.children[i].prior * (1 - frac) + _noise * frac
def get_info(self):
"""Get train info from mcts tree."""
child_visits = [self.root.children[a].visit_count for a in self.actions]
sum_visits = sum(child_visits)
child_visits = [visits / sum_visits for visits in child_visits]
return {"child_visits": child_visits, "root_value": self.root.value()}
def select_child(self, node):
"""Select the child with the highest UCB score."""
_, action, child = max((self.ucb_score(node, child), action, child) for action, child in node.children.items())
return action, child
|
plugins/base64_plugin/lib/Base64.py
|
MrT3acher/Android-Malware-Sandbox
| 218 |
134069
|
<filename>plugins/base64_plugin/lib/Base64.py<gh_stars>100-1000
import datetime
from lib.model.database.Database import Database
from sqlalchemy import Column, Integer, String, Date, DateTime, ForeignKey
Base = Database.get_declarative_base()
class Base64(Base):
__tablename__ = 'base64'
id = Column(Integer, primary_key=True)
date = Column(DateTime, default=datetime.datetime.utcnow)
method = Column(String)
value = Column(String)
stack = Column(String)
application_id = Column(Integer, ForeignKey('application.id'))
def __init__(self, p_method, base64Value, stack):
self.method = p_method
self.value = base64Value
self.stack = stack
def __repr__(self):
return f'<Base64(id={self.id},method="{self.method}",date="{self.date}")>'
|
extensions/cauchy/test_cauchy.py
|
NathanYanJing/state-spaces
| 513 |
134094
|
<filename>extensions/cauchy/test_cauchy.py
import math
import torch
import pytest
from einops import rearrange
from cauchy import cauchy_mult_torch, cauchy_mult_keops, cauchy_mult
def generate_data(batch_size, N, L, symmetric=True, device='cuda'):
if not symmetric:
v = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True)
w = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True)
z = torch.randn(L, dtype=torch.complex64, device=device)
else:
assert N % 2 == 0
v_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device)
v = torch.cat([v_half, v_half.conj()], dim=-1).requires_grad_(True)
w_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device)
w = torch.cat([w_half, w_half.conj()], dim=-1).requires_grad_(True)
z = torch.exp(1j * torch.randn(L, dtype=torch.float32, device=device))
return v, z, w
def grad_to_half_grad(dx):
dx_half, dx_half_conj = dx.chunk(2, dim=-1)
return dx_half + dx_half_conj.conj()
# @pytest.mark.parametrize('L', [1024])
# @pytest.mark.parametrize('N', [64])
# def test_cauchy_mult_nonsymmetric(N, L):
# device = 'cuda'
# batch_size = 4
# torch.random.manual_seed(2357)
# v, z, w = generate_data(batch_size, N, L, symmetric=False, device=device)
# out_torch = cauchy_mult_torch(v, z, w, symmetric=False)
# out_keops = cauchy_mult_keops(v, z, w)
# out = cauchy_mult(v, z, w, symmetric=False)
# assert torch.allclose(out, out_torch, rtol=1e-4, atol=1e-4)
# assert torch.allclose(out, out_keops, rtol=1e-4, atol=1e-4)
# dout = torch.randn_like(out)
# dv_torch, dw_torch = torch.autograd.grad(out_torch, (v, w), dout, retain_graph=True)
# dv_keops, dw_keops = torch.autograd.grad(out_keops, (v, w), dout, retain_graph=True)
# dv, dw = torch.autograd.grad(out, (v, w), dout, retain_graph=True)
# assert torch.allclose(dv, dv_torch, rtol=1e-4, atol=1e-4)
# assert torch.allclose(dv, dv_keops, rtol=1e-4, atol=1e-4)
# assert torch.allclose(dw, dw_torch, rtol=1e-4, atol=1e-4)
# assert torch.allclose(dw, dw_keops, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize('L', [3, 17, 489, 2**10, 1047, 2**11, 2**12, 2**13, 2**14, 2**18])
@pytest.mark.parametrize('N', [4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048])
def test_cauchy_mult_symmetric(N, L):
# rtol, atol = (1e-4, 1e-4) if N <= 64 and L <= 1024 else(1e-3, 1e-3)
atol = 1e-4
tol_factor = 10.0 # Our error shouldn't be this much higher than Keops' error
device = 'cuda'
batch_size = 4
torch.random.manual_seed(2357)
v, z, w = generate_data(batch_size, N, L, symmetric=True, device=device)
v_half = v[:, :N // 2].clone().detach().requires_grad_(True)
w_half = w[:, :N // 2].clone().detach().requires_grad_(True)
# out_torch = cauchy_mult_torch(v, z, w, symmetric=True)
out_torch = cauchy_mult_torch(v.cdouble(), z.cdouble(), w.cdouble(), symmetric=True).cfloat()
out_keops = cauchy_mult_keops(v, z, w)
out = cauchy_mult(v_half, z, w_half, symmetric=True)
relerr_out_keops = (out_keops - out_torch).abs() / out_torch.abs()
relerr_out = (out - out_torch).abs() / out_torch.abs()
dout = torch.randn_like(out)
dv_torch, dw_torch = torch.autograd.grad(out_torch, (v, w), dout, retain_graph=True)
dv_torch, dw_torch = dv_torch[:, :N // 2], dw_torch[:, :N // 2]
dv_keops, dw_keops = torch.autograd.grad(out_keops, (v, w), dout, retain_graph=True)
dv_keops, dw_keops = grad_to_half_grad(dv_keops), grad_to_half_grad(dw_keops)
dv, dw = torch.autograd.grad(out, (v_half, w_half), dout, retain_graph=True)
relerr_dv_keops = (dv_keops - dv_torch).abs() / dv_torch.abs()
relerr_dv = (dv - dv_torch).abs() / dv_torch.abs()
relerr_dw_keops = (dw_keops - dw_torch).abs() / dw_torch.abs()
relerr_dw = (dw - dw_torch).abs() / dw_torch.abs()
print(f'Keops out relative error: max {relerr_out_keops.amax().item():.6f}, mean {relerr_out_keops.mean().item():6f}')
print(f'out relative error: max {relerr_out.amax().item():.6f}, mean {relerr_out.mean().item():.6f}')
print(f'Keops dv relative error: max {relerr_dv_keops.amax().item():.6f}, mean {relerr_dv_keops.mean().item():6f}')
print(f'dv relative error: max {relerr_dv.amax().item():.6f}, mean {relerr_dv.mean().item():.6f}')
print(f'Keops dw relative error: max {relerr_dw_keops.amax().item():.6f}, mean {relerr_dw_keops.mean().item():6f}')
print(f'dw relative error: max {relerr_dw.amax().item():.6f}, mean {relerr_dw.mean().item():.6f}')
assert (relerr_out.amax() <= relerr_out_keops.amax() * tol_factor + atol)
assert (relerr_out.mean() <= relerr_out_keops.mean() * tol_factor + atol)
# assert torch.allclose(out, out_torch, rtol=rtol, atol=atol)
# assert torch.allclose(out, out_keops, rtol=rtol, atol=atol)
assert (relerr_dv.amax() <= relerr_dv_keops.amax() * tol_factor + atol)
assert (relerr_dv.mean() <= relerr_dv_keops.mean() * tol_factor + atol)
assert (relerr_dw.amax() <= relerr_dw_keops.amax() * tol_factor + atol)
assert (relerr_dw.mean() <= relerr_dw_keops.mean() * tol_factor + atol)
# assert torch.allclose(dv, dv_torch, rtol=1e-4, atol=1e-4)
# assert torch.allclose(dv, dv_keops, rtol=1e-4, atol=1e-4)
# assert torch.allclose(dw, dw_torch, rtol=1e-4, atol=1e-4)
# assert torch.allclose(dw, dw_keops, rtol=1e-4, atol=1e-4)
|
akshare/air/air_zhenqi.py
|
NovelResearchInvestment/akshare
| 721 |
134111
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/24 14:50
Desc: 真气网-空气质量
https://www.zq12369.com/environment.php
空气质量在线监测分析平台的空气质量数据
https://www.aqistudy.cn/
"""
import json
import os
import re
import pandas as pd
import requests
from py_mini_racer import py_mini_racer
from akshare.utils import demjson
def _get_js_path(name: str = None, module_file: str = None) -> str:
"""
获取 JS 文件的路径(从模块所在目录查找)
:param name: 文件名
:type name: str
:param module_file: 模块路径
:type module_file: str
:return: 路径
:rtype: str
"""
module_folder = os.path.abspath(os.path.dirname(os.path.dirname(module_file)))
module_json_path = os.path.join(module_folder, "air", name)
return module_json_path
def _get_file_content(file_name: str = "crypto.js") -> str:
"""
获取 JS 文件的内容
:param file_name: JS 文件名
:type file_name: str
:return: 文件内容
:rtype: str
"""
setting_file_name = file_name
setting_file_path = _get_js_path(setting_file_name, __file__)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
def has_month_data(href):
"""
Deal with href node
:param href: href
:type href: str
:return: href result
:rtype: str
"""
return href and re.compile("monthdata.php").search(href)
def air_city_table() -> pd.DataFrame:
"""
真气网-空气质量历史数据查询-全部城市列表
https://www.zq12369.com/environment.php?date=2019-06-05&tab=rank&order=DESC&type=DAY#rank
:return: 城市映射
:rtype: pandas.DataFrame
"""
url = "https://www.zq12369.com/environment.php"
date = "2020-05-01"
if len(date.split("-")) == 3:
params = {
"date": date,
"tab": "rank",
"order": "DESC",
"type": "DAY",
}
r = requests.get(url, params=params)
temp_df = pd.read_html(r.text)[1].iloc[1:, :]
del temp_df['降序']
temp_df.reset_index(inplace=True)
temp_df['index'] = temp_df.index + 1
temp_df.columns = ['序号', '省份', '城市', 'AQI', '空气质量', 'PM2.5浓度', '首要污染物']
temp_df['AQI'] = pd.to_numeric(temp_df['AQI'])
return temp_df
def air_quality_watch_point(
city: str = "杭州", start_date: str = "20220408", end_date: str = "20220409"
) -> pd.DataFrame:
"""
真气网-监测点空气质量-细化到具体城市的每个监测点
指定之间段之间的空气质量数据
https://www.zq12369.com/
:param city: 调用 ak.air_city_table() 接口获取
:type city: str
:param start_date: e.g., "20190327"
:type start_date: str
:param end_date: e.g., ""20200327""
:type end_date: str
:return: 指定城市指定日期区间的观测点空气质量
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "https://www.zq12369.com/api/zhenqiapi.php"
file_data = _get_file_content(file_name="crypto.js")
ctx = py_mini_racer.MiniRacer()
ctx.eval(file_data)
method = "GETCITYPOINTAVG"
ctx.call("encode_param", method)
ctx.call("encode_param", start_date)
ctx.call("encode_param", end_date)
city_param = ctx.call("encode_param", city)
ctx.call("encode_secret", method, city_param, start_date, end_date)
payload = {
"appId": "a01901d3caba1f362d69474674ce477f",
"method": ctx.call("encode_param", method),
"city": city_param,
"startTime": ctx.call("encode_param", start_date),
"endTime": ctx.call("encode_param", end_date),
"secret": ctx.call("encode_secret", method, city_param, start_date, end_date),
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"
}
r = requests.post(url, data=payload, headers=headers)
data_text = r.text
data_json = demjson.decode(ctx.call("decode_result", data_text))
temp_df = pd.DataFrame(data_json["rows"])
return temp_df
def air_quality_hist(
city: str = "杭州",
period: str = "day",
start_date: str = "20190327",
end_date: str = "20200427",
) -> pd.DataFrame:
"""
真气网-空气历史数据
https://www.zq12369.com/
:param city: 调用 ak.air_city_table() 接口获取所有城市列表
:type city: str
:param period: "hour": 每小时一个数据, 由于数据量比较大, 下载较慢; "day": 每天一个数据; "month": 每个月一个数据
:type period: str
:param start_date: e.g., "20190327"
:type start_date: str
:param end_date: e.g., "20200327"
:type end_date: str
:return: 指定城市和数据频率下在指定时间段内的空气质量数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "https://www.zq12369.com/api/newzhenqiapi.php"
file_data = _get_file_content(file_name="outcrypto.js")
ctx = py_mini_racer.MiniRacer()
ctx.eval(file_data)
appId = "4f0e3a273d547ce6b7147bfa7ceb4b6e"
method = "CETCITYPERIOD"
timestamp = ctx.eval("timestamp = new Date().getTime()")
p_text = json.dumps(
{
"city": city,
"endTime": f"{end_date} 23:45:39",
"startTime": f"{start_date} 00:00:00",
"type": period.upper(),
},
ensure_ascii=False,
indent=None,
).replace(' "', '"')
secret = ctx.call("hex_md5", appId + method + str(timestamp) + "WEB" + p_text)
payload = {
"appId": "4f0e3a273d547ce6b7147bfa7ceb4b6e",
"method": "CETCITYPERIOD",
"timestamp": int(timestamp),
"clienttype": "WEB",
"object": {
"city": city,
"type": period.upper(),
"startTime": f"{start_date} 00:00:00",
"endTime": f"{end_date} 23:45:39",
},
"secret": secret,
}
need = (
json.dumps(payload, ensure_ascii=False, indent=None, sort_keys=False)
.replace(' "', '"')
.replace("\\", "")
.replace('p": ', 'p":')
.replace('t": ', 't":')
)
headers = {
# 'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
# 'Cache-Control': 'no-cache',
# 'Connection': 'keep-alive',
# 'Content-Length': '1174',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'Cookie': 'UM_distinctid=1800e5142c5b85-04b8f11aa852f3-1a343370-1fa400-1800e5142c6b7e; CNZZDATA1254317176=1502593570-1649496979-%7C1649507817; city=%E6%9D%AD%E5%B7%9E; SECKEY_ABVK=eSrbUhd28Mjo7jf8Rfh+uY5E9C+tAhQ8mOfYJHSjSfY%3D; BMAP_SECKEY=N5fGcwdWpeJW46eZ<KEY>',
# 'Host': 'www.zq12369.com',
# 'Origin': 'https://www.zq12369.com',
# 'Pragma': 'no-cache',
# 'Referer': 'https://www.zq12369.com/environment.php?catid=4',
# 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
# 'sec-ch-ua-mobile': '?0',
# 'sec-ch-ua-platform': '"Windows"',
# 'Sec-Fetch-Dest': 'empty',
# 'Sec-Fetch-Mode': 'cors',
# 'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
}
params = {"param": ctx.call("AES.encrypt", need)}
params = {"param": ctx.call("encode_param", need)}
r = requests.post(url, data=params, headers=headers)
temp_text = ctx.call("decryptData", r.text)
data_json = demjson.decode(ctx.call("b.decode", temp_text))
temp_df = pd.DataFrame(data_json["result"]["data"]["rows"])
temp_df.index = temp_df["time"]
del temp_df["time"]
temp_df = temp_df.astype(float, errors="ignore")
return temp_df
def air_quality_rank(date: str = "") -> pd.DataFrame:
"""
真气网-168 城市 AQI 排行榜
https://www.zq12369.com/environment.php?date=2020-03-12&tab=rank&order=DESC&type=DAY#rank
:param date: "": 当前时刻空气质量排名; "20200312": 当日空气质量排名; "202003": 当月空气质量排名; "2019": 当年空气质量排名;
:type date: str
:return: 指定 date 类型的空气质量排名数据
:rtype: pandas.DataFrame
"""
if len(date) == 4:
date = date
elif len(date) == 6:
date = "-".join([date[:4], date[4:6]])
elif date == '':
date = '实时'
else:
date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://www.zq12369.com/environment.php"
if len(date.split("-")) == 3:
params = {
"date": date,
"tab": "rank",
"order": "DESC",
"type": "DAY",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[1].iloc[1:, :]
elif len(date.split("-")) == 2:
params = {
"month": date,
"tab": "rank",
"order": "DESC",
"type": "MONTH",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[2].iloc[1:, :]
elif len(date.split("-")) == 1 and date != "实时":
params = {
"year": date,
"tab": "rank",
"order": "DESC",
"type": "YEAR",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[3].iloc[1:, :]
if date == "实时":
params = {
"tab": "rank",
"order": "DESC",
"type": "MONTH",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[0].iloc[1:, :]
if __name__ == "__main__":
air_city_table_df = air_city_table()
print(air_city_table_df)
air_quality_watch_point_df = air_quality_watch_point(
city="杭州", start_date="20220408", end_date="20220409"
)
print(air_quality_watch_point_df)
air_quality_hist_df = air_quality_hist(
city="北京",
period="day",
start_date="20200320",
end_date="20200427",
)
print(air_quality_hist_df)
air_quality_rank_df = air_quality_rank()
print(air_quality_rank_df)
|
build/scripts/build_java_with_error_prone.py
|
jochenater/catboost
| 6,989 |
134121
|
<gh_stars>1000+
import sys
import os
ERROR_PRONE_FLAGS = [
'-Xep:FunctionalInterfaceMethodChanged:WARN',
'-Xep:ReturnValueIgnored:WARN',
]
JAVA10_EXPORTS = [
'--add-exports=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.main=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.processing=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED'
]
def just_do_it(argv):
java, error_prone_tool, javac_cmd = argv[0], argv[1], argv[2:]
if java.endswith('javac') or java.endswith('javac.exe'):
for f in javac_cmd:
if f.startswith('-Xep'):
ERROR_PRONE_FLAGS.append(f)
for f in ERROR_PRONE_FLAGS:
if f in javac_cmd:
javac_cmd.remove(f)
os.execv(java, [java] + JAVA10_EXPORTS + ['-processorpath', error_prone_tool, '-XDcompilePolicy=byfile'] + [(' '.join(['-Xplugin:ErrorProne'] + ERROR_PRONE_FLAGS))] + javac_cmd)
else:
os.execv(java, [java, '-Xbootclasspath/p:' + error_prone_tool, 'com.google.errorprone.ErrorProneCompiler'] + ERROR_PRONE_FLAGS + javac_cmd)
if __name__ == '__main__':
just_do_it(sys.argv[1:])
|
bpython/autocomplete.py
|
gpotter2/bpython
| 2,168 |
134145
|
# The MIT License
#
# Copyright (c) 2009-2015 the bpython authors.
# Copyright (c) 2015-2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# To gradually migrate to mypy we aren't setting these globally yet
# mypy: disallow_untyped_defs=True
# mypy: disallow_untyped_calls=True
import __main__
import abc
import glob
import keyword
import logging
import os
import re
import rlcompleter
import builtins
from enum import Enum
from typing import (
Any,
cast,
Dict,
Iterator,
List,
Optional,
Set,
Tuple,
Sequence,
)
from . import inspection
from . import line as lineparts
from .line import LinePart
from .lazyre import LazyReCompile
from .simpleeval import safe_eval, evaluate_current_expression, EvaluationError
from .importcompletion import ModuleGatherer
# Autocomplete modes
class AutocompleteModes(Enum):
NONE = "none"
SIMPLE = "simple"
SUBSTRING = "substring"
FUZZY = "fuzzy"
@classmethod
def from_string(cls, value: str) -> Optional[Any]:
if value.upper() in cls.__members__:
return cls.__members__[value.upper()]
return None
MAGIC_METHODS = tuple(
f"__{s}__"
for s in (
"new",
"init",
"del",
"repr",
"str",
"bytes",
"format",
"lt",
"le",
"eq",
"ne",
"gt",
"ge",
"hash",
"bool",
"getattr",
"getattribute",
"setattr",
"delattr",
"dir",
"get",
"set",
"delete",
"set_name",
"init_subclass",
"instancecheck",
"subclasscheck",
"class_getitem",
"call",
"len",
"length_hint",
"getitem",
"setitem",
"delitem",
"missing",
"iter",
"reversed",
"contains",
"add",
"sub",
"mul",
"matmul",
"truediv",
"floordiv",
"mod",
"divmod",
"pow",
"lshift",
"rshift",
"and",
"xor",
"or",
"radd",
"rsub",
"rmul",
"rmatmul",
"rtruediv",
"rfloordiv",
"rmod",
"rdivmod",
"rpow",
"rlshift",
"rrshift",
"rand",
"rxor",
"ror",
"iadd",
"isub",
"imul",
"imatmul",
"itruediv",
"ifloordiv",
"imod",
"ipow",
"ilshift",
"irshift",
"iand",
"ixor",
"ixor",
"neg",
"pos",
"abs",
"invert",
"complex",
"int",
"float",
"index",
"round",
"trunc",
"floor",
"ceil",
"enter",
"exit",
"await",
"aiter",
"anext",
"aenter",
"aexit",
)
)
KEYWORDS = frozenset(keyword.kwlist)
def after_last_dot(name: str) -> str:
return name.rstrip(".").rsplit(".")[-1]
def few_enough_underscores(current: str, match: str) -> bool:
"""Returns whether match should be shown based on current
if current is _, True if match starts with 0 or 1 underscore
if current is __, True regardless of match
otherwise True if match does not start with any underscore
"""
if current.startswith("__"):
return True
elif current.startswith("_") and not match.startswith("__"):
return True
return not match.startswith("_")
def _method_match_none(word: str, size: int, text: str) -> bool:
return False
def _method_match_simple(word: str, size: int, text: str) -> bool:
return word[:size] == text
def _method_match_substring(word: str, size: int, text: str) -> bool:
return text in word
def _method_match_fuzzy(word: str, size: int, text: str) -> bool:
s = r".*{}.*".format(".*".join(c for c in text))
return re.search(s, word) is not None
_MODES_MAP = {
AutocompleteModes.NONE: _method_match_none,
AutocompleteModes.SIMPLE: _method_match_simple,
AutocompleteModes.SUBSTRING: _method_match_substring,
AutocompleteModes.FUZZY: _method_match_fuzzy,
}
class BaseCompletionType:
"""Describes different completion types"""
def __init__(
self,
shown_before_tab: bool = True,
mode: AutocompleteModes = AutocompleteModes.SIMPLE,
) -> None:
self._shown_before_tab = shown_before_tab
self.method_match = _MODES_MAP[mode]
@abc.abstractmethod
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set[str]]:
"""Returns a list of possible matches given a line and cursor, or None
if this completion type isn't applicable.
ie, import completion doesn't make sense if there cursor isn't after
an import or from statement, so it ought to return None.
Completion types are used to:
* `locate(cur, line)` their initial target word to replace given a
line and cursor
* find `matches(cur, line)` that might replace that word
* `format(match)` matches to be displayed to the user
* determine whether suggestions should be `shown_before_tab`
* `substitute(cur, line, match)` in a match for what's found with
`target`
"""
raise NotImplementedError
@abc.abstractmethod
def locate(self, cursor_offset: int, line: str) -> Optional[LinePart]:
"""Returns a Linepart namedtuple instance or None given cursor and line
A Linepart namedtuple contains a start, stop, and word. None is
returned if no target for this type of completion is found under
the cursor."""
raise NotImplementedError
def format(self, word: str) -> str:
return word
def substitute(
self, cursor_offset: int, line: str, match: str
) -> Tuple[int, str]:
"""Returns a cursor offset and line with match swapped in"""
lpart = self.locate(cursor_offset, line)
assert lpart
offset = lpart.start + len(match)
changed_line = line[: lpart.start] + match + line[lpart.stop :]
return offset, changed_line
@property
def shown_before_tab(self) -> bool:
"""Whether suggestions should be shown before the user hits tab, or only
once that has happened."""
return self._shown_before_tab
class CumulativeCompleter(BaseCompletionType):
"""Returns combined matches from several completers"""
def __init__(
self,
completers: Sequence[BaseCompletionType],
mode: AutocompleteModes = AutocompleteModes.SIMPLE,
) -> None:
if not completers:
raise ValueError(
"CumulativeCompleter requires at least one completer"
)
self._completers: Sequence[BaseCompletionType] = completers
super().__init__(True, mode)
def locate(self, cursor_offset: int, line: str) -> Optional[LinePart]:
for completer in self._completers:
return_value = completer.locate(cursor_offset, line)
if return_value is not None:
return return_value
return None
def format(self, word: str) -> str:
return self._completers[0].format(word)
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set]:
return_value = None
all_matches = set()
for completer in self._completers:
matches = completer.matches(
cursor_offset=cursor_offset, line=line, **kwargs
)
if matches is not None:
all_matches.update(matches)
return_value = all_matches
return return_value
class ImportCompletion(BaseCompletionType):
def __init__(
self,
module_gatherer: ModuleGatherer,
mode: AutocompleteModes = AutocompleteModes.SIMPLE,
):
super().__init__(False, mode)
self.module_gatherer = module_gatherer
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set]:
return self.module_gatherer.complete(cursor_offset, line)
def locate(self, cursor_offset: int, line: str) -> Optional[LinePart]:
return lineparts.current_word(cursor_offset, line)
def format(self, word: str) -> str:
return after_last_dot(word)
class FilenameCompletion(BaseCompletionType):
def __init__(self, mode: AutocompleteModes = AutocompleteModes.SIMPLE):
super().__init__(False, mode)
def safe_glob(self, pathname: str) -> Iterator[str]:
return glob.iglob(glob.escape(pathname) + "*")
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set]:
cs = lineparts.current_string(cursor_offset, line)
if cs is None:
return None
matches = set()
username = cs.word.split(os.path.sep, 1)[0]
user_dir = os.path.expanduser(username)
for filename in self.safe_glob(os.path.expanduser(cs.word)):
if os.path.isdir(filename):
filename += os.path.sep
if cs.word.startswith("~"):
filename = username + filename[len(user_dir) :]
matches.add(filename)
return matches
def locate(self, cursor_offset: int, line: str) -> Optional[LinePart]:
return lineparts.current_string(cursor_offset, line)
def format(self, filename: str) -> str:
filename.rstrip(os.sep).rsplit(os.sep)[-1]
if os.sep in filename[:-1]:
return filename[filename.rindex(os.sep, 0, -1) + 1 :]
else:
return filename
class AttrCompletion(BaseCompletionType):
attr_matches_re = LazyReCompile(r"(\w+(\.\w+)*)\.(\w*)")
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set]:
if "locals_" not in kwargs:
return None
locals_ = cast(Dict[str, Any], kwargs["locals_"])
r = self.locate(cursor_offset, line)
if r is None:
return None
if locals_ is None: # TODO add a note about why
locals_ = __main__.__dict__
assert "." in r.word
for i in range(1, len(r.word) + 1):
if r.word[-i] == "[":
i -= 1
break
methodtext = r.word[-i:]
matches = {
"".join([r.word[:-i], m])
for m in self.attr_matches(methodtext, locals_)
}
return {
m
for m in matches
if few_enough_underscores(r.word.split(".")[-1], m.split(".")[-1])
}
def locate(self, cursor_offset: int, line: str) -> Optional[LinePart]:
return lineparts.current_dotted_attribute(cursor_offset, line)
def format(self, word: str) -> str:
return after_last_dot(word)
def attr_matches(self, text: str, namespace: Dict[str, Any]) -> List:
"""Taken from rlcompleter.py and bent to my will."""
m = self.attr_matches_re.match(text)
if not m:
return []
expr, attr = m.group(1, 3)
if expr.isdigit():
# Special case: float literal, using attrs here will result in
# a SyntaxError
return []
try:
obj = safe_eval(expr, namespace)
except EvaluationError:
return []
matches = self.attr_lookup(obj, expr, attr)
return matches
def attr_lookup(self, obj: Any, expr: str, attr: str) -> List:
"""Second half of attr_matches."""
words = self.list_attributes(obj)
if inspection.hasattr_safe(obj, "__class__"):
words.append("__class__")
klass = inspection.getattr_safe(obj, "__class__")
words = words + rlcompleter.get_class_members(klass)
if not isinstance(klass, abc.ABCMeta):
try:
words.remove("__abstractmethods__")
except ValueError:
pass
matches = []
n = len(attr)
for word in words:
if self.method_match(word, n, attr) and word != "__builtins__":
matches.append(f"{expr}.{word}")
return matches
def list_attributes(self, obj: Any) -> List[str]:
# TODO: re-implement dir using getattr_static to avoid using
# AttrCleaner here?
with inspection.AttrCleaner(obj):
return dir(obj)
class DictKeyCompletion(BaseCompletionType):
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set]:
if "locals_" not in kwargs:
return None
locals_ = kwargs["locals_"]
r = self.locate(cursor_offset, line)
if r is None:
return None
current_dict_parts = lineparts.current_dict(cursor_offset, line)
if current_dict_parts is None:
return None
_, _, dexpr = current_dict_parts
try:
obj = safe_eval(dexpr, locals_)
except EvaluationError:
return None
if isinstance(obj, dict) and obj.keys():
matches = {
f"{k!r}]" for k in obj.keys() if repr(k).startswith(r.word)
}
return matches if matches else None
else:
return None
def locate(self, cursor_offset: int, line: str) -> Optional[LinePart]:
return lineparts.current_dict_key(cursor_offset, line)
def format(self, match: str) -> str:
return match[:-1]
class MagicMethodCompletion(BaseCompletionType):
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set]:
if "current_block" not in kwargs:
return None
current_block = kwargs["current_block"]
r = self.locate(cursor_offset, line)
if r is None:
return None
if "class" not in current_block:
return None
return {name for name in MAGIC_METHODS if name.startswith(r.word)}
def locate(self, cursor_offset: int, line: str) -> Optional[LinePart]:
return lineparts.current_method_definition_name(cursor_offset, line)
class GlobalCompletion(BaseCompletionType):
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set]:
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
if "locals_" not in kwargs:
return None
locals_ = kwargs["locals_"]
r = self.locate(cursor_offset, line)
if r is None:
return None
matches = set()
n = len(r.word)
for word in KEYWORDS:
if self.method_match(word, n, r.word):
matches.add(word)
for nspace in (builtins.__dict__, locals_):
for word, val in nspace.items():
# if identifier isn't ascii, don't complete (syntax error)
if word is None:
continue
if (
self.method_match(word, n, r.word)
and word != "__builtins__"
):
matches.add(_callable_postfix(val, word))
return matches if matches else None
def locate(self, cursor_offset: int, line: str) -> Optional[LinePart]:
return lineparts.current_single_word(cursor_offset, line)
class ParameterNameCompletion(BaseCompletionType):
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set]:
if "argspec" not in kwargs:
return None
argspec = kwargs["argspec"]
if not argspec:
return None
r = self.locate(cursor_offset, line)
if r is None:
return None
if argspec:
matches = {
f"{name}="
for name in argspec[1][0]
if isinstance(name, str) and name.startswith(r.word)
}
matches.update(
name + "=" for name in argspec[1][4] if name.startswith(r.word)
)
return matches if matches else None
def locate(self, cursor_offset: int, line: str) -> Optional[LinePart]:
return lineparts.current_word(cursor_offset, line)
class ExpressionAttributeCompletion(AttrCompletion):
# could replace attr completion as a more general case with some work
def locate(self, cursor_offset: int, line: str) -> Optional[LinePart]:
return lineparts.current_expression_attribute(cursor_offset, line)
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set]:
if "locals_" not in kwargs:
return None
locals_ = kwargs["locals_"]
if locals_ is None:
locals_ = __main__.__dict__
attr = self.locate(cursor_offset, line)
assert attr, "locate was already truthy for the same call"
try:
obj = evaluate_current_expression(cursor_offset, line, locals_)
except EvaluationError:
return set()
# strips leading dot
matches = (m[1:] for m in self.attr_lookup(obj, "", attr.word))
return {m for m in matches if few_enough_underscores(attr.word, m)}
try:
import jedi
except ImportError:
class MultilineJediCompletion(BaseCompletionType): # type: ignore [no-redef]
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set]:
return None
def locate(self, cursor_offset: int, line: str) -> Optional[LinePart]:
return None
else:
class JediCompletion(BaseCompletionType):
_orig_start: Optional[int]
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set]:
if "history" not in kwargs:
return None
history = kwargs["history"]
if not lineparts.current_word(cursor_offset, line):
return None
history = "\n".join(history) + "\n" + line
try:
script = jedi.Script(history, path="fake.py")
completions = script.complete(
len(history.splitlines()), cursor_offset
)
except (jedi.NotFoundError, IndexError, KeyError):
# IndexError for #483
# KeyError for #544
self._orig_start = None
return None
if completions:
diff = len(completions[0].name) - len(completions[0].complete)
self._orig_start = cursor_offset - diff
else:
self._orig_start = None
return None
assert isinstance(self._orig_start, int)
first_letter = line[self._orig_start : self._orig_start + 1]
matches = [c.name for c in completions]
if any(
not m.lower().startswith(matches[0][0].lower()) for m in matches
):
# Too general - giving completions starting with multiple
# letters
return None
else:
# case-sensitive matches only
return {m for m in matches if m.startswith(first_letter)}
def locate(self, cursor_offset: int, line: str) -> LinePart:
assert isinstance(self._orig_start, int)
start = self._orig_start
end = cursor_offset
return LinePart(start, end, line[start:end])
class MultilineJediCompletion(JediCompletion): # type: ignore [no-redef]
def matches(
self, cursor_offset: int, line: str, **kwargs: Any
) -> Optional[Set]:
if "current_block" not in kwargs or "history" not in kwargs:
return None
current_block = kwargs["current_block"]
history = kwargs["history"]
if "\n" in current_block:
assert cursor_offset <= len(line), "{!r} {!r}".format(
cursor_offset,
line,
)
results = super().matches(cursor_offset, line, history=history)
return results
else:
return None
def get_completer(
completers: Sequence[BaseCompletionType],
cursor_offset: int,
line: str,
**kwargs: Any,
) -> Tuple[List[str], Optional[BaseCompletionType]]:
"""Returns a list of matches and an applicable completer
If no matches available, returns a tuple of an empty list and None
cursor_offset is the current cursor column
line is a string of the current line
kwargs (all optional):
locals_ is a dictionary of the environment
argspec is an inspect.ArgSpec instance for the current function where
the cursor is
current_block is the possibly multiline not-yet-evaluated block of
code which the current line is part of
complete_magic_methods is a bool of whether we ought to complete
double underscore methods like __len__ in method signatures
"""
for completer in completers:
try:
matches = completer.matches(cursor_offset, line, **kwargs)
except Exception as e:
# Instead of crashing the UI, log exceptions from autocompleters.
logger = logging.getLogger(__name__)
logger.debug(
"Completer {} failed with unhandled exception: {}".format(
completer, e
)
)
continue
if matches is not None:
return sorted(matches), (completer if matches else None)
return [], None
def get_default_completer(
mode: AutocompleteModes, module_gatherer: ModuleGatherer
) -> Tuple[BaseCompletionType, ...]:
return (
(
DictKeyCompletion(mode=mode),
ImportCompletion(module_gatherer, mode=mode),
FilenameCompletion(mode=mode),
MagicMethodCompletion(mode=mode),
MultilineJediCompletion(mode=mode),
CumulativeCompleter(
(
GlobalCompletion(mode=mode),
ParameterNameCompletion(mode=mode),
),
mode=mode,
),
AttrCompletion(mode=mode),
ExpressionAttributeCompletion(mode=mode),
)
if mode != AutocompleteModes.NONE
else tuple()
)
def _callable_postfix(value: Any, word: str) -> str:
"""rlcompleter's _callable_postfix done right."""
if callable(value):
word += "("
return word
|
tests/st/ops/gpu/test_in_top_k.py
|
PowerOlive/mindspore
| 3,200 |
134178
|
<filename>tests/st/ops/gpu/test_in_top_k.py
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
class InTopKNet(nn.Cell):
def __init__(self, k):
super(InTopKNet, self).__init__()
self.in_top_k = P.InTopK(k)
def construct(self, predictions, targets):
return self.in_top_k(predictions, targets)
def in_top_k(nptype):
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
predictions = Tensor(np.array([[4, 1, 2, 0, 0, 0, 0, 0, 0],
[7, 9, 9, 0, 0, 0, 0, 0, 0],
[3, 3, 3, 0, 0, 0, 0, 0, 0]]).astype(nptype))
k = 165
in_top_k_net = InTopKNet(k)
targets = Tensor(np.array([0, 1, 0]).astype(np.int32))
output = in_top_k_net(predictions, targets)
expected_output = np.array([True, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected_output)
k = -2
in_top_k_net = InTopKNet(k)
targets = Tensor(np.array([0, 1, 0]).astype(np.int32))
output = in_top_k_net(predictions, targets)
expected_output = np.array([False, False, False])
np.testing.assert_array_equal(output.asnumpy(), expected_output)
k = 1
in_top_k_net = InTopKNet(k)
targets = Tensor(np.array([0, 1, 0]).astype(np.int32))
output = in_top_k_net(predictions, targets)
expected_output = np.array([True, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected_output)
targets = Tensor(np.array([1, 0, 2]).astype(np.int32))
output = in_top_k_net(predictions, targets)
expected_output = np.array([False, False, True])
np.testing.assert_array_equal(output.asnumpy(), expected_output)
targets = Tensor(np.array([2, 2, 1]).astype(np.int32))
output = in_top_k_net(predictions, targets)
expected_output = np.array([False, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected_output)
k = 2
in_top_k_net = InTopKNet(k)
targets = Tensor(np.array([0, 1, 2]).astype(np.int32))
output = in_top_k_net(predictions, targets)
expected_output = np.array([True, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected_output)
targets = Tensor(np.array([2, 2, 0]).astype(np.int32))
output = in_top_k_net(predictions, targets)
expected_output = np.array([True, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected_output)
targets = Tensor(np.array([1, 0, 1]).astype(np.int32))
output = in_top_k_net(predictions, targets)
expected_output = np.array([False, False, True])
np.testing.assert_array_equal(output.asnumpy(), expected_output)
k = 3
in_top_k_net = InTopKNet(k)
targets = Tensor(np.array([2, 2, 2]).astype(np.int32))
output = in_top_k_net(predictions, targets)
expected_output = np.array([True, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected_output)
targets = Tensor(np.array([1, 1, 0]).astype(np.int32))
output = in_top_k_net(predictions, targets)
expected_output = np.array([True, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected_output)
targets = Tensor(np.array([0, 0, 1]).astype(np.int32))
output = in_top_k_net(predictions, targets)
expected_output = np.array([True, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected_output)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_in_top_k_float16():
in_top_k(np.float16)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_in_top_k_float32():
in_top_k(np.float32)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_in_top_k_invalid_input():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
# predictions must be 2d
with pytest.raises(ValueError):
in_top_k_net = InTopKNet(1)
predictions = Tensor(np.zeros(4).astype(np.float32))
targets = Tensor(np.zeros(4).astype(np.int32))
_ = in_top_k_net(predictions, targets)
# targets must be 1d
with pytest.raises(ValueError):
in_top_k_net = InTopKNet(1)
predictions = Tensor(np.zeros(4).astype(np.float32))
targets = Tensor(np.zeros(4).reshape(2, 2).astype(np.int32))
_ = in_top_k_net(predictions, targets)
# predictions.shape[1] must be equal to targets.shape[0]
with pytest.raises(ValueError):
in_top_k_net = InTopKNet(1)
predictions = Tensor(np.zeros(4).reshape(2, 2).astype(np.float32))
targets = Tensor(np.zeros(4).astype(np.int32))
_ = in_top_k_net(predictions, targets)
|
leet/sorting/rankTeams.py
|
peterlamar/python-cp-cheatsheet
| 140 |
134181
|
<reponame>peterlamar/python-cp-cheatsheet
class Solution:
def rankTeams(self, votes: List[str]) -> str:
# Create 2D data structure A : 0, 0, 0, 'A', C: 0, 0, 0, 'B'
rnk = {v:[0] * len(votes[0]) + [v] for v in votes[0]}
# Tally votes in reverse because sort defaults ascending
for v in votes:
for i, c in enumerate(v):
rnk[c][i] -= 1
# Sort
return "".join(sorted(rnk, key = lambda x: rnk[x]))
|
source/code/troubleshooter/modules/syslog_tst/syslog.py
|
akenamon/OMS-Agent-for-Linux
| 252 |
134192
|
<filename>source/code/troubleshooter/modules/syslog_tst/syslog.py<gh_stars>100-1000
from error_codes import *
from errors import is_error, print_errors
from helpers import geninfo_lookup, check_service_controller
from install.check_oms import get_oms_version
from install.install import check_installation
from connect.check_endpts import check_log_analytics_endpts
from connect.connect import check_connection
from heartbeat.heartbeat import start_omsagent, check_omsagent_running, check_heartbeat
from .check_conf import check_conf_files
from .check_rsysng import check_services
OMSADMIN_PATH = "/etc/opt/microsoft/omsagent/conf/omsadmin.conf"
def check_syslog(interactive, prev_success=NO_ERROR):
print("CHECKING FOR SYSLOG ISSUES...")
success = prev_success
# check if installed / connected / running correctly
print("Checking if omsagent installed and running...")
# check installation
if (get_oms_version() == None):
print_errors(ERR_OMS_INSTALL)
print("Running the installation part of the troubleshooter in order to find the issue...")
print("================================================================================")
return check_installation(interactive, err_codes=False, prev_success=ERR_FOUND)
# check connection
checked_la_endpts = check_log_analytics_endpts()
if (checked_la_endpts != NO_ERROR):
print_errors(checked_la_endpts)
print("Running the connection part of the troubleshooter in order to find the issue...")
print("================================================================================")
return check_connection(interactive, err_codes=False, prev_success=ERR_FOUND)
# check running
workspace_id = geninfo_lookup('WORKSPACE_ID')
if (workspace_id == None):
error_info.append(('Workspace ID', OMSADMIN_PATH))
return ERR_INFO_MISSING
checked_omsagent_running = check_omsagent_running(workspace_id)
if (checked_omsagent_running != NO_ERROR):
print_errors(checked_omsagent_running)
print("Running the general health part of the troubleshooter in order to find the issue...")
print("================================================================================")
return check_heartbeat(interactive, prev_success=ERR_FOUND)
# check for service controller
print("Checking if machine has a valid service controller...")
checked_sc = check_service_controller()
if (is_error(checked_sc)):
return checked_sc
else:
success = print_errors(checked_sc)
# check rsyslog / syslogng running
print("Checking if machine has rsyslog or syslog-ng running...")
checked_services = check_services()
if (is_error(checked_services)):
return print_errors(checked_services)
else:
success = print_errors(checked_services)
# check for syslog.conf and syslog destination file
print("Checking for syslog configuration files...")
checked_conf_files = check_conf_files()
if (is_error(checked_conf_files)):
if (checked_conf_files in [ERR_OMS_INSTALL, ERR_FILE_MISSING]):
print_errors(checked_conf_files)
print("Running the installation part of the troubleshooter in order to find the issue...")
print("================================================================================")
return check_installation(interactive, err_codes=False, prev_success=ERR_FOUND)
else:
return print_errors(checked_conf_files)
else:
success = print_errors(checked_conf_files)
return success
|
packages/pyright-internal/src/tests/samples/final5.py
|
Jasha10/pyright
| 3,934 |
134200
|
<reponame>Jasha10/pyright
# This sample tests that instance variables declared as Final within
# a dataclass do not need to have an explicit assignment because
# the generated __init__ method will assign them.
from dataclasses import dataclass
from typing import Final
class Foo1:
x: Final[int]
def __init__(self, x: int) -> None:
self.x = x
@dataclass
class Foo2:
x: Final[int]
|
cctbx/geometry_restraints/flags.py
|
dperl-sol/cctbx_project
| 155 |
134239
|
<reponame>dperl-sol/cctbx_project<gh_stars>100-1000
from __future__ import absolute_import, division, print_function
from libtbx import adopt_init_args
import sys
class flags(object):
def __init__(self,
bond=None,
nonbonded=None,
angle=None,
dihedral=None,
reference_coordinate=None,
reference_dihedral=None,
ncs_dihedral=None,
den_restraints=None,
chirality=None,
planarity=None,
parallelity=None,
bond_similarity=None,
ramachandran_restraints=None,
default=False):
if (bond is None): bond = default
if (nonbonded is None): nonbonded = default
if (angle is None): angle = default
if (dihedral is None): dihedral = default
if (reference_coordinate is None): reference_coordinate = default
if (reference_dihedral is None): reference_dihedral = default
if (ncs_dihedral is None): ncs_dihedral = default
if den_restraints is None: den_restraints = default
if (chirality is None): chirality = default
if (planarity is None): planarity = default
if (parallelity is None): parallelity = default
if (bond_similarity is None): bond_similarity = default
if ramachandran_restraints is None: ramachandran_restraints = default
adopt_init_args(self, locals())
def show(self, f=None):
if (f is None): f = sys.stdout
print("geometry_restraints.manager.flags:", file=f)
print(" bond:", self.bond, file=f)
print(" nonbonded:", self.nonbonded, file=f)
print(" angle:", self.angle, file=f)
print(" dihedral:", self.dihedral, file=f)
print(" reference coordinate:", self.reference_coordinate, file=f)
print(" reference dihedral:", self.reference_dihedral, file=f)
print(" chirality:", self.chirality, file=f)
print(" planarity:", self.planarity, file=f)
print(" parallelity:", self.parallelity, file=f)
print(" bond similarity:", self.bond_similarity, file=f)
print(" ramachandran:", self.ramachandran_restraints, file=f)
print(" DEN:", self.den_restraints, file=f)
|
src/research/CIM/cim_to_matpower/test/test.py
|
mzy2240/GridCal
| 284 |
134250
|
import CIM2Matpower
# from scipy.io import savemat
cim_to_matpower_filename = 'CIM_to_Matpower_import'
cimfiles = ['./UCTE10_20090319_modified_EQ.xml',
'./UCTE10_20090319_modified_TP.xml',
'./UCTE10_20090319_modified_SV.xml']
boundary_profiles = []
mpc = CIM2Matpower.cim_to_mpc(cimfiles, boundary_profiles) #, 'imported_CIM.log')
# savemat(cim_to_matpower_filename+'.mat', mpc)
|
PDF/check_type.py
|
saneravi/ML_Stuff
| 209 |
134261
|
import pprint
import click
import fitz # pip install pymupdf
@click.command()
@click.argument("filepath", type=click.Path(exists=True))
def entrypoint(filepath):
pp = pprint.PrettyPrinter(indent=4)
with fitz.open(filepath) as doc:
pp.pprint(doc.metadata)
print(f"Scanned pages: {get_scanned_pages_percentage(filepath) * 100:0.1f}%")
class NoTextPagesException(RuntimeError):
pass
def get_scanned_pages_percentage(filepath: str) -> float:
"""
Return the percentage of pages with text which were scanned.
Note that this could raise a NoTextPagesException.
"""
total_pages = 0
total_scanned_pages = 0
with fitz.open(filepath) as doc:
for page in doc:
text = page.getText().strip()
if len(text) == 0:
# Ignore "empty" pages
continue
total_pages += 1
pix1 = page.getPixmap(alpha=False) # render page to an image
pix1.writePNG(f"page-{page.number}.png") # store image as a PNG
remove_all_text(doc, page)
pix2 = page.getPixmap(alpha=False)
pix2.writePNG(f"page-{page.number}-no-text.png")
img1 = pix1.getImageData("png")
img2 = pix2.getImageData("png")
if img1 == img2:
print(f"{page.number} was scanned or has no text")
if len(text) > 0:
print(f"\tHas text of length {len(text):,} characters")
total_scanned_pages += 1
else:
print(f"{page.number} was NOT scanned")
if total_pages == 0:
raise NoTextPagesException
return total_scanned_pages / total_pages
def remove_all_text(doc, page):
page.cleanContents() # syntax cleaning of page appearance commands
# xref of the cleaned command source (bytes object)
xref = page.getContents()[0]
cont = doc.xrefStream(xref) # read it
ba_cont = bytearray(cont) # a modifyable version
pos = 0
changed = False # switch indicates changes
while pos < len(cont) - 1:
pos = ba_cont.find(b"BT\n", pos) # begin text object
if pos < 0:
break # not (more) found
pos2 = ba_cont.find(b"ET\n", pos) # end text object
if pos2 <= pos:
break # major error in PDF page definition!
ba_cont[pos : pos2 + 2] = b"" # remove text object
changed = True
if changed: # we have indeed removed some text
doc.updateStream(xref, ba_cont) # write back command stream w/o text
if __name__ == "__main__":
entrypoint()
|
Chapter13/webapp/babel/controllers.py
|
jayakumardhananjayan/pythonwebtut
| 135 |
134272
|
from flask import Blueprint, session, redirect, url_for
babel_blueprint = Blueprint(
'babel',
__name__,
url_prefix="/babel"
)
@babel_blueprint.route('/<string:locale>')
def index(locale):
session['locale'] = locale
return redirect(url_for('blog.home'))
|
extraPackages/matplotlib-3.0.3/examples/images_contours_and_fields/contourf_demo.py
|
dolboBobo/python3_ios
| 130 |
134287
|
<gh_stars>100-1000
"""
=============
Contourf Demo
=============
How to use the :meth:`.axes.Axes.contourf` method to create filled contour plots.
"""
import numpy as np
import matplotlib.pyplot as plt
origin = 'lower'
delta = 0.025
x = y = np.arange(-3.0, 3.01, delta)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = (Z1 - Z2) * 2
nr, nc = Z.shape
# put NaNs in one corner:
Z[-nr // 6:, -nc // 6:] = np.nan
# contourf will convert these to masked
Z = np.ma.array(Z)
# mask another corner:
Z[:nr // 6, :nc // 6] = np.ma.masked
# mask a circle in the middle:
interior = np.sqrt((X**2) + (Y**2)) < 0.5
Z[interior] = np.ma.masked
# We are using automatic selection of contour levels;
# this is usually not such a good idea, because they don't
# occur on nice boundaries, but we do it here for purposes
# of illustration.
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(X, Y, Z, 10, cmap=plt.cm.bone, origin=origin)
# Note that in the following, we explicitly pass in a subset of
# the contour levels used for the filled contours. Alternatively,
# We could pass in additional levels to provide extra resolution,
# or leave out the levels kwarg to use all of the original levels.
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='r', origin=origin)
ax2.set_title('Nonsense (3 masked regions)')
ax2.set_xlabel('word length anomaly')
ax2.set_ylabel('sentence length anomaly')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('verbosity coefficient')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
fig2, ax2 = plt.subplots(constrained_layout=True)
# Now make a contour plot with the levels specified,
# and with the colormap generated automatically from a list
# of colors.
levels = [-1.5, -1, -0.5, 0, 0.5, 1]
CS3 = ax2.contourf(X, Y, Z, levels,
colors=('r', 'g', 'b'),
origin=origin,
extend='both')
# Our data range extends outside the range of levels; make
# data below the lowest contour level yellow, and above the
# highest level cyan:
CS3.cmap.set_under('yellow')
CS3.cmap.set_over('cyan')
CS4 = ax2.contour(X, Y, Z, levels,
colors=('k',),
linewidths=(3,),
origin=origin)
ax2.set_title('Listed colors (3 masked regions)')
ax2.clabel(CS4, fmt='%2.1f', colors='w', fontsize=14)
# Notice that the colorbar command gets all the information it
# needs from the ContourSet object, CS3.
fig2.colorbar(CS3)
# Illustrate all 4 possible "extend" settings:
extends = ["neither", "both", "min", "max"]
cmap = plt.cm.get_cmap("winter")
cmap.set_under("magenta")
cmap.set_over("yellow")
# Note: contouring simply excludes masked or nan regions, so
# instead of using the "bad" colormap value for them, it draws
# nothing at all in them. Therefore the following would have
# no effect:
# cmap.set_bad("red")
fig, axs = plt.subplots(2, 2, constrained_layout=True)
for ax, extend in zip(axs.ravel(), extends):
cs = ax.contourf(X, Y, Z, levels, cmap=cmap, extend=extend, origin=origin)
fig.colorbar(cs, ax=ax, shrink=0.9)
ax.set_title("extend = %s" % extend)
ax.locator_params(nbins=4)
plt.show()
#############################################################################
#
# ------------
#
# References
# """"""""""
#
# The use of the following functions, methods and classes is shown
# in this example:
import matplotlib
matplotlib.axes.Axes.contour
matplotlib.pyplot.contour
matplotlib.axes.Axes.contourf
matplotlib.pyplot.contourf
matplotlib.axes.Axes.clabel
matplotlib.pyplot.clabel
matplotlib.figure.Figure.colorbar
matplotlib.pyplot.colorbar
matplotlib.colors.Colormap
matplotlib.colors.Colormap.set_bad
matplotlib.colors.Colormap.set_under
matplotlib.colors.Colormap.set_over
|
Algo and DSA/LeetCode-Solutions-master/Python/binary-tree-coloring-game.py
|
Sourav692/FAANG-Interview-Preparation
| 3,269 |
134289
|
# Time: O(n)
# Space: O(h)
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def btreeGameWinningMove(self, root, n, x):
"""
:type root: TreeNode
:type n: int
:type x: int
:rtype: bool
"""
def count(node, x, left_right):
if not node:
return 0
left, right = count(node.left, x, left_right), count(node.right, x, left_right)
if node.val == x:
left_right[0], left_right[1] = left, right
return left + right + 1
left_right = [0, 0]
count(root, x, left_right)
blue = max(max(left_right), n-(sum(left_right)+1))
return blue > n-blue
|
examples/hello_coroutine.py
|
haypo/trollius
| 175 |
134290
|
"""Print 'Hello World' every two seconds, using a coroutine."""
import trollius
from trollius import From
@trollius.coroutine
def greet_every_two_seconds():
while True:
print('Hello World')
yield From(trollius.sleep(2))
if __name__ == '__main__':
loop = trollius.get_event_loop()
try:
loop.run_until_complete(greet_every_two_seconds())
finally:
loop.close()
|
dashboard/dashboard/edit_anomaly_configs.py
|
Martijnve23/catapult
| 1,894 |
134293
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides the web interface for editing anomaly threshold configurations."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
from dashboard import edit_config_handler
from dashboard.common import request_handler
from dashboard.models import anomaly_config
class EditAnomalyConfigsHandler(edit_config_handler.EditConfigHandler):
"""Handles editing the info about anomaly threshold configurations.
The post method is inherited from EditConfigHandler. It takes the request
parameters documented there, as well as the following parameter, which
is a property of AnomalyConfig:
config: A JSON dictionary mapping config parameters to values.
"""
def __init__(self, request, response):
super(EditAnomalyConfigsHandler,
self).__init__(request, response, anomaly_config.AnomalyConfig)
def get(self):
"""Renders the UI with the form."""
# Note, this is similar to edit_sheriffs, and there may be some common
# logic that oculd be extracted to EditConfigHandler.
def ConfigData(config):
return {
'config': json.dumps(config.config, indent=2, sort_keys=True),
'patterns': '\n'.join(sorted(config.patterns)),
}
anomaly_configs = {
config.key.string_id(): ConfigData(config)
for config in anomaly_config.AnomalyConfig.query()
}
self.RenderHtml(
'edit_anomaly_configs.html', {
'anomaly_config_json': json.dumps(anomaly_configs),
'anomaly_config_names': sorted(anomaly_configs.keys()),
})
def _UpdateFromRequestParameters(self, anomaly_config_entity):
"""Updates the given AnomalyConfig based on query parameters."""
# This overrides the method in the superclass.
anomaly_config_entity.config = self._GetAndValidateConfigContents()
def _GetAndValidateConfigContents(self):
"""Returns a config dict if one could be gotten, or None otherwise."""
config = self.request.get('config')
if not config:
raise request_handler.InvalidInputError('No config contents given.')
try:
config_dict = json.loads(config)
except (ValueError, TypeError) as json_parse_error:
raise request_handler.InvalidInputError(str(json_parse_error))
if not isinstance(config_dict, dict):
raise request_handler.InvalidInputError('Config was not a dict.')
return config_dict
|
python/oneflow/compatible/single_client/layers.py
|
wangyuyue/oneflow
| 3,285 |
134294
|
<gh_stars>1000+
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.compatible.single_client.ops.categorical_ordinal_encode_op import (
categorical_ordinal_encoder,
)
from oneflow.compatible.single_client.ops.layers import (
batch_normalization,
batch_normalization_add_relu,
batch_normalization_relu,
conv1d,
conv2d,
conv3d,
dense,
layer_norm,
layer_norm_grad,
layer_norm_param_grad,
)
from oneflow.compatible.single_client.ops.layers import upsample as upsample_2d
from oneflow.compatible.single_client.ops.prelu import prelu
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/operations/__init__.py
|
rsdoherty/azure-sdk-for-python
| 2,728 |
134299
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._availability_sets_operations import AvailabilitySetsOperations
from ._virtual_machine_extension_images_operations import VirtualMachineExtensionImagesOperations
from ._virtual_machine_extensions_operations import VirtualMachineExtensionsOperations
from ._virtual_machine_images_operations import VirtualMachineImagesOperations
from ._usage_operations import UsageOperations
from ._virtual_machine_sizes_operations import VirtualMachineSizesOperations
from ._virtual_machines_operations import VirtualMachinesOperations
from ._virtual_machine_scale_sets_operations import VirtualMachineScaleSetsOperations
from ._virtual_machine_scale_set_vms_operations import VirtualMachineScaleSetVMsOperations
__all__ = [
'AvailabilitySetsOperations',
'VirtualMachineExtensionImagesOperations',
'VirtualMachineExtensionsOperations',
'VirtualMachineImagesOperations',
'UsageOperations',
'VirtualMachineSizesOperations',
'VirtualMachinesOperations',
'VirtualMachineScaleSetsOperations',
'VirtualMachineScaleSetVMsOperations',
]
|
test/torchaudio_unittest/datasets/vctk_test.py
|
popcornell/audio
| 1,718 |
134328
|
<reponame>popcornell/audio<gh_stars>1000+
import os
from pathlib import Path
from torchaudio.datasets import vctk
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
# Used to generate a unique transcript for each dummy audio file
_TRANSCRIPT = [
"Please call Stella",
"Ask her to bring these things",
"with her from the store",
"Six spoons of fresh snow peas, five thick slabs of blue cheese, and maybe a snack for her brother Bob",
"We also need a small plastic snake and a big toy frog for the kids",
"She can scoop these things into three red bags, and we will go meet her Wednesday at the train station",
"When the sunlight strikes raindrops in the air, they act as a prism and form a rainbow",
"The rainbow is a division of white light into many beautiful colors",
"These take the shape of a long round arch, with its path high above, and its two ends \
apparently beyond the horizon",
"There is, according to legend, a boiling pot of gold at one end",
]
def get_mock_dataset(root_dir):
"""
root_dir: root directory of the mocked data
"""
mocked_samples = []
dataset_dir = os.path.join(root_dir, "VCTK-Corpus-0.92")
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 48000
seed = 0
for speaker in range(225, 230):
speaker_id = "p" + str(speaker)
audio_dir = os.path.join(dataset_dir, "wav48_silence_trimmed", speaker_id)
os.makedirs(audio_dir, exist_ok=True)
file_dir = os.path.join(dataset_dir, "txt", speaker_id)
os.makedirs(file_dir, exist_ok=True)
for utterance_id in range(1, 11):
filename = f"{speaker_id}_{utterance_id:03d}_mic2"
audio_file_path = os.path.join(audio_dir, filename + ".wav")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
save_wav(audio_file_path, data, sample_rate)
txt_file_path = os.path.join(file_dir, filename[:-5] + ".txt")
transcript = _TRANSCRIPT[utterance_id - 1]
with open(txt_file_path, "w") as f:
f.write(transcript)
sample = (normalize_wav(data), sample_rate, transcript, speaker_id, utterance_id)
mocked_samples.append(sample)
seed += 1
return mocked_samples
class TestVCTK(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_vctk(self, dataset):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert transcript == self.samples[i][2]
assert speaker_id == self.samples[i][3]
assert int(utterance_id) == self.samples[i][4]
num_samples += 1
assert num_samples == len(self.samples)
def test_vctk_str(self):
dataset = vctk.VCTK_092(self.root_dir, audio_ext=".wav")
self._test_vctk(dataset)
def test_vctk_path(self):
dataset = vctk.VCTK_092(Path(self.root_dir), audio_ext=".wav")
self._test_vctk(dataset)
|
utils/update_mir_test_checks.py
|
arunkumarbhattar/llvm
| 4,812 |
134332
|
#!/usr/bin/env python
"""Updates FileCheck checks in MIR tests.
This script is a utility to update MIR based tests with new FileCheck
patterns.
The checks added by this script will cover the entire body of each
function it handles. Virtual registers used are given names via
FileCheck patterns, so if you do want to check a subset of the body it
should be straightforward to trim out the irrelevant parts. None of
the YAML metadata will be checked, other than function names.
If there are multiple llc commands in a test, the full set of checks
will be repeated for each different check pattern. Checks for patterns
that are common between different commands will be left as-is by
default, or removed if the --remove-common-prefixes flag is provided.
"""
from __future__ import print_function
import argparse
import collections
import glob
import os
import re
import subprocess
import sys
from UpdateTestChecks import common
MIR_FUNC_NAME_RE = re.compile(r' *name: *(?P<func>[A-Za-z0-9_.-]+)')
MIR_BODY_BEGIN_RE = re.compile(r' *body: *\|')
MIR_BASIC_BLOCK_RE = re.compile(r' *bb\.[0-9]+.*:$')
VREG_RE = re.compile(r'(%[0-9]+)(?::[a-z0-9_]+)?(?:\([<>a-z0-9 ]+\))?')
MI_FLAGS_STR= (
r'(frame-setup |frame-destroy |nnan |ninf |nsz |arcp |contract |afn '
r'|reassoc |nuw |nsw |exact |fpexcept )*')
VREG_DEF_RE = re.compile(
r'^ *(?P<vregs>{0}(?:, {0})*) = '
r'{1}(?P<opcode>[A-Zt][A-Za-z0-9_]+)'.format(VREG_RE.pattern, MI_FLAGS_STR))
MIR_PREFIX_DATA_RE = re.compile(r'^ *(;|bb.[0-9].*: *$|[a-z]+:( |$)|$)')
IR_FUNC_NAME_RE = re.compile(
r'^\s*define\s+(?:internal\s+)?[^@]*@(?P<func>[A-Za-z0-9_.]+)\s*\(')
IR_PREFIX_DATA_RE = re.compile(r'^ *(;|$)')
MIR_FUNC_RE = re.compile(
r'^---$'
r'\n'
r'^ *name: *(?P<func>[A-Za-z0-9_.-]+)$'
r'.*?'
r'^ *body: *\|\n'
r'(?P<body>.*?)\n'
r'^\.\.\.$',
flags=(re.M | re.S))
class LLC:
def __init__(self, bin):
self.bin = bin
def __call__(self, args, ir):
if ir.endswith('.mir'):
args = '{} -x mir'.format(args)
with open(ir) as ir_file:
stdout = subprocess.check_output('{} {}'.format(self.bin, args),
shell=True, stdin=ir_file)
if sys.version_info[0] > 2:
stdout = stdout.decode()
# Fix line endings to unix CR style.
stdout = stdout.replace('\r\n', '\n')
return stdout
class Run:
def __init__(self, prefixes, cmd_args, triple):
self.prefixes = prefixes
self.cmd_args = cmd_args
self.triple = triple
def __getitem__(self, index):
return [self.prefixes, self.cmd_args, self.triple][index]
def log(msg, verbose=True):
if verbose:
print(msg, file=sys.stderr)
def find_triple_in_ir(lines, verbose=False):
for l in lines:
m = common.TRIPLE_IR_RE.match(l)
if m:
return m.group(1)
return None
def find_run_lines(test, lines, verbose=False):
raw_lines = [m.group(1)
for m in [common.RUN_LINE_RE.match(l) for l in lines] if m]
run_lines = [raw_lines[0]] if len(raw_lines) > 0 else []
for l in raw_lines[1:]:
if run_lines[-1].endswith("\\"):
run_lines[-1] = run_lines[-1].rstrip("\\") + " " + l
else:
run_lines.append(l)
if verbose:
log('Found {} RUN lines:'.format(len(run_lines)))
for l in run_lines:
log(' RUN: {}'.format(l))
return run_lines
def build_run_list(test, run_lines, verbose=False):
run_list = []
all_prefixes = []
for l in run_lines:
if '|' not in l:
common.warn('Skipping unparseable RUN line: ' + l)
continue
commands = [cmd.strip() for cmd in l.split('|', 1)]
llc_cmd = commands[0]
filecheck_cmd = commands[1] if len(commands) > 1 else ''
common.verify_filecheck_prefixes(filecheck_cmd)
if not llc_cmd.startswith('llc '):
common.warn('Skipping non-llc RUN line: {}'.format(l), test_file=test)
continue
if not filecheck_cmd.startswith('FileCheck '):
common.warn('Skipping non-FileChecked RUN line: {}'.format(l),
test_file=test)
continue
triple = None
m = common.TRIPLE_ARG_RE.search(llc_cmd)
if m:
triple = m.group(1)
# If we find -march but not -mtriple, use that.
m = common.MARCH_ARG_RE.search(llc_cmd)
if m and not triple:
triple = '{}--'.format(m.group(1))
cmd_args = llc_cmd[len('llc'):].strip()
cmd_args = cmd_args.replace('< %s', '').replace('%s', '').strip()
check_prefixes = [
item
for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd)
for item in m.group(1).split(',')]
if not check_prefixes:
check_prefixes = ['CHECK']
all_prefixes += check_prefixes
run_list.append(Run(check_prefixes, cmd_args, triple))
# Remove any common prefixes. We'll just leave those entirely alone.
common_prefixes = set([prefix for prefix in all_prefixes
if all_prefixes.count(prefix) > 1])
for run in run_list:
run.prefixes = [p for p in run.prefixes if p not in common_prefixes]
return run_list, common_prefixes
def find_functions_with_one_bb(lines, verbose=False):
result = []
cur_func = None
bbs = 0
for line in lines:
m = MIR_FUNC_NAME_RE.match(line)
if m:
if bbs == 1:
result.append(cur_func)
cur_func = m.group('func')
bbs = 0
m = MIR_BASIC_BLOCK_RE.match(line)
if m:
bbs += 1
if bbs == 1:
result.append(cur_func)
return result
def build_function_body_dictionary(test, raw_tool_output, triple, prefixes,
func_dict, verbose):
for m in MIR_FUNC_RE.finditer(raw_tool_output):
func = m.group('func')
body = m.group('body')
if verbose:
log('Processing function: {}'.format(func))
for l in body.splitlines():
log(' {}'.format(l))
for prefix in prefixes:
if func in func_dict[prefix] and func_dict[prefix][func] != body:
common.warn('Found conflicting asm for prefix: {}'.format(prefix),
test_file=test)
func_dict[prefix][func] = body
def add_checks_for_function(test, output_lines, run_list, func_dict, func_name,
single_bb, verbose=False):
printed_prefixes = set()
for run in run_list:
for prefix in run.prefixes:
if prefix in printed_prefixes:
continue
if not func_dict[prefix][func_name]:
continue
# if printed_prefixes:
# # Add some space between different check prefixes.
# output_lines.append('')
printed_prefixes.add(prefix)
log('Adding {} lines for {}'.format(prefix, func_name), verbose)
add_check_lines(test, output_lines, prefix, func_name, single_bb,
func_dict[prefix][func_name].splitlines())
break
return output_lines
def add_check_lines(test, output_lines, prefix, func_name, single_bb,
func_body):
if single_bb:
# Don't bother checking the basic block label for a single BB
func_body.pop(0)
if not func_body:
common.warn('Function has no instructions to check: {}'.format(func_name),
test_file=test)
return
first_line = func_body[0]
indent = len(first_line) - len(first_line.lstrip(' '))
# A check comment, indented the appropriate amount
check = '{:>{}}; {}'.format('', indent, prefix)
output_lines.append('{}-LABEL: name: {}'.format(check, func_name))
vreg_map = {}
for func_line in func_body:
if not func_line.strip():
continue
m = VREG_DEF_RE.match(func_line)
if m:
for vreg in VREG_RE.finditer(m.group('vregs')):
name = mangle_vreg(m.group('opcode'), vreg_map.values())
vreg_map[vreg.group(1)] = name
func_line = func_line.replace(
vreg.group(1), '[[{}:%[0-9]+]]'.format(name), 1)
for number, name in vreg_map.items():
func_line = re.sub(r'{}\b'.format(number), '[[{}]]'.format(name),
func_line)
check_line = '{}: {}'.format(check, func_line[indent:]).rstrip()
output_lines.append(check_line)
def mangle_vreg(opcode, current_names):
base = opcode
# Simplify some common prefixes and suffixes
if opcode.startswith('G_'):
base = base[len('G_'):]
if opcode.endswith('_PSEUDO'):
base = base[:len('_PSEUDO')]
# Shorten some common opcodes with long-ish names
base = dict(IMPLICIT_DEF='DEF',
GLOBAL_VALUE='GV',
CONSTANT='C',
FCONSTANT='C',
MERGE_VALUES='MV',
UNMERGE_VALUES='UV',
INTRINSIC='INT',
INTRINSIC_W_SIDE_EFFECTS='INT',
INSERT_VECTOR_ELT='IVEC',
EXTRACT_VECTOR_ELT='EVEC',
SHUFFLE_VECTOR='SHUF').get(base, base)
# Avoid ambiguity when opcodes end in numbers
if len(base.rstrip('0123456789')) < len(base):
base += '_'
i = 0
for name in current_names:
if name.rstrip('0123456789') == base:
i += 1
if i:
return '{}{}'.format(base, i)
return base
def should_add_line_to_output(input_line, prefix_set):
# Skip any check lines that we're handling.
m = common.CHECK_RE.match(input_line)
if m and m.group(1) in prefix_set:
return False
return True
def update_test_file(args, test):
log('Scanning for RUN lines in test file: {}'.format(test), args.verbose)
with open(test) as fd:
input_lines = [l.rstrip() for l in fd]
script_name = os.path.basename(__file__)
first_line = input_lines[0] if input_lines else ""
if 'autogenerated' in first_line and script_name not in first_line:
common.warn("Skipping test which wasn't autogenerated by " +
script_name + ": " + test)
return
if args.update_only:
if not first_line or 'autogenerated' not in first_line:
common.warn("Skipping test which isn't autogenerated: " + test)
return
triple_in_ir = find_triple_in_ir(input_lines, args.verbose)
run_lines = find_run_lines(test, input_lines, args.verbose)
run_list, common_prefixes = build_run_list(test, run_lines, args.verbose)
simple_functions = find_functions_with_one_bb(input_lines, args.verbose)
func_dict = {}
for run in run_list:
for prefix in run.prefixes:
func_dict.update({prefix: dict()})
for prefixes, llc_args, triple_in_cmd in run_list:
log('Extracted LLC cmd: llc {}'.format(llc_args), args.verbose)
log('Extracted FileCheck prefixes: {}'.format(prefixes), args.verbose)
raw_tool_output = args.llc(llc_args, test)
if not triple_in_cmd and not triple_in_ir:
common.warn('No triple found: skipping file', test_file=test)
return
build_function_body_dictionary(test, raw_tool_output,
triple_in_cmd or triple_in_ir,
prefixes, func_dict, args.verbose)
state = 'toplevel'
func_name = None
prefix_set = set([prefix for run in run_list for prefix in run.prefixes])
log('Rewriting FileCheck prefixes: {}'.format(prefix_set), args.verbose)
if args.remove_common_prefixes:
prefix_set.update(common_prefixes)
elif common_prefixes:
common.warn('Ignoring common prefixes: {}'.format(common_prefixes),
test_file=test)
comment_char = '#' if test.endswith('.mir') else ';'
autogenerated_note = ('{} NOTE: Assertions have been autogenerated by '
'utils/{}'.format(comment_char, script_name))
output_lines = []
output_lines.append(autogenerated_note)
for input_line in input_lines:
if input_line == autogenerated_note:
continue
if state == 'toplevel':
m = IR_FUNC_NAME_RE.match(input_line)
if m:
state = 'ir function prefix'
func_name = m.group('func')
if input_line.rstrip('| \r\n') == '---':
state = 'document'
output_lines.append(input_line)
elif state == 'document':
m = MIR_FUNC_NAME_RE.match(input_line)
if m:
state = 'mir function metadata'
func_name = m.group('func')
if input_line.strip() == '...':
state = 'toplevel'
func_name = None
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
elif state == 'mir function metadata':
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
m = MIR_BODY_BEGIN_RE.match(input_line)
if m:
if func_name in simple_functions:
# If there's only one block, put the checks inside it
state = 'mir function prefix'
continue
state = 'mir function body'
add_checks_for_function(test, output_lines, run_list,
func_dict, func_name, single_bb=False,
verbose=args.verbose)
elif state == 'mir function prefix':
m = MIR_PREFIX_DATA_RE.match(input_line)
if not m:
state = 'mir function body'
add_checks_for_function(test, output_lines, run_list,
func_dict, func_name, single_bb=True,
verbose=args.verbose)
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
elif state == 'mir function body':
if input_line.strip() == '...':
state = 'toplevel'
func_name = None
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
elif state == 'ir function prefix':
m = IR_PREFIX_DATA_RE.match(input_line)
if not m:
state = 'ir function body'
add_checks_for_function(test, output_lines, run_list,
func_dict, func_name, single_bb=False,
verbose=args.verbose)
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
elif state == 'ir function body':
if input_line.strip() == '}':
state = 'toplevel'
func_name = None
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
log('Writing {} lines to {}...'.format(len(output_lines), test), args.verbose)
with open(test, 'wb') as fd:
fd.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='Show verbose output')
parser.add_argument('--llc-binary', dest='llc', default='llc', type=LLC,
help='The "llc" binary to generate the test case with')
parser.add_argument('--remove-common-prefixes', action='store_true',
help='Remove existing check lines whose prefixes are '
'shared between multiple commands')
parser.add_argument('-u', '--update-only', action='store_true',
help='Only update test if it was already autogened')
parser.add_argument('tests', nargs='+')
args = parser.parse_args()
test_paths = [test for pattern in args.tests for test in glob.glob(pattern)]
for test in test_paths:
try:
update_test_file(args, test)
except Exception:
common.warn('Error processing file', test_file=test)
raise
if __name__ == '__main__':
main()
|
src/tests/migration/test_bookkeeping_migration.py
|
mv-idatalytics/jenkins-byro
| 114 |
134337
|
import pytest
from django.db.models import Q
from helper import TestMigrations
class TestWithShackdataBase(TestMigrations):
app = "bookkeeping"
migrate_fixtures = ["tests/fixtures/test_shackspace_transactions.json"]
migrate_from = "0012_auto_20180617_1926"
@pytest.mark.xfail
@pytest.mark.django_db
class TestBookkeepingMigrationsFirst(TestWithShackdataBase):
migrate_to = "0013_new_data_model"
def setUpBeforeMigration(self, apps):
RealTransaction = apps.get_model("bookkeeping", "RealTransaction")
VirtualTransaction = apps.get_model("bookkeeping", "VirtualTransaction")
# For test comparison
self.real_transaction_count = RealTransaction.objects.count()
self.virtual_transaction_w_src_count = VirtualTransaction.objects.filter(
source_account__isnull=False
).count()
self.virtual_transaction_w_dst_count = VirtualTransaction.objects.filter(
destination_account__isnull=False
).count()
self.virtual_transaction_member_fees_count = VirtualTransaction.objects.filter(
Q(
source_account__isnull=True,
destination_account__account_category="member_fees",
real_transaction__isnull=True,
)
| Q(
destination_account__isnull=True,
source_account__account_category="member_fees",
real_transaction__isnull=True,
)
).count()
self.orphan_virtual_transaction_count = VirtualTransaction.objects.filter(
real_transaction=None
).count()
self.reversed_transactions = {
rt: rt.reverses
for rt in RealTransaction.objects.filter(reverses__isnull=False).all()
}
def test_accounts_migrated(self):
from byro.bookkeeping.models import Account
assert Account.objects.filter(tags__name="bank").count() == 1
assert Account.objects.filter(tags__name="fees").count() == 1
assert Account.objects.filter(tags__name="fees_receivable").count() == 1
def test_transactions_migrated(self):
from byro.bookkeeping.models import Booking, Transaction
# All RealTransaction lead to one Transaction, as do VirtualTransaction with no RealTransaction
assert (
Transaction.objects.count()
== self.real_transaction_count + self.orphan_virtual_transaction_count
)
# All VirtualTransaction lead to one Booking per direction, as does each RealTransaction
# VirtualTransaction referencing 'member_fees' have an additional implicit direction
assert (
Booking.objects.count()
== self.virtual_transaction_w_src_count
+ self.virtual_transaction_w_dst_count
+ self.real_transaction_count
+ self.virtual_transaction_member_fees_count
)
def test_reverses_migrated(self):
assert len(self.reversed_transactions) > 0
from byro.bookkeeping.models import Transaction
for rt, rt_reverses in self.reversed_transactions.items():
t = Transaction.objects.filter(
Q(memo=rt.purpose) | Q(bookings__memo=rt.purpose)
).first()
t_reverses = Transaction.objects.filter(
Q(memo=rt_reverses.purpose) | Q(bookings__memo=rt_reverses.purpose)
).first()
assert t
assert t_reverses
assert t.reverses == t_reverses
def test_amounts_migrated(self):
from byro.bookkeeping.models import Booking
assert Booking.objects.filter(amount__lt=0).count() == 0
@pytest.mark.xfail
@pytest.mark.django_db
class TestBookkeepingMigrationsFinal(TestWithShackdataBase):
migrate_to = "0014_auto_20180707_1410"
def test_accounts_migrated_fully(self):
from byro.bookkeeping.models import Account, AccountCategory
assert (
Account.objects.exclude(
account_category__in=[
AccountCategory.ASSET,
AccountCategory.LIABILITY,
AccountCategory.INCOME,
AccountCategory.EXPENSE,
AccountCategory.EQUITY,
]
).count()
== 0
)
|
Autocoders/Python/src/fprime_ac/generators/writers/GTestWriterBase.py
|
SSteve/fprime
| 9,182 |
134339
|
<filename>Autocoders/Python/src/fprime_ac/generators/writers/GTestWriterBase.py<gh_stars>1000+
# ===============================================================================
# NAME: GTestWriterBase.py
#
# DESCRIPTION: A base class for GTest writers
#
# AUTHOR: <NAME>
# EMAIL: <EMAIL>
# DATE CREATED : July 8, 2019
#
# Copyright 2015, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
from fprime_ac.generators.writers import ComponentWriterBase
class GTestWriterBase(ComponentWriterBase.ComponentWriterBase):
"""
A base class for GTest writers
"""
def transformEnumType(self, c, type, typeinfo):
return c.component_base + "::" + type if typeinfo == "enum" else type
def getTlmType(self, c):
def f(type, typeinfo):
if type == "string":
type = "Fw::TlmString"
return self.transformEnumType(c, type, typeinfo)
return f
def transformEventParams(self, c, params):
def transformEventParam(param):
name, type, comment, size, typeinfo = param
if typeinfo == "string":
return (name, "const char *const", comment, typeinfo)
elif typeinfo == "enum":
return (name, c.component_base + "::" + type, comment, typeinfo)
else:
return (name, "const " + type, comment, typeinfo)
return list(map(transformEventParam, params))
def getEventParams(self, c):
def f(eventName):
params = c.event_params[eventName]
return self.transformEventParams(c, params)
return f
def getParamValTlm(self, c):
def f(type, typeinfo):
if type == "string":
type = "const char *const"
else:
type = self.transformEnumType(c, type, typeinfo)
type = "const " + type + "&"
return ("val", type, "The channel value")
return f
def initGTest(self, obj, c):
self.init(obj, c)
c.gtest_base = c.name() + "GTestBase"
c.tester_base = c.name() + "TesterBase"
c.get_event_params = self.getEventParams(c)
c.get_param_val_Tlm = self.getParamValTlm(c)
c.param_maxHistorySize = (
"maxHistorySize",
"const U32",
"The maximum size of each history",
)
c.param_fileName = (
"__callSiteFileName",
"const char *const",
"The name of the file containing the call site",
"",
)
c.param_lineNumber = (
"__callSiteLineNumber",
"const U32",
"The line number of the call site",
"",
)
c.param_size = ("size", "const U32", "The asserted size", "")
c.param_index = ("__index", "const U32", "The index", "")
c.params_assert_size = [c.param_fileName, c.param_lineNumber, c.param_size]
c.params_assert_cmd_response = [
c.param_fileName,
c.param_lineNumber,
c.param_index,
c.param_opCode,
c.param_cmdSeq,
c.param_response,
]
c.params_assert_event = [c.param_fileName, c.param_lineNumber, c.param_index]
c.params_assert_from_port = c.params_assert_event
|
python/helpers/pydev/pydevd_attach_to_process/linux/lldb_threads_settrace.py
|
truthiswill/intellij-community
| 229 |
134355
|
<gh_stars>100-1000
# This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
# print('Startup LLDB in Python!')
import lldb
try:
show_debug_info = 1
is_debug = 0
options = lldb.SBExpressionOptions()
options.SetFetchDynamicValue()
options.SetTryAllThreads(run_others=False)
options.SetTimeoutInMicroSeconds(timeout=10000000)
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for thread in process:
# Get the first frame
# print('Thread %s, suspended %s\n'%(thread, thread.IsStopped()))
if internal_dict.get('_thread_%d' % thread.GetThreadID(), False):
process.SetSelectedThread(thread)
if not thread.IsStopped():
# thread.Suspend()
error = process.Stop()
frame = thread.GetSelectedFrame()
if frame.GetFunctionName() == '__select':
# print('We are in __select')
# Step over select, otherwise evaluating expression there can terminate thread
thread.StepOver()
frame = thread.GetSelectedFrame()
print('Will settrace in: %s' % (frame,))
for f in thread:
print(f)
res = frame.EvaluateExpression("(int) SetSysTraceFunc(%s, %s)" % (
show_debug_info, is_debug), options)
error = res.GetError()
if error:
print(error)
thread.Resume()
except:
import traceback;traceback.print_exc()
|
esp32/frozen/_boot.py
|
peterson79/pycom-micropython-sigfox
| 198 |
134363
|
<reponame>peterson79/pycom-micropython-sigfox
# _boot.py -- always run on boot-up, even during safe boot
import os
from machine import UART
os.dupterm(UART(0, 115200))
|
common/ops/gather_ops.py
|
vahidk/TensorflowFramework
| 129 |
134375
|
<filename>common/ops/gather_ops.py
"""Gather ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from common.ops import shape_ops
def batch_gather(tensor, indices):
"""Gather in batch from a tensor of arbitrary size.
In pseduocode this module will produce the following:
output[i] = tf.gather(tensor[i], indices[i])
Args:
tensor: Tensor of arbitrary size.
indices: Vector of indices.
Returns:
output: A tensor of gathered values.
"""
shape = shape_ops.get_shape(tensor)
flat_first = tf.reshape(tensor, [shape[0] * shape[1]] + shape[2:])
indices = tf.convert_to_tensor(indices)
offset_shape = [shape[0]] + [1] * (indices.shape.ndims - 1)
offset = tf.reshape(tf.range(shape[0]) * shape[1], offset_shape)
output = tf.gather(flat_first, indices + offset)
return output
|
PYTHON/pattern.py
|
ayushyado/HACKTOBERFEST2021-2
| 125 |
134399
|
# Python program to draw square
# using Turtle Programming
import turtle
skk = turtle.Turtle()
for i in range(4):
skk.forward(50)
skk.right(90)
turtle.done()
# Python program to draw star
# using Turtle Programming
import turtle
star = turtle.Turtle()
star.right(75)
star.forward(100)
for i in range(4):
star.right(144)
star.forward(100)
turtle.done()
|
Allura/allura/tests/scripts/test_delete_projects.py
|
rohankumardubey/allura
| 113 |
134400
|
<filename>Allura/allura/tests/scripts/test_delete_projects.py
# coding=utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
from ming.odm import session, Mapper, ThreadLocalODMSession
from mock import patch
from tg import app_globals as g
from alluratest.tools import assert_equal
from alluratest.controller import TestController
from allura.tests.decorators import audits, out_audits, with_user_project
from allura import model as M
from allura.scripts import delete_projects
from allura.lib import plugin
class TestDeleteProjects(TestController):
def setUp(self):
super(TestDeleteProjects, self).setUp()
n = M.Neighborhood.query.get(name='Projects')
admin = M.User.by_username('test-admin')
self.p_shortname = 'test-delete'
self.proj = n.register_project(self.p_shortname, admin)
def run_script(self, options):
cls = delete_projects.DeleteProjects
opts = cls.parser().parse_args(options)
cls.execute(opts)
def things_related_to_project(self, pid):
result = []
ac_ids = [ac._id for ac in M.AppConfig.query.find(dict(project_id=pid))]
for m in Mapper.all_mappers():
cls = m.mapped_class
things = None
if 'project_id' in m.property_index:
things = cls.query.find(dict(project_id=pid)).all()
elif 'app_config_id' in m.property_index:
things = cls.query.find(dict(app_config_id={'$in': ac_ids})).all()
if things:
result.extend(things)
return result
def test_project_is_deleted(self):
p = M.Project.query.get(shortname=self.p_shortname)
assert p is not None, 'Can not find project to delete'
self.run_script(['p/{}'.format(p.shortname)])
session(p).expunge(p)
p = M.Project.query.get(shortname=p.shortname)
assert p is None, 'Project is not deleted'
def test_artifacts_are_deleted(self):
pid = M.Project.query.get(shortname=self.p_shortname)._id
things = self.things_related_to_project(pid)
assert len(things) > 0, 'No things related to project to begin with'
self.run_script(['p/{}'.format(self.p_shortname)])
things = self.things_related_to_project(pid)
assert len(things) == 0, 'Not all things are deleted: %s' % things
def test_subproject_is_deleted(self):
p = M.Project.query.get(shortname='test/sub1')
assert p is not None, 'Can not find subproject to delete'
self.run_script(['p/test/sub1'])
session(p).expunge(p)
p = M.Project.query.get(shortname='test/sub1')
assert p is None, 'Project is not deleted'
p = M.Project.query.get(shortname='test')
assert p is not None, 'Parent project should not be deleted'
def test_subproject_artifacts_are_deleted(self):
parent_pid = M.Project.query.get(shortname='test')._id
pid = M.Project.query.get(shortname='test/sub1')._id
things = self.things_related_to_project(pid)
assert len(things) > 0, 'No things related to subproject to begin with'
parent_things_before = self.things_related_to_project(parent_pid)
self.run_script(['p/test/sub1'])
things = self.things_related_to_project(pid)
assert len(things) == 0, 'Not all things are deleted: %s' % things
parent_things_after = self.things_related_to_project(parent_pid)
assert_equal(len(parent_things_before), len(parent_things_after))
@patch('allura.lib.plugin.solr_del_project_artifacts', autospec=True)
def test_solr_index_is_deleted(self, del_solr):
pid = M.Project.query.get(shortname=self.p_shortname)._id
self.run_script(['p/{}'.format(self.p_shortname)])
del_solr.post.assert_called_once_with(pid)
@with_user_project('test-user')
@patch('allura.model.auth.request')
@patch('allura.lib.helpers.request')
def test_userproject_does_disable(self, req, req2):
req.remote_addr = None
req.user_agent = 'MozFoo'
req2.url = None
self.run_script(['u/test-user'])
assert M.User.by_username('test-user').disabled
@patch.object(plugin.g, 'post_event', autospec=True)
def test_event_is_fired(self, post_event):
pid = M.Project.query.get(shortname=self.p_shortname)._id
self.run_script(['p/{}'.format(self.p_shortname)])
post_event.assert_called_once_with('project_deleted', project_id=pid, reason=None)
@patch.object(plugin.g, 'post_event', autospec=True)
@patch('allura.scripts.delete_projects.log', autospec=True)
def test_delete_with_reason(self, log, post_event):
p = M.Project.query.get(shortname=self.p_shortname)
pid = p._id
assert p is not None, 'Can not find project to delete'
self.run_script(['-r', 'The Reason¢¢', 'p/{}'.format(p.shortname)])
session(p).expunge(p)
p = M.Project.query.get(shortname=p.shortname)
assert p is None, 'Project is not deleted'
log.info.assert_called_once_with('Purging %s Reason: %s', '/p/test-delete/', 'The Reason¢¢')
post_event.assert_called_once_with('project_deleted', project_id=pid, reason='The Reason¢¢')
def _disable_users(self, disable):
dev = M.User.by_username('test-user')
self.proj.add_user(dev, ['Developer'])
ThreadLocalODMSession.flush_all()
g.credentials.clear()
proj = 'p/{}'.format(self.p_shortname)
msg = 'Account disabled because project /{} is deleted. Reason: The Reason'.format(proj)
opts = ['-r', 'The Reason', proj]
if disable:
opts.insert(0, '--disable-users')
_audit = audits if disable else out_audits
with _audit(msg, user=True):
self.run_script(opts)
admin = M.User.by_username('test-admin')
dev = M.User.by_username('test-user')
assert admin.disabled is disable
assert dev.disabled is disable
@patch('allura.model.auth.request')
@patch('allura.lib.helpers.request')
def test_disable_users(self, req, req2):
req.remote_addr = None
req.user_agent = 'MozFoo'
req2.url = None
self._disable_users(disable=True)
def test_not_disable_users(self):
self._disable_users(disable=False)
|
src/storage-preview/azext_storage_preview/vendored_sdks/azure_storagev2/fileshare/v2020_02_10/_serialize.py
|
Mannan2812/azure-cli-extensions
| 207 |
134418
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=no-self-use
from azure.core import MatchConditions
from ._parser import _datetime_to_str, _get_file_permission
from ._generated.models import SourceModifiedAccessConditions, LeaseAccessConditions, CopyFileSmbInfo
_SUPPORTED_API_VERSIONS = [
'2019-02-02',
'2019-07-07',
'2019-12-12',
'2020-02-10',
]
def _get_match_headers(kwargs, match_param, etag_param):
# type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str])
# TODO: extract this method to shared folder also add some comments, so that share, datalake and blob can use it.
if_match = None
if_none_match = None
match_condition = kwargs.pop(match_param, None)
if match_condition == MatchConditions.IfNotModified:
if_match = kwargs.pop(etag_param, None)
if not if_match:
raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param))
elif match_condition == MatchConditions.IfPresent:
if_match = '*'
elif match_condition == MatchConditions.IfModified:
if_none_match = kwargs.pop(etag_param, None)
if not if_none_match:
raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param))
elif match_condition == MatchConditions.IfMissing:
if_none_match = '*'
elif match_condition is None:
if etag_param in kwargs:
raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param))
else:
raise TypeError("Invalid match condition: {}".format(match_condition))
return if_match, if_none_match
def get_source_conditions(kwargs):
# type: (Dict[str, Any]) -> SourceModifiedAccessConditions
if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag')
return SourceModifiedAccessConditions(
source_if_modified_since=kwargs.pop('source_if_modified_since', None),
source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None),
source_if_match=if_match or kwargs.pop('source_if_match', None),
source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None)
)
def get_access_conditions(lease):
# type: (Optional[Union[ShareLeaseClient, str]]) -> Union[LeaseAccessConditions, None]
try:
lease_id = lease.id # type: ignore
except AttributeError:
lease_id = lease # type: ignore
return LeaseAccessConditions(lease_id=lease_id) if lease_id else None
def get_smb_properties(kwargs):
# type: (Dict[str, Any]) -> Dict[str, Any]
ignore_read_only = kwargs.pop('ignore_read_only', None)
set_archive_attribute = kwargs.pop('set_archive_attribute', None)
file_permission = kwargs.pop('file_permission', None)
file_permission_key = kwargs.pop('permission_key', None)
file_attributes = kwargs.pop('file_attributes', None)
file_creation_time = kwargs.pop('file_creation_time', None) or ""
file_last_write_time = kwargs.pop('file_last_write_time', None) or ""
file_permission_copy_mode = None
file_permission = _get_file_permission(file_permission, file_permission_key, None)
if file_permission:
if file_permission.lower() == "source":
file_permission = None
file_permission_copy_mode = "source"
else:
file_permission_copy_mode = "override"
elif file_permission_key:
if file_permission_key.lower() == "source":
file_permission_key = None
file_permission_copy_mode = "source"
else:
file_permission_copy_mode = "override"
return {
'file_permission': file_permission,
'file_permission_key': file_permission_key,
'copy_file_smb_info': CopyFileSmbInfo(
file_permission_copy_mode=file_permission_copy_mode,
ignore_read_only=ignore_read_only,
file_attributes=file_attributes,
file_creation_time=_datetime_to_str(file_creation_time),
file_last_write_time=_datetime_to_str(file_last_write_time),
set_archive_attribute=set_archive_attribute
)
}
def get_api_version(kwargs, default):
# type: (Dict[str, Any]) -> str
api_version = kwargs.pop('api_version', None)
if api_version and api_version not in _SUPPORTED_API_VERSIONS:
versions = '\n'.join(_SUPPORTED_API_VERSIONS)
raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions))
return api_version or default
|
examples/wmf_example.py
|
carmanzhang/cornac
| 597 |
134432
|
<gh_stars>100-1000
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example for Collaborative Filtering for Implicit Feedback Datasets (Citeulike)"""
import cornac
from cornac.data import Reader
from cornac.datasets import citeulike
from cornac.eval_methods import RatioSplit
# Load user-item feedback
_, item_ids = citeulike.load_text()
data = citeulike.load_feedback(reader=Reader(item_set=item_ids))
# Instantiate an evaluation method to split data into train and test sets.
ratio_split = RatioSplit(
data=data,
test_size=0.2,
exclude_unknowns=True,
verbose=True,
seed=123,
rating_threshold=0.5,
)
# Instantiate the WMF model
wmf = cornac.models.WMF(
k=50,
max_iter=50,
learning_rate=0.001,
lambda_u=0.01,
lambda_v=0.01,
verbose=True,
seed=123,
)
# Use Recall@300 for evaluation
rec_300 = cornac.metrics.Recall(k=300)
# Instantiate and run an experiment
cornac.Experiment(
eval_method=ratio_split, models=[wmf], metrics=[rec_300], user_based=True
).run()
|
train.py
|
JJinIT/som-dst
| 139 |
134435
|
<filename>train.py
"""
SOM-DST
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
from model import SomDST
from pytorch_transformers import BertTokenizer, AdamW, WarmupLinearSchedule, BertConfig
from utils.data_utils import prepare_dataset, MultiWozDataset
from utils.data_utils import make_slot_meta, domain2id, OP_SET, make_turn_label, postprocessing
from utils.eval_utils import compute_prf, compute_acc, per_domain_join_accuracy
from utils.ckpt_utils import download_ckpt, convert_ckpt_compatible
from evaluation import model_evaluation
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import numpy as np
import argparse
import random
import os
import json
import time
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def masked_cross_entropy_for_value(logits, target, pad_idx=0):
mask = target.ne(pad_idx)
logits_flat = logits.view(-1, logits.size(-1))
log_probs_flat = torch.log(logits_flat)
target_flat = target.view(-1, 1)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
losses = losses_flat.view(*target.size())
losses = losses * mask.float()
loss = losses.sum() / (mask.sum().float())
return loss
def main(args):
def worker_init_fn(worker_id):
np.random.seed(args.random_seed + worker_id)
n_gpu = 0
if torch.cuda.is_available():
n_gpu = torch.cuda.device_count()
np.random.seed(args.random_seed)
random.seed(args.random_seed)
rng = random.Random(args.random_seed)
torch.manual_seed(args.random_seed)
if n_gpu > 0:
torch.cuda.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
ontology = json.load(open(args.ontology_data))
slot_meta, ontology = make_slot_meta(ontology)
op2id = OP_SET[args.op_code]
print(op2id)
tokenizer = BertTokenizer(args.vocab_path, do_lower_case=True)
train_data_raw = prepare_dataset(data_path=args.train_data_path,
tokenizer=tokenizer,
slot_meta=slot_meta,
n_history=args.n_history,
max_seq_length=args.max_seq_length,
op_code=args.op_code)
train_data = MultiWozDataset(train_data_raw,
tokenizer,
slot_meta,
args.max_seq_length,
rng,
ontology,
args.word_dropout,
args.shuffle_state,
args.shuffle_p)
print("# train examples %d" % len(train_data_raw))
dev_data_raw = prepare_dataset(data_path=args.dev_data_path,
tokenizer=tokenizer,
slot_meta=slot_meta,
n_history=args.n_history,
max_seq_length=args.max_seq_length,
op_code=args.op_code)
print("# dev examples %d" % len(dev_data_raw))
test_data_raw = prepare_dataset(data_path=args.test_data_path,
tokenizer=tokenizer,
slot_meta=slot_meta,
n_history=args.n_history,
max_seq_length=args.max_seq_length,
op_code=args.op_code)
print("# test examples %d" % len(test_data_raw))
model_config = BertConfig.from_json_file(args.bert_config_path)
model_config.dropout = args.dropout
model_config.attention_probs_dropout_prob = args.attention_probs_dropout_prob
model_config.hidden_dropout_prob = args.hidden_dropout_prob
model = SomDST(model_config, len(op2id), len(domain2id), op2id['update'], args.exclude_domain)
if not os.path.exists(args.bert_ckpt_path):
args.bert_ckpt_path = download_ckpt(args.bert_ckpt_path, args.bert_config_path, 'assets')
ckpt = torch.load(args.bert_ckpt_path, map_location='cpu')
model.encoder.bert.load_state_dict(ckpt)
# re-initialize added special tokens ([SLOT], [NULL], [EOS])
model.encoder.bert.embeddings.word_embeddings.weight.data[1].normal_(mean=0.0, std=0.02)
model.encoder.bert.embeddings.word_embeddings.weight.data[2].normal_(mean=0.0, std=0.02)
model.encoder.bert.embeddings.word_embeddings.weight.data[3].normal_(mean=0.0, std=0.02)
model.to(device)
num_train_steps = int(len(train_data_raw) / args.batch_size * args.n_epochs)
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
enc_param_optimizer = list(model.encoder.named_parameters())
enc_optimizer_grouped_parameters = [
{'params': [p for n, p in enc_param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in enc_param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
enc_optimizer = AdamW(enc_optimizer_grouped_parameters, lr=args.enc_lr)
enc_scheduler = WarmupLinearSchedule(enc_optimizer, int(num_train_steps * args.enc_warmup),
t_total=num_train_steps)
dec_param_optimizer = list(model.decoder.parameters())
dec_optimizer = AdamW(dec_param_optimizer, lr=args.dec_lr)
dec_scheduler = WarmupLinearSchedule(dec_optimizer, int(num_train_steps * args.dec_warmup),
t_total=num_train_steps)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data,
sampler=train_sampler,
batch_size=args.batch_size,
collate_fn=train_data.collate_fn,
num_workers=args.num_workers,
worker_init_fn=worker_init_fn)
loss_fnc = nn.CrossEntropyLoss()
best_score = {'epoch': 0, 'joint_acc': 0, 'op_acc': 0, 'final_slot_f1': 0}
for epoch in range(args.n_epochs):
batch_loss = []
model.train()
for step, batch in enumerate(train_dataloader):
batch = [b.to(device) if not isinstance(b, int) else b for b in batch]
input_ids, input_mask, segment_ids, state_position_ids, op_ids,\
domain_ids, gen_ids, max_value, max_update = batch
if rng.random() < args.decoder_teacher_forcing: # teacher forcing
teacher = gen_ids
else:
teacher = None
domain_scores, state_scores, gen_scores = model(input_ids=input_ids,
token_type_ids=segment_ids,
state_positions=state_position_ids,
attention_mask=input_mask,
max_value=max_value,
op_ids=op_ids,
max_update=max_update,
teacher=teacher)
loss_s = loss_fnc(state_scores.view(-1, len(op2id)), op_ids.view(-1))
loss_g = masked_cross_entropy_for_value(gen_scores.contiguous(),
gen_ids.contiguous(),
tokenizer.vocab['[PAD]'])
loss = loss_s + loss_g
if args.exclude_domain is not True:
loss_d = loss_fnc(domain_scores.view(-1, len(domain2id)), domain_ids.view(-1))
loss = loss + loss_d
batch_loss.append(loss.item())
loss.backward()
enc_optimizer.step()
enc_scheduler.step()
dec_optimizer.step()
dec_scheduler.step()
model.zero_grad()
if step % 100 == 0:
if args.exclude_domain is not True:
print("[%d/%d] [%d/%d] mean_loss : %.3f, state_loss : %.3f, gen_loss : %.3f, dom_loss : %.3f" \
% (epoch+1, args.n_epochs, step,
len(train_dataloader), np.mean(batch_loss),
loss_s.item(), loss_g.item(), loss_d.item()))
else:
print("[%d/%d] [%d/%d] mean_loss : %.3f, state_loss : %.3f, gen_loss : %.3f" \
% (epoch+1, args.n_epochs, step,
len(train_dataloader), np.mean(batch_loss),
loss_s.item(), loss_g.item()))
batch_loss = []
if (epoch+1) % args.eval_epoch == 0:
eval_res = model_evaluation(model, dev_data_raw, tokenizer, slot_meta, epoch+1, args.op_code)
if eval_res['joint_acc'] > best_score['joint_acc']:
best_score = eval_res
model_to_save = model.module if hasattr(model, 'module') else model
save_path = os.path.join(args.save_dir, 'model_best.bin')
torch.save(model_to_save.state_dict(), save_path)
print("Best Score : ", best_score)
print("\n")
print("Test using best model...")
best_epoch = best_score['epoch']
ckpt_path = os.path.join(args.save_dir, 'model_best.bin')
model = SomDST(model_config, len(op2id), len(domain2id), op2id['update'], args.exclude_domain)
ckpt = torch.load(ckpt_path, map_location='cpu')
model.load_state_dict(ckpt)
model.to(device)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=False, is_gt_p_state=False, is_gt_gen=False)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=False, is_gt_p_state=False, is_gt_gen=True)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=False, is_gt_p_state=True, is_gt_gen=False)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=False, is_gt_p_state=True, is_gt_gen=True)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=True, is_gt_p_state=False, is_gt_gen=False)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=True, is_gt_p_state=True, is_gt_gen=False)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=True, is_gt_p_state=False, is_gt_gen=True)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=True, is_gt_p_state=True, is_gt_gen=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_root", default='data/mwz2.1', type=str)
parser.add_argument("--train_data", default='train_dials.json', type=str)
parser.add_argument("--dev_data", default='dev_dials.json', type=str)
parser.add_argument("--test_data", default='test_dials.json', type=str)
parser.add_argument("--ontology_data", default='ontology.json', type=str)
parser.add_argument("--vocab_path", default='assets/vocab.txt', type=str)
parser.add_argument("--bert_config_path", default='assets/bert_config_base_uncased.json', type=str)
parser.add_argument("--bert_ckpt_path", default='assets/bert-base-uncased-pytorch_model.bin', type=str)
parser.add_argument("--save_dir", default='outputs', type=str)
parser.add_argument("--random_seed", default=42, type=int)
parser.add_argument("--num_workers", default=4, type=int)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--enc_warmup", default=0.1, type=float)
parser.add_argument("--dec_warmup", default=0.1, type=float)
parser.add_argument("--enc_lr", default=4e-5, type=float)
parser.add_argument("--dec_lr", default=1e-4, type=float)
parser.add_argument("--n_epochs", default=30, type=int)
parser.add_argument("--eval_epoch", default=1, type=int)
parser.add_argument("--op_code", default="4", type=str)
parser.add_argument("--slot_token", default="[SLOT]", type=str)
parser.add_argument("--dropout", default=0.1, type=float)
parser.add_argument("--hidden_dropout_prob", default=0.1, type=float)
parser.add_argument("--attention_probs_dropout_prob", default=0.1, type=float)
parser.add_argument("--decoder_teacher_forcing", default=0.5, type=float)
parser.add_argument("--word_dropout", default=0.1, type=float)
parser.add_argument("--not_shuffle_state", default=False, action='store_true')
parser.add_argument("--shuffle_p", default=0.5, type=float)
parser.add_argument("--n_history", default=1, type=int)
parser.add_argument("--max_seq_length", default=256, type=int)
parser.add_argument("--msg", default=None, type=str)
parser.add_argument("--exclude_domain", default=False, action='store_true')
args = parser.parse_args()
args.train_data_path = os.path.join(args.data_root, args.train_data)
args.dev_data_path = os.path.join(args.data_root, args.dev_data)
args.test_data_path = os.path.join(args.data_root, args.test_data)
args.ontology_data = os.path.join(args.data_root, args.ontology_data)
args.shuffle_state = False if args.not_shuffle_state else True
print('pytorch version: ', torch.__version__)
print(args)
main(args)
|
intro/numpy/solutions/2_2_data_statistics.py
|
rossbar/scipy-lecture-notes
| 2,538 |
134458
|
<filename>intro/numpy/solutions/2_2_data_statistics.py<gh_stars>1000+
import numpy as np
data = np.loadtxt('../../../data/populations.txt')
year, hares, lynxes, carrots = data.T
populations = data[:,1:]
print(" Hares, Lynxes, Carrots")
print("Mean:", populations.mean(axis=0))
print("Std:", populations.std(axis=0))
j_max_years = np.argmax(populations, axis=0)
print("Max. year:", year[j_max_years])
max_species = np.argmax(populations, axis=1)
species = np.array(['Hare', 'Lynx', 'Carrot'])
print("Max species:")
print(year)
print(species[max_species])
above_50000 = np.any(populations > 50000, axis=1)
print("Any above 50000:", year[above_50000])
j_top_2 = np.argsort(populations, axis=0)[:2]
print("Top 2 years with lowest populations for each:")
print(year[j_top_2])
hare_grad = np.gradient(hares, 1.0)
print("diff(Hares) vs. Lynxes correlation", np.corrcoef(hare_grad, lynxes)[0,1])
import matplotlib.pyplot as plt
plt.plot(year, hare_grad, year, -lynxes)
plt.savefig('plot.png')
|
tests/test006.py
|
takipsizad/pyjs
| 739 |
134471
|
import DOM
class UIObject:
def getElement(self):
return self.element
def setElement(self, element):
self.element = element
def setStyleName(self, style):
DOM.setAttribute(self.element, "className", style)
class Widget(UIObject):
def setParent(self, parent):
self.parent = parent
class FocusWidget(Widget):
def __init__(self, element):
self.setElement(element)
class ButtonBase(FocusWidget):
def __init__(self, element):
FocusWidget.__init__(self, element)
def setHTML(self, html):
DOM.setInnerHTML(self.getElement(), html)
class Button(ButtonBase):
def __init__(self, html=None):
ButtonBase.__init__(self, DOM.createButton())
self.setStyleName("gwt-Button")
if html:
self.setHTML(html)
|
src/tests/test_mas.py
|
francesco-p/FACIL
| 243 |
134489
|
from tests import run_main_and_assert
FAST_LOCAL_TEST_ARGS = "--exp-name local_test --datasets mnist" \
" --network LeNet --num-tasks 3 --seed 1 --batch-size 32" \
" --nepochs 3" \
" --num-workers 0" \
" --approach mas"
def test_mas_without_exemplars():
run_main_and_assert(FAST_LOCAL_TEST_ARGS)
def test_mas_with_exemplars():
args_line = FAST_LOCAL_TEST_ARGS
args_line += " --num-exemplars 200"
run_main_and_assert(args_line)
def test_mas_with_warmup():
args_line = FAST_LOCAL_TEST_ARGS
args_line += " --warmup-nepochs 5"
args_line += " --warmup-lr-factor 0.5"
args_line += " --num-exemplars 200"
run_main_and_assert(args_line)
|
tests/apitests/python/test_push_cnab_bundle.py
|
PushkarJ/harbor
| 12,706 |
134524
|
from __future__ import absolute_import
import sys
import unittest
from testutils import ADMIN_CLIENT, suppress_urllib3_warning
from testutils import harbor_server
from testutils import TEARDOWN
import library.repository
import library.cnab
from library.project import Project
from library.user import User
from library.repository import Repository
from library.artifact import Artifact
from library.scan import Scan
class TestCNAB(unittest.TestCase):
@suppress_urllib3_warning
def setUp(self):
print("Setup")
@unittest.skipIf(TEARDOWN == False, "Test data won't be erased.")
def do_tearDown(self):
"""
Tear down:
1. Delete repository(RA) by user(UA);
2. Delete project(PA);
3. Delete user(UA);
"""
#1. Delete repository(RA) by user(UA);
TestCNAB.repo.delete_repository(TestCNAB.project_name, TestCNAB.cnab_repo_name, **TestCNAB.USER_CLIENT)
#2. Delete project(PA);
TestCNAB.project.delete_project(TestCNAB.project_id, **TestCNAB.USER_CLIENT)
#3. Delete user(UA).
TestCNAB.user.delete_user(TestCNAB.user_id, **ADMIN_CLIENT)
def test_01_PushBundleByCnab(self):
"""
Test case:
Push Bundle By Cnab
Test step and expected result:
1. Create a new user(UA);
2. Create a new project(PA) by user(UA);
3. Push bundle to harbor as repository(RA);
4. Get repository from Harbor successfully;
5. Verfiy bundle name;
6. Get artifact by sha256;
7. Verify artifact information.
"""
TestCNAB.project= Project()
TestCNAB.user= User()
TestCNAB.artifact = Artifact()
TestCNAB.repo= Repository()
TestCNAB.scan = Scan()
TestCNAB.url = ADMIN_CLIENT["endpoint"]
TestCNAB.user_push_cnab_password = "<PASSWORD>"
TestCNAB.cnab_repo_name = "test_cnab"
TestCNAB.cnab_tag = "test_cnab_tag"
TestCNAB.project_name = None
TestCNAB.artifacts_config_ref_child_list = None
TestCNAB.artifacts_ref_child_list = None
#1. Create a new user(UA);
TestCNAB.user_id, TestCNAB.user_name = TestCNAB.user.create_user(user_password = <PASSWORD>.user_push_cnab_password, **ADMIN_CLIENT)
TestCNAB.USER_CLIENT=dict(endpoint = TestCNAB.url, username = TestCNAB.user_name, password = <PASSWORD>AB.user_push_cnab_password, with_scan_overview = True)
#2. Create a new project(PA) by user(UA);
TestCNAB.project_id, TestCNAB.project_name = TestCNAB.project.create_project(metadata = {"public": "false"}, **TestCNAB.USER_CLIENT)
#3. Push bundle to harbor as repository(RA);
target = harbor_server + "/" + TestCNAB.project_name + "/" + TestCNAB.cnab_repo_name + ":" + TestCNAB.cnab_tag
TestCNAB.reference_sha256 = library.cnab.push_cnab_bundle(harbor_server, TestCNAB.user_name, TestCNAB.user_push_cnab_password, "goharbor/harbor-log:v1.10.0", "kong:latest", target)
#4. Get repository from Harbor successfully;
TestCNAB.cnab_bundle_data = TestCNAB.repo.get_repository(TestCNAB.project_name, TestCNAB.cnab_repo_name, **TestCNAB.USER_CLIENT)
print(TestCNAB.cnab_bundle_data)
#4.1 Get refs of CNAB bundle;
TestCNAB.artifacts = TestCNAB.artifact.list_artifacts(TestCNAB.project_name, TestCNAB.cnab_repo_name, **TestCNAB.USER_CLIENT)
print("artifacts:", TestCNAB.artifacts)
TestCNAB.artifacts_ref_child_list = []
TestCNAB.artifacts_config_ref_child_list = []
for ref in TestCNAB.artifacts[0].references:
if ref.annotations["io.cnab.manifest.type"] != 'config':
TestCNAB.artifacts_ref_child_list.append(ref.child_digest)
else:
TestCNAB.artifacts_config_ref_child_list.append(ref.child_digest)
self.assertEqual(len(TestCNAB.artifacts_ref_child_list), 2, msg="Image artifact count should be 2.")
self.assertEqual(len(TestCNAB.artifacts_config_ref_child_list), 1, msg="Bundle count should be 1.")
print(TestCNAB.artifacts_ref_child_list)
#4.2 Cnab bundle can be pulled by ctr successfully;
# This step might not successful since ctr does't support cnab fully, it might be uncomment sometime in future.
# Please keep them in comment!
#library.containerd.ctr_images_pull(TestCNAB.user_name, TestCNAB.user_push_cnab_password, target)
#library.containerd.ctr_images_list(oci_ref = target)
#5. Verfiy bundle name;
self.assertEqual(TestCNAB.cnab_bundle_data.name, TestCNAB.project_name + "/" + TestCNAB.cnab_repo_name)
#6. Get artifact by sha256;
artifact = TestCNAB.artifact.get_reference_info(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.reference_sha256, **TestCNAB.USER_CLIENT)
#7. Verify artifact information;
self.assertEqual(artifact.type, 'CNAB')
self.assertEqual(artifact.digest, TestCNAB.reference_sha256)
def test_02_ScanCNAB(self):
"""
Test case:
Scan CNAB
Test step and expected result:
1. Scan config artifact, it should be failed with 400 status code;
2. Scan 1st child artifact, it should be scanned, the other should be not scanned, repository should not be scanned;
3. Scan 2cn child artifact, it should be scanned, repository should not be scanned;
4. Scan repository, it should be scanned;
Tear down:
"""
#1. Scan config artifact, it should be failed with 400 status code;
TestCNAB.scan.scan_artifact(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_config_ref_child_list[0], expect_status_code = 400, **TestCNAB.USER_CLIENT)
#2. Scan 1st child artifact, it should be scanned, the other should be not scanned, repository should not be scanned;
TestCNAB.scan.scan_artifact(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_ref_child_list[0], **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_ref_child_list[0], **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_ref_child_list[1], expected_scan_status = "Not Scanned", **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_config_ref_child_list[0], expected_scan_status = "No Scan Overview", **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts[0].digest, expected_scan_status = "Not Scanned", **TestCNAB.USER_CLIENT)
#3. Scan 2cn child artifact, it should be scanned, repository should not be scanned;
TestCNAB.scan.scan_artifact(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_ref_child_list[1], **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_ref_child_list[1], **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_config_ref_child_list[0], expected_scan_status = "No Scan Overview", **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts[0].digest, expected_scan_status = "Not Scanned", **TestCNAB.USER_CLIENT)
#4. Scan repository, it should be scanned;
TestCNAB.scan.scan_artifact(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts[0].digest, **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts[0].digest, **TestCNAB.USER_CLIENT)
self.do_tearDown()
if __name__ == '__main__':
suite = unittest.TestSuite(unittest.makeSuite(TestCNAB))
result = unittest.TextTestRunner(sys.stdout, verbosity=2, failfast=True).run(suite)
if not result.wasSuccessful():
raise Exception(r"CNAB test failed: {}".format(result))
|
libs/numpy/ma/tests/test_deprecations.py
|
rocketbot-cl/recognition
| 353 |
134534
|
<filename>libs/numpy/ma/tests/test_deprecations.py
"""Test deprecation and future warnings.
"""
import numpy as np
from numpy.testing import assert_warns
from numpy.ma.testutils import assert_equal
from numpy.ma.core import MaskedArrayFutureWarning
class TestArgsort:
""" gh-8701 """
def _test_base(self, argsort, cls):
arr_0d = np.array(1).view(cls)
argsort(arr_0d)
arr_1d = np.array([1, 2, 3]).view(cls)
argsort(arr_1d)
# argsort has a bad default for >1d arrays
arr_2d = np.array([[1, 2], [3, 4]]).view(cls)
result = assert_warns(
np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d)
assert_equal(result, argsort(arr_2d, axis=None))
# should be no warnings for explicitly specifying it
argsort(arr_2d, axis=None)
argsort(arr_2d, axis=-1)
def test_function_ndarray(self):
return self._test_base(np.ma.argsort, np.ndarray)
def test_function_maskedarray(self):
return self._test_base(np.ma.argsort, np.ma.MaskedArray)
def test_method(self):
return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray)
class TestMinimumMaximum:
def test_minimum(self):
assert_warns(DeprecationWarning, np.ma.minimum, np.ma.array([1, 2]))
def test_maximum(self):
assert_warns(DeprecationWarning, np.ma.maximum, np.ma.array([1, 2]))
def test_axis_default(self):
# NumPy 1.13, 2017-05-06
data1d = np.ma.arange(6)
data2d = data1d.reshape(2, 3)
ma_min = np.ma.minimum.reduce
ma_max = np.ma.maximum.reduce
# check that the default axis is still None, but warns on 2d arrays
result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d)
assert_equal(result, ma_max(data2d, axis=None))
result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d)
assert_equal(result, ma_min(data2d, axis=None))
# no warnings on 1d, as both new and old defaults are equivalent
result = ma_min(data1d)
assert_equal(result, ma_min(data1d, axis=None))
assert_equal(result, ma_min(data1d, axis=0))
result = ma_max(data1d)
assert_equal(result, ma_max(data1d, axis=None))
assert_equal(result, ma_max(data1d, axis=0))
|
src/ploomber/sources/interact.py
|
MarcoJHB/ploomber
| 2,141 |
134553
|
"""
One of Ploomber's main goals is to allow writing robust/reliable code in an
interactive way. Interactive workflows make people more productive but they
might come in detriment of writing high quality code (e.g. developing a
pipeline in a single ipynb file). The basic idea for this module is to provide
a way to transparently go back and forth between a Task in a DAG and a
temporary Jupyter notebook. Currently, we only provide this for PythonCallable
and NotebookRunner but the idea is to expand to other tasks, so we have to
decide on a common behavior for this, here are a few rules:
1) Temporary jupyter notebook are usually destroyed when the user closes the
jupyter applciation. But there are extraordinary cases where we don't want to
remove it, as it might cause code loss. e.g. if the user calls
PythonCallable.develop() and while it is editing the notebook the module where
the source function is defined, we risk corrupting the module file, so we abort
overriding changes but still keep the temporary notebook. For this reason,
we save temporary notebooks in the same location of the source being edited,
to make it easier to recognize which file is related to.
2) The current working directory (cwd) in the session where Task.develop() is
called can be different from the cwd in the Jupyter application. This happens
because Jupyter sets the cwd to the current parent folder, this means that
any relative path defined in the DAG, will break if the cwd in the Jupyter app
is not the same as in the DAg declaration. To fix this, we always add a top
cell in temporary notebooks to make the cwd the same folder where
Task.develop() was called.
3) [TODO] all temporary cells must have a tmp- preffx
TODO: move the logic that implements NotebookRunner.{develop, debug} to this
module
"""
import importlib
from itertools import chain
from pathlib import Path
import inspect
import warnings
import jupyter_client
# papermill is importing a deprecated module from pyarrow
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
from papermill.translators import PythonTranslator
import parso
import nbformat
from ploomber.util import chdir_code
from ploomber.sources.nb_utils import find_cell_with_tag
from ploomber.static_analysis.python import PythonCallableExtractor
from ploomber.sources.inspect import getfile
# TODO: test for locally defined objects
# TODO: reloading the fn causes trobule if it enters into an inconsistent
# state, e.g. a module that does not exist is saved, next time is realoaded,
# it will fail because it has to import such module
# TODO: if we remove upstream refernces from the functions body from jupyter
# the parameter is deleted from the signature but on reload (dag.render())
# signature validation fails bc it still loads the old signature, two options:
# either force reload all modules from all pythoncallables, or re-implement
# the signature check to get the signature using static analysis, not sure
# which is best
class CallableInteractiveDeveloper:
"""Convert callables to notebooks, edit and save back
Parameters
----------
fn : callable
Function to edit
params : dict
Parameters to call the function
Examples
--------
>>> wih CallableInteractiveDeveloper(fn, {'param': 1}) as path_to_nb:
... # do stuff with the notebook file
... pass
"""
def __init__(self, fn, params):
self.fn = fn
self.path_to_source = Path(inspect.getsourcefile(fn))
self.params = params
self.tmp_path = self.path_to_source.with_name(
self.path_to_source.with_suffix('').name + '-tmp.ipynb')
self._source_code = None
def _reload_fn(self):
# force to reload module to get the right information in case the
# original source code was modified and the function is no longer in
# the same position
# NOTE: are there any problems with this approach?
# we could also read the file directly and use ast/parso to get the
# function's information we need
mod = importlib.reload(inspect.getmodule(self.fn))
self.fn = getattr(mod, self.fn.__name__)
def to_nb(self, path=None):
"""
Converts the function to is notebook representation, Returns a
notebook object, if path is passed, it saves the notebook as well
Returns the function's body in a notebook (tmp location), inserts
params as variables at the top
"""
self._reload_fn()
body_elements, _ = parse_function(self.fn)
top, local, bottom = extract_imports(self.fn)
return function_to_nb(body_elements, top, local, bottom, self.params,
self.fn, path)
def overwrite(self, obj):
"""
Overwrite the function's body with the notebook contents, excluding
injected parameters and cells whose first line is "#". obj can be
either a notebook object or a path
"""
self._reload_fn()
if isinstance(obj, (str, Path)):
nb = nbformat.read(obj, as_version=nbformat.NO_CONVERT)
else:
nb = obj
nb.cells = nb.cells[:last_non_empty_cell(nb.cells)]
# remove cells that are only needed for the nb but not for the function
code_cells = [c['source'] for c in nb.cells if keep_cell(c)]
# add 4 spaces to each code cell, exclude white space lines
code_cells = [indent_cell(code) for code in code_cells]
# get the original file where the function is defined
content = self.path_to_source.read_text()
content_lines = content.splitlines()
trailing_newline = content[-1] == '\n'
# an upstream parameter
fn_starts, fn_ends = function_lines(self.fn)
# keep the file the same until you reach the function definition plus
# an offset to account for the signature (which might span >1 line)
_, body_start = parse_function(self.fn)
keep_until = fn_starts + body_start
header = content_lines[:keep_until]
# the footer is everything below the end of the original definition
footer = content_lines[fn_ends:]
# if there is anything at the end, we have to add an empty line to
# properly end the function definition, if this is the last definition
# in the file, we don't have to add this
if footer:
footer = [''] + footer
new_content = '\n'.join(header + code_cells + footer)
# replace old top imports with new ones
new_content_lines = new_content.splitlines()
_, line = extract_imports_top(parso.parse(new_content),
new_content_lines)
imports_top_cell, _ = find_cell_with_tag(nb, 'imports-top')
# ignore trailing whitespace in top imports cell but keep original
# amount of whitespace separating the last import and the first name
# definition
content_to_write = (imports_top_cell['source'].rstrip() + '\n' +
'\n'.join(new_content_lines[line - 1:]))
# if the original file had a trailing newline, keep it
if trailing_newline:
content_to_write += '\n'
# NOTE: this last part parses the code several times, we can improve
# performance by only parsing once
m = parso.parse(content_to_write)
fn_def = find_function_with_name(m, self.fn.__name__)
fn_code = fn_def.get_code()
has_upstream_dependencies = PythonCallableExtractor(
fn_code).extract_upstream()
upstream_in_func_sig = upstream_in_func_signature(fn_code)
if not upstream_in_func_sig and has_upstream_dependencies:
fn_code_new = add_upstream_to_func_signature(fn_code)
content_to_write = _replace_fn_source(content_to_write, fn_def,
fn_code_new)
elif upstream_in_func_sig and not has_upstream_dependencies:
fn_code_new = remove_upstream_to_func_signature(fn_code)
content_to_write = _replace_fn_source(content_to_write, fn_def,
fn_code_new)
self.path_to_source.write_text(content_to_write)
def __enter__(self):
self._source_code = self.path_to_source.read_text()
self.to_nb(path=self.tmp_path)
return str(self.tmp_path)
def __exit__(self, exc_type, exc_val, exc_tb):
current_source_code = self.path_to_source.read_text()
if self._source_code != current_source_code:
raise ValueError(f'File "{self.path_to_source}" (where '
f'callable "{self.fn.__name__}" is defined) '
'changed while editing the function in the '
'notebook app. This might lead to corrupted '
'source files. Changes from the notebook were '
'not saved back to the module. Notebook '
f'available at "{self.tmp_path}')
self.overwrite(self.tmp_path)
Path(self.tmp_path).unlink()
def __del__(self):
tmp = Path(self.tmp_path)
if tmp.exists():
tmp.unlink()
def last_non_empty_cell(cells):
"""Returns the index + 1 for the last non-empty cell
"""
idx = len(cells)
for cell in cells[::-1]:
if cell.source:
return idx
idx -= 1
return idx
def keep_cell(cell):
"""
Rule to decide whether to keep a cell or not. This is executed before
converting the notebook back to a function
"""
cell_tags = set(cell['metadata'].get('tags', {}))
# remove cell with this tag, they are not part of the function body
tags_to_remove = {
'injected-parameters',
'imports-top',
'imports-local',
'imports-bottom',
'debugging-settings',
}
has_tags_to_remove = len(cell_tags & tags_to_remove)
return (cell['cell_type'] == 'code' and not has_tags_to_remove
and cell['source'][:2] != '#\n')
def indent_line(lline):
return ' ' + lline if lline else ''
def indent_cell(code):
return '\n'.join([indent_line(line) for line in code.splitlines()])
def body_elements_from_source(source):
# getsource adds a new line at the end of the the function, we don't need
# this
body = parso.parse(source).children[0].children[-1]
# parso is adding a new line as first element, not sure if this
# happens always though
if isinstance(body.children[0], parso.python.tree.Newline):
body_elements = body.children[1:]
else:
body_elements = body.children
return body_elements, body.start_pos[0] - 1
def parse_function(fn):
"""
Extract function's source code, parse it and return function body
elements along with the # of the last line for the signature (which
marks the beginning of the function's body) and all the imports
"""
# TODO: exclude return at the end, what if we find more than one?
# maybe do not support functions with return statements for now
source = inspect.getsource(fn).rstrip()
body_elements, start_pos = body_elements_from_source(source)
return body_elements, start_pos
def extract_imports(fn):
source = Path(getfile(fn)).read_text()
module = parso.parse(source)
lines = source.splitlines()
imports_top, line = extract_imports_top(module, lines)
# any imports below the top imports
lines_bottom = '\n'.join(lines[line - 1:])
imports_bottom = '\n'.join(
imp.get_code() for imp in parso.parse(lines_bottom).iter_imports())
# generate imports from local definitions
imports_local = make_import_from_definitions(module, fn)
return (
imports_top,
imports_local,
imports_bottom if imports_bottom else None,
)
def extract_imports_top(module, lines):
ch = module.children[0]
while True:
if ch:
if not has_import(ch):
break
else:
break
ch = ch.get_next_sibling()
line, _ = ch.start_pos
# line numbers start at 1...
imports_top = '\n'.join(lines[:line - 1])
new_lines = trailing_newlines(imports_top)
return imports_top[:-new_lines], line - new_lines
def has_import(stmt):
"""
Check if statement contains an import
"""
for ch in stmt.children:
if ch.type in {'import_name', 'import_from'}:
return True
return False
def trailing_newlines(s):
n = 0
for char in reversed(s):
if char != '\n':
break
n += 1
return n
def function_lines(fn):
lines, start = inspect.getsourcelines(fn)
end = start + len(lines)
return start, end
def get_func_and_class_names(module):
return [
defs.name.get_code().strip()
for defs in chain(module.iter_funcdefs(), module.iter_classdefs())
]
def make_import_from_definitions(module, fn):
module_name = inspect.getmodule(fn).__name__
names = [
name for name in get_func_and_class_names(module)
if name != fn.__name__
]
if names:
names_all = ', '.join(names)
return f'from {module_name} import {names_all}'
def function_to_nb(body_elements, imports_top, imports_local, imports_bottom,
params, fn, path):
"""
Save function body elements to a notebook
"""
# TODO: Params should implement an option to call to_json_serializable
# on product to avoid repetition I'm using this same code in notebook
# runner. Also raise error if any of the params is not
# json serializable
try:
params = params.to_json_serializable()
params['product'] = params['product'].to_json_serializable()
except AttributeError:
pass
nb_format = nbformat.versions[nbformat.current_nbformat]
nb = nb_format.new_notebook()
# get the module where the function is declared
tokens = inspect.getmodule(fn).__name__.split('.')
module_name = '.'.join(tokens[:-1])
# add cell that chdirs for the current working directory
# add __package__, we need this for relative imports to work
# see: https://www.python.org/dev/peps/pep-0366/ for details
source = """
# Debugging settings (this cell will be removed before saving)
# change the current working directory to the one when .debug() happen
# to make relative paths work
import os
{}
__package__ = "{}"
""".format(chdir_code(Path('.').resolve()), module_name)
cell = nb_format.new_code_cell(source,
metadata={'tags': ['debugging-settings']})
nb.cells.append(cell)
# then add params passed to the function
cell = nb_format.new_code_cell(PythonTranslator.codify(params),
metadata={'tags': ['injected-parameters']})
nb.cells.append(cell)
# first three cells: imports
for code, tag in ((imports_top, 'imports-top'),
(imports_local, 'imports-local'), (imports_bottom,
'imports-bottom')):
if code:
nb.cells.append(
nb_format.new_code_cell(source=code,
metadata=dict(tags=[tag])))
for statement in body_elements:
lines, newlines = split_statement(statement)
# find indentation # of characters using the first line
idx = indentation_idx(lines[0])
# remove indentation from all function body lines
lines = [line[idx:] for line in lines]
# add one empty cell per leading new line
nb.cells.extend(
[nb_format.new_code_cell(source='') for _ in range(newlines)])
# add actual code as a single string
cell = nb_format.new_code_cell(source='\n'.join(lines))
nb.cells.append(cell)
k = jupyter_client.kernelspec.get_kernel_spec('python3')
nb.metadata.kernelspec = {
"display_name": k.display_name,
"language": k.language,
"name": 'python3'
}
if path:
nbformat.write(nb, path)
return nb
def split_statement(statement):
code = statement.get_code()
newlines = 0
for char in code:
if char != '\n':
break
newlines += 1
lines = code.strip('\n').split('\n')
return lines, newlines
def indentation_idx(line):
idx = len(line) - len(line.lstrip())
return idx
def upstream_in_func_signature(source):
_, params = _get_func_def_and_params(source)
return 'upstream' in set(p.name.get_code().strip() for p in params
if p.type == 'param')
def add_upstream_to_func_signature(source):
fn, params = _get_func_def_and_params(source)
# add a "," if there is at least one param
params.insert(-1, ', upstream' if len(params) > 2 else 'upstream')
signature = try_get_code(params)
fn.children[2] = signature
# delete leading newline code, to avoid duplicating it
return try_get_code(fn.children).lstrip('\n')
def remove_upstream_to_func_signature(source):
fn, params = _get_func_def_and_params(source)
params_names = (p.get_code().strip(', ') for p in params[1:-1])
params_list = ', '.join(p for p in params_names if p != 'upstream')
signature = f'({params_list})'
fn.children[2] = signature
# delete leading newline code, to avoid duplicating it
return try_get_code(fn.children).lstrip('\n')
def _get_func_def_and_params(source):
fn = parso.parse(source).children[0]
if fn.type != 'funcdef':
raise ValueError('Expected first element from parse source'
f' code to be "funcdef", got {fn.type!r}')
return fn, fn.children[2].children
def _replace_fn_source(content_to_write, fn_def, fn_code_new):
line_from, line_to = fn_def.start_pos[0], fn_def.end_pos[0]
lines = content_to_write.splitlines()
lines_new = (lines[:line_from - 1] + [fn_code_new] + lines[line_to - 1:])
return '\n'.join(lines_new)
def try_get_code(elements):
code = []
for p in elements:
try:
s = p.get_code()
except AttributeError:
s = p
code.append(s)
return ''.join(code)
def find_function_with_name(module, fn_name):
for fn_def in module.iter_funcdefs():
if fn_def.name.get_code().strip() == fn_name:
return fn_def
|
python/tvm/relay/op/nn/utils.py
|
XiaoSong9905/tvm
| 4,640 |
134557
|
<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""NN operator common utilities"""
from tvm.ir import container
def get_pad_tuple1d(padding):
"""Common code to get the 1 dimensional pad option
Parameters
----------
padding : Union[int, Tuple[int, ...]]
Padding size
Returns
-------
pad_left : int
Padding size on left
pad_right : int
Padding size on right.
"""
# compute the padding size
if isinstance(padding, container.Array):
padding = list(padding)
if isinstance(padding, (tuple, list)):
if len(padding) == 1:
pad_w = padding[0] * 2
elif len(padding) == 2:
return padding[0], padding[1]
else:
raise ValueError("Size of padding can only be 1 or 2")
elif isinstance(padding, int):
pad_w = padding * 2
else:
raise ValueError("Unknown padding option %s" % padding)
pad_left = (pad_w + 1) // 2
return pad_left, pad_w - pad_left
def get_pad_tuple2d(padding):
"""Common code to get the pad option
Parameters
----------
padding : Union[int, Tuple[int, ...]]
Padding size
Returns
-------
pad_top : int
Padding size on top
pad_left : int
Padding size on left
pad_down : int
Padding size on down.
pad_right : int
Padding size on right.
"""
# compute the padding size
if isinstance(padding, container.Array):
padding = list(padding)
if isinstance(padding, (tuple, list)):
if len(padding) == 2:
pad_h = padding[0] * 2
pad_w = padding[1] * 2
elif len(padding) == 4:
return padding[0], padding[1], padding[2], padding[3]
else:
raise ValueError("Size of padding can only be 2 or 4")
elif isinstance(padding, int):
pad_h = pad_w = padding * 2
else:
raise ValueError("Unknown padding option %s" % padding)
pad_top = (pad_h + 1) // 2
pad_left = (pad_w + 1) // 2
return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left
def get_pad_tuple3d(padding):
"""Common code to get the pad option
Parameters
----------
padding : Union[int, Tuple[int, ...]]
Padding size
Returns
-------
pad_front : int
Padding size on front
pad_top : int
Padding size on top
pad_left : int
Padding size on left
pad_back : int
Padding size on back
pad_down : int
Padding size on down.
pad_right : int
Padding size on right.
"""
# compute the padding size
if isinstance(padding, container.Array):
padding = list(padding)
if isinstance(padding, (tuple, list)):
if len(padding) == 3:
pad_d = padding[0] * 2
pad_h = padding[1] * 2
pad_w = padding[2] * 2
elif len(padding) == 6:
return padding[0], padding[1], padding[2], padding[3], padding[4], padding[5]
else:
raise ValueError("Size of padding can only be 3 or 6")
elif isinstance(padding, int):
pad_d = pad_h = pad_w = padding * 2
else:
raise ValueError("Unknown padding option %s" % padding)
pad_front = (pad_d + 1) // 2
pad_top = (pad_h + 1) // 2
pad_left = (pad_w + 1) // 2
return pad_front, pad_top, pad_left, pad_d - pad_front, pad_h - pad_top, pad_w - pad_left
|
app.py
|
nickk/awesome-panel
| 179 |
134585
|
# pylint: disable=redefined-outer-name,protected-access,missing-function-docstring
"""In this module we configure our awesome-panel.org app and serve it using the
awesome_panel.application framework.
The awesome_panel.application framework provides
- Templates: One or more Templates to layout your app(s). A template might provide `main`,
`sidebar`, `topbar` layouts where you can put your content.
- Components: Smaller constitutents used to create the Template or PageComponents
- Views: Layout+Styling of Components
- Services: Services that can be used by the Template and components. For example a progress_service
- Models: Like Application, Page, Author, Tag, Progress etc.
"""
import os
import platform
import panel as pn
# We need to configure the site before we import the pages
from application.config import site # isort: split
# We need to import the application module to get the applications added to the site
from application import pages # pylint: disable=unused-import
if __name__ == "__main__":
address = os.getenv("BOKEH_ADDRESS", "0.0.0.0")
APP_ROUTES = {app.url: app.view for app in site.applications}
if platform.system() == "Windows":
pn.serve(APP_ROUTES, port=80, dev=False, address=address)
else:
pn.serve(APP_ROUTES, port=80, dev=False, address=address, num_procs=4)
|
idaes/dmf/commands.py
|
eslickj/idaes-pse
| 112 |
134604
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Perform all logic, input, output of commands that is
particular to the CLI.
Call functions defined in 'api' module to handle logic
that is common to the API and CLI.
"""
# stdlib
from datetime import datetime
import glob
import json
import logging
import math
import os
import re
import sys
# Third-party
import jsonschema
# Local
from .dmfbase import DMF, DMFConfig
from .util import strlist
from .util import is_jupyter_notebook, is_python, is_resource_json
from .util import ColorTerm
from .errors import (
ParseError,
CommandError,
WorkspaceNotFoundError,
WorkspaceConfNotFoundError,
WorkspaceError,
BadResourceError,
)
from . import resource
from .workspace import Workspace, find_workspaces
__author__ = "<NAME>"
_log = logging.getLogger(__name__)
def workspace_init(dirname, metadata):
# type: (str, dict) -> None
"""Initialize from root at `dirname`, set environment variable
for other commands, and parse config file.
"""
try:
ws = Workspace(dirname, create=True, add_defaults=True)
except OSError as err:
raise CommandError("init", "initialize workspace", str(err))
except ParseError as err:
raise CommandError("init", "parse config", str(err))
except WorkspaceError as err:
raise CommandError("init", "initialize workspace", str(err))
_log.info("Created new workspace in: {}".format(dirname))
if metadata:
ws.set_meta(metadata)
_log.info("Set metadata for: {}".format(strlist(list(metadata))))
def workspace_info(dirname):
# type: (str) -> None
t = ColorTerm()
try:
ws = Workspace(dirname, create=False)
except WorkspaceNotFoundError:
print("Workspace not found at path: {}".format(dirname))
raise CommandError("info", "find workspace", "not found at: {}".format(dirname))
except WorkspaceConfNotFoundError:
print("No configuration found for workspace for path: {}".format(dirname))
raise CommandError(
"info", "find workspace configuration", "not found at: {}".format(dirname)
)
num_obj = DMF(path=ws.root).count()
bullet = " - "
print(f"\n{t.blue}Workspace")
if ws.name and (ws.name != "none"):
if ws.description and (ws.description != "none"):
print(f" {t.blue}[{ws.name}] - {ws.description}")
else:
print(" {t.blue}{ws.name} - (no description)")
elif ws.description and (ws.description != "none"):
print(f" {t.blue}(no name) - {ws.description}")
else:
print(f" {t.blue}(no name or description)")
print("\nGeneral information")
print(f"{bullet}{t.blue}Location = {ws.root}")
info = ws.meta.copy()
if "_id" in info:
print(f"{bullet}{t.blue}Workspace identifier (_id) = {info['_id']}")
del info["_id"]
else:
print(f"{bullet}{t.blue}Workspace identifier (_id) = unknown")
if "created" in info:
print(f"{bullet}{t.blue}Created = {info[ws.CONF_CREATED]}")
else:
print(f"{bullet}{t.blue}Created = unknown")
if "modified" in info:
print(f"{bullet}{t.blue}Modified = {info[ws.CONF_MODIFIED]}")
else:
print(f"{bullet}{t.blue}Modified = unknown")
print(f"{bullet}{t.blue}Num. resources = {num_obj}")
print(f"\n{t.magenta}{t.bold}Configuration")
already_shown = (ws.CONF_MODIFIED, ws.CONF_CREATED, ws.CONF_NAME, ws.CONF_DESC)
for k in info.keys():
if k in already_shown:
continue
v = info[k]
print(f"{bullet}{t.blue}{k} = {v}")
print("")
def init_conf(workspace):
# type: (str) -> int
"""Initialize the workspace.
"""
t = ColorTerm()
# Open/create configuration file
try:
conf = DMFConfig()
except IOError as err:
print(f"Failed to open global configuration: {err}")
try:
open(DMFConfig._filename, "w")
except IOError:
print("Failed to create new configuration file")
return -1
print("Created new configuration file")
conf = DMFConfig()
# If a workspace argument is given, save this value,
# as the default workspace, in the configuration file
if workspace:
fullpath = os.path.abspath(workspace)
conf.c[conf.WORKSPACE] = fullpath
conf.save()
# Print contents of configuration file to standard output
print(
f"{t.magenta}{t.bold}DMF global configuration{t.reset} "
f"<{t.green}{conf._filename}>"
)
keys = conf.c.keys()
if keys:
for k in sorted(keys):
print(f" > {t.blue}{k}{t.reset} = {t.bold}{conf.c[k]}]")
else:
print(f"{t.blue}(empty)")
return 0
def workspace_import(path, patterns, exit_on_error):
# type: (str, list, bool) -> int
"""Import files into workspace.
Args:
path (str): Target workspace directory
patterns (list): List of Unix-style glob for files to import.
Files are expected to be resource JSON or a
Jupyter Notebook.
exit_on_error (bool): If False, continue trying to import resources
even if one or more fail.
Returns:
int: Number of things imported
Raises:
BadResourceError, if there is a problem
"""
d = DMF(path)
count = 0
for pattern in patterns:
for filename in glob.glob(pattern):
# Skip directories
if os.path.isdir(filename):
_log.warning('Not importing directory "{}"'.format(filename))
continue
# For Jupyter Notebooks, first create a (temporary)
# JSON resource from the original data.
if is_jupyter_notebook(filename):
try:
rsrc = _import_jupyternb(filename)
except ValueError as e:
msg = (
"Cannot create resource from Jupyter Notebook "
'"{}": {}'.format(filename, e)
)
if exit_on_error:
raise BadResourceError(msg)
_log.error(msg)
continue
# For Python files, first create a (temporary)
# JSON resource from the original data.
elif is_python(filename):
try:
rsrc = _import_python(filename)
except ValueError as e:
msg = "Cannot create resource from Python file " '"{}": {}'.format(
filename, e
)
if exit_on_error:
raise BadResourceError(msg)
_log.error(msg)
continue
# JSON resource file
elif is_resource_json(filename):
try:
rsrc = _import_resource(filename)
except ValueError as e:
msg = 'Bad resource from file "{}": {}'.format(filename, e)
if exit_on_error:
raise BadResourceError(msg)
_log.error(msg)
continue
# Generic file import
else:
try:
rsrc = _import_file(filename)
except ValueError as e:
msg = "Cannot create resource from file " '"{}": {}'.format(
filename, e
)
if exit_on_error:
raise BadResourceError(msg)
_log.error(msg)
continue
# Resource in hand. Now add it.
d.add(rsrc)
count += 1
return count
def list_workspaces(root, stream=None):
"""List workspaces found from a given root path.
Args:
root: root path
stream: Output stream (must have .write() method)
"""
workspaces = find_workspaces(root)
if stream is None or stream == sys.stdout:
colors = True
else:
colors = False
t = ColorTerm(enabled=colors)
if colors:
output_table = [("Path", "Name")]
else:
output_table = [("Path", "Name"), ("----", "----")]
widths = [4, 4]
any_good_workspaces = False
for w in sorted(workspaces):
try:
ws = Workspace(w)
output_table.append((w, ws.name))
widths = [max(len(w), widths[0]), max(len(ws.name), widths[1])]
any_good_workspaces = True
except WorkspaceError:
pass # XXX: Should we print a warning?
if not any_good_workspaces:
# either no paths, or all paths raised an error
stream.write("ERROR: No valid workspaces found\n")
else:
colfmts = ["{{:{:d}s}}".format(width) for width in widths]
first_row = True
for row in output_table:
for i in (0, 1):
if colors:
if first_row:
fmt = f"{t.bold}{colfmts[i]}"
else:
fmt = f"{[t.blue, t.white][i]}{colfmts[i]}"
fmt += t.reset
else:
fmt = colfmts[i]
stream.write(fmt.format(row[i]))
stream.write("\n" if i == 1 else " ")
first_row = False
def list_resources(path, long_format=None, relations=False):
"""List resources in a given DMF workspace.
Args:
path (str): Path to the workspace
long_format (bool): List in long format flag
relations (bool): Show relationships, in long format
Returns:
None
"""
t = ColorTerm()
d = DMF(path)
if long_format:
resources = list(d.find())
uuid_pfx = _uuid_prefix([r.uuid for r in resources])
fields = ("uuid", "name", "type", "modified", "created")
widths = (uuid_pfx, 30, 20, 19, 19)
colors = (t.green, t.white, t.yellow, t.white, t.white)
fmts = [f"{{:{w}s}}" for w in widths]
left_gutter = "| " if relations else ""
# table header
print(
" " * len(left_gutter)
+ t.bold
+ " ".join([f.format(v) for f, v in zip(fmts, fields)])
+ t.reset
)
def datestr(t):
return datetime.isoformat(datetime.fromtimestamp(t))
# table body
for r in resources:
values = list(getattr(r, k) for k in fields[:-2])
values.append(datestr(r.modified))
values.append(datestr(r.created))
if not values[1] and r.desc:
values[1] = r.desc[: widths[1]]
else:
values[1] = values[1][: widths[1]]
if uuid_pfx < 32:
values[0] = values[0][:uuid_pfx]
print(
left_gutter
+ " ".join([c + f.format(v) for c, f, v in zip(colors, fmts, values)])
+ t.reset
)
if relations and len(r.relations) > 0:
relitems = []
for rel in r.relations:
if rel.subject == r.uuid:
fmt = f"{t.white}{{p}}->{t.blue}{{o}}"
else:
fmt = f"{t.blue}{{s}}->{t.white}{{p}}"
item = fmt.format(
s=rel.subject[:uuid_pfx],
p=rel.predicate,
o=rel.object[:uuid_pfx],
)
relitems.append(item)
print(f"+-- {' / '.join(relitems)}")
else:
items = []
for r in d.find():
name_color = "w"
if r.name:
name = r.name
elif r.desc:
name = r.desc[:40]
name_color = t.blue
else:
name = r.uuid
name_color = t.green
item = f"{name_color}{name}{t.yellow}:{r.type}"
items.append(item)
if items:
columnized = _display_in_columns(items, max_line=t.width)
print(columnized + t.reset)
def _uuid_prefix(uuids, step=4, maxlen=32):
"""Get smallest multiple of `step` len prefix that gives unique values.
"""
full = set(uuids)
for n in range(step, maxlen, step):
prefixes = {u[:n] for u in uuids}
if len(prefixes) == len(full):
return n
return maxlen
def cat_resources(path, objects=(), color=True):
d = DMF(path=path)
t = ColorTerm(enabled=color)
unmatched = set(objects)
first = True
# get all resources,
# display any that match an object as a prefix
for r in d.find():
for oid in unmatched:
if r.uuid.startswith(oid):
unmatched.remove(oid) # don't show twice
if not first:
_cat_resource_sep(t)
_cat_resource_show(t, r)
first = False
break
def _cat_resource_sep(t):
print(f"{t.blue}{'-' * 60}")
def _cat_resource_show(cp, r):
d = r.as_dict()
json.dump(d, cp, indent=2)
print()
# regular expression to find VT100 color escape codes
_noprint_re = re.compile(r"\033\[[0-9]+m")
def _display_in_columns(items, max_line=80, col_sep=" ", row_sep="\n"):
"""Take a list of items and max line width, and calculate display
of the items in columns.
The algorithm is simple, just trying increasing numbers of columns and
picking the largest number that did not result in a row that was too wide.
The input items are not re-ordered.
Args:
items (List[str]): String items
max_line (int): Maximum width for any displayed line (row)
col_sep (str): Separator between columns, after each item
row_sep (str): Separator between rows, at the end of each line
Returns:
str:
"""
if not items:
return ""
# Calculate item lengths, stripping terminal escapes
lengths, nplengths = [], []
for item in items:
clean = _noprint_re.sub("", item)
lengths.append(len(clean))
nplengths.append(len(item) - len(clean))
col_sep_len = len(col_sep) # useful later
# Give up immediately, putting everything in one column,
# if any single item doesn't fit
if max_line <= max(lengths) + col_sep_len:
return row_sep.join(items)
# Determine maximum number of columns
max_columns = 1 # number of columns
max_widths = [max_line] # width of each column
n = len(lengths)
# Determine number of columns.
# Start at 2 columns, stop when cannot fit items side-by-side
for i in range(2, n):
# initialize calculated widths of each column
widths = [0] * i
# for display where all columns are same length except last,
# number of items per column is ceiling of items/#col
nitems = int(math.ceil(n / i))
# put items in each column
for col in range(i):
pad = 0 if col == (i - 1) else col_sep_len # sep. between columns
# put items in the current column, adjusting the column
# max width to widest item
maxj = min(n, (col + 1) * nitems) # don't overshoot on last col
for j in range(col * nitems, maxj):
widths[col] = max(widths[col], lengths[j] + pad)
# total width is sum of column widths
line_len = sum(widths)
# if we went over, then stop
if line_len > max_line:
break
# otherwise, this is valid -- save and continue
max_columns, max_widths = i, widths[:]
# Put items into rows of max. number of columns determined above
nrows, rows = int(math.ceil(n / max_columns)), []
for row in range(nrows):
col, row_items = 0, []
# skip through items by nrows at a time, to move across the columns,
# and fill in the items for the current row (which acts as an offset)
for i in range(row, len(items), nrows):
# create format string with width = col. max including esc chars,
# but without padding since we will add that when we join
# the row items together
pad = 0 if col == (max_columns - 1) else col_sep_len
fmt = "{{:{n}s}}".format(n=max_widths[col] + nplengths[i] - pad)
# format row item for column
row_items.append(fmt.format(items[i]))
col += 1 # move to next column
# append the row items as big string
rows.append(col_sep.join(row_items))
# Final result is a big string of the rows joined together
return row_sep.join(rows)
def _import_resource(filename):
"""Import a resource from 'filename'. Raises a ValueError if that
fails. Most of the code is simply generating error messages.
"""
if not os.path.exists(filename):
raise ValueError('File "{}" not found'.format(filename))
try:
f = open(filename)
except Exception as e:
raise ValueError('Cannot open file "{}": {}'.format(filename, e))
try:
j = json.load(f)
except json.JSONDecodeError as e:
raise ValueError('Cannot parse JSON file "{}": {}'.format(filename, e))
try:
r = resource.Resource(value=j)
r.validate()
except (ValueError, jsonschema.ValidationError) as err:
raise ValueError("Invalid resource: {}".format(err))
return r
def _import_jupyternb(path):
"""Create & import a resource from a Jupyter Notebook file at `path`.
Assume that `path` exists and is a Jupyter Notebook.
Args:
path (str): Jupyter Notebook file.
Returns:
(Resource) DMF Resource representing the notebook.
"""
r = resource.Resource(type_=resource.ResourceTypes.notebook)
filename = os.path.splitext(os.path.split(path)[1])[0]
# XXX: add notebook 'metadata' as FilePath metadata attr
r.v["datafiles"].append({"desc": filename, "path": path})
r.v["desc"] = filename
r.validate()
return r
def _import_python(path):
"""Create & import a resource from a Python file at `path`.
Assume that `path` exists and is a valid Python file.
Args:
path (str): Python file name.
Returns:
(Resource) DMF Resource representing the notebook.
"""
r = resource.Resource(type_=resource.ResourceTypes.code)
filename = os.path.splitext(os.path.split(path)[1])[0]
r.v["codes"].append({"name": filename, "language": "python", "type": "module"})
r.v["datafiles"].append({"desc": filename, "path": path})
r.validate()
return r
def _import_file(path):
"""Create & import a resource from a generic file at `path`.
Assume that `path` exists.
Args:
path (str): File name.
Returns:
(Resource) DMF Resource representing the notebook.
"""
r = resource.Resource(type_=resource.ResourceTypes.data)
filename = os.path.split(path)[1]
r.v["datafiles"].append({"desc": filename, "path": path})
r.v["desc"] = filename
r.validate()
return r
|
cpmpy/jobs_puzzle.py
|
tias/hakank
| 279 |
134645
|
<gh_stars>100-1000
"""
Jobs puzzle in cpmpy.
(This is a standard problem in Automatic Reasoning.)
From http://www-unix.mcs.anl.gov/~wos/mathproblems/jobs.html
'''
Jobs Puzzle
There are four people: Roberta, Thelma, Steve, and Pete.
Among them, they hold eight different jobs.
Each holds exactly two jobs.
The jobs are chef, guard, nurse, clerk, police officer (gender not implied),
teacher, actor, and boxer.
The job of nurse is held by a male.
The husband of the chef is the clerk.
Roberta is not a boxer.
Pete has no education past the ninth grade.
Roberta, the chef, and the police officer went golfing together.
Question: Who holds which jobs?
'''
The answer:
Chef Thelma
Guard Roberta
Nurse Steve
Clerk Pete
Police Steve
Teacher Roberta
Actor Pete
Boxer Thelma
Model created by <NAME>, <EMAIL>
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
def jobs_puzzle():
# data
n = 4
persons = range(n)
[Roberta, Thelma, Steve, Pete] = persons
person_names = ["Roberta", "Thelma", "Steve", "Pete"]
# variables
Jobs = intvar(0,n-1,shape=2*n,name="Jobs")
chef, guard, nurse, clerk, police_officer, teacher, actor, boxer = Jobs
Jobs_s = ["chef", "guard", "nurse", "clerk", "police_officer", "teacher", "actor", "boxer"]
# constraints
model = Model( [
# Each person holds exactly two jobs.
global_cardinality_count(Jobs,[2,2,2,2]),
# The job of nurse is held by a male.
((nurse == Steve) | (nurse == Pete)),
# The husband of the chef is the clerk.
((clerk == Steve) | (clerk == Pete)),
((chef == Roberta) | (chef == Thelma)),
chef != clerk,
# Roberta is not a boxer.
Roberta != boxer,
# Pete has no education past the ninth grade.
Pete != teacher,
Pete != police_officer,
Pete != nurse,
# Roberta, [and] the chef, and the police officer went golfing together.
Roberta != chef,
chef != police_officer,
Roberta != police_officer,
# From the name of the job
((actor == Steve) | (actor == Pete)),
])
ss = CPM_ortools(model)
num_solutions = 0
while ss.solve():
num_solutions += 1
print("jobs:",Jobs.value())
print(["%s:%s" % (Jobs_s[i],person_names[Jobs[i].value()]) for i in range(2*n)])
get_different_solution(ss,Jobs)
print("num_solutions:", num_solutions)
jobs_puzzle()
|
ch2-hello-world-app/pages/urls.py
|
balazskiss1985/djangoforbeginners
| 781 |
134649
|
<gh_stars>100-1000
from django.urls import path
from .views import homePageView
urlpatterns = [
path('', homePageView, name='home'),
]
|
src/dispatch/plugins/dispatch_core/config.py
|
roor0/dispatch
| 3,417 |
134655
|
<reponame>roor0/dispatch
import logging
from starlette.config import Config
log = logging.getLogger(__name__)
config = Config(".env")
|
furniture/scripts/download_demos.py
|
KejiaChen/assembly
| 364 |
134671
|
import gdown
import os
from zipfile import ZipFile
demos = {
"Sawyer_chair_agne_0007_00XX.zip": "1-lVTCH4oPq22cLC4Mmia9AKqzDIIVDO0",
'Sawyer_table_dockstra_0279_00XX': '1QAchFmYpQGqa6zaZ2QeZH5ET-iuyerU0',
"Sawyer_bench_bjursta_0210_00XX.zip": "12b8_j1mC8-pgotjARF1aTcqH2T7FNHNF",
"Sawyer_table_bjorkudden_0207_00XX.zip": "19DA5M2iPvOYa9KG54uIxOhNF0r2zXClK",
"Sawyer_table_lack_0825_00XX.zip": "1BrgbaE9Wx-Si7VtXpUJSHRrRnyqdLJA7",
"Sawyer_toy_table_00XX.zip": "1Wg6oxkiiOX8DsYVdr7sYNmYnSdaxIskc",
"Sawyer_chair_ingolf_0650_00XX.zip": "1i9A9CVPys7LiUnePRn4OkVgczRjqT4kZ",
"Sawyer_chair_bernhard_0146_00XX.zip": "1nWnHDSQq33INXdOmIAL_28wrd6BKEUr-",
}
# url = 'https://drive.google.com/uc?id=' + unique google drive ID
# compression format = '.zip'
for key, value in demos.items():
url = "https://drive.google.com/uc?id=" + value
outfile = os.path.join("demos", key)
if os.path.exists(outfile):
print("already downloaded", outfile)
else:
gdown.download(url, outfile, quiet=False)
answer = input("Do you want to unzip demos? [y/n] ")
if answer == "y":
for key in demos.keys():
furniture_name = key.rsplit("_", 1)[0]
demo_path = os.path.join("demos", furniture_name)
os.makedirs(demo_path, exist_ok=True)
zip_file = os.path.join("demos", key)
with ZipFile(zip_file, "r") as zf:
zf.extractall(demo_path)
|
tests/test_dataset/test_ocr_dataset.py
|
yuexy/mmocr
| 2,261 |
134681
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import os.path as osp
import tempfile
from mmocr.datasets.ocr_dataset import OCRDataset
def _create_dummy_ann_file(ann_file):
ann_info1 = 'sample1.jpg hello'
ann_info2 = 'sample2.jpg world'
with open(ann_file, 'w') as fw:
for ann_info in [ann_info1, ann_info2]:
fw.write(ann_info + '\n')
def _create_dummy_loader():
loader = dict(
type='HardDiskLoader',
repeat=1,
parser=dict(type='LineStrParser', keys=['file_name', 'text']))
return loader
def test_detect_dataset():
tmp_dir = tempfile.TemporaryDirectory()
# create dummy data
ann_file = osp.join(tmp_dir.name, 'fake_data.txt')
_create_dummy_ann_file(ann_file)
# test initialization
loader = _create_dummy_loader()
dataset = OCRDataset(ann_file, loader, pipeline=[])
tmp_dir.cleanup()
# test pre_pipeline
img_info = dataset.data_infos[0]
results = dict(img_info=img_info)
dataset.pre_pipeline(results)
assert results['img_prefix'] == dataset.img_prefix
assert results['text'] == img_info['text']
# test evluation
metric = 'acc'
results = [{'text': 'hello'}, {'text': 'worl'}]
eval_res = dataset.evaluate(results, metric)
assert math.isclose(eval_res['word_acc'], 0.5, abs_tol=1e-4)
assert math.isclose(eval_res['char_precision'], 1.0, abs_tol=1e-4)
assert math.isclose(eval_res['char_recall'], 0.9, abs_tol=1e-4)
|
textworld/gym/envs/utils.py
|
JohnnySun8/TextWorld
| 307 |
134689
|
from typing import Iterable, Any
from numpy.random import RandomState
def shuffled_cycle(iterable: Iterable[Any], rng: RandomState, nb_loops: int = -1) -> Iterable[Any]:
"""
Yield each element of `iterable` one by one, then shuffle the elements
and start yielding from the start. Stop after `nb_loops` loops.
Arguments:
iterable: Iterable containing the elements to yield.
rng: Random generator used to shuffle the elements after each loop.
nb_loops: Number of times to go through all the elements. If set to -1,
loop an infinite number of times.
"""
elements = []
for e in iterable:
elements.append(e)
yield e
cpt = nb_loops
while cpt != 0:
cpt -= 1
rng.shuffle(elements)
for e in elements:
yield e
|
code/iccp.py
|
timgates42/pypng
| 329 |
134696
|
<gh_stars>100-1000
#!/usr/bin/env python
# iccp
#
# International Color Consortium Profile
#
# Tools for manipulating ICC profiles.
#
# An ICC profile can be extracted from a PNG image (iCCP chunk).
#
#
# Non-standard ICCP tags.
#
# Apple use some (widespread but) non-standard tags. These can be
# displayed in Apple's ColorSync Utility.
# - 'vcgt' (Video Card Gamma Tag). Table to load into video
# card LUT to apply gamma.
# - 'ndin' Apple display native information.
# - 'dscm' Apple multi-localized description strings.
# - 'mmod' Apple display make and model information.
# References
#
# [ICC 2001] ICC Specification ICC.1:2001-04 (Profile version 2.4.0)
# [ICC 2004] ICC Specification ICC.1:2004-10 (Profile version 4.2.0.0)
import argparse
import struct
import warnings
import zlib
import png
class FormatError(Exception):
pass
class Profile:
"""An International Color Consortium Profile (ICC Profile)."""
def __init__(self):
self.rawtagtable = None
self.rawtagdict = {}
self.d = dict()
def fromFile(self, inp, name="<unknown>"):
# See [ICC 2004]
profile = inp.read(128)
if len(profile) < 128:
raise FormatError("ICC Profile is too short.")
(size,) = struct.unpack_from(">L", profile)
profile += inp.read(self.d["size"] - len(profile))
return self.fromString(profile, name)
def fromString(self, profile, name="<unknown>"):
self.d = dict()
d = self.d
if len(profile) < 128:
raise FormatError("ICC Profile is too short.")
d.update(
zip(
[
"size",
"preferredCMM",
"version",
"profileclass",
"colourspace",
"pcs",
],
struct.unpack_from(">L4sL4s4s4s", profile),
)
)
if len(profile) < d["size"]:
warnings.warn(
"Profile size declared to be %d, but only got %d bytes"
% (d["size"], len(profile))
)
d["version"] = "%08x" % d["version"]
d["created"] = readICCdatetime(profile[24:36])
d.update(
zip(
["acsp", "platform", "flag", "manufacturer", "model"],
struct.unpack_from(">4s4s3L", profile, 36),
)
)
if d["acsp"] != "acsp":
warnings.warn("acsp field not present (not an ICC Profile?).")
d["deviceattributes"] = profile[56:64]
(d["intent"],) = struct.unpack_from(">L", profile, 64)
d["pcsilluminant"] = readICCXYZNumber(profile[68:80])
d["creator"] = profile[80:84]
d["id"] = profile[84:100]
(ntags,) = struct.unpack_from(">L", profile, 128)
d["ntags"] = ntags
fmt = "4s2L" * ntags
# tag table
tt = struct.unpack_from(">" + fmt, profile, 132)
tt = group(tt, 3)
# Could (should) detect 2 or more tags having the same sig. But
# we don't. Two or more tags with the same sig is illegal per
# the ICC spec.
# Convert (sig,offset,size) triples into (sig,value) pairs.
rawtag = map(lambda x: (x[0], profile[x[1] : x[1] + x[2]]), tt)
self.rawtagtable = rawtag
self.rawtagdict = dict(rawtag)
tag = dict()
# Interpret the tags whose types we know about
for sig, v in rawtag:
if sig in tag:
warnings.warn("Duplicate tag %r found. Ignoring." % sig)
continue
v = ICCdecode(v)
if v is not None:
tag[sig] = v
self.tag = tag
return self
def greyInput(self):
"""Adjust ``self.d`` dictionary for greyscale input device.
``profileclass`` is 'scnr', ``colourspace`` is 'GRAY', ``pcs``
is 'XYZ '.
"""
self.d.update(dict(profileclass="scnr", colourspace="GRAY", pcs="XYZ "))
return self
def maybeAddDefaults(self):
if self.rawtagdict:
return
self._addTags(
cprt="Copyright unknown.",
desc="created by $URL$ $Rev$",
wtpt=D50(),
)
def addTags(self, **k):
self.maybeAddDefaults()
self._addTags(**k)
def _addTags(self, **k):
"""Helper for :meth:`addTags`."""
for tag, thing in k.items():
if not isinstance(thing, (tuple, list)):
thing = (thing,)
typetag = defaulttagtype[tag]
self.rawtagdict[tag] = encode(typetag, *thing)
return self
def write(self, out):
"""Write ICC Profile to the file."""
if not self.rawtagtable:
self.rawtagtable = self.rawtagdict.items()
tags = tagblock(self.rawtagtable)
self.writeHeader(out, 128 + len(tags))
out.write(tags)
out.flush()
return self
def writeHeader(self, out, size=999):
"""Add default values to the instance's `d` dictionary, then
write a header out onto the file stream. The size of the
profile must be specified using the `size` argument.
"""
def defaultkey(d, key, value):
"""Add ``[key]==value`` to the dictionary `d`, but only if
it does not have that key already.
"""
if key in d:
return
d[key] = value
z = "\x00" * 4
defaults = dict(
preferredCMM=z,
version="02000000",
profileclass=z,
colourspace=z,
pcs="XYZ ",
created=writeICCdatetime(),
acsp="acsp",
platform=z,
flag=0,
manufacturer=z,
model=0,
deviceattributes=0,
intent=0,
pcsilluminant=encodefuns()["XYZ"](*D50()),
creator=z,
)
for k, v in defaults.items():
defaultkey(self.d, k, v)
hl = map(
self.d.__getitem__,
[
"preferredCMM",
"version",
"profileclass",
"colourspace",
"pcs",
"created",
"acsp",
"platform",
"flag",
"manufacturer",
"model",
"deviceattributes",
"intent",
"pcsilluminant",
"creator",
],
)
# Convert to struct.pack input
hl[1] = int(hl[1], 16)
out.write(struct.pack(">L4sL4s4s4s12s4s4sL4sLQL12s4s", size, *hl))
out.write("\x00" * 44)
return self
def encodefuns():
"""Returns a dictionary mapping ICC type signature sig to encoding
function. Each function returns a string comprising the content of
the encoded value. To form the full value, the type sig and the 4
zero bytes should be prefixed (8 bytes).
"""
def desc(ascii):
"""Return textDescription type [ICC 2001] 6.5.17. The ASCII part is
filled in with the string `ascii`, the Unicode and ScriptCode parts
are empty."""
ascii += "\x00"
n = len(ascii)
return struct.pack(">L%ds2LHB67s" % n, n, ascii, 0, 0, 0, 0, "")
def text(ascii):
"""Return textType [ICC 2001] 6.5.18."""
return ascii + "\x00"
def curv(f=None, n=256):
"""Return a curveType, [ICC 2001] 6.5.3. If no arguments are
supplied then a TRC for a linear response is generated (no entries).
If an argument is supplied and it is a number (for *f* to be a
number it means that ``float(f)==f``) then a TRC for that
gamma value is generated.
Otherwise `f` is assumed to be a function that maps [0.0, 1.0] to
[0.0, 1.0]; an `n` element table is generated for it.
"""
if f is None:
return struct.pack(">L", 0)
try:
if float(f) == f:
return struct.pack(">LH", 1, int(round(f * 2 ** 8)))
except (TypeError, ValueError):
pass
assert n >= 2
table = []
M = float(n - 1)
for i in range(n):
x = i / M
table.append(int(round(f(x) * 65535)))
return struct.pack(">L%dH" % n, n, *table)
def XYZ(x, y, z):
"""
Encode an (X,Y,Z) colour.
"""
return struct.pack(">3l", *map(fs15f16, [x, y, z]))
return locals()
# Tag type defaults.
# Most tags can only have one or a few tag types.
# When encoding, we associate a default tag type with each tag so that
# the encoding is implicit.
defaulttagtype = dict(
A2B0="mft1",
A2B1="mft1",
A2B2="mft1",
bXYZ="XYZ",
bTRC="curv",
B2A0="mft1",
B2A1="mft1",
B2A2="mft1",
calt="dtim",
targ="text",
chad="sf32",
chrm="chrm",
cprt="desc",
crdi="crdi",
dmnd="desc",
dmdd="desc",
devs="",
gamt="mft1",
kTRC="curv",
gXYZ="XYZ",
gTRC="curv",
lumi="XYZ",
meas="",
bkpt="XYZ",
wtpt="XYZ",
ncol="",
ncl2="",
resp="",
pre0="mft1",
pre1="mft1",
pre2="mft1",
desc="desc",
pseq="",
psd0="data",
psd1="data",
psd2="data",
psd3="data",
ps2s="data",
ps2i="data",
rXYZ="XYZ",
rTRC="curv",
scrd="desc",
scrn="",
tech="sig",
bfd="",
vued="desc",
view="view",
)
def encode(tsig, *args):
"""Encode a Python value as an ICC type. `tsig` is the type
signature to (the first 4 bytes of the encoded value, see [ICC 2004]
section 10.
"""
fun = encodefuns()
if tsig not in fun:
raise "No encoder for type %r." % tsig
v = fun[tsig](*args)
# Padd tsig out with spaces.
tsig = (tsig + " ")[:4]
return tsig + ("\x00" * 4) + v
def tagblock(tag):
"""`tag` should be a list of (*signature*, *element*) pairs, where
*signature* (the key) is a length 4 string, and *element* is the
content of the tag element (another string).
The entire tag block (consisting of first a table and then the
element data) is constructed and returned as a string.
"""
n = len(tag)
tablelen = 12 * n
# Build the tag table in two parts. A list of 12-byte tags, and a
# string of element data. Offset is the offset from the start of
# the profile to the start of the element data (so the offset for
# the next element is this offset plus the length of the element
# string so far).
offset = 128 + tablelen + 4
# The table. As a string.
table = ""
# The element data
element = ""
for k, v in tag:
table += struct.pack(">4s2L", k, offset + len(element), len(v))
element += v
return struct.pack(">L", n) + table + element
def iccp(out, inp):
profile = Profile().fromString(*profileFromPNG(inp))
print(profile.d, file=out)
print([x[0] for x in profile.rawtagtable], file=out)
print(profile.tag, file=out)
def profileFromPNG(inp):
"""
Extract profile from PNG file. Return (*profile*, *name*)
pair.
"""
r = png.Reader(file=inp)
_, chunk = r.chunk("iCCP")
i = chunk.index(b"\x00")
name = chunk[:i]
compression = chunk[i + 1]
assert compression == 0
profile = zlib.decompress(chunk[i + 2 :])
return profile, name
def iccpout(out, inp):
"""Extract ICC Profile from PNG file `inp` and write it to
the file `out`."""
out.write(profileFromPNG(inp)[0])
def fs15f16(x):
"""Convert float to ICC s15Fixed16Number (as a Python ``int``)."""
return int(round(x * 2 ** 16))
def D50():
"""Return D50 illuminant as an (X,Y,Z) triple."""
# See [ICC 2001] A.1
return (0.9642, 1.0000, 0.8249)
def writeICCdatetime(t=None):
"""`t` should be a gmtime tuple (as returned from
``time.gmtime()``). If not supplied, the current time will be used.
Return an ICC dateTimeNumber in a 12 byte string.
"""
import time
if t is None:
t = time.gmtime()
return struct.pack(">6H", *t[:6])
def readICCdatetime(s):
"""Convert from 12 byte ICC representation of dateTimeNumber to
ISO8601 string. See [ICC 2004] 5.1.1"""
return "%04d-%02d-%02dT%02d:%02d:%02dZ" % struct.unpack(">6H", s)
def readICCXYZNumber(s):
"""Convert from 12 byte ICC representation of XYZNumber to (x,y,z)
triple of floats. See [ICC 2004] 5.1.11"""
return s15f16l(s)
def s15f16l(s):
"""Convert sequence of ICC s15Fixed16 to list of float."""
# Note: As long as float has at least 32 bits of mantissa, all
# values are preserved.
n = len(s) // 4
t = struct.unpack(">%dl" % n, s)
return map((2 ** -16).__mul__, t)
# Several types and their byte encodings are defined by [ICC 2004]
# section 10. When encoded, a value begins with a 4 byte type
# signature. We use the same 4 byte type signature in the names of the
# Python functions that decode the type into a Pythonic representation.
def ICCdecode(s):
"""Take an ICC encoded tag, and dispatch on its type signature
(first 4 bytes) to decode it into a Python value. Pair (*sig*,
*value*) is returned, where *sig* is a 4 byte string, and *value* is
some Python value determined by the content and type.
"""
sig = s[0:4].strip()
f = dict(
text=RDtext,
XYZ=RDXYZ,
curv=RDcurv,
vcgt=RDvcgt,
sf32=RDsf32,
)
if sig not in f:
return None
return (sig, f[sig](s))
def RDXYZ(s):
"""Convert ICC XYZType to rank 1 array of trimulus values."""
# See [ICC 2001] 6.5.26
assert s[0:4] == "XYZ "
return readICCXYZNumber(s[8:])
def RDsf32(s):
"""Convert ICC s15Fixed16ArrayType to list of float."""
# See [ICC 2004] 10.18
assert s[0:4] == "sf32"
return s15f16l(s[8:])
def RDmluc(s):
"""Convert ICC multiLocalizedUnicodeType. This types encodes
several strings together with a language/country code for each
string. A list of (*lc*, *string*) pairs is returned where *lc* is
the 4 byte language/country code, and *string* is the string
corresponding to that code. It seems unlikely that the same
language/country code will appear more than once with different
strings, but the ICC standard does not prohibit it."""
# See [ICC 2004] 10.13
assert s[0:4] == "mluc"
n, sz = struct.unpack_from(">2L", s, 8)
assert sz == 12
record = []
for i in range(n):
lc, l, o = struct.unpack_from("4s2L", s, 16 + 12 * n)
record.append(lc, s[o : o + l])
# How are strings encoded?
return record
def RDtext(s):
"""Convert ICC textType to Python string."""
# Note: type not specified or used in [ICC 2004], only in older
# [ICC 2001].
# See [ICC 2001] 6.5.18
assert s[0:4] == "text"
return s[8:-1]
def RDcurv(s):
"""Convert ICC curveType."""
# See [ICC 2001] 6.5.3
assert s[0:4] == "curv"
(count,) = struct.unpack_from(">L", s, 8)
if count == 0:
return dict(gamma=1)
table = struct.unpack_from(">%dH" % count, s, 12)
if count == 1:
return dict(gamma=table[0] * 2 ** -8)
return table
def RDvcgt(s):
"""Convert Apple CMVideoCardGammaType."""
# See
# http://developer.apple.com/documentation/GraphicsImaging/Reference/ColorSync_Manager/Reference/reference.html#//apple_ref/c/tdef/CMVideoCardGammaType
assert s[0:4] == "vcgt"
(tagtype,) = struct.unpack_from(">L", s, 8)
if tagtype != 0:
return s[8:]
if tagtype == 0:
# Table.
channels, count, size = struct.unpack_from(">3H", s, 12)
if size == 1:
fmt = "B"
elif size == 2:
fmt = "H"
else:
return s[8:]
n = len(s[18:]) // size
t = struct.unpack_from(">%d%s" % (n, fmt), s, 18)
t = group(t, count)
return size, t
return s[8:]
def group(s, n):
return zip(*[iter(s)] * n)
def main(argv=None):
import sys
if argv is None:
argv = sys.argv
argv = argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument("-o")
parser.add_argument("input", nargs="?", default="-", type=png.cli_open)
args = parser.parse_args(argv)
if args.o:
return iccpout(open(args.o, "wb"), args.input)
return iccp(sys.stdout, args.input)
if __name__ == "__main__":
main()
|
nose2/tests/functional/test_util.py
|
deeplow/nose2
| 637 |
134699
|
# -*- coding: utf-8 -*-
import six
from nose2 import util
from nose2.tests._common import TestCase, support_file
class UtilTests(TestCase):
def test_name_from_path(self):
test_module = support_file("scenario/tests_in_package/pkg1/test/test_things.py")
test_package_path = support_file("scenario/tests_in_package")
self.assertEqual(
util.name_from_path(test_module),
("pkg1.test.test_things", test_package_path),
)
def test_non_ascii_output(self):
class D:
def __init__(self):
self.out = []
def write(self, arg):
self.out.append(arg)
stream = D()
decorated = util._WritelnDecorator(stream)
string = six.u("\u00dcnic\u00f6de")
decorated.write(string)
str("".join(stream.out))
|
rlpyt/spaces/gym_wrapper_schema.py
|
traffic-lights/rlpyt
| 2,122 |
134724
|
import numpy as np
from gym.spaces.dict import Dict as GymDict
from rlpyt.utils.collections import NamedTupleSchema, NamedTuple
from rlpyt.spaces.composite import Composite
class GymSpaceWrapper:
"""Wraps a gym space to match the rlpyt interface; most of
the functionality is for automatically converting a GymDict (dictionary)
space into an rlpyt Composite space (and converting between the two). Use
inside the initialization of the environment wrapper for a gym environment.
"""
def __init__(self, space, null_value=0, name="obs", force_float32=True,
schemas=None):
"""Input ``space`` is a gym space instance.
Input ``name`` governs naming of internal NamedTupleSchemas used to
store Gym info.
"""
self._gym_space = space
self._base_name = name
self._null_value = null_value
if schemas is None:
schemas = {}
self._schemas = schemas
if isinstance(space, GymDict):
nt = self._schemas.get(name)
if nt is None:
nt = NamedTupleSchema(name, [k for k in space.spaces.keys()])
schemas[name] = nt # Put at module level for pickle.
elif not (isinstance(nt, NamedTupleSchema) and
sorted(nt._fields) ==
sorted([k for k in space.spaces.keys()])):
raise ValueError(f"Name clash in schemas: {name}.")
spaces = [GymSpaceWrapper(
space=v,
null_value=null_value,
name="_".join([name, k]),
force_float32=force_float32,
schemas=schemas)
for k, v in space.spaces.items()]
self.space = Composite(spaces, nt)
self._dtype = None
else:
self.space = space
self._dtype = np.float32 if (space.dtype == np.float64 and
force_float32) else None
def sample(self):
"""Returns a single sample in a namedtuple (for composite) or numpy
array using the the ``sample()`` method of the underlying gym
space(s)."""
sample = self.space.sample()
if self.space is self._gym_space: # Not Composite.
# Force numpy array, might force float64->float32.
sample = np.asarray(sample, dtype=self._dtype)
return sample
def null_value(self):
"""Similar to ``sample()`` but returning a null value."""
if self.space is self._gym_space:
null = np.asarray(self.space.sample(), dtype=self._dtype)
if self._null_value is not None:
try:
null[:] = self._null_value
except IndexError: # e.g. scalar.
null.fill(self._null_value)
else:
null.fill(0)
else: # Is composite.
null = self.space.null_value()
return null
def convert(self, value):
"""For dictionary space, use to convert wrapped env's dict to rlpyt
namedtuple, i.e. inside the environment wrapper's ``step()``, for
observation output to the rlpyt sampler (see helper function in
file)"""
return dict_to_nt(value, name=self._base_name, schemas=self._schemas)
def revert(self, value):
"""For dictionary space, use to revert namedtuple action into wrapped
env's dict, i.e. inside the environment wrappers ``step()``, for input
to the underlying gym environment (see helper function in file)."""
return nt_to_dict(value)
@property
def dtype(self):
return self._dtype or self.space.dtype
@property
def shape(self):
return self.space.shape
def contains(self, x):
return self.space.contains(x)
def __repr__(self):
return self.space.__repr__()
def __eq__(self, other):
return self.space.__eq__(other)
@property
def low(self):
return self.space.low
@property
def high(self):
return self.space.high
@property
def n(self):
return self.space.n
def seed(self, seed=None):
if type(self.space) is Composite:
return [space.seed(seed=seed) for space in self.space.spaces]
else:
return self.space.seed(seed=seed)
def dict_to_nt(value, name, schemas):
if isinstance(value, dict):
values = {k: dict_to_nt(v, "_".join([name, k]))
for k, v in value.items()}
return schemas[name](**values)
if isinstance(value, np.ndarray) and value.dtype == np.float64:
return np.asarray(value, dtype=np.float32)
return value
def nt_to_dict(value):
if isinstance(value, NamedTuple):
return {k: nt_to_dict(v) for k, v in zip(value._fields, value)}
return value
|
venv/Lib/site-packages/toolz/tests/test_curried.py
|
ajayiagbebaku/NFL-Model
| 3,749 |
134732
|
import toolz
import toolz.curried
from toolz.curried import (take, first, second, sorted, merge_with, reduce,
merge, operator as cop)
from collections import defaultdict
from importlib import import_module
from operator import add
def test_take():
assert list(take(2)([1, 2, 3])) == [1, 2]
def test_first():
assert first is toolz.itertoolz.first
def test_merge():
assert merge(factory=lambda: defaultdict(int))({1: 1}) == {1: 1}
assert merge({1: 1}) == {1: 1}
assert merge({1: 1}, factory=lambda: defaultdict(int)) == {1: 1}
def test_merge_with():
assert merge_with(sum)({1: 1}, {1: 2}) == {1: 3}
def test_merge_with_list():
assert merge_with(sum, [{'a': 1}, {'a': 2}]) == {'a': 3}
def test_sorted():
assert sorted(key=second)([(1, 2), (2, 1)]) == [(2, 1), (1, 2)]
def test_reduce():
assert reduce(add)((1, 2, 3)) == 6
def test_module_name():
assert toolz.curried.__name__ == 'toolz.curried'
def test_curried_operator():
for k, v in vars(cop).items():
if not callable(v):
continue
if not isinstance(v, toolz.curry):
try:
# Make sure it is unary
v(1)
except TypeError:
try:
v('x')
except TypeError:
pass
else:
continue
raise AssertionError(
'toolz.curried.operator.%s is not curried!' % k,
)
# Make sure this isn't totally empty.
assert len(set(vars(cop)) & {'add', 'sub', 'mul'}) == 3
def test_curried_namespace():
exceptions = import_module('toolz.curried.exceptions')
namespace = {}
def should_curry(func):
if not callable(func) or isinstance(func, toolz.curry):
return False
nargs = toolz.functoolz.num_required_args(func)
if nargs is None or nargs > 1:
return True
return nargs == 1 and toolz.functoolz.has_keywords(func)
def curry_namespace(ns):
return {
name: toolz.curry(f) if should_curry(f) else f
for name, f in ns.items() if '__' not in name
}
from_toolz = curry_namespace(vars(toolz))
from_exceptions = curry_namespace(vars(exceptions))
namespace.update(toolz.merge(from_toolz, from_exceptions))
namespace = toolz.valfilter(callable, namespace)
curried_namespace = toolz.valfilter(callable, toolz.curried.__dict__)
if namespace != curried_namespace:
missing = set(namespace) - set(curried_namespace)
if missing:
raise AssertionError('There are missing functions in toolz.curried:\n %s'
% ' \n'.join(sorted(missing)))
extra = set(curried_namespace) - set(namespace)
if extra:
raise AssertionError('There are extra functions in toolz.curried:\n %s'
% ' \n'.join(sorted(extra)))
unequal = toolz.merge_with(list, namespace, curried_namespace)
unequal = toolz.valfilter(lambda x: x[0] != x[1], unequal)
messages = []
for name, (orig_func, auto_func) in sorted(unequal.items()):
if name in from_exceptions:
messages.append('%s should come from toolz.curried.exceptions' % name)
elif should_curry(getattr(toolz, name)):
messages.append('%s should be curried from toolz' % name)
else:
messages.append('%s should come from toolz and NOT be curried' % name)
raise AssertionError('\n'.join(messages))
|
finvizfinance/__init__.py
|
lit26/finvizfinance
| 167 |
134744
|
<reponame>lit26/finvizfinance
"""
.. module:: __init__
:synopsis: finvizfinance package general information
.. moduleauthor:: <NAME> <<EMAIL>>
"""
__version__ = "0.10"
__author__ = "<NAME>"
|
bin/gftools-builder.py
|
yanone/gftools
| 150 |
134777
|
#!/usr/bin/env python3
# Copyright 2020 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from gftools.builder import GFBuilder
from gftools.builder import __doc__ as GFBuilder_doc
parser = argparse.ArgumentParser(
description=("Build a font family"),
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="#"*79 + "\n" + GFBuilder_doc,
)
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Show extra debugging information",
)
parser.add_argument("--family-name", help="Font family name")
parser.add_argument(
"--no-autohint",
action="store_true",
default=False,
help="Don't run ttfautohint on static TTFs",
)
parser.add_argument("--stylespace", help="Path to a statmake stylespace file")
parser.add_argument(
"--no-clean-up",
action="store_true",
default=False,
help="Do not remove temporary files (instance_ufos/)")
parser.add_argument("file", nargs="+", help="YAML build config file *or* source files")
parser.add_argument("--dump-config", type=str, help="Config file to generate")
args = parser.parse_args()
if len(args.file) == 1 and (
args.file[0].endswith(".yaml") or args.file[0].endswith(".yml")
):
builder = GFBuilder(configfile=args.file[0])
else:
config={"sources": args.file}
if args.stylespace:
config["stylespaceFile"] = args.stylespace
if args.family_name:
config["familyName"] = args.family_name
builder = GFBuilder(config=config)
if args.no_autohint:
builder.config["autohintTTF"] = False
if args.no_clean_up:
builder.config["cleanUp"] = False
if args.debug:
builder.config["logLevel"] = "DEBUG"
if args.dump_config:
import sys
import yaml
with open(args.dump_config, "w") as fp:
config= {k: v for (k, v) in builder.config.items() if v is not None}
fp.write(yaml.dump(config, Dumper=yaml.SafeDumper))
sys.exit()
builder.build()
|
tests/test_watcher.py
|
hiyongz/uiautomator2
| 4,493 |
134778
|
# coding: utf-8
#
import uiautomator2 as u2
def test_watch_context(sess: u2.Device):
with sess.watch_context(builtin=True) as ctx:
ctx.when("App").click()
sess(text='Menu').click()
assert sess(text='Inflate from XML').wait()
def teardown_function(d: u2.Device):
print("Teardown", d)
|
sarpy/io/general/nitf_elements/tres/unclass/ACCVTB.py
|
bombaci-vsc/sarpy
| 119 |
134797
|
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
class PT(TREElement):
def __init__(self, value):
super(PT, self).__init__()
self.add_field('LON', 's', 15, value)
self.add_field('LAT', 's', 15, value)
class ACVT(TREElement):
def __init__(self, value):
super(ACVT, self).__init__()
self.add_field('UNIAAV', 's', 3, value)
if self.UNIAAV != '':
self.add_field('AAV', 's', 5, value)
self.add_field('UNIAPV', 's', 3, value)
if self.UNIAPV != '':
self.add_field('APV', 's', 5, value)
self.add_field('NUMPTS', 'd', 3, value)
self.add_loop('PTs', self.NUMPTS, PT, value)
class ACCVTBType(TREElement):
def __init__(self, value):
super(ACCVTBType, self).__init__()
self.add_field('NUMACVT', 'd', 2, value)
self.add_loop('ACVTs', self.NUMACVT, ACVT, value)
class ACCVTB(TREExtension):
_tag_value = 'ACCVTB'
_data_type = ACCVTBType
|
ais/stream/checksum.py
|
andyvan-trabus/libais
| 161 |
134812
|
<filename>ais/stream/checksum.py
"""Utilities for working with NMEA strings."""
import re
import sys
import time
nmeaChecksumRegExStr = r"""\,[0-9]\*[0-9A-F][0-9A-F]"""
nmeaChecksumRE = re.compile(nmeaChecksumRegExStr)
def checksumStr(data):
"""Take a NMEA 0183 string and compute the checksum.
@param data: NMEA message. Leading ?/! and training checksum are optional
@type data: str
@return: hexadecimal value
@rtype: str
Checksum is calculated by xor'ing everything between ? or ! and the *
>>> checksumStr("!AIVDM,1,1,,B,35MsUdPOh8JwI:0HUwquiIFH21>i,0*09")
'09'
>>> checksumStr("AIVDM,1,1,,B,35MsUdPOh8JwI:0HUwquiIFH21>i,0")
'09'
"""
# FIX: strip off new line at the end too
if data[0]=='!' or data[0]=='?': data = data[1:]
if data[-1]=='*': data = data[:-1]
if data[-3]=='*': data = data[:-3]
# FIX: rename sum to not shadown builting function
checksum = 0
for c in data:
checksum = checksum ^ ord(c)
sum_hex = "%x" % checksum
if len(sum_hex) == 1:
sum_hex = '0' + sum_hex
return sum_hex.upper()
def isChecksumValid(nmeaStr, allowTailData=True):
"""Return True if the string checks out with the checksum.
@param allowTailData: Permit handing of Coast Guard format with data after the checksum
@param data: NMEA message. Leading ?/! are optional
@type data: str
@return: True if the checksum matches
@rtype: bool
>>> isChecksumValid("!AIVDM,1,1,,B,35MsUdPOh8JwI:0HUwquiIFH21>i,0*09")
True
Corrupted:
>>> isChecksumValid("!AIVDM,11,1,,B,35MsUdPOh8JwI:0HUwquiIFH21>i,0*09")
False
"""
if allowTailData:
match = nmeaChecksumRE.search(nmeaStr)
if not match:
return False
nmeaStr = nmeaStr[:match.end()]
if nmeaStr[-3]!='*':
return False # Bad string without proper checksum.
checksum=nmeaStr[-2:]
if checksum.upper() == checksumStr(nmeaStr).upper():
return True
return False
|
experiments/segmentation/utils.py
|
ElementAI/baal
| 575 |
134830
|
from typing import List
import numpy as np
import segmentation_models_pytorch as smp
from segmentation_models_pytorch.base.modules import Activation
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import datasets
from torchvision.transforms import transforms
from baal import ActiveLearningDataset
pascal_voc_ids = np.array([
[0, 0, 0],
[128, 0, 0],
[0, 128, 0],
[128, 128, 0],
[0, 0, 128],
[128, 0, 128],
[0, 128, 128],
[128, 128, 128],
[64, 0, 0],
[192, 0, 0],
[64, 128, 0],
[192, 128, 0],
[64, 0, 128],
[192, 0, 128],
[64, 128, 128],
[192, 128, 128],
[0, 64, 0],
[128, 64, 0],
[0, 192, 0],
[128, 192, 0],
[0, 64, 128],
])
def active_pascal(
path="/tmp",
*args,
transform=transforms.ToTensor(),
test_transform=transforms.ToTensor(),
**kwargs,
):
"""Get active Pascal-VOC 2102 datasets.
Arguments:
path : str
The root folder for the Pascal dataset
Returns:
ActiveLearningDataset
the active learning dataset, training data
Dataset
the evaluation dataset
"""
return (
ActiveLearningDataset(datasets.VOCSegmentation(
path, image_set='train', transform=transform, download=False, *args, **kwargs
)),
datasets.VOCSegmentation(path, image_set='val', transform=test_transform, download=False,
*args, **kwargs),
)
class SegmentationHead(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, activation=None, upsampling=1):
dropout = nn.Dropout2d(0.5)
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
padding=kernel_size // 2)
upsampling = nn.UpsamplingBilinear2d(
scale_factor=upsampling) if upsampling > 1 else nn.Identity()
activation = Activation(activation)
super().__init__(dropout, conv2d, upsampling, activation)
def add_dropout(model: smp.Unet, decoder_channels: List[int] = (256, 128, 64, 32, 16),
classes=1, activation=None):
seg_head = SegmentationHead(
in_channels=decoder_channels[-1],
out_channels=classes,
activation=activation,
kernel_size=3,
)
model.add_module('segmentation_head', seg_head)
model.initialize()
class FocalLoss(nn.Module):
"""
References:
Author: clcarwin
Site https://github.com/clcarwin/focal_loss_pytorch/blob/master/focalloss.py
"""
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, (float, int)): self.alpha = torch.Tensor([alpha, 1 - alpha])
if isinstance(alpha, list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim() > 2:
input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W
input = input.transpose(1, 2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1, 1)
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = logpt.data.exp()
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
select = (target != 0).type(torch.LongTensor).to(self.alpha.device)
at = self.alpha.gather(0, select.data.view(-1))
logpt = logpt * at
loss = -1 * (1 - pt) ** self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
|
pyfakefs/tests/patched_packages_test.py
|
yarikoptic/pyfakefs
| 422 |
134832
|
<reponame>yarikoptic/pyfakefs<gh_stars>100-1000
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides patches for some commonly used modules that enable them to work
with pyfakefs.
"""
import os
from pyfakefs import fake_filesystem_unittest
try:
import pandas as pd
except ImportError:
pd = None
try:
import xlrd
except ImportError:
xlrd = None
try:
import openpyxl
except ImportError:
openpyxl = None
class TestPatchedPackages(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
if pd is not None:
def test_read_csv(self):
path = '/foo/bar.csv'
self.fs.create_file(path, contents='1,2,3,4')
df = pd.read_csv(path)
assert (df.columns == ['1', '2', '3', '4']).all()
def test_read_table(self):
path = '/foo/bar.csv'
self.fs.create_file(path, contents='1|2|3|4')
df = pd.read_table(path, delimiter='|')
assert (df.columns == ['1', '2', '3', '4']).all()
if pd is not None and xlrd is not None:
def test_read_excel(self):
path = '/foo/bar.xlsx'
src_path = os.path.dirname(os.path.abspath(__file__))
src_path = os.path.join(src_path, 'fixtures', 'excel_test.xlsx')
# map the file into another location to be sure that
# the real fs is not used
self.fs.add_real_file(src_path, target_path=path)
df = pd.read_excel(path)
assert (df.columns == [1, 2, 3, 4]).all()
if pd is not None and openpyxl is not None:
def test_write_excel(self):
self.fs.create_dir('/foo')
path = '/foo/bar.xlsx'
df = pd.DataFrame([[0, 1, 2, 3]])
with pd.ExcelWriter(path) as writer:
df.to_excel(writer)
df = pd.read_excel(path)
assert (df.columns == ['Unnamed: 0', 0, 1, 2, 3]).all()
|
ngym_shaping/utils/tasktools.py
|
manuelmolano/ngym_shaping
| 112 |
134838
|
<filename>ngym_shaping/utils/tasktools.py<gh_stars>100-1000
from __future__ import division
from collections import OrderedDict
import numpy as np
def to_map(*args):
"produces ordered dict from given inputs"
if isinstance(args[0], list):
var_list = args[0]
else:
var_list = args
od = OrderedDict()
for i, v in enumerate(var_list):
od[v] = i
return od
def get_idx(t, start_end):
"""
auxiliary function for defining task periods
"""
start, end = start_end
return list(np.where((start <= t) & (t < end))[0])
def get_periods_idx(dt, periods):
"""
function for defining task periods
"""
t = np.linspace(0, periods['tmax'], int(periods['tmax']/dt)+1)
return t, {k: get_idx(t, v) for k, v in periods.items() if k != 'tmax'}
def minmax_number(dist, args):
"""Given input to the random_number_fn function, return min and max."""
if dist == 'uniform':
return args[0], args[1]
elif dist == 'choice':
return np.min(args), np.max(args)
elif dist == 'truncated_exponential':
return args[1], args[2]
elif dist == 'constant':
return args, args
else:
raise ValueError('Unknown dist:', str(dist))
def circular_dist(original_dist):
'''Get the distance in periodic boundary conditions.'''
return np.minimum(abs(original_dist), 2 * np.pi - abs(original_dist))
def divide(x, y):
try:
z = x/y
if np.isnan(z):
raise ZeroDivisionError
return z
except ZeroDivisionError:
return 0
def correct_2AFC(perf):
"""
computes performance
"""
p_decision = perf.n_decision/perf.n_trials
p_correct = divide(perf.n_correct, perf.n_decision)
return p_decision, p_correct
def compute_perf(perf, reward, num_tr_perf, tr_perf):
if tr_perf:
num_tr_perf += 1
perf += (reward - perf)/num_tr_perf
return perf, num_tr_perf
|
packages/jet_bridge_base/jet_bridge_base/fields/integer.py
|
bokal2/jet-bridge
| 1,247 |
134888
|
<reponame>bokal2/jet-bridge<filename>packages/jet_bridge_base/jet_bridge_base/fields/integer.py
import six
from jet_bridge_base.fields.field import Field
class IntegerField(Field):
field_error_messages = {
'invalid': 'not a valid integer'
}
def to_internal_value_item(self, value):
if value is None:
return
value = six.text_type(value).strip()
try:
return int(value)
except (ValueError, TypeError):
self.error('invalid')
def to_representation_item(self, value):
if value is None:
return
return six.text_type(value)
|
plugins/module_utils/oci_identity_custom_helpers.py
|
slmjy/oci-ansible-collection
| 108 |
134895
|
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
"""This module contains all the customisations for identity modules."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
try:
from oci.exceptions import ServiceError, MaximumWaitTimeExceeded
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
logger = oci_common_utils.get_logger("oci_identity_custom_helpers")
def _debug(s):
get_logger().debug(s)
def get_logger():
return logger
class ApiKeyHelperCustom:
# For idempotence comparison
# We pass as key and get key_value in response
def get_existing_resource_dict_for_idempotence_check(self, resource):
existing_dict = super(
ApiKeyHelperCustom, self
).get_existing_resource_dict_for_idempotence_check(resource)
if "key_value" in existing_dict:
existing_dict["key"] = existing_dict["key_value"]
return existing_dict
def get_exclude_attributes(self):
exclude_attributes = super(ApiKeyHelperCustom, self).get_exclude_attributes()
remove_exlcuded_attributes = ["key"]
exclude_attributes = [
x not in remove_exlcuded_attributes for x in exclude_attributes
]
return exclude_attributes
class CompartmentHelperCustom:
def __init__(self, module, resource_type, service_client_class, namespace):
if module.params.get("compartment_id") and module.params.get(
"parent_compartment_id"
):
module.fail_json(
msg="Parameters are mutually exclusive: compartment_id, parent_compartment_id."
)
super(CompartmentHelperCustom, self).__init__(
module, resource_type, service_client_class, namespace
)
# default implementation ends up using "comparment_id" from response
# as the unique identifier but we really want to use "id"
# otherwise we will end up trying to update the parent compartment of the
# one we are aiming for
def set_required_ids_in_module_when_name_is_identifier(self, resource):
id_param = self.get_module_resource_id_param()
if resource.get("id"):
self.module.params[id_param] = resource["id"]
# the module uses 'parent_compartment_id' so as not to conflict with the compartment_id
# parameter on UPDATE, but the SDK/API operation expects 'compartment_id'
def get_create_model(self):
params_copy = self.module.params.copy()
params_copy["compartment_id"] = params_copy["parent_compartment_id"]
return oci_common_utils.convert_input_data_to_model_class(
params_copy, self.get_create_model_class()
)
def get_required_kwargs_for_list(self):
# the module 'compartment_id' parameter represents the unique identifier for a compartment
# to be used by update and delete
# any time we are listing compartments to do an idempotency check or to find a resource by name
# we want to use 'parent_compartment_id'
return {"compartment_id": self.module.params.get("parent_compartment_id")}
class CompartmentFactsHelperCustom:
def __init__(self, module, resource_type, service_client_class, namespace):
if module.params.get("compartment_id") and module.params.get(
"parent_compartment_id"
):
module.fail_json(
msg="Parameters are mutually exclusive: compartment_id, parent_compartment_id."
)
super(CompartmentFactsHelperCustom, self).__init__(
module, resource_type, service_client_class, namespace
)
def list_subcompartments(self, compartment_id, optional_kwargs):
subcompartments = []
immediate_subcompartments = oci_common_utils.list_all_resources(
self.client.list_compartments,
compartment_id=compartment_id,
**optional_kwargs
)
if immediate_subcompartments:
subcompartments.extend(immediate_subcompartments)
for comp in immediate_subcompartments:
subcompartments.extend(self.list_subcompartments(comp.id, optional_kwargs))
return subcompartments
def list_resources(self):
optional_list_method_params = [
"access_level",
"compartment_id_in_subtree",
"name",
"sort_by",
"sort_order",
"lifecycle_state",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
parent_compartment_id = self.module.params.get("parent_compartment_id")
# service doesn't support `compartment_id_in_subtree` for non root compartments
# but we do as a matter of convenience
if not self.is_compartment_root(
parent_compartment_id
) and self.module.params.get("compartment_id_in_subtree"):
# compartment_id_in_subtree is not allowed on list calls for non root compartment
del optional_kwargs["compartment_id_in_subtree"]
# name filtering is done AFTER because we don't want to prune the search
# based on name without searching subcompartments
if "name" in optional_kwargs:
del optional_kwargs["name"]
subcompartments = self.list_subcompartments(
parent_compartment_id, optional_kwargs
)
if self.module.params.get("name"):
subcompartments = [
compartment
for compartment in subcompartments
if compartment.name == self.module.params.get("name")
]
return subcompartments
else:
return oci_common_utils.list_all_resources(
self.client.list_compartments,
compartment_id=self.module.params.get("parent_compartment_id"),
**optional_kwargs
)
def is_compartment_root(self, compartment_id):
# Returns True when compartment_id is OCID of the tenancy.
# can't do GET compartment because user may not have access
# so use GetTenancy which will return the tenancy if
# compartment_id == tenancy_ocid and will give 404 otherwise
try:
oci_common_utils.call_with_backoff(
self.client.get_tenancy, tenancy_id=compartment_id
).data
except ServiceError as se:
if se.status == 404:
return False
else:
raise
return True
class MfaTotpDeviceActionsHelperCustom:
def is_action_necessary(self, action, resource):
if action.upper() == "ACTIVATE":
if resource.is_activated:
return False
return True
elif action.upper() == "GENERATE_TOTP_SEED":
return True
class MfaTotpDeviceHelperCustom:
def get_matching_resource(self):
# mfa_totp_device has no create model, there are no params
# and a user can only have one mfa_totp_device
# therefore, if any mfa totp device exists for this user,
# it is a match
for resource in self.list_resources():
if not self._is_resource_active(resource):
continue
return resource
return None
class PolicyHelperCustom:
# user must pass in version date in format YYYY-MM-DD but service
# returns it as 2020-01-17T00:00:00+00:00 so we need to normalize
# for comparison purposes
def update_version_date(self, model_dict):
if model_dict["version_date"]:
model_dict["version_date"] = "{version_date}T00:00:00+00:00".format(
version_date=model_dict["version_date"]
)
return model_dict
def get_create_model_dict_for_idempotence_check(self, create_model):
model_dict = super(
PolicyHelperCustom, self
).get_create_model_dict_for_idempotence_check(create_model)
self.update_version_date(model_dict)
return model_dict
def get_update_model_dict_for_idempotence_check(self, update_model):
model_dict = super(
PolicyHelperCustom, self
).get_update_model_dict_for_idempotence_check(update_model)
self.update_version_date(model_dict)
return model_dict
class UiPasswordHelperCustom:
def is_create(self):
return True
class TagHelperCustom:
def get_update_model_dict_for_idempotence_check(self, update_model):
update_model_dict = super(
TagHelperCustom, self
).get_update_model_dict_for_idempotence_check(update_model)
resource = self.get_resource().data
if (
update_model_dict["validator"]
and update_model_dict["validator"]["validator_type"] == "DEFAULT"
and resource.validator is None
):
update_model_dict["validator"] = None
return update_model_dict
class UserCapabilitiesHelperCustom:
# As per API documentation operation `UpdateUserCapabilities` returns `User` resource in response body.
# This override is required as generated module doesn't have get_resource method to return `User` resource.
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_user, user_id=self.module.params.get("user_id"),
)
# for idempotency check we compare `UpdateUserCapabilitiesDetails` and `UserCapabilities`.
# There is no API call to fetch just `UserCapabilities` resource for a user. This resource is a part of
# `User` resource.
def is_update_necessary(self, existing_resource_dict):
update_model = self.get_update_model()
update_model_dict = self.get_update_model_dict_for_idempotence_check(
update_model
)
update_is_necessary = not oci_common_utils.compare_dicts(
update_model_dict, existing_resource_dict["capabilities"]
)
_debug(
"is update necessary for {resource_type}: {update_is_necessary}".format(
resource_type=self.get_response_field_name(),
update_is_necessary=update_is_necessary,
)
)
return update_is_necessary
class UserStateHelperCustom:
# As per API documentation operation `UpdateUserState` returns `User` resource in response body.
# This override is required as generated module doesn't have get_resource method to return `User` resource.
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_user, user_id=self.module.params.get("user_id"),
)
# operation `UpdateUserState` updates state to unblocked. Only "false" is supported
# (for changing the state to unblocked). If set to "true" API throws an error: Changing
# user state to 'Blocked' is not supported.
def is_update_necessary(self, existing_resource_dict):
if self.module.params.get("blocked") is not None and not self.module.params.get(
"blocked"
):
if existing_resource_dict.get("inactive_status", None) == 4:
return True
else:
return False
return super(UserStateHelperCustom, self).is_update_necessary(
existing_resource_dict
)
class TagActionsHelperCustom:
# overriding the perform_action method as bulk_delete tags operation does not support
# get_resource method which is an integral part of the main perform_action method
def perform_action(self, action):
action_fn = self.get_action_fn(action)
if not action_fn:
self.module.fail_json(msg="{0} not supported by the module.".format(action))
if self.check_mode:
return self.prepare_result(
changed=True,
resource_type=self.get_response_field_name(action),
resource=None,
)
# if sent list is empty or None, return back without performing the action with
# status of resource as not changed
if action == "bulk_delete":
tag_ids = self.module.params.get("tag_definition_ids")
if not tag_ids:
return self.prepare_result(
changed=False,
resource_type=self.get_response_field_name(action),
resource=None,
)
try:
action_fn()
except MaximumWaitTimeExceeded as mwtex:
self.module.fail_json(msg=str(mwtex))
except ServiceError as se:
self.module.fail_json(
msg="Performing action failed with exception: {0}".format(se.message)
)
else:
return self.prepare_result(
changed=True,
resource_type=self.get_response_field_name(action),
resource=None,
)
class CompartmentActionsHelperCustom:
# overriding the perform_action method as bulk_move and bulk_delete actions do not support
# get_resource method which is an integral part of the main perform_action method
def perform_action(self, action):
if action in ["move", "recover"]:
return super(CompartmentActionsHelperCustom, self).perform_action(action)
action_fn = self.get_action_fn(action)
if not action_fn:
self.module.fail_json(msg="{0} not supported by the module.".format(action))
if self.check_mode:
return self.prepare_result(
changed=True,
resource_type=self.get_response_field_name(action),
resource=None,
)
# if resource list is empty or None, return back without performing the action with
# status of resource as not changed
resources_list = self.module.params.get("resources")
if not resources_list:
return self.prepare_result(
changed=False,
resource_type=self.get_response_field_name(action),
resource=None,
)
try:
action_fn()
except MaximumWaitTimeExceeded as mwtex:
self.module.fail_json(msg=str(mwtex))
except ServiceError as se:
self.module.fail_json(
msg="Performing action failed with exception: {0}".format(se.message)
)
else:
return self.prepare_result(
changed=True,
resource_type=self.get_response_field_name(action),
resource=None,
)
# this method is overridden to ensure idempotency for the move and recover actions
def is_action_necessary(self, action, resource_data):
if action == "move":
return resource_data.compartment_id != self.module.params.get(
"target_compartment_id"
)
if action == "recover":
return resource_data.lifecycle_state == "DELETED"
return super(CompartmentActionsHelperCustom, self).is_action_necessary(
action, resource_data
)
class TagDefaultHelperCustom:
def get_optional_kwargs_for_list(self):
if self.module.params.get("tag_definition_id"):
return dict(tag_definition_id=self.module.params.get("tag_definition_id"))
elif self.module.params.get("compartment_id"):
return dict(tag_definition_id=self.module.params.get("compartment_id"))
return dict()
|
python/ql/test/2/library-tests/PointsTo/imports/test.py
|
vadi2/codeql
| 4,036 |
134900
|
from package import foo as myfoo
|
models/stylegan_tf_official/run_metrics.py
|
CV-IP/interfacegan
| 855 |
134901
|
# Copyright (c) 2019, <NAME>. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Main entry point for training StyleGAN and ProGAN networks."""
import dnnlib
from dnnlib import EasyDict
import dnnlib.tflib as tflib
import config
from metrics import metric_base
from training import misc
#----------------------------------------------------------------------------
def run_pickle(submit_config, metric_args, network_pkl, dataset_args, mirror_augment):
ctx = dnnlib.RunContext(submit_config)
tflib.init_tf()
print('Evaluating %s metric on network_pkl "%s"...' % (metric_args.name, network_pkl))
metric = dnnlib.util.call_func_by_name(**metric_args)
print()
metric.run(network_pkl, dataset_args=dataset_args, mirror_augment=mirror_augment, num_gpus=submit_config.num_gpus)
print()
ctx.close()
#----------------------------------------------------------------------------
def run_snapshot(submit_config, metric_args, run_id, snapshot):
ctx = dnnlib.RunContext(submit_config)
tflib.init_tf()
print('Evaluating %s metric on run_id %s, snapshot %s...' % (metric_args.name, run_id, snapshot))
run_dir = misc.locate_run_dir(run_id)
network_pkl = misc.locate_network_pkl(run_dir, snapshot)
metric = dnnlib.util.call_func_by_name(**metric_args)
print()
metric.run(network_pkl, run_dir=run_dir, num_gpus=submit_config.num_gpus)
print()
ctx.close()
#----------------------------------------------------------------------------
def run_all_snapshots(submit_config, metric_args, run_id):
ctx = dnnlib.RunContext(submit_config)
tflib.init_tf()
print('Evaluating %s metric on all snapshots of run_id %s...' % (metric_args.name, run_id))
run_dir = misc.locate_run_dir(run_id)
network_pkls = misc.list_network_pkls(run_dir)
metric = dnnlib.util.call_func_by_name(**metric_args)
print()
for idx, network_pkl in enumerate(network_pkls):
ctx.update('', idx, len(network_pkls))
metric.run(network_pkl, run_dir=run_dir, num_gpus=submit_config.num_gpus)
print()
ctx.close()
#----------------------------------------------------------------------------
def main():
submit_config = dnnlib.SubmitConfig()
# Which metrics to evaluate?
metrics = []
metrics += [metric_base.fid50k]
#metrics += [metric_base.ppl_zfull]
#metrics += [metric_base.ppl_wfull]
#metrics += [metric_base.ppl_zend]
#metrics += [metric_base.ppl_wend]
#metrics += [metric_base.ls]
#metrics += [metric_base.dummy]
# Which networks to evaluate them on?
tasks = []
tasks += [EasyDict(run_func_name='run_metrics.run_pickle', network_pkl='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', dataset_args=EasyDict(tfrecord_dir='ffhq', shuffle_mb=0), mirror_augment=True)] # karras2019stylegan-ffhq-1024x1024.pkl
#tasks += [EasyDict(run_func_name='run_metrics.run_snapshot', run_id=100, snapshot=25000)]
#tasks += [EasyDict(run_func_name='run_metrics.run_all_snapshots', run_id=100)]
# How many GPUs to use?
submit_config.num_gpus = 1
#submit_config.num_gpus = 2
#submit_config.num_gpus = 4
#submit_config.num_gpus = 8
# Execute.
submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir)
submit_config.run_dir_ignore += config.run_dir_ignore
for task in tasks:
for metric in metrics:
submit_config.run_desc = '%s-%s' % (task.run_func_name, metric.name)
if task.run_func_name.endswith('run_snapshot'):
submit_config.run_desc += '-%s-%s' % (task.run_id, task.snapshot)
if task.run_func_name.endswith('run_all_snapshots'):
submit_config.run_desc += '-%s' % task.run_id
submit_config.run_desc += '-%dgpu' % submit_config.num_gpus
dnnlib.submit_run(submit_config, metric_args=metric, **task)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
|
tools/nntool/importer/onnx/handlers/backend/split.py
|
mfkiwl/gap_sdk
| 118 |
134911
|
<gh_stars>100-1000
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from graph.types.base import NNEdge
from graph.types.input_output import ConstantInputParameters
from graph.types.others import SplitParameters
from importer.common.constant_mixin import ConstantMixin
from importer.common.provisional_dim import ProvisionalDim
from importer.onnx.common import logger
from ..backend_handler import BackendHandler
from ..handler import onnx_op
@onnx_op("Split")
class Split(ConstantMixin, BackendHandler):
@classmethod
def _common(cls, node, **kwargs):
all_nodes = kwargs['all_nodes']
G = kwargs['G']
valid_name = kwargs['valid_name']
inputs = [all_nodes[inp] for inp in node.input]
x = inputs[0]
x_shape = x[2].shape
axis = node.attrs.get('axis', 0)
if axis < 0:
axis += len(x_shape)
assert axis < len(x_shape) and axis >= 0,\
"axis %s is out of bounds - input dims %s in node %s" % (axis, x_shape, valid_name)
split_dim = x_shape[axis]
assert split_dim is not None, "split dimension must be defined"
split = None
if cls.SINCE_VERSION >= 13:
if len(inputs) > 1:
split = cls.get_constant(inputs[1])
else:
split = node.attrs.get('split')
if split:
split = np.array(split)
assert sum(split) == split_dim, "split sizes should add up to total size %s" % valid_name
assert np.all(split > 0), "split sizes should be greater than zero %s" % valid_name
else:
num_outputs = len(node.output)
assert split_dim % num_outputs == 0,\
"no split attribute or value and dimension is not divisible by number of outputs %s" % valid_name
split = np.array([split_dim // num_outputs] * num_outputs)
split = split.tolist()
act_slices = []
out_shapes = []
out_pshapes = []
cur = 0
for idx, split_dim in enumerate(split):
act_slices.append(
tuple((cur, cur + split_dim, 1) if didx == axis else (0, dim, 1)
for didx, dim in enumerate(x_shape) if dim is not None)
)
out_pshape = tuple(split_dim if didx == axis else dim for didx,
dim in enumerate(x_shape))
out_shapes.append(
tuple(dim for dim in out_pshape if dim is not None)
)
out_pshapes.append(
ProvisionalDim(out_pshape)
)
cur += split_dim
axis -= sum(1 if dim is None else 0 for dim in x_shape[:axis:])
params = SplitParameters(valid_name, act_slices=act_slices,
out_shapes=out_shapes, axis=axis)
if cls.is_constant(x):
logger.info("reducing %s to %s constant(s)", valid_name, len(out_shapes))
values = params.numpy_split(cls.get_constant(x))
for idx, out_pshape in enumerate(out_pshapes):
cparams = ConstantInputParameters(valid_name, value=values[idx], constant_store=G.constant_store)
all_nodes[node.output[idx]] = (cparams, 0, out_pshape)
return None
G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
for idx, out_pshape in enumerate(out_pshapes):
all_nodes[node.output[idx]] = (params, idx, out_pshape)
return params
@classmethod
def version_1(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_2(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_11(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_13(cls, node, **kwargs):
return cls._common(node, **kwargs)
|
body tracking/python/cv_viewer/tracking_viewer.py
|
khanfarhan10/zed-examples
| 472 |
134925
|
import cv2
import numpy as np
from cv_viewer.utils import *
import pyzed.sl as sl
#----------------------------------------------------------------------
# 2D VIEW
#----------------------------------------------------------------------
def cvt(pt, scale):
'''
Function that scales point coordinates
'''
out = [pt[0]*scale[0], pt[1]*scale[1]]
return out
def render_2D(left_display, img_scale, objects, is_tracking_on):
'''
Parameters
left_display (np.array): numpy array containing image data
img_scale (list[float])
objects (list[sl.ObjectData])
'''
overlay = left_display.copy()
# Render skeleton joints and bones
for obj in objects:
if render_object(obj, is_tracking_on):
if len(obj.keypoint_2d) > 0:
color = generate_color_id_u(obj.id)
# Draw skeleton bones
for part in SKELETON_BONES:
kp_a = cvt(obj.keypoint_2d[part[0].value], img_scale)
kp_b = cvt(obj.keypoint_2d[part[1].value], img_scale)
# Check that the keypoints are inside the image
if(kp_a[0] < left_display.shape[1] and kp_a[1] < left_display.shape[0]
and kp_b[0] < left_display.shape[1] and kp_b[1] < left_display.shape[0]
and kp_a[0] > 0 and kp_a[1] > 0 and kp_b[0] > 0 and kp_b[1] > 0 ):
cv2.line(left_display, (int(kp_a[0]), int(kp_a[1])), (int(kp_b[0]), int(kp_b[1])), color, 1, cv2.LINE_AA)
# Get spine base coordinates to create backbone
left_hip = obj.keypoint_2d[sl.BODY_PARTS.LEFT_HIP.value]
right_hip = obj.keypoint_2d[sl.BODY_PARTS.RIGHT_HIP.value]
spine = (left_hip + right_hip) / 2
kp_spine = cvt(spine, img_scale)
kp_neck = cvt(obj.keypoint_2d[sl.BODY_PARTS.NECK.value], img_scale)
# Check that the keypoints are inside the image
if(kp_spine[0] < left_display.shape[1] and kp_spine[1] < left_display.shape[0]
and kp_neck[0] < left_display.shape[1] and kp_neck[1] < left_display.shape[0]
and kp_spine[0] > 0 and kp_spine[1] > 0 and kp_neck[0] > 0 and kp_neck[1] > 0
and left_hip[0] > 0 and left_hip[1] > 0 and right_hip[0] > 0 and right_hip[1] > 0 ):
cv2.line(left_display, (int(kp_spine[0]), int(kp_spine[1])), (int(kp_neck[0]), int(kp_neck[1])), color, 1, cv2.LINE_AA)
# Skeleton joints
for kp in obj.keypoint_2d:
cv_kp = cvt(kp, img_scale)
if(cv_kp[0] < left_display.shape[1] and cv_kp[1] < left_display.shape[0]):
cv2.circle(left_display, (int(cv_kp[0]), int(cv_kp[1])), 3, color, -1)
if(kp_spine[0] < left_display.shape[1] and kp_spine[1] < left_display.shape[0]
and left_hip[0] > 0 and left_hip[1] > 0 and right_hip[0] > 0 and right_hip[1] > 0 ):
cv2.circle(left_display, (int(kp_spine[0]), int(kp_spine[1])), 3, color, -1)
cv2.addWeighted(left_display, 0.9, overlay, 0.1, 0.0, left_display)
|
h2o-py/tests/testdir_algos/word2vec/pyunit_word2vec_to_frame.py
|
vishalbelsare/h2o-3
| 6,098 |
134942
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.word2vec import H2OWord2vecEstimator
def word2vec_to_frame():
print("Test converting a word2vec model to a Frame")
words = h2o.create_frame(rows=1000,cols=1,string_fraction=1.0,missing_fraction=0.0)
embeddings = h2o.create_frame(rows=1000,cols=100,real_fraction=1.0,missing_fraction=0.0)
word_embeddings = words.cbind(embeddings)
w2v_model = H2OWord2vecEstimator(pre_trained=word_embeddings)
w2v_model.train()
w2v_frame = w2v_model.to_frame()
word_embeddings.names = w2v_frame.names
assert word_embeddings.as_data_frame().equals(word_embeddings.as_data_frame()), "Source and generated embeddings match"
if __name__ == "__main__":
pyunit_utils.standalone_test(word2vec_to_frame)
else:
word2vec_to_frame()
|
packages/python/chart-studio/chart_studio/api/v2/dash_apps.py
|
sgn/plotly.py
| 11,750 |
134976
|
"""
Beta interface to Plotly's /v2/dash-apps endpoints.
"""
from __future__ import absolute_import
from chart_studio.api.v2.utils import build_url, request
RESOURCE = "dash-apps"
def create(body):
"""Create a dash app item."""
url = build_url(RESOURCE)
return request("post", url, json=body)
def retrieve(fid):
"""Retrieve a dash app from Plotly."""
url = build_url(RESOURCE, id=fid)
return request("get", url)
def update(fid, content):
"""Completely update the writable."""
url = build_url(RESOURCE, id=fid)
return request("put", url, json=content)
|
tools/odrive/tests/integration_test.py
|
deafloo/ODrive
| 1,068 |
134986
|
<filename>tools/odrive/tests/integration_test.py
# this test runs the motor using CAN
# TODO - run a motor using all common use cases (uart, step/dir, pwm)
import test_runner
import struct
import can
import asyncio
import time
import math
from fibre.utils import Logger
from odrive.enums import *
from test_runner import *
# Each argument is described as tuple (name, format, scale).
# Struct format codes: https://docs.python.org/2/library/struct.html
command_set = {
'heartbeat': (0x001, [('error', 'I', 1), ('current_state', 'I', 1)]), # tested
'estop': (0x002, []), # tested
'get_motor_error': (0x003, [('motor_error', 'I', 1)]), # untested
'get_encoder_error': (0x004, [('encoder_error', 'I', 1)]), # untested
'get_sensorless_error': (0x005, [('sensorless_error', 'I', 1)]), # untested
'set_node_id': (0x006, [('node_id', 'I', 1)]), # tested
'set_requested_state': (0x007, [('requested_state', 'I', 1)]), # tested
# 0x008 not yet implemented
'get_encoder_estimates': (0x009, [('encoder_pos_estimate', 'f', 1), ('encoder_vel_estimate', 'f', 1)]), # partially tested
'get_encoder_count': (0x00a, [('encoder_shadow_count', 'i', 1), ('encoder_count', 'i', 1)]), # partially tested
'set_controller_modes': (0x00b, [('control_mode', 'i', 1), ('input_mode', 'i', 1)]), # tested
'set_input_pos': (0x00c, [('input_pos', 'f', 1), ('vel_ff', 'h', 0.001), ('torque_ff', 'h', 0.001)]), # tested
'set_input_vel': (0x00d, [('input_vel', 'f', 1), ('torque_ff', 'f', 1)]), # tested
'set_input_torque': (0x00e, [('input_torque', 'f', 1)]), # tested
'set_velocity_limit': (0x00f, [('velocity_limit', 'f', 1)]), # tested
'start_anticogging': (0x010, []), # untested
'set_traj_vel_limit': (0x011, [('traj_vel_limit', 'f', 1)]), # tested
'set_traj_accel_limits': (0x012, [('traj_accel_limit', 'f', 1), ('traj_decel_limit', 'f', 1)]), # tested
'set_traj_inertia': (0x013, [('inertia', 'f', 1)]), # tested
'get_iq': (0x014, [('iq_setpoint', 'f', 1), ('iq_measured', 'f', 1)]), # untested
'get_sensorless_estimates': (0x015, [('sensorless_pos_estimate', 'f', 1), ('sensorless_vel_estimate', 'f', 1)]), # untested
'reboot': (0x016, []), # tested
'get_vbus_voltage': (0x017, [('vbus_voltage', 'f', 1)]), # tested
'clear_errors': (0x018, []), # partially tested
}
def command(bus, node_id_, extended_id, cmd_name, **kwargs):
cmd_spec = command_set[cmd_name]
cmd_id = cmd_spec[0]
fmt = '<' + ''.join([f for (n, f, s) in cmd_spec[1]]) # all little endian
if (sorted([n for (n, f, s) in cmd_spec[1]]) != sorted(kwargs.keys())):
raise Exception("expected arguments: " + str([n for (n, f, s) in cmd_spec[1]]))
fields = [((kwargs[n] / s) if f == 'f' else int(kwargs[n] / s)) for (n, f, s) in cmd_spec[1]]
data = struct.pack(fmt, *fields)
msg = can.Message(arbitration_id=((node_id_ << 5) | cmd_id), extended_id=extended_id, data=data)
bus.send(msg)
async def record_messages(bus, node_id, extended_id, cmd_name, timeout = 5.0):
"""
Returns an async generator that yields a dictionary for each CAN message that
is received, provided that the CAN ID matches the expected value.
"""
cmd_spec = command_set[cmd_name]
cmd_id = cmd_spec[0]
fmt = '<' + ''.join([f for (n, f, s) in cmd_spec[1]]) # all little endian
reader = can.AsyncBufferedReader()
notifier = can.Notifier(bus, [reader], timeout = timeout, loop = asyncio.get_event_loop())
try:
# The timeout in can.Notifier only triggers if no new messages are received at all,
# so we need a second monitoring method.
start = time.monotonic()
while True:
msg = await reader.get_message()
if ((msg.arbitration_id == ((node_id << 5) | cmd_id)) and (msg.is_extended_id == extended_id) and not msg.is_remote_frame):
fields = struct.unpack(fmt, msg.data[:(struct.calcsize(fmt))])
res = {n: (fields[i] * s) for (i, (n, f, s)) in enumerate(cmd_spec[1])}
res['t'] = time.monotonic()
yield res
if (time.monotonic() - start) > timeout:
break
finally:
notifier.stop()
async def request(bus, node_id, extended_id, cmd_name, timeout = 1.0):
cmd_spec = command_set[cmd_name]
cmd_id = cmd_spec[0]
msg_generator = record_messages(bus, node_id, extended_id, cmd_name, timeout)
msg = can.Message(arbitration_id=((node_id << 5) | cmd_id), extended_id=extended_id, data=[], is_remote_frame=True)
bus.send(msg)
async for msg in msg_generator:
return msg
raise TimeoutError()
async def get_all(async_iterator):
return [x async for x in async_iterator]
class TestSimpleCANClosedLoop():
def prepare(self, odrive: ODriveComponent, canbus: CanInterfaceComponent, axis_ctx: ODriveAxisComponent, motor_ctx: MotorComponent, enc_ctx: EncoderComponent, node_id: int, extended_id: bool, logger: Logger):
# Make sure there are no funny configurations active
logger.debug('Setting up clean configuration...')
axis_ctx.parent.erase_config_and_reboot()
axis_ctx.parent.handle.config.enable_brake_resistor = True
axis_ctx.parent.save_config_and_reboot()
# run calibration
axis_ctx.handle.requested_state = AXIS_STATE_FULL_CALIBRATION_SEQUENCE
while axis_ctx.handle.requested_state != AXIS_STATE_UNDEFINED or axis_ctx.handle.current_state != AXIS_STATE_IDLE:
time.sleep(1)
test_assert_eq(axis_ctx.handle.current_state, AXIS_STATE_IDLE)
test_assert_no_error(axis_ctx)
# Return a context that can be used in a with-statement.
class safe_terminator():
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
logger.debug('clearing config...')
axis_ctx.handle.requested_state = AXIS_STATE_IDLE
time.sleep(0.005)
axis_ctx.parent.erase_config_and_reboot()
return safe_terminator()
def get_test_cases(self, testrig: TestRig):
for axis, motor, encoder, tf1 in testrig.get_closed_loop_combos(init=False):
yield AnyTestCase(*[
(axis.parent, canbus, axis, motor, encoder, 0, False, TestFixture.all_of(tf1, tf2))
for canbus, tf2 in testrig.get_connected_components(axis.parent.can, CanInterfaceComponent)
])
def run_test(self, odrive: ODriveComponent, canbus: CanInterfaceComponent, axis_ctx: ODriveAxisComponent, motor_ctx: MotorComponent, enc_ctx: EncoderComponent, node_id: int, extended_id: bool, logger: Logger):
# this test is a sanity check to make sure that closed loop operation works
# actual testing of closed loop functionality should be tested using closed_loop_test.py
with self.prepare(odrive, canbus, axis_ctx, motor_ctx, enc_ctx, node_id, extended_id, logger):
def my_cmd(cmd_name, **kwargs): command(canbus.handle, node_id, extended_id, cmd_name, **kwargs)
def my_req(cmd_name, **kwargs): return asyncio.run(request(canbus.handle, node_id, extended_id, cmd_name, **kwargs))
def fence(): my_req('get_vbus_voltage') # fence to ensure the CAN command was sent
def flush_rx():
while not canbus.handle.recv(timeout = 0) is None: pass
axis_ctx.handle.config.enable_watchdog = False
odrive.handle.clear_errors()
axis_ctx.handle.config.can.node_id = node_id
axis_ctx.handle.config.can.is_extended = extended_id
time.sleep(0.1)
my_cmd('set_node_id', node_id=node_id+20)
flush_rx()
asyncio.run(request(canbus.handle, node_id+20, extended_id, 'get_vbus_voltage'))
test_assert_eq(axis_ctx.handle.config.can.node_id, node_id+20)
# Reset node ID to default value
command(canbus.handle, node_id+20, extended_id, 'set_node_id', node_id=node_id)
fence()
test_assert_eq(axis_ctx.handle.config.can.node_id, node_id)
vel_limit = 15.0
nominal_vel = 10.0
axis_ctx.handle.controller.config.vel_limit = vel_limit
axis_ctx.handle.motor.config.current_lim = 20.0
my_cmd('set_requested_state', requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL)
fence()
test_assert_eq(axis_ctx.handle.current_state, AXIS_STATE_CLOSED_LOOP_CONTROL)
test_assert_no_error(axis_ctx)
start_pos = axis_ctx.handle.encoder.pos_estimate
# position test
logger.debug('Position control test')
my_cmd('set_controller_modes', control_mode=CONTROL_MODE_POSITION_CONTROL, input_mode=INPUT_MODE_PASSTHROUGH) # position control, passthrough
fence()
my_cmd('set_input_pos', input_pos=1.0, vel_ff=0, torque_ff=0)
fence()
test_assert_eq(axis_ctx.handle.controller.input_pos, 1.0, range=0.1)
time.sleep(2)
test_assert_eq(axis_ctx.handle.encoder.pos_estimate, start_pos + 1.0, range=0.1)
my_cmd('set_input_pos', input_pos=0, vel_ff=0, torque_ff=0)
fence()
time.sleep(2)
test_assert_no_error(axis_ctx)
# velocity test
logger.debug('Velocity control test')
my_cmd('set_controller_modes', control_mode=CONTROL_MODE_VELOCITY_CONTROL, input_mode=INPUT_MODE_PASSTHROUGH) # velocity control, passthrough
fence()
my_cmd('set_input_vel', input_vel = nominal_vel, torque_ff=0)
fence()
time.sleep(5)
test_assert_eq(axis_ctx.handle.encoder.vel_estimate, nominal_vel, range=nominal_vel * 0.05) # big range here due to cogging and other issues
my_cmd('set_input_vel', input_vel = 0, torque_ff=0)
fence()
time.sleep(2)
test_assert_no_error(axis_ctx)
# torque test
logger.debug('Torque control test')
my_cmd('set_controller_modes', control_mode=CONTROL_MODE_TORQUE_CONTROL, input_mode=INPUT_MODE_PASSTHROUGH) # torque control, passthrough
fence()
my_cmd('set_input_torque', input_torque=0.5)
fence()
time.sleep(5)
test_assert_eq(axis_ctx.handle.controller.input_torque, 0.5, range=0.1)
my_cmd('set_input_torque', input_torque = 0)
fence()
time.sleep(2)
test_assert_no_error(axis_ctx)
# go back to idle
my_cmd('set_requested_state', requested_state = AXIS_STATE_IDLE)
fence()
test_assert_eq(axis_ctx.handle.current_state, AXIS_STATE_IDLE)
tests = [TestSimpleCANClosedLoop()]
if __name__ == '__main__':
test_runner.run(tests)
|
auth0/v3/authentication/revoke_token.py
|
akmjenkins/auth0-python
| 340 |
135023
|
from .base import AuthenticationBase
class RevokeToken(AuthenticationBase):
"""Revoke Refresh Token endpoint
Args:
domain (str): Your auth0 domain (e.g: username.auth0.com)
"""
def revoke_refresh_token(self, client_id, token, client_secret=None):
"""Revokes a Refresh Token if it has been compromised
Each revocation request invalidates not only the specific token, but all other tokens
based on the same authorization grant. This means that all Refresh Tokens that have
been issued for the same user, application, and audience will be revoked.
Args:
client_id (str): The Client ID for your Application
token (str): The Refresh Token you want to revoke
client_secret (str, optional): The Client Secret for your Application.
Required for confidential applications.
See: https://auth0.com/docs/applications/application-types#confidential-applications
See: https://auth0.com/docs/api/authentication#refresh-token
"""
body = {
'client_id': client_id,
'token': token,
}
if client_secret:
body.update({'client_secret': client_secret})
return self.post('{}://{}/oauth/revoke'.format(self.protocol, self.domain), data=body)
|
cctbx/dmtbx/boost_python/tst_dmtbx.py
|
dperl-sol/cctbx_project
| 155 |
135036
|
<filename>cctbx/dmtbx/boost_python/tst_dmtbx.py<gh_stars>100-1000
from __future__ import absolute_import, division, print_function
from cctbx import dmtbx
from cctbx import sgtbx
from cctbx.array_family import flex
from libtbx.test_utils import approx_equal
def exercise_triplet_generator():
sg = sgtbx.space_group_info("P 41").group()
i = flex.miller_index(((1,2,3),(2,3,4)))
a = flex.double((1,2))
t = dmtbx.ext.triplet_generator(sg, i, None, 0, False, False)
assert t.t_den() == sg.t_den()
assert t.max_relations_per_reflection() == 0
assert t.sigma_2_only() == False
assert t.discard_weights() == False
t = dmtbx.ext.triplet_generator(sg, i, a, 3, True, False)
assert t.max_relations_per_reflection() == 3
assert t.sigma_2_only() == True
assert t.discard_weights() == False
t = dmtbx.ext.triplet_generator(sg, i, None, 0, False, True)
assert t.sigma_2_only() == False
assert t.discard_weights() == True
assert tuple(t.n_relations()) == (0,0)
assert t.relations_for(0) == ()
assert approx_equal(tuple(t.sums_of_amplitude_products(a)), (0,0))
s = flex.bool()
r = t.raw_apply_tangent_formula(a, a, s, False, False, 1.e-15)
assert approx_equal(tuple(r), (1,2))
i = flex.miller_index(((4,6,0),(5,2,5),(6,1,5)))
a = flex.double((1,2,3))
t = dmtbx.ext.triplet_generator(sg, i, None, 0, False, False)
assert tuple(t.n_relations()) == (4,2,2)
assert [r.format(i, 0) for r in t.relations_for(0)] \
== ["(4,6,0) (5,2,5) %s (6,1,5) %s 3 2" % (True, False),
"(4,6,0) (5,2,5) %s (6,1,5) %s 9 2" % (False, True)]
assert [r.format(i, 1) for r in t.relations_for(1)] \
== ["(5,2,5) (4,6,0) %s (6,1,5) %s 3 2" % (False, False)]
assert [r.format(i, 2) for r in t.relations_for(2)] \
== ["(6,1,5) (4,6,0) %s (5,2,5) %s 9 2" % (False, False)]
assert approx_equal(tuple(t.sums_of_amplitude_products(a)), (24,6,4))
t = dmtbx.ext.triplet_generator(sg, i, None, 0, False, True)
assert tuple(t.n_relations()) == (1,1,1)
assert [r.format(i, 0) for r in t.relations_for(0)] \
== ["(4,6,0) (5,2,5) %s (6,1,5) %s 3 1" % (True, False)]
assert approx_equal(tuple(t.sums_of_amplitude_products(a)), (6,3,2))
t = dmtbx.ext.triplet_generator(sg, i, None, 0, False, False)
r0 = t.relations_for(0)
r1 = t.relations_for(1)
assert r0[0].is_sigma_2(0)
assert r0[0].is_similar_to(r0[1])
assert not r0[0].is_similar_to(r1[0])
i = flex.miller_index(((4,6,0),(5,1,2)))
t = dmtbx.ext.triplet_generator(sg, i, None, 0, False, False)
assert [r.format(i, 0) for r in t.relations_for(0)] \
== ["(4,6,0) (5,1,2) %s (5,1,2) %s 6 4" % (False, True)]
assert [r.format(i, 1) for r in t.relations_for(1)] \
== ["(5,1,2) (4,6,0) %s (5,1,2) %s 6 4" % (False, False)]
assert not t.relations_for(0)[0].is_sigma_2(0)
t = dmtbx.ext.triplet_generator(sg, i, None, 0, False, True)
assert [r.format(i, 0) for r in t.relations_for(0)] \
== ["(4,6,0) (5,1,2) %s (5,1,2) %s 6 1" % (False, True)]
assert [r.format(i, 1) for r in t.relations_for(1)] \
== ["(5,1,2) (4,6,0) %s (5,1,2) %s 6 1" % (False, False)]
t = dmtbx.ext.triplet_generator(sg, i, None, 0, True, False)
assert tuple(t.n_relations()) == (0,0)
def run():
exercise_triplet_generator()
print("OK")
if (__name__ == "__main__"):
run()
|
packages/pyright-internal/src/tests/samples/assertType1.py
|
Jasha10/pyright
| 3,934 |
135045
|
# This sample tests the assert_type call.
from typing import Any, Literal
from typing_extensions import assert_type
def func1():
# This should generate an error.
assert_type()
# This should generate an error.
assert_type(1)
# This should generate an error.
assert_type(1, 2, 3)
# This should generate an error.
assert_type(*[])
def func2(x: int, y: int | str):
assert_type(x, int)
# This should generate an error.
assert_type(x, str)
# This should generate an error.
assert_type(x, Any)
x = 3
assert_type(x, Literal[3])
# This should generate an error.
assert_type(x, int)
assert_type(y, int | str)
assert_type(y, str | int)
# This should generate an error.
assert_type(y, str)
# This should generate an error.
assert_type(y, None)
# This should generate two errors.
assert_type(y, 3)
|
python/dgl/ops/spmm.py
|
ketyi/dgl
| 9,516 |
135086
|
<filename>python/dgl/ops/spmm.py
"""dgl spmm operator module."""
import sys
from ..backend import gspmm as gspmm_internal
from ..backend import gspmm_hetero as gspmm_internal_hetero
from .. import backend as F
__all__ = ['gspmm']
def reshape_lhs_rhs(lhs_data, rhs_data):
r""" Expand dims so that there will be no broadcasting issues with different
number of dimensions. For example, given two shapes (N, 3, 1), (E, 5, 3, 4)
that are valid broadcastable shapes, change them to (N, 1, 3, 1) and
(E, 5, 3, 4)
Parameters
----------
lhs_data : tensor or None
The left operand, could be None if it's not required by op.
rhs_data : tensor or None
The right operand, could be None if it's not required by op.
"""
lhs_shape = F.shape(lhs_data)
rhs_shape = F.shape(rhs_data)
if len(lhs_shape) != len(rhs_shape):
max_ndims = max(len(lhs_shape), len(rhs_shape))
lhs_pad_ndims = max_ndims - len(lhs_shape)
rhs_pad_ndims = max_ndims - len(rhs_shape)
new_lhs_shape = (lhs_shape[0],) + (1,) * lhs_pad_ndims + lhs_shape[1:]
new_rhs_shape = (rhs_shape[0],) + (1,) * rhs_pad_ndims + rhs_shape[1:]
lhs_data = F.reshape(lhs_data, new_lhs_shape)
rhs_data = F.reshape(rhs_data, new_rhs_shape)
return lhs_data, rhs_data
def gspmm(g, op, reduce_op, lhs_data, rhs_data):
r""" Generalized Sparse Matrix Multiplication interface.
It fuses two steps into one kernel.
1. Computes messages by :attr:`op` source node and edge features.
2. Aggregate the messages by :attr:`reduce_op` as the features on destination nodes.
.. math::
x_v = \psi_{(u, v, e)\in \mathcal{G}}(\rho(x_u, x_e))
where :math:`x_v` is the returned feature on destination nodes, and :math:`x_u`,
:math:`x_e` refers to :attr:`u`, :attr:`e` respectively. :math:`\rho` means binary
operator :attr:`op` and :math:`\psi` means reduce operator :attr:`reduce_op`,
:math:`\mathcal{G}` is the graph we apply gspmm on: :attr:`g`.
Note that this function does not handle gradients.
Parameters
----------
g : DGLGraph
The input graph.
op : str
The binary op's name, could be ``add``, ``sub``, ``mul``, ``div``,
``copy_lhs``, ``copy_rhs``.
reduce_op : str
Reduce operator, could be ``sum``, ``max``, ``min``, ``mean``.
lhs_data : tensor or None
The left operand, could be None if it's not required by the op.
rhs_data : tensor or None
The right operand, could be None if it's not required by the op.
Returns
-------
tensor
The result tensor.
"""
if g._graph.number_of_etypes() == 1:
if op not in ['copy_lhs', 'copy_rhs']:
lhs_data, rhs_data = reshape_lhs_rhs(lhs_data, rhs_data)
# With max and min reducers infinity will be returned for zero degree nodes
ret = gspmm_internal(g._graph, op,
'sum' if reduce_op == 'mean' else reduce_op,
lhs_data, rhs_data)
else:
# lhs_data or rhs_data is None only in unary functions like ``copy-u`` or ``copy_e``
lhs_data = [None] * g._graph.number_of_ntypes() if lhs_data is None else lhs_data
rhs_data = [None] * g._graph.number_of_etypes() if rhs_data is None else rhs_data
# TODO (Israt): Call reshape func
lhs_and_rhs_tuple = tuple(list(lhs_data) + list(rhs_data))
ret = gspmm_internal_hetero(g._graph, op,
'sum' if reduce_op == 'mean' else reduce_op,
len(lhs_data), *lhs_and_rhs_tuple)
# TODO (Israt): Add support for 'mean' in heterograph
# divide in degrees for mean reducer.
if reduce_op == 'mean':
ret_shape = F.shape(ret)
deg = g.in_degrees()
deg = F.astype(F.clamp(deg, 1, max(g.number_of_edges(), 1)), F.dtype(ret))
deg_shape = (ret_shape[0],) + (1,) * (len(ret_shape) - 1)
return ret / F.reshape(deg, deg_shape)
else:
return ret
def _attach_zerodeg_note(docstring, reducer):
note1 = """
The {} function will return zero for nodes with no incoming messages.""".format(reducer)
note2 = """
This is implemented by replacing all {} values to zero.
""".format("infinity" if reducer == "min" else "negative infinity")
docstring = docstring + note1
if reducer in ('min', 'max'):
docstring = docstring + note2
return docstring
def _gen_spmm_func(binary_op, reduce_op):
name = "u_{}_e_{}".format(binary_op, reduce_op)
docstring = """Generalized SpMM function.
It fuses two steps into one kernel.
1. Computes messages by {} source node and edge features.
2. Aggregate the messages by {} as the features on destination nodes.
Parameters
----------
g : DGLHeteroGraph
The input graph
x : tensor
The source node features.
y : tensor
The edge features.
Returns
-------
tensor
The result tensor.
Notes
-----
This function supports autograd (computing input gradients given the output gradient). If the
feature shape of two input operands do not match, we first broadcasts the features to a unified
shape (note that the memory usage will not increase accordingly) and then performs the operation.
Broadcasting follows NumPy semantics. Please see
https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
for more details about the NumPy broadcasting semantics.
""".format(binary_op, reduce_op)
docstring = _attach_zerodeg_note(docstring, reduce_op)
def func(g, x, y):
return gspmm(g, binary_op, reduce_op, x, y)
func.__name__ = name
func.__doc__ = docstring
return func
def _gen_copy_reduce_func(binary_op, reduce_op):
name = "{}_{}".format(binary_op, reduce_op)
binary_str = {
"copy_u": "It copies node feature to edge as the message.",
'copy_e': "It regards edge feature as message."
}
x_str = {
"copy_u": "source node",
"copy_e": "edge"
}
docstring = lambda binary_op: _attach_zerodeg_note("""Generalized SpMM function. {}
Then aggregates the message by {} on destination nodes.
Parameters
----------
g : DGLHeteroGraph
The input graph
x : tensor
The {} features.
Returns
-------
tensor
The result tensor.
Notes
-----
This function supports autograd (computing input gradients given the output gradient).
""".format(
binary_str[binary_op],
reduce_op,
x_str[binary_op]), reduce_op)
def func(g, x):
if binary_op == 'copy_u':
return gspmm(g, 'copy_lhs', reduce_op, x, None)
else:
return gspmm(g, 'copy_rhs', reduce_op, None, x)
func.__name__ = name
func.__doc__ = docstring(binary_op)
return func
def _register_spmm_func():
"""Register spmm functions
- Binary operation plus reduction between u and e: u_[]_e_[]
- Copy u plus reduction: copy_u_[]
- Copy e plus reduction: copy_e_[]
"""
for binary_op in ["add", "sub", "mul", "div", "copy_u", "copy_e"]:
for reduce_op in ["sum", "max", "min", "mean"]:
if binary_op.startswith("copy"):
func = _gen_copy_reduce_func(binary_op, reduce_op)
else:
func = _gen_spmm_func(binary_op, reduce_op)
setattr(sys.modules[__name__], func.__name__, func)
__all__.append(func.__name__)
_register_spmm_func()
|
Object_Classification/DFASAFN/DFASAFN_Home/utils/data_load.py
|
lindagaw/Emotion-Detection
| 121 |
135094
|
<gh_stars>100-1000
import torch
import torch.utils.data as data
from PIL import Image
import os
import numpy as np
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def default_loader(path):
return Image.open(path).convert('RGB')
def make_dataset(root, label):
images = []
labeltxt = open(label)
for line in labeltxt:
data = line.strip().split(' ')
if is_image_file(data[0]):
path = os.path.join(root, data[0])
gt = int(data[1])
item = (path, gt)
images.append(item)
return images
class CLEFImage(data.Dataset):
def __init__(self, root, label, transform=None, loader=default_loader):
imgs = make_dataset(root, label)
self.root = root
self.label = label
self.imgs = imgs
self.transform = transform
self.loader = loader
def __getitem__(self, index):
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.imgs)
|
lexicon/providers/googleclouddns.py
|
nextgens/lexicon
| 1,184 |
135095
|
<reponame>nextgens/lexicon
"""
Implements the Google Cloud DNS provider.
This API is quite complicated to use, as it used some unique
concepts compared to other providers.
First of all, it uses a full-fledged OAuth2 authentication,
involving signing a JWT and retrieve a Bearer token.
This hard work is done in the authenticate() process,
using the strong and well known "cryptography" package.
Second, Google Cloud DNS API contains this really particular patterns:
- all records of the same type and name are stacked together in a RecordSet representation,
which contains in the rrdatas array all current values for this type/name pair, including
explicitly monovalued entries like A or CNAME.
- modifications can only done through a create/delete pattern: no way to update a record
- more importantly, this approach extends to all values of a given type/name pair: it means
that adding/removing a new value to a TXT entry requires to delete all values of this entry,
then recreate it with all values desired (the old ones plus the new one for adding, the old
ones minus the removed one for removing)
So all the hard work in this provider, apart from the authentication process, is to convert the
Lexicon monovalued entries representation to/from the Google multivalued and stacked representation
through create/update/list/delete processes.
"""
import binascii
import json
import logging
import time
from base64 import b64decode, urlsafe_b64encode
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["googledomains.com"]
def provider_parser(subparser):
"""Generate a subparser for Google Cloud DNS"""
subparser.description = """
The Google Cloud DNS provider requires the JSON file which contains the service account info to connect to the API.
This service account must own the project role DNS > DNS administrator for the project associated to the DNS zone.
You can create a new service account, associate a private key, and download its info through this url:
https://console.cloud.google.com/iam-admin/serviceaccounts?authuser=2"""
subparser.add_argument(
"--auth-service-account-info",
help="""
specify the service account info in the Google JSON format:
can be either the path of a file prefixed by 'file::' (eg. file::/tmp/service_account_info.json)
or the base64 encoded content of this file prefixed by 'base64::'
(eg. base64::eyJhbGciOyJ...)""",
)
class Provider(BaseProvider):
"""
Provider class for Google Cloud DNS
We need serveral parameters, which are available in the JSON file provided
by Google when associating a private key to the relevant service account.
So this JSON file is the natural input to configure the provider.
It can be provided as a path to the JSON file, or as its content encoded
in base64, which is a suitable portable way in particular for Docker containers.
In both cases the content is loaded as bytes, on loaded in a private instance variable.
"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self._token = None
if self._get_provider_option("auth_service_account_info").startswith("file::"):
with open(
self._get_provider_option("auth_service_account_info").replace(
"file::", ""
),
"rb",
) as file:
service_account_info_bytes = file.read()
elif self._get_provider_option("auth_service_account_info").startswith(
"base64::"
):
service_account_info_bytes = b64decode(
self._get_provider_option("auth_service_account_info").replace(
"base64::", ""
)
)
else:
raise Exception(
"Invalid value for --auth-service-account-info, should be a path "
"prefixed with 'file::' or a base64 value prefixed by 'base64::'."
)
self._service_account_info = json.loads(
service_account_info_bytes.decode("utf-8")
)
if (
not self._service_account_info["client_email"]
or not self._service_account_info["private_key"]
or not self._service_account_info["project_id"]
):
raise Exception(
"Invalid service account info (missing either client_email/private_"
"key/project_id key)."
)
# the complete list of zones may be paginated. So we recursively call
# the list managedZones api until no page_token is given, keeping a list
# of matched zone ids as we go.
def _get_managed_zone_ids(self, zone_ids, page_token=None):
results = self._get("/managedZones", {"pageToken": page_token})
zone_ids += [
managedZone["id"]
for managedZone in results["managedZones"]
if managedZone["dnsName"] == f"{self.domain}."
]
if "nextPageToken" in results:
return self._get_managed_zone_ids(zone_ids, results["nextPageToken"])
return zone_ids
# We have a real authentication here, that uses the OAuth protocol:
# - a JWT token is forged with the Google Cloud DNS access claims,
# using the service account info loaded by the constructor,
# - this JWT token is signed by a PKCS1v15 signature using
# the RSA private key associated to the service account,
# - this JWT token is then submitted to the Google API, which returns an access token
# - this access token will be used for every future HTTP request
# to the Google Cloud DNS API to authenticate the user.
# - finally we make a first authenticated request to retrieve
# the managed zone id, which will also be used on future requests.
# This access token has a default lifetime of 10 minutes,
# but is used only for the current Lexicon operation, so it should be sufficient.
def _authenticate(self):
jwt_header_bytes = urlsafe_b64encode(
json.dumps({"alg": "RS256", "typ": "JWT"}).encode("utf-8")
)
epoch_time = int(time.time())
jwt_claims_bytes = urlsafe_b64encode(
json.dumps(
{
"iss": self._service_account_info["client_email"],
"scope": "https://www.googleapis.com/auth/ndev.clouddns.readwrite",
"aud": "https://www.googleapis.com/oauth2/v4/token",
"exp": epoch_time + 60 * 10,
"iat": epoch_time,
}
).encode("utf-8")
)
private_key = serialization.load_pem_private_key(
self._service_account_info["private_key"].encode("utf-8"),
password=None,
backend=default_backend(),
)
jwt_sign_bytes = urlsafe_b64encode(
private_key.sign(
b".".join([jwt_header_bytes, jwt_claims_bytes]),
padding.PKCS1v15(),
hashes.SHA256(),
)
)
jwt_bytes = b".".join([jwt_header_bytes, jwt_claims_bytes, jwt_sign_bytes])
auth_request = requests.request(
"POST",
"https://www.googleapis.com/oauth2/v4/token",
data={
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"assertion": jwt_bytes,
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
auth_request.raise_for_status()
post_result = auth_request.json()
if not post_result["access_token"]:
raise AuthenticationError(
"Error, could not grant RW access on the "
f"Google Cloud DNS API for user: {self._get_provider_option('auth_email')}"
)
self._token = post_result["access_token"]
targeted_managed_zone_ids = self._get_managed_zone_ids([])
if not targeted_managed_zone_ids:
raise AuthenticationError(
f"Error, domain {self.domain} is not registered for this project"
)
self.domain_id = targeted_managed_zone_ids[0]
# List all records for the given type/name/content.
# It is quite straight forward to request data, the biggest operation is to convert
# the stacked multivalued RecordSets into Lexicon monovalued entries.
# Please note that we could provide type and name to the API to make the filtering,
# but providing the type makes the name mandatory with the Google Cloud DNS API, and
# name is not always available (we can ask for every TXT record for example). So to stick to
# the most general case, its preferable to always get all records and be free to filter
# the way we want afterwards.
def _list_records(self, rtype=None, name=None, content=None):
results = self._get(f"/managedZones/{self.domain_id}/rrsets")
records = []
for rrset in results["rrsets"]:
for rrdata in rrset["rrdatas"]:
record = {
"type": rrset["type"],
"name": self._full_name(rrset["name"]),
"ttl": rrset["ttl"],
"content": rrdata,
}
self._clean_TXT_record(record)
record["id"] = Provider._identifier(record)
records.append(record)
if rtype:
records = [record for record in records if record["type"] == rtype]
if name:
records = [
record for record in records if record["name"] == self._full_name(name)
]
if content:
records = [record for record in records if record["content"] == content]
LOGGER.debug("list_records: %s", records)
return records
# Create the record with provided type, name and content.
# Because of the way this API is constructed, it is quite complex in fact.
# Indeed we need to know if there is already a RecordSet for the type/name pair, and update
# or create accordingly the RecordSet. Furthermore, we need first to delete the old RecordSet
# if it exists, to replace it with the RecordSet containing the new content we want.
def _create_record(self, rtype, name, content):
if not rtype or not name or not content:
raise Exception(
"Error, rtype, name and content are mandatory to create a record."
)
identifier = Provider._identifier(
{"type": rtype, "name": self._full_name(name), "content": content}
)
query_params = {"type": rtype, "name": self._fqdn_name(name)}
results = self._get(
f"/managedZones/{self.domain_id}/rrsets", query_params=query_params
)
rrdatas = []
changes = {}
if results["rrsets"]:
rrset = results["rrsets"][0]
for rrdata in rrset["rrdatas"]:
if rrdata == Provider._normalize_content(rrset["type"], content):
LOGGER.debug("create_record (ignored, duplicate): %s", identifier)
return True
changes["deletions"] = [
{
"name": rrset["name"],
"type": rrset["type"],
"ttl": rrset["ttl"],
"rrdatas": rrset["rrdatas"][:],
}
]
rrdatas = rrset["rrdatas"][:]
rrdatas.append(Provider._normalize_content(rtype, content))
changes["additions"] = [
{
"name": self._fqdn_name(name),
"type": rtype,
"ttl": self._get_lexicon_option("ttl"),
"rrdatas": rrdatas,
}
]
self._post(f"/managedZones/{self.domain_id}/changes", data=changes)
LOGGER.debug("create_record: %s", identifier)
return True
# Update a record for the given identifier or type/name pair
# with the given content if provided.
# Again because of the API specification, updating is even more complex than creating,
# as we need to take into account every RecordSet that should be destroyed then recreated.
# As all the hard work has been done on list_record, create_record and delete_record, we use a
# combination of these three methods to obtain the state we want.
# Even if this make the operation very costly regarding the number of requests to do, it allows
# the implementation to be way more readable (without that, it would take grossly the size of
# he three quoted methods).
def _update_record(self, identifier, rtype=None, name=None, content=None):
if not identifier and (not rtype or not name):
raise Exception("Error, identifier or rtype+name parameters are required.")
if identifier:
records = self._list_records()
records_to_update = [
record for record in records if record["id"] == identifier
]
else:
records_to_update = self._list_records(rtype=rtype, name=name)
if not records_to_update:
raise Exception(
f"Error, could not find a record for given identifier: {identifier}"
)
if len(records_to_update) > 1:
LOGGER.warning(
"Warning, multiple records found for given parameters, "
"only first one will be updated: %s",
records_to_update,
)
record_identifier = records_to_update[0]["id"]
original_level = LOGGER.getEffectiveLevel()
LOGGER.setLevel(logging.WARNING)
self._delete_record(record_identifier)
new_record = {
"type": rtype if rtype else records_to_update[0]["type"],
"name": name if name else records_to_update[0]["name"],
"content": content if content else records_to_update[0]["content"],
}
self._create_record(
new_record["type"], new_record["name"], new_record["content"]
)
LOGGER.setLevel(original_level)
LOGGER.debug(
"update_record: %s => %s",
record_identifier,
Provider._identifier(new_record),
)
return True
# Delete a record for the given identifier or the given type/name/content.
# Really complex to do, because a lot of RecordSets can be updated (so destroyed and recreated)
# depending on the given condition (eg. with content alone, every record could be inspected).
# There is mainly to cases:
# - either an association of one or more between type, name and content is given
# - either an identifier is given, and we extract the type + name + content
# to process as the first case.
# Anyway, we will need to parse every RecordSet available, and for each of them
# that matches the conditions:
# - mark as deletion the existing RecordSet
# - remove the targeted content from the RecordSet
# - mark as addition the update RecordSet with the subset of rrdatas if rrdatas is not empty
# - do not mark as additions RecordSets whose rrdatas subset become empty:
# for this type/name pair, all RecordSet needs to go away.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
results = self._get(f"/managedZones/{self.domain_id}/rrsets")
if identifier:
changes = self._process_records_to_delete_by_identifier(results, identifier)
else:
changes = self._process_records_to_delete_by_parameters(
results, rtype, name, content
)
if not changes:
raise Exception(
"Could not find existing record matching the given parameters."
)
self._post(f"/managedZones/{self.domain_id}/changes", data=changes)
LOGGER.debug("delete_records: %s %s %s %s", identifier, rtype, name, content)
return True
# Calculate the changes to do based on the record to remove identified by its identifier.
# This implementation find the corresponding record, and use its type + name + value to
# delegate the processing to _process_records_to_delete_by_parameters.
def _process_records_to_delete_by_identifier(self, results, identifier):
for rrset in results["rrsets"]:
for rrdata in rrset["rrdatas"]:
record = {
"type": rrset["type"],
"name": self._full_name(rrset["name"]),
"content": rrdata,
}
self._clean_TXT_record(record)
record_identifier = Provider._identifier(record)
if identifier == record_identifier:
return self._process_records_to_delete_by_parameters(
results, record["type"], record["name"], record["content"]
)
return None
# Calculate the changes to do based on the records to remove identified by type/name/content.
# Additions and deletions are registered accordingly in the changes, and RecordSet with empty
# rrdatas after its subset are not marked in additions to be completely removed
# from the DNS zone.
def _process_records_to_delete_by_parameters(
self, results, rtype=None, name=None, content=None
):
rrsets_to_modify = results["rrsets"]
if rtype:
rrsets_to_modify = [
rrset for rrset in rrsets_to_modify if rrset["type"] == rtype
]
if name:
rrsets_to_modify = [
rrset
for rrset in rrsets_to_modify
if rrset["name"] == self._fqdn_name(name)
]
if content:
rrsets_to_modify = [
rrset
for rrset in rrsets_to_modify
if (f'"{content}"' if rrset["type"] == "TXT" else content)
in rrset["rrdatas"]
]
changes = {"additions": [], "deletions": []}
for rrset_to_modify in rrsets_to_modify:
changes["deletions"].append(
{
"name": rrset_to_modify["name"],
"type": rrset_to_modify["type"],
"ttl": rrset_to_modify["ttl"],
"rrdatas": rrset_to_modify["rrdatas"][:],
}
)
if content:
new_rrdatas = rrset_to_modify["rrdatas"][:]
new_rrdatas.remove(
f'"{content}"' if rrset_to_modify["type"] == "TXT" else content
)
if new_rrdatas:
changes["additions"].append(
{
"name": rrset_to_modify["name"],
"type": rrset_to_modify["type"],
"ttl": rrset_to_modify["ttl"],
"rrdatas": new_rrdatas,
}
)
if not changes["additions"] and not changes["deletions"]:
return None
return changes
# With Google Cloud DNS API, content of CNAME entries must be FQDN (with a trailing dot),
# and content of TXT entries must be quoted. This static method ensures that.
@staticmethod
def _normalize_content(rtype, content):
if rtype == "TXT":
return f'"{content}"'
if rtype == "CNAME":
return f"{content}." if not content.endswith(".") else content
return content
# Google Cloud DNS API does not provide identifier for RecordSets.
# So we need to calculate our own identifier at runtime.
# It is based on a SHA256 hash with the most relevant
# parameters of a record: type, name and content.
# Note that the identifier is calculated on a Lexicon monovalued entry, not a Google stacked
# multivalued RecordSet, to make it usable during Lexicon calls to updates and deletions.
@staticmethod
def _identifier(record):
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(("type=" + record.get("type", "") + ",").encode("utf-8"))
digest.update(("name=" + record.get("name", "") + ",").encode("utf-8"))
digest.update(("content=" + record.get("content", "") + ",").encode("utf-8"))
return binascii.hexlify(digest.finalize()).decode("utf-8")[0:7]
# The request, when authenticated, is really standard:
# - the request body is encoded as application/json for POST
# (so the use of 'json' config instead of 'data' in request),
# - the body response is also encoded as application/json for GET and POST,
# - and the request headers must contain the access token in the 'Authorization' field.
def _request(self, action="GET", url="/", data=None, query_params=None):
request = requests.request(
action,
f"https://content.googleapis.com/dns/v1/projects/{self._service_account_info['project_id']}{url}",
params=None if not query_params else query_params,
json=None if not data else data,
headers={"Authorization": f"Bearer {self._token}"},
)
request.raise_for_status()
return request.json()
|
libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py
|
andreikop/botbuilder-python
| 388 |
135104
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import ABC
from asyncio import sleep
from copy import Error
from http import HTTPStatus
from typing import Awaitable, Callable, List, Union
from botbuilder.core.invoke_response import InvokeResponse
from botbuilder.schema import (
Activity,
ActivityTypes,
ConversationReference,
DeliveryModes,
ExpectedReplies,
ResourceResponse,
)
from botframework.connector import Channels, ConnectorClient
from botframework.connector.auth import (
AuthenticationConstants,
BotFrameworkAuthentication,
ClaimsIdentity,
)
from botframework.connector.auth.authenticate_request_result import (
AuthenticateRequestResult,
)
from botframework.connector.auth.connector_factory import ConnectorFactory
from botframework.connector.auth.user_token_client import UserTokenClient
from .bot_adapter import BotAdapter
from .conversation_reference_extension import get_continuation_activity
from .turn_context import TurnContext
class CloudAdapterBase(BotAdapter, ABC):
CONNECTOR_FACTORY_KEY = "ConnectorFactory"
USER_TOKEN_CLIENT_KEY = "UserTokenClient"
def __init__(
self, bot_framework_authentication: BotFrameworkAuthentication
) -> None:
super().__init__()
if not bot_framework_authentication:
raise TypeError("Expected BotFrameworkAuthentication but got None instead")
self.bot_framework_authentication = bot_framework_authentication
async def send_activities(
self, context: TurnContext, activities: List[Activity]
) -> List[ResourceResponse]:
if not context:
raise TypeError("Expected TurnContext but got None instead")
if activities is None:
raise TypeError("Expected Activities list but got None instead")
if len(activities) == 0:
raise TypeError("Expecting one or more activities, but the list was empty.")
responses = []
for activity in activities:
activity.id = None
response = ResourceResponse()
if activity.type == "delay":
delay_time = int((activity.value or 1000) / 1000)
await sleep(delay_time)
elif activity.type == ActivityTypes.invoke_response:
context.turn_state[self._INVOKE_RESPONSE_KEY] = activity
elif (
activity.type == ActivityTypes.trace
and activity.channel_id != Channels.emulator
):
# no-op
pass
else:
connector_client: ConnectorClient = context.turn_state.get(
self.BOT_CONNECTOR_CLIENT_KEY
)
if not connector_client:
raise Error("Unable to extract ConnectorClient from turn context.")
if activity.reply_to_id:
response = await connector_client.conversations.reply_to_activity(
activity.conversation.id, activity.reply_to_id, activity
)
else:
response = await connector_client.conversations.send_to_conversation(
activity.conversation.id, activity
)
response = response or ResourceResponse(activity.id or "")
responses.append(response)
return responses
async def update_activity(self, context: TurnContext, activity: Activity):
if not context:
raise TypeError("Expected TurnContext but got None instead")
if activity is None:
raise TypeError("Expected Activity but got None instead")
connector_client: ConnectorClient = context.turn_state.get(
self.BOT_CONNECTOR_CLIENT_KEY
)
if not connector_client:
raise Error("Unable to extract ConnectorClient from turn context.")
response = await connector_client.conversations.update_activity(
activity.conversation.id, activity.reply_to_id, activity
)
response_id = response.id if response and response.id else None
return ResourceResponse(id=response_id) if response_id else None
async def delete_activity(
self, context: TurnContext, reference: ConversationReference
):
if not context:
raise TypeError("Expected TurnContext but got None instead")
if not reference:
raise TypeError("Expected ConversationReference but got None instead")
connector_client: ConnectorClient = context.turn_state.get(
self.BOT_CONNECTOR_CLIENT_KEY
)
if not connector_client:
raise Error("Unable to extract ConnectorClient from turn context.")
await connector_client.conversations.delete_activity(
reference.conversation.id, reference.activity_id
)
async def continue_conversation( # pylint: disable=arguments-differ
self, reference: ConversationReference, callback: Callable,
):
"""
Sends a proactive message to a conversation.
Call this method to proactively send a message to a conversation.
Most channels require a user to initiate a conversation with a bot before the bot can send activities
to the user.
:param reference: A reference to the conversation to continue.
:type reference: :class:`botbuilder.schema.ConversationReference`
:param callback: The method to call for the resulting bot turn.
:type callback: :class:`typing.Callable`
"""
return await self.process_proactive(
self.create_claims_identity(),
get_continuation_activity(reference),
None,
callback,
)
async def continue_conversation_with_claims(
self,
claims_identity: ClaimsIdentity,
reference: ConversationReference,
audience: str,
logic: Callable[[TurnContext], Awaitable],
):
return await self.process_proactive(
claims_identity, get_continuation_activity(reference), audience, logic
)
async def process_proactive(
self,
claims_identity: ClaimsIdentity,
continuation_activity: Activity,
audience: str,
logic: Callable[[TurnContext], Awaitable],
):
# Create the connector factory and the inbound request, extracting parameters and then create a
# connector for outbound requests.
connector_factory = self.bot_framework_authentication.create_connector_factory(
claims_identity
)
# Create the connector client to use for outbound requests.
connector_client = await connector_factory.create(
continuation_activity.service_url, audience
)
# Create a UserTokenClient instance for the application to use. (For example, in the OAuthPrompt.)
user_token_client = await self.bot_framework_authentication.create_user_token_client(
claims_identity
)
# Create a turn context and run the pipeline.
context = self._create_turn_context(
continuation_activity,
claims_identity,
audience,
connector_client,
user_token_client,
logic,
connector_factory,
)
# Run the pipeline
await self.run_pipeline(context, logic)
async def process_activity(
self,
auth_header_or_authenticate_request_result: Union[
str, AuthenticateRequestResult
],
activity: Activity,
logic: Callable[[TurnContext], Awaitable],
):
"""
Creates a turn context and runs the middleware pipeline for an incoming activity.
:param auth_header: The HTTP authentication header of the request
:type auth_header: :class:`typing.Union[typing.str, AuthenticateRequestResult]`
:param activity: The incoming activity
:type activity: :class:`Activity`
:param logic: The logic to execute at the end of the adapter's middleware pipeline.
:type logic: :class:`typing.Callable`
:return: A task that represents the work queued to execute.
.. remarks::
This class processes an activity received by the bots web server. This includes any messages
sent from a user and is the method that drives what's often referred to as the
bots *reactive messaging* flow.
Call this method to reactively send a message to a conversation.
If the task completes successfully, then an :class:`InvokeResponse` is returned;
otherwise. `null` is returned.
"""
# Authenticate the inbound request, extracting parameters and create a ConnectorFactory for creating a
# Connector for outbound requests.
authenticate_request_result = (
await self.bot_framework_authentication.authenticate_request(
activity, auth_header_or_authenticate_request_result
)
if isinstance(auth_header_or_authenticate_request_result, str)
else auth_header_or_authenticate_request_result
)
# Set the caller_id on the activity
activity.caller_id = authenticate_request_result.caller_id
# Create the connector client to use for outbound requests.
connector_client = (
await authenticate_request_result.connector_factory.create(
activity.service_url, authenticate_request_result.audience
)
if authenticate_request_result.connector_factory
else None
)
if not connector_client:
raise Error("Unable to extract ConnectorClient from turn context.")
# Create a UserTokenClient instance for the application to use.
# (For example, it would be used in a sign-in prompt.)
user_token_client = await self.bot_framework_authentication.create_user_token_client(
authenticate_request_result.claims_identity
)
# Create a turn context and run the pipeline.
context = self._create_turn_context(
activity,
authenticate_request_result.claims_identity,
authenticate_request_result.audience,
connector_client,
user_token_client,
logic,
authenticate_request_result.connector_factory,
)
# Run the pipeline
await self.run_pipeline(context, logic)
# If there are any results they will have been left on the TurnContext.
return self._process_turn_results(context)
def create_claims_identity(self, bot_app_id: str = "") -> ClaimsIdentity:
return ClaimsIdentity(
{
AuthenticationConstants.AUDIENCE_CLAIM: bot_app_id,
AuthenticationConstants.APP_ID_CLAIM: bot_app_id,
},
True,
)
def _create_turn_context(
self,
activity: Activity,
claims_identity: ClaimsIdentity,
oauth_scope: str,
connector_client: ConnectorClient,
user_token_client: UserTokenClient,
logic: Callable[[TurnContext], Awaitable],
connector_factory: ConnectorFactory,
) -> TurnContext:
context = TurnContext(self, activity)
context.turn_state[self.BOT_IDENTITY_KEY] = claims_identity
context.turn_state[self.BOT_CONNECTOR_CLIENT_KEY] = connector_client
context.turn_state[self.USER_TOKEN_CLIENT_KEY] = user_token_client
context.turn_state[self.BOT_CALLBACK_HANDLER_KEY] = logic
context.turn_state[self.CONNECTOR_FACTORY_KEY] = connector_factory
context.turn_state[self.BOT_OAUTH_SCOPE_KEY] = oauth_scope
return context
def _process_turn_results(self, context: TurnContext) -> InvokeResponse:
# Handle ExpectedReplies scenarios where all activities have been
# buffered and sent back at once in an invoke response.
if context.activity.delivery_mode == DeliveryModes.expect_replies:
return InvokeResponse(
status=HTTPStatus.OK,
body=ExpectedReplies(activities=context.buffered_reply_activities),
)
# Handle Invoke scenarios where the bot will return a specific body and return code.
if context.activity.type == ActivityTypes.invoke:
activity_invoke_response: Activity = context.turn_state.get(
self._INVOKE_RESPONSE_KEY
)
if not activity_invoke_response:
return InvokeResponse(status=HTTPStatus.NOT_IMPLEMENTED)
return activity_invoke_response.value
# No body to return
return None
|
gazpacho/utils.py
|
maxhumber/gazpacho
| 659 |
135113
|
<filename>gazpacho/utils.py
import re
from xml.dom.minidom import parseString as string_to_dom
from xml.parsers.expat import ExpatError
def format(html, fail=False):
"""\
Indent and format html
Arguments:
- html: string to format
- fail: allowed to fail as a boolean
Example:
```
html = "<ul><li>Item</li><li>Item</li></ul>"
print(format(html))
# <ul>
# <li>Item</li>
# <li>Item</li>
# </ul>
```
"""
try:
html_closed_voids = re.sub(
fr'(<({"|".join(VOID_TAGS)})[^/>]*)(>)', fr"\1/\3", html
)
dom = string_to_dom(html_closed_voids)
ugly = dom.toprettyxml(indent=" ")
split = list(filter(lambda x: len(x.strip()), ugly.split("\n")))[1:]
html_joined = "\n".join(split)
html = re.sub(fr'(<)({"|".join(VOID_TAGS)})(.*)(\/>)', fr"\1\2\3>", html_joined)
except ExpatError as error:
if fail:
raise error
return html
|
h1st/model/ensemble/randomforest_classifier_stack_ensemble.py
|
Adatao/H1st
| 796 |
135115
|
<reponame>Adatao/H1st
from typing import List
from sklearn.ensemble import RandomForestClassifier
from h1st.model.ensemble.classifier_stack_ensemble import ClassifierStackEnsemble
from h1st.model.model import Model
class RandomForestClassifierStackEnsemble(ClassifierStackEnsemble):
"""
A ready to use StackEnsemble for classifier with ensembler is a sklearn's MultiOutputClassifier using RandomForestClassifier
Each sub model must be a subclass of h1.Model and its .predict() method will receive an input data as a dictionary that has 'X' key and numeric value
and return a dictionary with 'predictions' key and its prediction value
.. code-block:: python
:caption: Sub model for a StackEnsemble Example
class Model1(h1.Model):
def predict(self, data):
X = data['X']
...
return {'predictions': }
.. code-block:: python
:caption: RandomForestClassifierStackEnsemble usage Example
class Model2(h1.Model):
def predict(self, data):
X = data['X']
...
return {'predictions': }
class RandomForestClassifierStackEnsemble(ClassifierStackEnsemble):
def __init__(self):
super().__init__([
Model1().load('version_of_model_1'),
Model2().load('version_of_model_2')
])
def load_data(self,):
...
return loaded_data
def prep(self, loaded_data):
...
return prepared_data
m1 = Model1()
m1.load_prep_train_eval()
## Equivalent to
# loaded_data = m1.load_data()
# prepared_data = m1.prep(loaded_data)
# m1.train(prepared_data)
# m1.evaluate(prepared_data)
print(m1.metrics)
m1.persist('version_of_model_1')
m2 = Model2()
m2.load_prep_train_eval()
print(m2.metrics)
m2.persist('version_of_model_2')
ensemble = RandomForestClassifierStackEnsemble(
[Model1().load('version_of_model_1'),
Model2().load('version_of_model_2')])
ensemble.load_prep_train_eval()
print(ensemble.metrics)
ensemble.persist('version_of_model_ensemble')
ensemble.predict(...)
"""
def __init__(self, sub_models: List[Model], **kwargs):
super().__init__(
# MultiOutputClassifier(RandomForestClassifier(n_jobs=-1, max_depth=4, random_state=42)),
RandomForestClassifier(n_jobs=-1, max_depth=4, random_state=42),
sub_models,
**kwargs
)
|
airflow/utils/code_utils.py
|
ChaseKnowlden/airflow
| 15,947 |
135118
|
<filename>airflow/utils/code_utils.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import inspect
import os
from typing import Any, Optional
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
def get_python_source(x: Any) -> Optional[str]:
"""Helper function to get Python source (or not), preventing exceptions"""
if isinstance(x, str):
return x
if x is None:
return None
source_code = None
if isinstance(x, functools.partial):
source_code = inspect.getsource(x.func)
if source_code is None:
try:
source_code = inspect.getsource(x)
except TypeError:
pass
if source_code is None:
try:
source_code = inspect.getsource(x.__call__)
except (TypeError, AttributeError):
pass
if source_code is None:
source_code = f'No source code available for {type(x)}'
return source_code
def prepare_code_snippet(file_path: str, line_no: int, context_lines_count: int = 5) -> str:
"""
Prepare code snippet with line numbers and a specific line marked.
:param file_path: File nam
:param line_no: Line number
:param context_lines_count: The number of lines that will be cut before and after.
:return: str
"""
with open(file_path) as text_file:
# Highlight code
code = text_file.read()
code_lines = code.split("\n")
# Prepend line number
code_lines = [
f">{lno:3} | {line}" if line_no == lno else f"{lno:4} | {line}"
for lno, line in enumerate(code_lines, 1)
]
# # Cut out the snippet
start_line_no = max(0, line_no - context_lines_count - 1)
end_line_no = line_no + context_lines_count
code_lines = code_lines[start_line_no:end_line_no]
# Join lines
code = "\n".join(code_lines)
return code
def get_terminal_formatter(**opts):
"""Returns the best formatter available in the current terminal."""
if '256' in os.environ.get('TERM', ''):
formatter = Terminal256Formatter(**opts)
else:
formatter = TerminalFormatter(**opts)
return formatter
|
RecoParticleFlow/Configuration/test/analyzePFChargedHadrons_cfg.py
|
ckamtsikis/cmssw
| 852 |
135123
|
<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
process = cms.Process("ANALYSIS")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.source = cms.Source (
"PoolSource",
fileNames = cms.untracked.vstring(
'rfio:/castor/cern.ch/user/p/pjanot/CMSSW390pre3/display_Matt_3.root'
),
secondaryFileNames = cms.untracked.vstring(),
noEventSort = cms.untracked.bool(True),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck')
)
process.pfChargedHadronAnalyzer = cms.EDAnalyzer(
"PFChargedHadronAnalyzer",
PFCandidates = cms.InputTag("particleFlow"),
ptMin = cms.double(1.), # Minimum pt
pMin = cms.double(3.), # Minimum p
nPixMin = cms.int32(2), # Nb of pixel hits
nHitMin = cms.vint32(14,17,20,17), # Nb of track hits
nEtaMin = cms.vdouble(1.4, 1.6, 2.0, 2.4), # in these eta ranges
hcalMin = cms.double(1.), # Minimum hcal energy
ecalMax = cms.double(0.2), # Maximum ecal energy
verbose = cms.untracked.bool(True), # not used.
)
process.load("FastSimulation.Configuration.EventContent_cff")
process.aod = cms.OutputModule("PoolOutputModule",
process.AODSIMEventContent,
fileName = cms.untracked.string('aod.root')
)
#process.outpath = cms.EndPath(process.aod )
process.p = cms.Path(process.pfChargedHadronAnalyzer)
|
deep-rl/lib/python2.7/site-packages/OpenGL/raw/EGL/EXT/create_context_robustness.py
|
ShujaKhalid/deep-rl
| 210 |
135152
|
<reponame>ShujaKhalid/deep-rl
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.EGL import _types as _cs
# End users want this...
from OpenGL.raw.EGL._types import *
from OpenGL.raw.EGL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'EGL_EXT_create_context_robustness'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.EGL,'EGL_EXT_create_context_robustness',error_checker=_errors._error_checker)
EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_EXT=_C('EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_EXT',0x3138)
EGL_CONTEXT_OPENGL_ROBUST_ACCESS_EXT=_C('EGL_CONTEXT_OPENGL_ROBUST_ACCESS_EXT',0x30BF)
EGL_LOSE_CONTEXT_ON_RESET_EXT=_C('EGL_LOSE_CONTEXT_ON_RESET_EXT',0x31BF)
EGL_NO_RESET_NOTIFICATION_EXT=_C('EGL_NO_RESET_NOTIFICATION_EXT',0x31BE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.