max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
experiments/specialization/main.py
|
mit-han-lab/inter-operator-scheduler
| 140 |
58633
|
<filename>experiments/specialization/main.py
import json
import argparse
import numpy as np
import os
from ios.ir import Graph
from ios.visualizer import draw, draw_block
from ios.cost_model import IOSCostModel
argparser = argparse.ArgumentParser()
argparser.add_argument('--edir', type=str, required=True)
argparser.add_argument('--ename', type=str, required=True)
argparser.add_argument('--device', type=str, required=True, choices=['k80', 'v100'])
argparser.add_argument('--graph', type=str, required=True)
argparser.add_argument('--bs', type=int, required=True)
argparser.add_argument('--warmup', type=int, required=False, default=2)
argparser.add_argument('--number', type=int, required=False, default=6)
argparser.add_argument('--repeat', type=int, required=False, default=6)
args = argparser.parse_args()
expr_dir = f'./outputs/{args.edir}/{args.ename}-{args.device}-g{args.graph}-bs{args.bs}-{args.warmup}-{args.number}-{args.repeat}'
#os.makedirs("./outputs", exist_ok=True)
#os.makedirs(f"./outputs/{args.ename}", exist_ok=True)
os.makedirs(expr_dir, exist_ok=True)
def summary_str(latency):
if args.edir == 'batchsize':
g, e = args.ename.split('_')
g = g[3:]
e = e[3:]
return f'Optimized for BS {g:<3} Execute with BS {e:<3} Latency: {latency:.2f} ms'
elif args.edir == 'device':
g, e = args.ename.split('_on_')
return f'Optimized for {g:<4} Execute with {e:<4} Latency: {latency:.2f} ms'
else:
raise ValueError
def main():
logs = {}
with open(f'schedules/{args.graph}.json', 'r') as f:
graph = Graph.from_config(json.load(f))
cost_model = IOSCostModel()
name = graph.name
graph_latency = cost_model.get_graph_latency(graph, args.bs, warmup=args.warmup, number=args.number, repeat=args.repeat)
block_latency = [np.mean(cost_model.get_block_latency(block, args.bs, args.warmup, args.number, args.repeat)) for block in graph.blocks]
logs[name] = {}
logs[name]['latency'] = graph_latency
logs[name]['mean'] = float(np.mean(graph_latency))
logs[name]['std'] = float(np.std(graph_latency))
logs[name]['block_latency'] = block_latency
summary = summary_str(np.mean(graph_latency))
print(summary)
for bindex, block in enumerate(graph.blocks):
block_dir = f'{expr_dir}/{name}_blocks'
os.makedirs(block_dir, exist_ok=True)
draw_block(block, f'{block_dir}/{bindex}.png', f'{name} block {bindex}, latency {block_latency[bindex]:.3f}')
draw(graph, f"{expr_dir}/{name}.png", label=f'{name}, latency {float(np.mean(graph_latency)):.3f}')
with open(f"{expr_dir}/{name}.json", "w") as f:
json.dump(graph.export_config(), f, indent=2)
with open(f'{expr_dir}/latency.json', 'w') as f:
json.dump(logs, f, indent=2)
with open(f'{expr_dir}/summary.txt', 'w') as f:
f.write(summary + "\n")
with open(f'{expr_dir}/arguments.txt', 'w') as f:
json.dump(args.__dict__, f, indent=2)
main()
|
data_managers/data_manager_diamond_database_builder/data_manager/data_manager_diamond_database_builder.py
|
supernord/tools-iuc
| 142 |
58670
|
#!/usr/bin/env python
import bz2
import gzip
import json
import optparse
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
import urllib.error
import urllib.parse
import urllib.request
import zipfile
from ftplib import FTP
CHUNK_SIZE = 2**20 # 1mb
def cleanup_before_exit(tmp_dir):
if tmp_dir and os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
def _get_files_in_ftp_path(ftp, path):
path_contents = []
ftp.retrlines('MLSD %s' % (path), path_contents.append)
return [line.split(';')[-1].lstrip() for line in path_contents]
def _get_stream_readers_for_tar(file_obj, tmp_dir):
fasta_tar = tarfile.open(fileobj=file_obj, mode='r:*')
return [fasta_tar.extractfile(member) for member in fasta_tar.getmembers()]
def _get_stream_readers_for_zip(file_obj, tmp_dir):
fasta_zip = zipfile.ZipFile(file_obj, 'r')
rval = []
for member in fasta_zip.namelist():
fasta_zip.extract(member, tmp_dir)
rval.append(open(os.path.join(tmp_dir, member), 'rb'))
return rval
def _get_stream_readers_for_gzip(file_obj, tmp_dir):
return [gzip.GzipFile(fileobj=file_obj, mode='rb')]
def _get_stream_readers_for_bz2(file_obj, tmp_dir):
return [bz2.BZ2File(file_obj.name, 'rb')]
def download_from_ncbi(data_manager_dict, params, target_directory,
database_id, database_name):
NCBI_FTP_SERVER = 'ftp.ncbi.nlm.nih.gov'
NCBI_DOWNLOAD_PATH = '/blast/db/FASTA/'
COMPRESSED_EXTENSIONS = [('.tar.gz', _get_stream_readers_for_tar),
('.tar.bz2', _get_stream_readers_for_tar),
('.zip', _get_stream_readers_for_zip),
('.gz', _get_stream_readers_for_gzip),
('.bz2', _get_stream_readers_for_bz2)]
ncbi_identifier = params['reference_source']['requested_identifier']
ftp = FTP(NCBI_FTP_SERVER)
ftp.login()
path_contents = _get_files_in_ftp_path(ftp, NCBI_DOWNLOAD_PATH)
ncbi_file_name = None
get_stream_reader = None
ext = None
for ext, get_stream_reader in COMPRESSED_EXTENSIONS:
if "%s%s" % (ncbi_identifier, ext) in path_contents:
ncbi_file_name = "%s%s%s" % (NCBI_DOWNLOAD_PATH, ncbi_identifier, ext)
break
if not ncbi_file_name:
raise Exception('Unable to determine filename for NCBI database for %s: %s' % (ncbi_identifier, path_contents))
tmp_dir = tempfile.mkdtemp(prefix='tmp-data-manager-ncbi-')
ncbi_fasta_filename = os.path.join(tmp_dir, "%s%s" % (ncbi_identifier, ext))
# fasta_base_filename = "%s.fa" % database_id
# fasta_filename = os.path.join(target_directory, fasta_base_filename)
# fasta_writer = open(fasta_filename, 'wb+')
tmp_extract_dir = os.path.join(tmp_dir, 'extracted_fasta')
os.mkdir(tmp_extract_dir)
tmp_fasta = open(ncbi_fasta_filename, 'wb+')
ftp.retrbinary('RETR %s' % ncbi_file_name, tmp_fasta.write)
tmp_fasta.flush()
tmp_fasta.seek(0)
fasta_readers = get_stream_reader(tmp_fasta, tmp_extract_dir)
data_table_entry = _stream_fasta_to_file(fasta_readers, target_directory, database_id, database_name, params)
_add_data_table_entry(data_manager_dict, data_table_entry)
for fasta_reader in fasta_readers:
fasta_reader.close()
tmp_fasta.close()
cleanup_before_exit(tmp_dir)
def download_from_url(data_manager_dict, params, target_directory, database_id, database_name):
# TODO: we should automatically do decompression here
urls = list(filter(bool, [x.strip() for x in params['reference_source']['user_url'].split('\n')]))
fasta_reader = [urllib.request.urlopen(url) for url in urls]
data_table_entry = _stream_fasta_to_file(fasta_reader, target_directory, database_id, database_name, params)
_add_data_table_entry(data_manager_dict, data_table_entry)
def download_from_history(data_manager_dict, params, target_directory, database_id, database_name):
# TODO: allow multiple FASTA input files
input_filename = params['reference_source']['input_fasta']
if isinstance(input_filename, list):
fasta_reader = [open(filename, 'rb') for filename in input_filename]
else:
fasta_reader = open(input_filename, 'rb')
data_table_entry = _stream_fasta_to_file(fasta_reader, target_directory, database_id, database_name, params)
_add_data_table_entry(data_manager_dict, data_table_entry)
def copy_from_directory(data_manager_dict, params, target_directory, database_id, database_name):
input_filename = params['reference_source']['fasta_filename']
create_symlink = params['reference_source']['create_symlink'] == 'create_symlink'
if create_symlink:
data_table_entry = _create_symlink(input_filename, target_directory, database_id, database_name)
else:
if isinstance(input_filename, list):
fasta_reader = [open(filename, 'rb') for filename in input_filename]
else:
fasta_reader = open(input_filename)
data_table_entry = _stream_fasta_to_file(fasta_reader, target_directory, database_id, database_name, params)
_add_data_table_entry(data_manager_dict, data_table_entry)
def _add_data_table_entry(data_manager_dict, data_table_entry):
data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
data_manager_dict['data_tables']['diamond_database'] = data_manager_dict['data_tables'].get('diamond_database', [])
data_manager_dict['data_tables']['diamond_database'].append(data_table_entry)
return data_manager_dict
def _stream_fasta_to_file(fasta_stream, target_directory, database_id,
database_name, params, close_stream=True):
fasta_base_filename = "%s.fa" % database_id
fasta_filename = os.path.join(target_directory, fasta_base_filename)
temp_fasta = tempfile.NamedTemporaryFile(delete=False, suffix=".fasta")
temp_fasta.close()
fasta_writer = open(temp_fasta.name, 'wb+')
if not isinstance(fasta_stream, list):
fasta_stream = [fasta_stream]
last_char = None
for fh in fasta_stream:
if last_char not in [None, '\n', '\r']:
fasta_writer.write('\n')
while True:
data = fh.read(CHUNK_SIZE)
if data:
fasta_writer.write(data)
last_char = data[-1]
else:
break
if close_stream:
fh.close()
fasta_writer.close()
args = ['diamond', 'makedb',
'--in', temp_fasta.name,
'--db', fasta_filename]
if params['tax_cond']['tax_select'] == "history":
for i in ["taxonmap", "taxonnodes", "taxonnames"]:
args.extend(['--' + i, params['tax_cond'][i]])
elif params['tax_cond']['tax_select'] == "ncbi":
if os.path.isfile(os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.FULL.gz')):
args.extend(['--taxonmap',
os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.FULL.gz')])
elif os.path.isfile(os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.FULL')):
args.extend(['--taxonmap',
os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.FULL')])
elif os.path.isfile(os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.gz')):
args.extend(['--taxonmap',
os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.gz')])
elif os.path.isfile(os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid')):
args.extend(['--taxonmap',
os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid')])
else:
raise Exception('Unable to find prot.accession2taxid file in %s' % (params['tax_cond']['ncbi_tax']))
args.extend(['--taxonnodes',
os.path.join(params['tax_cond']['ncbi_tax'], 'nodes.dmp')])
args.extend(['--taxonnames',
os.path.join(params['tax_cond']['ncbi_tax'], 'names.dmp')])
tmp_stderr = tempfile.NamedTemporaryFile(prefix="tmp-data-manager-diamond-database-builder-stderr")
proc = subprocess.Popen(args=args, shell=False, cwd=target_directory,
stderr=tmp_stderr.fileno())
return_code = proc.wait()
if return_code:
tmp_stderr.flush()
tmp_stderr.seek(0)
print("Error building diamond database:", file=sys.stderr)
while True:
chunk = tmp_stderr.read(CHUNK_SIZE)
if not chunk:
break
sys.stderr.write(chunk.decode('utf-8'))
sys.exit(return_code)
tmp_stderr.close()
os.remove(temp_fasta.name)
return dict(value=database_id, name=database_name,
db_path="%s.dmnd" % fasta_base_filename)
def _create_symlink(input_filename, target_directory, database_id, database_name):
fasta_base_filename = "%s.fa" % database_id
fasta_filename = os.path.join(target_directory, fasta_base_filename)
os.symlink(input_filename, fasta_filename)
return dict(value=database_id, name=database_name, db_path=fasta_base_filename)
REFERENCE_SOURCE_TO_DOWNLOAD = dict(ncbi=download_from_ncbi,
url=download_from_url,
history=download_from_history,
directory=copy_from_directory)
def main():
# Parse Command Line
parser = optparse.OptionParser()
parser.add_option('-d', '--dbkey_description', dest='dbkey_description',
action='store', type="string", default=None,
help='dbkey_description')
(options, args) = parser.parse_args()
filename = args[0]
with open(filename) as fp:
params = json.load(fp)
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
data_manager_dict = {}
param_dict = params['param_dict']
database_id = param_dict['database_id']
database_name = param_dict['database_name']
if param_dict['tax_cond']['tax_select'] == "ncbi":
param_dict['tax_cond']['ncbi_tax'] = args[1]
# Fetch the FASTA
REFERENCE_SOURCE_TO_DOWNLOAD[param_dict['reference_source']['reference_source_selector']](data_manager_dict, param_dict, target_directory, database_id, database_name)
# save info to json file
open(filename, 'w').write(json.dumps(data_manager_dict, sort_keys=True))
if __name__ == "__main__":
main()
|
deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/VERSION/GL_4_4.py
|
ShujaKhalid/deep-rl
| 210 |
58707
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_VERSION_GL_4_4'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_VERSION_GL_4_4',error_checker=_errors._error_checker)
GL_BUFFER_IMMUTABLE_STORAGE=_C('GL_BUFFER_IMMUTABLE_STORAGE',0x821F)
GL_BUFFER_STORAGE_FLAGS=_C('GL_BUFFER_STORAGE_FLAGS',0x8220)
GL_CLEAR_TEXTURE=_C('GL_CLEAR_TEXTURE',0x9365)
GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT=_C('GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT',0x00004000)
GL_CLIENT_STORAGE_BIT=_C('GL_CLIENT_STORAGE_BIT',0x0200)
GL_DYNAMIC_STORAGE_BIT=_C('GL_DYNAMIC_STORAGE_BIT',0x0100)
GL_LOCATION_COMPONENT=_C('GL_LOCATION_COMPONENT',0x934A)
GL_MAP_COHERENT_BIT=_C('GL_MAP_COHERENT_BIT',0x0080)
GL_MAP_PERSISTENT_BIT=_C('GL_MAP_PERSISTENT_BIT',0x0040)
GL_MAP_READ_BIT=_C('GL_MAP_READ_BIT',0x0001)
GL_MAP_WRITE_BIT=_C('GL_MAP_WRITE_BIT',0x0002)
GL_MAX_VERTEX_ATTRIB_STRIDE=_C('GL_MAX_VERTEX_ATTRIB_STRIDE',0x82E5)
GL_MIRROR_CLAMP_TO_EDGE=_C('GL_MIRROR_CLAMP_TO_EDGE',0x8743)
GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED=_C('GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED',0x8221)
GL_QUERY_BUFFER=_C('GL_QUERY_BUFFER',0x9192)
GL_QUERY_BUFFER_BARRIER_BIT=_C('GL_QUERY_BUFFER_BARRIER_BIT',0x00008000)
GL_QUERY_BUFFER_BINDING=_C('GL_QUERY_BUFFER_BINDING',0x9193)
GL_QUERY_RESULT_NO_WAIT=_C('GL_QUERY_RESULT_NO_WAIT',0x9194)
GL_STENCIL_INDEX=_C('GL_STENCIL_INDEX',0x1901)
GL_STENCIL_INDEX8=_C('GL_STENCIL_INDEX8',0x8D48)
GL_TEXTURE_BUFFER_BINDING=_C('GL_TEXTURE_BUFFER_BINDING',0x8C2A)
GL_TRANSFORM_FEEDBACK_BUFFER=_C('GL_TRANSFORM_FEEDBACK_BUFFER',0x8C8E)
GL_TRANSFORM_FEEDBACK_BUFFER_INDEX=_C('GL_TRANSFORM_FEEDBACK_BUFFER_INDEX',0x934B)
GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE=_C('GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE',0x934C)
GL_UNSIGNED_INT_10F_11F_11F_REV=_C('GL_UNSIGNED_INT_10F_11F_11F_REV',0x8C3B)
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray)
def glBindBuffersBase(target,first,count,buffers):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray,ctypes.POINTER(_cs.GLintptr),ctypes.POINTER(_cs.GLsizeiptr))
def glBindBuffersRange(target,first,count,buffers,offsets,sizes):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray)
def glBindImageTextures(first,count,textures):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray)
def glBindSamplers(first,count,samplers):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray)
def glBindTextures(first,count,textures):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray,ctypes.POINTER(_cs.GLintptr),arrays.GLsizeiArray)
def glBindVertexBuffers(first,count,buffers,offsets,strides):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizeiptr,ctypes.c_void_p,_cs.GLbitfield)
def glBufferStorage(target,size,data,flags):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glClearTexImage(texture,level,format,type,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glClearTexSubImage(texture,level,xoffset,yoffset,zoffset,width,height,depth,format,type,data):pass
|
tools/benchmark/asaga_spase_url.py
|
sumau/tick
| 411 |
58715
|
<filename>tools/benchmark/asaga_spase_url.py
import os
from tick.array.serialize import tick_double_sparse2d_from_file, \
tick_double_array_from_file
from tick.linear_model.model_logreg import ModelLogReg
from tick.prox.prox_elasticnet import ProxElasticNet
from tick.solver.saga import SAGA
# Create this dataset with benchmark_util
dirpath = os.path.dirname(__file__)
features_path = os.path.join(dirpath, "data", "url.3.features.cereal")
labels_path = os.path.join(dirpath, "data", "url.3.labels.cereal")
N_ITER = 200
n_samples = 196000
ALPHA = 1. / n_samples
BETA = 1e-10
STRENGTH = ALPHA + BETA
RATIO = BETA / STRENGTH
THREADS = 8
features = tick_double_sparse2d_from_file(features_path)
labels = tick_double_array_from_file(labels_path)
model = ModelLogReg().fit(features, labels)
prox = ProxElasticNet(STRENGTH, RATIO)
saga = SAGA(
max_iter=N_ITER,
tol=0,
rand_type="unif",
step=0.00257480411965,
n_threads=THREADS,
verbose=False,
record_every=20,
)
saga.history.print_order += ['time']
saga.set_model(model).set_prox(prox)
saga.solve()
saga.print_history()
|
Python/HelloWorld_Spanish.py
|
saurabhcommand/Hello-world
| 1,428 |
58718
|
print("Hola, el mundo!")
|
exercises/de/test_04_11_01.py
|
Jette16/spacy-course
| 2,085 |
58760
|
<gh_stars>1000+
def test():
assert (
len(TRAINING_DATA) == 3
), "Irgendetwas scheint mit deinen Daten nicht zu stimmen. Erwartet werden 3 Beispiele."
assert all(
len(entry) == 2 and isinstance(entry[1], dict) for entry in TRAINING_DATA
), "Die Trainingsdaten haben nicht das richtige Format. Erwartet wird eine Liste von Tuples, bestehend aus Text und einem Dictionary als zweites Element."
ents = [entry[1].get("entities", []) for entry in TRAINING_DATA]
assert len(ents[0]) == 2, "Das erste Beispiel sollte zwei Entitäten enhalten."
ent_0_0 = (0, 6, "WEBSITE")
ent_0_1 = (11, 18, "WEBSITE")
assert (
ents[0][0] == ent_0_0
), "Überprüfe nochmal die erste Entität im ersten Beispiel."
assert (
ents[0][1] == ent_0_1
), "Überprüfe nochmal die zweite Entität im ersten Beispiel."
assert len(ents[1]) == 1, "Das zweite Beispiel sollte eine Entität enthalten."
assert ents[1] == [
(28, 35, "WEBSITE",)
], "Überprüfe nochmal die Entität im zweiten Beispiel."
assert len(ents[2]) == 1, "Das dritte Beispiel sollte eine Entität enthalten."
assert ents[2] == [
(15, 21, "WEBSITE",)
], "Überprüfe nochmal die Entität im dritten Beispiel."
__msg__.good("Sehr schön!")
|
PWGJE/EMCALJetTasks/Tracks/analysis/plots/TriggerEfficiencyPlotMC.py
|
maroozm/AliPhysics
| 114 |
58801
|
<filename>PWGJE/EMCALJetTasks/Tracks/analysis/plots/TriggerEfficiencyPlotMC.py
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
"""
Comparison plot of trigger efficiencies in MC in different pt-hat bins including underlying data structure
@author: <NAME>
"""
from PWG.PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import SinglePanelPlot, GraphicsObject, Frame, Style
from PWG.PWGJE.EMCALJetTasks.Tracks.analysis.base.ComparisonData import ComparisonData, ComparisonObject, ComparisonPlot
from ROOT import TFile,kBlack
class TriggerEfficiencyClassPtHat(ComparisonObject):
def __init__(self, pthatbin, triggerdata, style):
ComparisonObject.__init__(self, triggerdata, style)
self.__pthatbin = pthatbin
def GetLegendTitle(self):
return "p_{t}-hat bin %d" %(self.__pthatbin)
def GetObjectName(self):
return "pthat%d" %(self.__pthatbin)
class TriggerEfficiencyClassTriggerType(ComparisonObject):
def __init__(self, triggername, triggerdata, style):
ComparisonObject.__init__(self, triggerdata, style)
self.__triggername = triggername
def GetLegendTitle(self):
return self.__triggername
def GetObjectName(self):
return self.__triggername
class TriggerEfficiencyContainer(ComparisonData):
"""
Underlying data structure for the comparison plot
"""
def __init__(self):
"""
Initialise container
"""
ComparisonData.__init__(self)
def AddEfficiency(self, trclasstype, key, efficiencyCurve, style):
"""
Add new trigger ifno
"""
triggerdata = None
if trclasstype == "pthat":
triggerdata = TriggerEfficiencyClassPtHat(key, efficiencyCurve, style)
elif trclasstype == "triggertype":
triggerdata = TriggerEfficiencyClassTriggerType(key, efficiencyCurve, style)
self.AddEntry(triggerdata)
class TriggerEfficiencyFrame(Frame):
"""
Frame class for trigger efficiency plots
"""
def __init__(self, name):
"""
Constructor
"""
Frame.__init__(self, name, 0., 100., 0., 1.)
self.SetXtitle("p_{t} (GeV/c)")
self.SetYtitle("Trigger efficiency")
class TriggerEfficiencyPlotMC(ComparisonPlot):
"""
Comparison plot of trigger efficiencies in different pt-hat bins
"""
def __init__(self):
"""
Constructor
"""
ComparisonPlot.__init__(self)
self._comparisonContainer = TriggerEfficiencyContainer()
self.SetFrame(TriggerEfficiencyFrame("tframe"))
self.SetLegendAttributes(0.65, 0.15, 0.89, 0.5)
self.__triggername = ""
def SetTriggerName(self, trname):
"""
Set triggername for the label
"""
self.__triggername = trname
def AddEfficiency(self, pthatbin, efficiency, style):
"""
Add new efficiency container to the data structure
"""
self._comparisonContainer.AddEfficiency("pthat", pthatbin, efficiency, style)
def Create(self):
"""
Create the plot
"""
self._Create("triggerEfficiencyMC", "MC trigger efficiency plot")
if len(self.__triggername):
pad = self._GetFramedPad()
pad.DrawLabel(0.15, 0.8, 0.5, 0.85, self.__triggername)
class TriggerEfficiencyPlotClasses(ComparisonPlot):
"""
Plot comparing the trigger efficiency of different trigger types
"""
def __init__(self):
"""
Constructor
"""
ComparisonPlot.__init__(self)
self._comparisonContainer = TriggerEfficiencyContainer()
self.SetFrame(TriggerEfficiencyFrame("tframe"))
self.SetLegendAttributes(0.65, 0.15, 0.89, 0.5)
def AddTriggerEfficiency(self, triggername, efficiency, style):
"""
Add trigger class to the comparison data
"""
self._comparisonContainer.AddEfficiency("triggertype", triggername, efficiency, style)
def Create(self):
self._Create("triggerclasses", "Trigger efficiencies")
class TriggerEfficiencySumPlot(SinglePanelPlot):
"""
Plot the summed trigger efficiency from different pt-hard bins
"""
def __init__(self, triggername, triggerefficiency):
"""
Constructor
"""
SinglePanelPlot.__init__(self)
self.__triggername = triggername
self.__triggereff = triggerefficiency
def Create(self):
"""
Create the plot
"""
self._OpenCanvas("trgEffSumm", "Summed trigger efficiency")
pad = self._GetFramedPad()
pad.DrawFrame(TriggerEfficiencyFrame("tframe"))
pad.DrawGraphicsObject(GraphicsObject(self.__triggereff.GetEfficiencyCurve(), Style(kBlack, 20)), False, "Trigger Eff")
pad.DrawLabel(0.5, 0.2, 0.89, 0.25, "Trigger: %s" %(self.__triggername))
|
tests/physicalvalidation/physical_validation/data/gromacs_parser.py
|
hejamu/gromacs
| 384 |
58812
|
<reponame>hejamu/gromacs
###########################################################################
# #
# physical_validation, #
# a python package to test the physical validity of MD results #
# #
# Written by <NAME> <<EMAIL>> #
# <NAME> <<EMAIL>> #
# #
# Copyright (C) 2012 University of Virginia #
# (C) 2017 University of Colorado Boulder #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public #
# License as published by the Free Software Foundation; either #
# version 2.1 of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library; if not, write to the #
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
# Boston, MA 02110-1301 USA #
# #
###########################################################################
r"""
gromacs_parser.py
"""
import warnings
import numpy as np
from . import parser
# py2.7 compatibility
from .simulation_data import SimulationData
from .unit_data import UnitData
from .ensemble_data import EnsembleData
from .system_data import SystemData
from .observable_data import ObservableData
from .trajectory_data import TrajectoryData
# replace lines above by this when py2.7 support is dropped:
# from . import SimulationData, UnitData, EnsembleData, SystemData, ObservableData, TrajectoryData
from ..util.gromacs_interface import GromacsInterface
from ..util import error as pv_error
class GromacsParser(parser.Parser):
"""
GromacsParser
"""
@staticmethod
def units():
# Gromacs uses kJ/mol
return UnitData(
kb=8.314462435405199e-3,
energy_str='kJ/mol',
energy_conversion=1.0,
length_str='nm',
length_conversion=1.0,
volume_str='nm^3',
volume_conversion=1.0,
temperature_str='K',
temperature_conversion=1.0,
pressure_str='bar',
pressure_conversion=1.0,
time_str='ps',
time_conversion=1.0)
def __init__(self, exe=None, includepath=None):
r"""
Create a GromacsParser object
Parameters
----------
exe: str, optional
Path to a gmx executable (or simply the executable name, if it is in the path)
Default: Looks for `gmx`, then for `gmx_d` in the path. If neither is found, `exe` is
set to None, and any parsing including simulation trajectories (`edr`, `trr`
and `gro` arguments in `get_simulation_data()`) will fail.
includepath: str or List[str], optional
Path or list of paths to location(s) of topology file. Is used for the lookup of
`#include` statements in topologies.
Default: None - no additional topology location. Lookup will be restricted to current
directory and location of the `top` file given to `get_simulation_data()`,
plus any include locations added to the `mdp` file.
"""
super(GromacsParser, self).__init__()
self.__interface = GromacsInterface(exe=exe, includepath=includepath)
# gmx energy codes
self.__gmx_energy_names = {'kinetic_energy': 'Kinetic-En.',
'potential_energy': 'Potential',
'total_energy': 'Total-Energy',
'volume': 'Volume',
'pressure': 'Pressure',
'temperature': 'Temperature',
'constant_of_motion': 'Conserved-En.'}
def get_simulation_data(self,
mdp=None, top=None, edr=None,
trr=None, gro=None):
r"""
Parameters
----------
mdp: str, optional
A string pointing to a .mdp file
top: str, optional
A string pointing to a .top file
edr: str, optional
A string pointing to a .edr file
trr: str, optional
A string pointing to a .trr file
gro: str, optional
A string pointing to a .gro file (Note: if also trr is given, gro is ignored)
Returns
-------
result: SimulationData
A SimulationData filled with the results of the simulation as described by
the provided GROMACS files.
"""
result = SimulationData()
result.units = self.units()
# trajectories (might be used later for the box...)
trajectory_dict = None
if trr is not None:
if gro is not None:
warnings.warn('`trr` and `gro` given. Ignoring `gro`.')
trajectory_dict = self.__interface.read_trr(trr)
result.trajectory = TrajectoryData(
trajectory_dict['position'],
trajectory_dict['velocity'])
elif gro is not None:
trajectory_dict = self.__interface.read_gro(gro)
result.trajectory = TrajectoryData(
trajectory_dict['position'],
trajectory_dict['velocity'])
# simulation parameters & system
if mdp is not None and top is not None:
mdp_options = self.__interface.read_mdp(mdp)
define = None
include = None
if 'define' in mdp_options:
define = mdp_options['define']
if 'include' in mdp_options:
include = mdp_options['include']
molecules = self.__interface.read_system_from_top(top, define=define, include=include)
if 'dt' in mdp_options:
result.dt = float(mdp_options['dt'])
natoms = 0
mass = []
constraints_per_molec = []
angles = ('constraints' in mdp_options and
mdp_options['constraints'] == 'all-angles')
angles_h = (angles or
'constraints' in mdp_options and
mdp_options['constraints'] == 'h-angles')
bonds = (angles_h or
'constraints' in mdp_options and
mdp_options['constraints'] == 'all-bonds')
bonds_h = (bonds or
'constraints' in mdp_options and
mdp_options['constraints'] == 'h-bonds')
molecule_idx = []
next_molec = 0
molec_bonds = []
molec_bonds_constrained = []
for molecule in molecules:
natoms += molecule['nmolecs'] * molecule['natoms']
for n in range(0, molecule['nmolecs']):
molecule_idx.append(next_molec)
next_molec += molecule['natoms']
mass.extend(molecule['mass'] * molecule['nmolecs'])
constraints = 0
constrained_bonds = []
all_bonds = molecule['bonds'] + molecule['bondsh']
if molecule['settles']:
constraints = 3
constrained_bonds = all_bonds
else:
if bonds:
constraints += molecule['nbonds'][0]
constrained_bonds.extend(molecule['bonds'])
if bonds_h:
constraints += molecule['nbonds'][1]
constrained_bonds.extend(molecule['bondsh'])
if angles:
constraints += molecule['nangles'][0]
if angles_h:
constraints += molecule['nangles'][1]
constraints_per_molec.extend([constraints] * molecule['nmolecs'])
molec_bonds.extend([all_bonds] * molecule['nmolecs'])
molec_bonds_constrained.extend([constrained_bonds] * molecule['nmolecs'])
system = SystemData()
system.natoms = natoms
system.mass = mass
system.molecule_idx = molecule_idx
system.nconstraints = np.sum(constraints_per_molec)
system.nconstraints_per_molecule = constraints_per_molec
system.ndof_reduction_tra = 3
system.ndof_reduction_rot = 0
if 'comm-mode' in mdp_options:
if mdp_options['comm-mode'] == 'linear':
system.ndof_reduction_tra = 3
elif mdp_options['comm-mode'] == 'angular':
system.ndof_reduction_tra = 3
system.ndof_reduction_rot = 3
if mdp_options['comm-mode'] == 'none':
system.ndof_reduction_tra = 0
system.bonds = molec_bonds
system.constrained_bonds = molec_bonds_constrained
result.system = system
thermostat = ('tcoupl' in mdp_options and
mdp_options['tcoupl'] and
mdp_options['tcoupl'] != 'no')
stochastic_dyn = ('integrator' in mdp_options and
mdp_options['integrator'] in ['sd', 'sd2', 'bd'])
constant_temp = thermostat or stochastic_dyn
temperature = None
if constant_temp:
ref_t = [float(t) for t in mdp_options['ref-t'].split()]
if len(ref_t) == 1 or np.allclose(ref_t, [ref_t[0]]*len(ref_t)):
temperature = ref_t[0]
else:
raise pv_error.InputError('mdp',
'Ensemble definition ambiguous: Different t-ref values found.')
constant_press = ('pcoupl' in mdp_options and
mdp_options['pcoupl'] and
mdp_options['pcoupl'] != 'no')
volume = None
pressure = None
if constant_press:
ref_p = [float(p) for p in mdp_options['ref-p'].split()]
if len(ref_p) == 1 or np.allclose(ref_p, [ref_p[0]]*len(ref_p)):
pressure = ref_p[0]
else:
raise pv_error.InputError('mdp',
'Ensemble definition ambiguous: Different p-ref values found.')
else:
if trajectory_dict is not None:
box = trajectory_dict['box'][0]
# Different box shapes?
volume = box[0]*box[1]*box[2]
else:
warnings.warn('Constant volume simulation with undefined volume.')
if constant_temp and constant_press:
ens = 'NPT'
elif constant_temp:
ens = 'NVT'
else:
ens = 'NVE'
if ens == 'NVE':
self.__gmx_energy_names['constant_of_motion'] = 'Total-Energy'
else:
self.__gmx_energy_names['constant_of_motion'] = 'Conserved-En.'
result.ensemble = EnsembleData(
ens,
natoms=natoms,
volume=volume, pressure=pressure,
temperature=temperature
)
if edr is not None:
observable_dict = self.__interface.get_quantities(edr,
self.__gmx_energy_names.values(),
args=['-dp'])
# constant volume simulations don't write out the volume in .edr file
if (observable_dict['Volume'] is None and
result.ensemble is not None and
result.ensemble.volume is not None):
nframes = observable_dict['Pressure'].size
observable_dict['Volume'] = np.ones(nframes) * result.ensemble.volume
result.observables = ObservableData()
for key, gmxkey in self.__gmx_energy_names.items():
result.observables[key] = observable_dict[gmxkey]
return result
|
wardroom/aws.py
|
jpweber/wardroom
| 171 |
58893
|
<reponame>jpweber/wardroom
import logging
import sys
import time
import boto3
import click
logger = logging.getLogger(name=__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stderr)
logger.addHandler(handler)
yaml_template ='''
{}:
'64': {}
'''.strip('\r\n')
def copy_to_region(image, src_region, dest_region):
session = boto3.session.Session(region_name=dest_region)
local_client = session.client('ec2')
logger.info("creating image in region {}".format(dest_region))
resp = local_client.copy_image(
Name=image.name,
SourceImageId=image.image_id,
SourceRegion=src_region,
)
local_ec2 = session.resource('ec2')
new_image = local_ec2.Image(resp['ImageId'])
return (new_image, dest_region)
def make_public_and_tag(image, region, desc):
while True:
image.load()
if image.state == 'available':
image.modify_attribute(
LaunchPermission={
'Add': [{'Group': 'all'}]
}
)
# Can only modify one attribute at a time
image.modify_attribute(Description={'Value': desc})
logger.info("region {} ami {} is available".format(region, image.id))
break
time.sleep(5)
def encode_desc(dict_):
return " ".join("{0}={1}".format(*item) for item in dict_.items())
@click.group()
def aws():
pass
@aws.command(name='copy-ami')
@click.option('-r', '--src-region', default='us-east-1', help='AWS Region')
@click.option('-q', '--quiet', is_flag=True)
@click.argument('src_ami')
def copy_ami(src_region, src_ami, quiet):
if quiet:
logger.setLevel(logging.WARN)
session = boto3.session.Session(region_name=src_region)
client = session.client('ec2')
dest_regions = [region['RegionName'] for region in client.describe_regions()['Regions']
if region['RegionName'] != src_region
]
dest_regions.sort()
logger.info("detected {} regions".format(len(dest_regions)))
image = session.resource('ec2').Image(src_ami)
description = encode_desc({i['Key']: i['Value'] for i in image.tags or []})
# copy to all regions
images = [copy_to_region(image, src_region, region) for region in dest_regions]
# Add the original
images.append((image, src_region))
# print out the YAML
for (image, region) in images:
print(yaml_template.format(region, image.id))
logger.info("waiting for all images to be available. In the mean time,"
"that YAML can be pasted into the quickstart template.")
# wait for all images to be available
for (image, region) in images:
make_public_and_tag(image, region, description)
|
zeus/models/repository_api_token.py
|
conrad-kronos/zeus
| 221 |
58930
|
from zeus.config import db
from zeus.db.mixins import ApiTokenMixin, RepositoryMixin, StandardAttributes
from zeus.db.utils import model_repr
class RepositoryApiToken(StandardAttributes, RepositoryMixin, ApiTokenMixin, db.Model):
"""
An API token associated to a repository.
"""
__tablename__ = "repository_api_token"
__repr__ = model_repr("repository_id", "key")
def get_token_key(self):
return "r"
|
Installation/auxiliary/gdb/test-gdb.py
|
ffteja/cgal
| 3,227 |
58935
|
#!/usr/bin/python
# Copyright (c) 2011 GeometryFactory Sarl (France)
#
# $URL$
# $Id$
# SPDX-License-Identifier: LGPL-3.0-or-later OR LicenseRef-Commercial
#
# Author(s) : <NAME>
import sys
import os
import gdb
sys.path.insert(0, os.getcwd() + '/python')
import CGAL.printers
|
bin/command/generate_dockerfile_command.py
|
oobeya-space/Dockerfile
| 1,405 |
58936
|
<reponame>oobeya-space/Dockerfile
#!/usr/bin/env/python
# -*- coding: utf-8 -*-
#
# (c) 2016 WebDevOps.io
#
# This file is part of Dockerfile Repository.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from cleo import Output
from jinja2 import Environment, FileSystemLoader
from webdevops import DockerfileUtility
from webdevops.command import BaseCommand
class GenerateDockerfileCommand(BaseCommand):
"""
Build Dockerfile containers
generate:dockerfile
{docker images?* : Docker images (whitelist)}
{--whitelist=?* : image/tag whitelist }
{--blacklist=?* : image/tag blacklist }
"""
template = ''
template_header = '{% extends "Dockerfile/layout.jinja2" %}\n{% block content %}'
template_footer = '{% endblock %}'
def run_task(self, configuration):
template_path = configuration.get('templatePath')
dockerfile_path = configuration.get('dockerPath')
whitelist = self.get_whitelist()
blacklist = self.get_blacklist()
if Output.VERBOSITY_VERBOSE <= self.output.get_verbosity():
self.line('<info>-> </info><comment>docker path</comment> : %s' % dockerfile_path)
self.line('<info>-> </info><comment>template path </comment> : %s' % template_path)
if whitelist:
self.line('<info>-> </info><comment>whitelist </comment> :')
for crit in whitelist:
self.line("\t * %s" % crit)
if blacklist:
self.line('<info>-> </info><comment>blacklist </comment> :')
for crit in blacklist:
self.line("\t * %s" % crit)
self.template = Environment(
autoescape=False,
loader=FileSystemLoader([template_path]),
trim_blocks=False
)
for file in DockerfileUtility.find_file_in_path(dockerfile_path=dockerfile_path, filename="Dockerfile.jinja2", whitelist=whitelist, blacklist=blacklist):
self.process_dockerfile(file)
def process_dockerfile(self, input_file):
"""
:param input_file: Input File
:type input_file: str
"""
output_file = os.path.splitext(input_file)
output_file = os.path.join(os.path.dirname(output_file[0]), os.path.basename(output_file[0]))
docker_image = os.path.basename(os.path.dirname(os.path.dirname(output_file)))
docker_tag = os.path.basename(os.path.dirname(output_file))
context = {
'Dockerfile': {
'image': docker_image,
'tag': docker_tag
}
}
if Output.VERBOSITY_NORMAL <= self.output.get_verbosity():
self.line("<info>* </info><comment>Processing Dockerfile for </comment>%s:%s" % (docker_image,docker_tag))
with open(input_file, 'r') as fileInput:
template_content = fileInput.read()
template_content = self.template_header + template_content + self.template_footer
rendered_content = self.template.from_string(template_content).render(context)
rendered_content = rendered_content.lstrip()
with open(output_file, 'w') as file_output:
file_output.write(rendered_content)
|
tests/python_ann_assign/ann_assign.py
|
hixio-mh/plugin-python
| 362 |
58972
|
<reponame>hixio-mh/plugin-python<gh_stars>100-1000
a: str
a: bool = True
my_long_var_aaaaaaaaaaaaaaaaaaaaaaaaaa: MyLongTypeAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
my_long_var_aaaaaaaaaaaaaaaaaaaaaaaaaa: MyLongTypeAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA = 1
|
simplesqlite/core.py
|
thombashi/SimpleSQLite
| 126 |
59083
|
<reponame>thombashi/SimpleSQLite
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import logging
import os
import re
import sqlite3
from collections import OrderedDict, defaultdict
from sqlite3 import Connection, Cursor
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union, cast
import pathvalidate
import typepy
from dataproperty.typing import TypeHint
from mbstrdecoder import MultiByteStrDecoder
from sqliteschema import SQLITE_SYSTEM_TABLES, SQLiteSchemaExtractor
from tabledata import TableData
from typepy import extract_typepy_from_dtype
from ._common import extract_table_metadata
from ._func import copy_table, validate_table_name
from ._logger import logger
from ._sanitizer import SQLiteTableDataSanitizer
from .converter import RecordConvertor
from .error import (
AttributeNotFoundError,
DatabaseError,
NameValidationError,
NullDatabaseConnectionError,
OperationalError,
TableNotFoundError,
)
from .query import (
Attr,
AttrList,
Insert,
QueryItem,
Select,
Table,
Value,
WhereQuery,
make_index_name,
)
from .sqlquery import SqlQuery
MEMORY_DB_NAME = ":memory:"
class SimpleSQLite:
"""
Wrapper class for |sqlite3| module.
:param str database_src:
SQLite database source. Acceptable types are:
(1) File path to a database to be connected.
(2) sqlite3.Connection instance.
(3) SimpleSQLite instance
:param str mode: Open mode.
:param bool delayed_connection:
Delaying connection to a database until access to the database the first time,
if the value is |True|.
:param int max_workers:
Maximum number of workers to generate a table.
In default, the same as the total number of CPUs.
:param bool profile:
Recording SQL query execution time profile, if the value is |True|.
.. seealso::
:py:meth:`.connect`
:py:meth:`.get_profile`
"""
dup_col_handler = "error"
global_debug_query = False
@property
def database_path(self) -> Optional[str]:
"""
:return: File path of the connected database.
:rtype: str
:Examples:
>>> from simplesqlite import SimpleSQLite
>>> con = SimpleSQLite("sample.sqlite", "w")
>>> con.database_path
'/tmp/sample.sqlite'
>>> con.close()
>>> print(con.database_path)
None
"""
if self.__delayed_connection_path:
return self.__delayed_connection_path
return self.__database_path
@property
def connection(self) -> Optional[Connection]:
"""
:return: |Connection| instance of the connected database.
:rtype: sqlite3.Connection
"""
self.__delayed_connect()
return self.__connection
@property
def schema_extractor(self) -> SQLiteSchemaExtractor:
return SQLiteSchemaExtractor(self, max_workers=self.__max_workers)
@property
def total_changes(self) -> int:
"""
.. seealso::
:py:attr:`sqlite3.Connection.total_changes`
"""
self.check_connection()
return self.connection.total_changes # type: ignore
@property
def mode(self) -> Optional[str]:
"""
:return: Connection mode: ``"r"``/``"w"``/``"a"``.
:rtype: str
.. seealso:: :py:meth:`.connect`
"""
return self.__mode
def __initialize_connection(self) -> None:
self.__database_path: Optional[str] = None
self.__connection: Optional[Connection] = None
self.__mode: Optional[str] = None
self.__delayed_connection_path: Optional[str] = None
self.__dict_query_count: Dict[str, int] = defaultdict(int)
self.__dict_query_totalexectime: Dict[str, float] = defaultdict(float)
def __init__(
self,
database_src: Union[Connection, "SimpleSQLite", str],
mode: str = "a",
delayed_connection: bool = True,
max_workers: Optional[int] = None,
profile: bool = False,
) -> None:
self.debug_query = False
self.__initialize_connection()
self.__mode = mode
self.__max_workers = max_workers
self.__is_profile = profile
if database_src is None:
raise TypeError("database_src must be not None")
if isinstance(database_src, SimpleSQLite):
self.__connection = database_src.connection
self.__database_path = database_src.database_path
self.debug_query = database_src.debug_query
return
if isinstance(database_src, sqlite3.Connection):
self.__connection = database_src
return
if delayed_connection:
self.__delayed_connection_path = database_src
return
self.connect(database_src, mode)
def __del__(self) -> None:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.close()
def is_connected(self) -> bool:
"""
:return: |True| if the connection to a database is valid.
:rtype: bool
:Examples:
>>> from simplesqlite import SimpleSQLite
>>> con = SimpleSQLite("sample.sqlite", "w")
>>> con.is_connected()
True
>>> con.close()
>>> con.is_connected()
False
"""
try:
self.check_connection()
except NullDatabaseConnectionError:
return False
return True
def check_connection(self) -> None:
"""
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:Sample Code:
.. code:: python
import simplesqlite
con = simplesqlite.SimpleSQLite("sample.sqlite", "w")
print("---- connected to a database ----")
con.check_connection()
print("---- disconnected from a database ----")
con.close()
try:
con.check_connection()
except simplesqlite.NullDatabaseConnectionError as e:
print(e)
:Output:
.. code-block:: none
---- connected to a database ----
---- disconnected from a database ----
null database connection
"""
if self.connection is None:
if not self.__delayed_connect():
raise NullDatabaseConnectionError("null database connection")
def connect(self, database_path: str, mode: str = "a") -> None:
"""
Connect to a SQLite database.
:param str database_path:
Path to the SQLite database file to be connected.
:param str mode:
``"r"``: Open for read only.
``"w"``: Open for read/write.
Delete existing tables when connecting.
``"a"``: Open for read/write. Append to the existing tables.
:raises ValueError:
If ``database_path`` is invalid or |attr_mode| is invalid.
:raises simplesqlite.DatabaseError:
If the file is encrypted or is not a database.
:raises simplesqlite.OperationalError:
If unable to open the database file.
"""
self.close()
logger.debug(f"connect to a SQLite database: path='{database_path}', mode={mode}")
if mode == "r":
self.__verify_db_file_existence(database_path)
elif mode in ["w", "a"]:
self.__validate_db_path(database_path)
else:
raise ValueError("unknown connection mode: " + mode)
if database_path == MEMORY_DB_NAME:
self.__database_path = database_path
else:
self.__database_path = os.path.realpath(database_path)
try:
self.__connection = sqlite3.connect(database_path)
except sqlite3.OperationalError as e:
raise OperationalError(e)
self.__mode = mode
try:
# validate connection after connect
self.fetch_table_names()
except sqlite3.DatabaseError as e:
raise DatabaseError(e)
if mode != "w":
return
for table in self.fetch_table_names():
self.drop_table(table)
def execute_query(
self, query: Union[str, QueryItem], caller: Optional[Tuple] = None
) -> Optional[Cursor]:
"""
Send arbitrary SQLite query to the database.
:param query: Query to executed.
:param tuple caller:
Caller information.
Expects the return value of :py:meth:`logging.Logger.findCaller`.
:return: The result of the query execution.
:rtype: sqlite3.Cursor
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
.. warning::
This method can execute an arbitrary query.
i.e. No access permissions check by |attr_mode|.
"""
import time
self.check_connection()
if typepy.is_null_string(query):
return None
if self.debug_query or self.global_debug_query:
logger.debug(query)
if self.__is_profile:
exec_start_time = time.time()
assert self.connection # to avoid type check error
try:
result = self.connection.execute(str(query))
except (sqlite3.OperationalError, sqlite3.IntegrityError) as e:
if caller is None:
caller = logging.getLogger().findCaller()
file_path, line_no, func_name = caller[:3]
raise OperationalError(
message="\n".join(
[
"failed to execute query at {:s}({:d}) {:s}".format(
file_path, line_no, func_name
),
f" - query: {MultiByteStrDecoder(query).unicode_str}",
f" - msg: {e}",
f" - db: {self.database_path}",
]
)
)
if self.__is_profile:
self.__dict_query_count[str(query)] += 1
elapse_time = time.time() - exec_start_time
self.__dict_query_totalexectime[str(query)] += elapse_time
return result
def set_row_factory(self, row_factory: Optional[Callable]) -> None:
"""
Set row_factory to the database connection.
"""
self.check_connection()
self.__connection.row_factory = row_factory # type: ignore
def select(
self,
select: Union[str, AttrList],
table_name: str,
where: Optional[WhereQuery] = None,
extra: Optional[str] = None,
) -> Optional[Cursor]:
"""
Send a SELECT query to the database.
:param select: Attribute for the ``SELECT`` query.
:param str table_name: |arg_select_table_name|
:param where: |arg_select_where|
:type where: |arg_where_type|
:param str extra: |arg_select_extra|
:return: Result of the query execution.
:rtype: sqlite3.Cursor
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
"""
self.verify_table_existence(table_name)
return self.execute_query(
str(Select(select, table_name, where, extra)),
logging.getLogger().findCaller(),
)
def select_as_dataframe(
self,
table_name: str,
columns: Optional[Sequence[str]] = None,
where: Optional[WhereQuery] = None,
extra: Optional[str] = None,
):
"""
Get data in the database and return fetched data as a
:py:class:`pandas.Dataframe` instance.
:param str table_name: |arg_select_table_name|
:param columns: |arg_select_as_xx_columns|
:param where: |arg_select_where|
:param extra: |arg_select_extra|
:return: Table data as a :py:class:`pandas.Dataframe` instance.
:rtype: pandas.DataFrame
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
:ref:`example-select-as-dataframe`
.. note::
``pandas`` package required to execute this method.
"""
import pandas
if columns is None:
columns = self.fetch_attr_names(table_name)
result = self.select(
select=AttrList(columns), table_name=table_name, where=where, extra=extra
)
if result is None:
return pandas.DataFrame()
return pandas.DataFrame(result.fetchall(), columns=columns)
def select_as_tabledata(
self,
table_name: str,
columns: Optional[Sequence[str]] = None,
where: Optional[WhereQuery] = None,
extra: Optional[str] = None,
type_hints: Optional[Dict[str, TypeHint]] = None,
) -> TableData:
"""
Get data in the database and return fetched data as a
:py:class:`tabledata.TableData` instance.
:param str table_name: |arg_select_table_name|
:param columns: |arg_select_as_xx_columns|
:param where: |arg_select_where|
:type where: |arg_where_type|
:param str extra: |arg_select_extra|
:return: Table data as a :py:class:`tabledata.TableData` instance.
:rtype: tabledata.TableData
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
.. note::
``pandas`` package required to execute this method.
"""
if columns is None:
columns = self.fetch_attr_names(table_name)
result = self.select(
select=AttrList(columns), table_name=table_name, where=where, extra=extra
)
if result is None:
return TableData(None, [], [])
if type_hints is None:
type_hints = self.fetch_data_types(table_name)
return TableData(
table_name,
columns,
result.fetchall(),
type_hints=[type_hints.get(col) for col in columns],
max_workers=self.__max_workers,
)
def select_as_dict(
self,
table_name: str,
columns: Optional[Sequence[str]] = None,
where: Optional[WhereQuery] = None,
extra: Optional[str] = None,
) -> "Optional[List[OrderedDict[str, Any]]]":
"""
Get data in the database and return fetched data as a
|OrderedDict| list.
:param str table_name: |arg_select_table_name|
:param list columns: |arg_select_as_xx_columns|
:param where: |arg_select_where|
:type where: |arg_where_type|
:param str extra: |arg_select_extra|
:return: Table data as |OrderedDict| instances.
:rtype: |list| of |OrderedDict|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
:ref:`example-select-as-dict`
"""
return self.select_as_tabledata(table_name, columns, where, extra).as_dict().get(table_name)
def select_as_memdb(
self,
table_name: str,
columns: Optional[Sequence[str]] = None,
where: Optional[WhereQuery] = None,
extra: Optional[str] = None,
):
"""
Get data in the database and return fetched data as a
in-memory |SimpleSQLite| instance.
:param str table_name: |arg_select_table_name|
:param columns: |arg_select_as_xx_columns|
:param where: |arg_select_where|
:type where: |arg_where_type|
:param str extra: |arg_select_extra|
:return:
Table data as a |SimpleSQLite| instance that connected to in
memory database.
:rtype: |SimpleSQLite|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
"""
table_schema = self.schema_extractor.fetch_table_schema(table_name)
memdb = connect_memdb(max_workers=self.__max_workers)
memdb.create_table_from_tabledata(
self.select_as_tabledata(table_name, columns, where, extra),
primary_key=table_schema.primary_key,
index_attrs=table_schema.index_list,
)
return memdb
def insert(
self, table_name: str, record: Any, attr_names: Optional[Sequence[str]] = None
) -> None:
"""
Send an INSERT query to the database.
:param str table_name: Table name of executing the query.
:param record: Record to be inserted.
:type record: |dict|/|namedtuple|/|list|/|tuple|
:raises IOError: |raises_write_permission|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
:ref:`example-insert-records`
"""
self.insert_many(table_name, records=[record], attr_names=attr_names)
def insert_many(
self,
table_name: str,
records: Sequence[Union[Dict, Sequence]],
attr_names: Optional[Sequence[str]] = None,
) -> int:
"""
Send an INSERT query with multiple records to the database.
:param str table: Table name of executing the query.
:param records: Records to be inserted.
:type records: list of |dict|/|namedtuple|/|list|/|tuple|
:return: Number of inserted records.
:rtype: int
:raises IOError: |raises_write_permission|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
:ref:`example-insert-records`
"""
self.validate_access_permission(["w", "a"])
self.verify_table_existence(table_name, allow_view=False)
if attr_names:
logger.debug(
"insert {number} records into {table}({attrs})".format(
number=len(records) if records else 0, table=table_name, attrs=attr_names
)
)
else:
logger.debug(
"insert {number} records into {table}".format(
number=len(records) if records else 0, table=table_name
)
)
if typepy.is_empty_sequence(records):
return 0
if attr_names is None:
attr_names = self.fetch_attr_names(table_name)
records = RecordConvertor.to_records(attr_names, records)
query = Insert(table_name, AttrList(attr_names)).to_query()
if self.debug_query or self.global_debug_query:
logging_count = 8
num_records = len(records)
logs = [query] + [
f" record {i:4d}: {record}" for i, record in enumerate(records[:logging_count])
]
if num_records - logging_count > 0:
logs.append(f" and other {num_records - logging_count} records will be inserted")
logger.debug("\n".join(logs))
assert self.connection # to avoid type check error
try:
self.connection.executemany(query, records)
except (sqlite3.OperationalError, sqlite3.IntegrityError) as e:
caller = logging.getLogger().findCaller()
file_path, line_no, func_name = caller[:3]
raise OperationalError(
f"{file_path:s}({line_no:d}) {func_name:s}: failed to execute query:\n"
+ f" query={query}\n"
+ f" msg='{e}'\n"
+ f" db={self.database_path}\n"
+ f" records={records[:2]}\n"
)
return len(records)
def update(
self, table_name: str, set_query: Optional[str], where: Optional[WhereQuery] = None
) -> Optional[Cursor]:
"""Execute an UPDATE query.
Args:
table_name (|str|):
Table name of executing the query.
set_query (|str|):
``SET`` clause for the update query.
where (|arg_where_type| , optional):
``WHERE`` clause for the update query.
Defaults to |None|.
Raises:
IOError:
|raises_write_permission|
simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
simplesqlite.OperationalError:
|raises_operational_error|
"""
self.validate_access_permission(["w", "a"])
self.verify_table_existence(table_name, allow_view=False)
query = SqlQuery.make_update(table_name, set_query, where)
return self.execute_query(query, logging.getLogger().findCaller())
def delete(self, table_name: str, where: Optional[WhereQuery] = None) -> Optional[Cursor]:
"""
Send a DELETE query to the database.
:param str table_name: Table name of executing the query.
:param where: |arg_select_where|
:type where: |arg_where_type|
"""
self.validate_access_permission(["w", "a"])
self.verify_table_existence(table_name, allow_view=False)
query = f"DELETE FROM {table_name:s}"
if where:
query += f" WHERE {where:s}"
return self.execute_query(query, logging.getLogger().findCaller())
def fetch_value(
self,
select: str,
table_name: str,
where: Optional[WhereQuery] = None,
extra: Optional[str] = None,
) -> Optional[int]:
"""
Fetch a value from the table. Return |None| if no value matches
the conditions, or the table not found in the database.
:param str select: Attribute for SELECT query
:param str table_name: Table name of executing the query.
:param where: |arg_select_where|
:type where: |arg_where_type|
:return: Result of execution of the query.
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
"""
try:
self.verify_table_existence(table_name)
except DatabaseError as e:
logger.debug(e)
return None
result = self.execute_query(
Select(select, table_name, where, extra), logging.getLogger().findCaller()
)
if result is None:
return None
fetch = result.fetchone()
if fetch is None:
return None
return fetch[0]
def fetch_values(self, select, table_name, where=None, extra=None) -> List:
result = self.select(select=select, table_name=table_name, where=where, extra=extra)
if result is None:
return []
return [record[0] for record in result.fetchall()]
def fetch_table_names(
self, include_system_table: bool = False, include_view: bool = True
) -> List[str]:
"""
:return: List of table names in the database.
:rtype: list
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Sample Code:
.. code:: python
from simplesqlite import SimpleSQLite
con = SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
"hoge",
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.fetch_table_names())
:Output:
.. code-block:: python
['hoge']
"""
self.check_connection()
return self.schema_extractor.fetch_table_names(
include_system_table=include_system_table, include_view=include_view
)
def fetch_view_names(self) -> List[str]:
"""
:return: List of table names in the database.
:rtype: list
"""
self.check_connection()
return self.schema_extractor.fetch_view_names()
def fetch_attr_names(self, table_name: str) -> List[str]:
"""
:return: List of attribute names in the table.
:rtype: list
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
.. code:: python
import simplesqlite
table_name = "sample_table"
con = simplesqlite.SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
table_name,
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.fetch_attr_names(table_name))
try:
print(con.fetch_attr_names("not_existing"))
except simplesqlite.TableNotFoundError as e:
print(e)
:Output:
.. parsed-literal::
['attr_a', 'attr_b']
'not_existing' table not found in /tmp/sample.sqlite
"""
self.verify_table_existence(table_name)
return self.schema_extractor.fetch_table_schema(table_name).get_attr_names()
def fetch_attr_type(self, table_name: str) -> Dict[str, str]:
"""
:return:
Dictionary of attribute names and attribute types in the table.
:rtype: dict
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
"""
self.verify_table_existence(table_name, allow_view=False)
result = self.execute_query(
"SELECT sql FROM sqlite_master WHERE type='table' and name={:s}".format(
Value(table_name)
)
)
assert result # to avoid type check error
query = result.fetchone()[0]
match = re.search("[(].*[)]", query)
assert match # to avoid type check error
def get_entry(items):
key = " ".join(items[:-1])
value = items[-1]
return [key, value]
return dict([get_entry(item.split(" ")) for item in match.group().strip("()").split(", ")])
def fetch_num_records(
self, table_name: str, where: Optional[WhereQuery] = None
) -> Optional[int]:
"""
Fetch the number of records in a table.
:param str table_name: Table name to get number of records.
:param where: |arg_select_where|
:type where: |arg_where_type|
:return:
Number of records in the table.
|None| if no value matches the conditions,
or the table not found in the database.
:rtype: int
"""
return self.fetch_value(select="COUNT(*)", table_name=table_name, where=where)
def fetch_data_types(self, table_name: str) -> Dict[str, TypeHint]:
_, _, type_hints = extract_table_metadata(self, table_name)
return type_hints
def get_profile(self, profile_count: int = 50) -> List[Any]:
"""
Get profile of query execution time.
:param int profile_count:
Number of profiles to retrieve,
counted from the top query in descending order by
the cumulative execution time.
:return: Profile information for each query.
:rtype: list of |namedtuple|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
:ref:`example-get-profile`
"""
from collections import namedtuple
profile_table_name = "sql_profile"
value_matrix = [
[query, execute_time, self.__dict_query_count.get(query, 0)]
for query, execute_time in self.__dict_query_totalexectime.items()
]
attr_names = ("sql_query", "cumulative_time", "count")
con_tmp = connect_memdb(max_workers=self.__max_workers)
try:
con_tmp.create_table_from_data_matrix(
profile_table_name, attr_names, data_matrix=value_matrix
)
except ValueError:
return []
try:
result = con_tmp.select(
select="{:s},SUM({:s}),SUM({:s})".format(*attr_names),
table_name=profile_table_name,
extra="GROUP BY {:s} ORDER BY {:s} DESC LIMIT {:d}".format(
attr_names[0], attr_names[1], profile_count
),
)
except sqlite3.OperationalError:
return []
if result is None:
return []
SqliteProfile = namedtuple("SqliteProfile", " ".join(attr_names)) # type: ignore
return [SqliteProfile(*profile) for profile in result.fetchall()]
def fetch_sqlite_master(self) -> List[Dict]:
"""
Get sqlite_master table information as a list of dictionaries.
:return: sqlite_master table information.
:rtype: list
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:Sample Code:
.. code:: python
import json
from simplesqlite import SimpleSQLite
con = SimpleSQLite("sample.sqlite", "w")
data_matrix = [
[1, 1.1, "aaa", 1, 1],
[2, 2.2, "bbb", 2.2, 2.2],
[3, 3.3, "ccc", 3, "ccc"],
]
con.create_table_from_data_matrix(
"sample_table",
["a", "b", "c", "d", "e"],
data_matrix,
index_attrs=["a"])
print(json.dumps(con.fetch_sqlite_master(), indent=4))
:Output:
.. code-block:: json
[
{
"tbl_name": "sample_table",
"sql": "CREATE TABLE 'sample_table' ('a' INTEGER, 'b' REAL, 'c' TEXT, 'd' REAL, 'e' TEXT)",
"type": "table",
"name": "sample_table",
"rootpage": 2
},
{
"tbl_name": "sample_table",
"sql": "CREATE INDEX sample_table_a_index ON sample_table('a')",
"type": "index",
"name": "sample_table_a_index",
"rootpage": 3
}
]
"""
self.check_connection()
return self.schema_extractor.fetch_sqlite_master()
def has_table(self, table_name: str, include_view: bool = True) -> bool:
"""
:param str table_name: Table name to be tested.
:return: |True| if the database has the table.
:rtype: bool
:Sample Code:
.. code:: python
from simplesqlite import SimpleSQLite
con = SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
"hoge",
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.has_table("hoge"))
print(con.has_table("not_existing"))
:Output:
.. code-block:: python
True
False
"""
try:
validate_table_name(table_name)
except NameValidationError:
return False
return table_name in self.fetch_table_names(include_view=include_view)
def has_view(self, view_name: str) -> bool:
"""
:param str table_name: Table name to be tested.
:return: |True| if the database has the table.
:rtype: bool
"""
try:
validate_table_name(view_name)
except NameValidationError:
return False
return view_name in self.fetch_view_names()
def has_attr(self, table_name: str, attr_name: Optional[str]) -> bool:
"""
:param str table_name: Table name that the attribute exists.
:param str attr_name: Attribute name to be tested.
:return: |True| if the table has the attribute.
:rtype: bool
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:Sample Code:
.. code:: python
import simplesqlite
table_name = "sample_table"
con = simplesqlite.SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
table_name,
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.has_attr(table_name, "attr_a"))
print(con.has_attr(table_name, "not_existing"))
try:
print(con.has_attr("not_existing", "attr_a"))
except simplesqlite.DatabaseError as e:
print(e)
:Output:
.. parsed-literal::
True
False
'not_existing' table not found in /tmp/sample.sqlite
"""
self.verify_table_existence(table_name, allow_view=False)
if typepy.is_null_string(attr_name):
return False
return attr_name in self.fetch_attr_names(table_name)
def has_attrs(self, table_name: str, attr_names: Sequence[str]) -> bool:
"""
:param str table_name: Table name that attributes exists.
:param attr_names: Attribute names to tested.
:return: |True| if the table has all of the attribute.
:rtype: bool
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:Sample Code:
.. code:: python
import simplesqlite
table_name = "sample_table"
con = simplesqlite.SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
table_name,
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.has_attrs(table_name, ["attr_a"]))
print(con.has_attrs(table_name, ["attr_a", "attr_b"]))
print(con.has_attrs(table_name, ["attr_a", "attr_b", "not_existing"]))
try:
print(con.has_attr("not_existing", ["attr_a"]))
except simplesqlite.DatabaseError as e:
print(e)
:Output:
.. parsed-literal::
True
True
False
'not_existing' table not found in /tmp/sample.sqlite
"""
if typepy.is_empty_sequence(attr_names):
return False
not_exist_fields = [
attr_name for attr_name in attr_names if not self.has_attr(table_name, attr_name)
]
if not_exist_fields:
return False
return True
def verify_table_existence(self, table_name: str, allow_view: bool = True) -> None:
"""
:param str table_name: Table name to be tested.
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.NameValidationError:
|raises_validate_table_name|
:Sample Code:
.. code:: python
import simplesqlite
table_name = "sample_table"
con = simplesqlite.SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
table_name,
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
con.verify_table_existence(table_name)
try:
con.verify_table_existence("not_existing")
except simplesqlite.DatabaseError as e:
print(e)
:Output:
.. parsed-literal::
'not_existing' table not found in /tmp/sample.sqlite
"""
validate_table_name(table_name)
if self.has_table(table_name, include_view=allow_view):
return
raise TableNotFoundError(f"'{table_name}' not found in '{self.database_path}' database")
def verify_attr_existence(self, table_name: str, attr_name: str) -> None:
"""
:param str table_name: Table name that the attribute exists.
:param str attr_name: Attribute name to tested.
:raises simplesqlite.AttributeNotFoundError:
If attribute not found in the table
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:Sample Code:
.. code:: python
from simplesqlite import (
SimpleSQLite,
DatabaseError,
AttributeNotFoundError
)
table_name = "sample_table"
con = SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
table_name,
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
con.verify_attr_existence(table_name, "attr_a")
try:
con.verify_attr_existence(table_name, "not_existing")
except AttributeNotFoundError as e:
print(e)
try:
con.verify_attr_existence("not_existing", "attr_a")
except DatabaseError as e:
print(e)
:Output:
.. parsed-literal::
'not_existing' attribute not found in 'sample_table' table
'not_existing' table not found in /tmp/sample.sqlite
"""
self.verify_table_existence(table_name, allow_view=False)
if self.has_attr(table_name, attr_name):
return
raise AttributeNotFoundError(f"'{attr_name}' attribute not found in '{table_name}' table")
def validate_access_permission(self, valid_permissions: Sequence[str]) -> None:
"""
:param valid_permissions:
List of permissions that access is allowed.
:type valid_permissions: |list|/|tuple|
:raises ValueError: If the |attr_mode| is invalid.
:raises IOError:
If the |attr_mode| not in the ``valid_permissions``.
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
"""
self.check_connection()
if typepy.is_null_string(self.mode):
raise ValueError("mode is not set")
if self.mode not in valid_permissions:
raise OSError(
"invalid access: expected-mode='{}', current-mode='{}'".format(
"' or '".join(valid_permissions), self.mode
)
)
def drop_table(self, table_name: str) -> None:
"""
:param str table_name: Table name to drop.
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises IOError: |raises_write_permission|
"""
self.validate_access_permission(["w", "a"])
if table_name in SQLITE_SYSTEM_TABLES:
# warning message
return
if self.has_table(table_name, include_view=False):
query = f"DROP TABLE IF EXISTS '{table_name:s}'"
self.execute_query(query, logging.getLogger().findCaller())
elif self.has_view(table_name):
self.execute_query(f"DROP VIEW IF EXISTS {table_name}")
self.commit()
def create_table(self, table_name: str, attr_descriptions: Sequence[str]) -> bool:
"""
:param str table_name: Table name to create.
:param list attr_descriptions: List of table description.
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises IOError: |raises_write_permission|
"""
self.validate_access_permission(["w", "a"])
table_name = table_name.strip()
if self.has_table(table_name):
return True
query = "CREATE TABLE IF NOT EXISTS '{:s}' ({:s})".format(
table_name, ", ".join(attr_descriptions)
)
logger.debug(query)
if self.execute_query(query, logging.getLogger().findCaller()) is None:
return False
return True
def create_index(self, table_name: str, attr_name: str) -> None:
"""
:param str table_name:
Table name that contains the attribute to be indexed.
:param str attr_name: Attribute name to create index.
:raises IOError: |raises_write_permission|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
"""
self.verify_table_existence(table_name, allow_view=False)
self.validate_access_permission(["w", "a"])
query_format = "CREATE INDEX IF NOT EXISTS {index:s} ON {table}({attr})"
query = query_format.format(
index=make_index_name(table_name, attr_name),
table=Table(table_name),
attr=Attr(attr_name),
)
logger.debug(query)
self.execute_query(query, logging.getLogger().findCaller())
def create_index_list(self, table_name: str, attr_names: Sequence[str]) -> None:
"""
:param str table_name: Table name that exists attribute.
:param list attr_names:
List of attribute names to create indices.
Ignore attributes that are not existing in the table.
.. seealso:: :py:meth:`.create_index`
"""
self.validate_access_permission(["w", "a"])
if typepy.is_empty_sequence(attr_names):
return
table_attr_set = set(self.fetch_attr_names(table_name))
index_attr_set = set(AttrList.sanitize(attr_names)) # type: ignore
for attribute in list(table_attr_set.intersection(index_attr_set)):
self.create_index(table_name, attribute)
def create_table_from_data_matrix(
self,
table_name: str,
attr_names: Sequence[str],
data_matrix: Any,
primary_key: Optional[str] = None,
add_primary_key_column: bool = False,
index_attrs: Optional[Sequence[str]] = None,
type_hints: Optional[Sequence[TypeHint]] = None,
) -> None:
"""
Create a table if not exists. Moreover, insert data into the created
table.
:param str table_name: Table name to create.
:param list attr_names: Attribute names of the table.
:param data_matrix: Data to be inserted into the table.
:type data_matrix: List of |dict|/|namedtuple|/|list|/|tuple|
:param str primary_key: |primary_key|
:param tuple index_attrs: |index_attrs|
:raises simplesqlite.NameValidationError:
|raises_validate_table_name|
:raises simplesqlite.NameValidationError:
|raises_validate_attr_name|
:raises ValueError: If the ``data_matrix`` is empty.
:Example:
:ref:`example-create-table-from-data-matrix`
.. seealso::
:py:meth:`.create_table`
:py:meth:`.insert_many`
:py:meth:`.create_index_list`
"""
self.__create_table_from_tabledata(
TableData(
table_name,
headers=attr_names,
rows=data_matrix,
type_hints=type_hints,
max_workers=self.__max_workers,
),
primary_key,
add_primary_key_column,
index_attrs,
)
def create_table_from_tabledata(
self,
table_data: TableData,
primary_key: Optional[str] = None,
add_primary_key_column: bool = False,
index_attrs: Optional[Sequence[str]] = None,
) -> None:
"""
Create a table from :py:class:`tabledata.TableData`.
:param tabledata.TableData table_data: Table data to create.
:param str primary_key: |primary_key|
:param tuple index_attrs: |index_attrs|
.. seealso::
:py:meth:`.create_table_from_data_matrix`
"""
self.__create_table_from_tabledata(
table_data, primary_key, add_primary_key_column, index_attrs
)
def create_table_from_csv(
self,
csv_source: str,
table_name: str = "",
attr_names: Sequence[str] = (),
delimiter: str = ",",
quotechar: str = '"',
encoding: str = "utf-8",
primary_key: Optional[str] = None,
add_primary_key_column: bool = False,
index_attrs: Optional[Sequence[str]] = None,
) -> None:
"""
Create a table from a CSV file/text.
:param str csv_source: Path to the CSV file or CSV text.
:param str table_name:
Table name to create.
Using CSV file basename as the table name if the value is empty.
:param list attr_names:
Attribute names of the table.
Use the first line of the CSV file as attributes if ``attr_names`` is empty.
:param str delimiter:
A one-character string used to separate fields.
:param str quotechar:
A one-character string used to quote fields containing special
characters, such as the ``delimiter`` or ``quotechar``,
or which contain new-line characters.
:param str encoding: CSV file encoding.
:param str primary_key: |primary_key|
:param tuple index_attrs: |index_attrs|
:raises ValueError: If the CSV data is invalid.
:Dependency Packages:
- `pytablereader <https://github.com/thombashi/pytablereader>`__
:Example:
:ref:`example-create-table-from-csv`
.. seealso::
:py:meth:`.create_table_from_data_matrix`
:py:func:`csv.reader`
:py:meth:`.pytablereader.CsvTableFileLoader.load`
:py:meth:`.pytablereader.CsvTableTextLoader.load`
"""
import pytablereader as ptr
loader = ptr.CsvTableFileLoader(csv_source)
if typepy.is_not_null_string(table_name):
loader.table_name = table_name
loader.headers = attr_names
loader.delimiter = delimiter
loader.quotechar = quotechar
loader.encoding = encoding
try:
for table_data in loader.load():
self.__create_table_from_tabledata(
table_data, primary_key, add_primary_key_column, index_attrs
)
return
except (ptr.InvalidFilePathError, OSError):
pass
loader = ptr.CsvTableTextLoader(csv_source)
if typepy.is_not_null_string(table_name):
loader.table_name = table_name
loader.headers = attr_names
loader.delimiter = delimiter
loader.quotechar = quotechar
loader.encoding = encoding
for table_data in loader.load():
self.__create_table_from_tabledata(
table_data, primary_key, add_primary_key_column, index_attrs
)
def create_table_from_json(
self,
json_source: str,
table_name: str = "",
primary_key: Optional[str] = None,
add_primary_key_column: bool = False,
index_attrs: Optional[Sequence[str]] = None,
) -> None:
"""
Create a table from a JSON file/text.
:param str json_source: Path to the JSON file or JSON text.
:param str table_name: Table name to create.
:param str primary_key: |primary_key|
:param tuple index_attrs: |index_attrs|
:Dependency Packages:
- `pytablereader <https://github.com/thombashi/pytablereader>`__
:Examples:
:ref:`example-create-table-from-json`
.. seealso::
:py:meth:`.pytablereader.JsonTableFileLoader.load`
:py:meth:`.pytablereader.JsonTableTextLoader.load`
"""
import pytablereader as ptr
loader = ptr.JsonTableFileLoader(json_source)
if typepy.is_not_null_string(table_name):
loader.table_name = table_name
try:
for table_data in loader.load():
self.__create_table_from_tabledata(
table_data, primary_key, add_primary_key_column, index_attrs
)
return
except (ptr.InvalidFilePathError, OSError):
pass
loader = ptr.JsonTableTextLoader(json_source)
if typepy.is_not_null_string(table_name):
loader.table_name = table_name
for table_data in loader.load():
self.__create_table_from_tabledata(
table_data, primary_key, add_primary_key_column, index_attrs
)
def create_table_from_dataframe(
self,
dataframe,
table_name: str = "",
primary_key: Optional[str] = None,
add_primary_key_column: bool = False,
index_attrs: Optional[Sequence[str]] = None,
) -> None:
"""
Create a table from a pandas.DataFrame instance.
:param pandas.DataFrame dataframe: DataFrame instance to convert.
:param str table_name: Table name to create.
:param str primary_key: |primary_key|
:param tuple index_attrs: |index_attrs|
:Examples:
:ref:`example-create-table-from-df`
"""
self.__create_table_from_tabledata(
TableData.from_dataframe(
dataframe=dataframe,
table_name=table_name,
type_hints=[extract_typepy_from_dtype(dtype) for dtype in dataframe.dtypes],
),
primary_key,
add_primary_key_column,
index_attrs,
)
def dump(self, db_path: str, mode: str = "a") -> None:
with SimpleSQLite(db_path, mode=mode, max_workers=self.__max_workers) as dst_con:
for table_name in self.fetch_table_names(include_view=False):
copy_table(self, dst_con, src_table_name=table_name, dst_table_name=table_name)
def rollback(self) -> None:
"""
.. seealso:: :py:meth:`sqlite3.Connection.rollback`
"""
try:
self.check_connection()
except NullDatabaseConnectionError:
return
logger.debug(f"rollback: path='{self.database_path}'")
assert self.connection # to avoid type check error
self.connection.rollback()
def commit(self) -> None:
"""
.. seealso:: :py:meth:`sqlite3.Connection.commit`
"""
try:
self.check_connection()
except NullDatabaseConnectionError:
return
logger.debug(f"commit: path='{self.database_path}'")
assert self.connection # to avoid type check error
try:
self.connection.commit()
except sqlite3.ProgrammingError:
pass
def close(self) -> None:
"""
Commit and close the connection.
.. seealso:: :py:meth:`sqlite3.Connection.close`
"""
if self.__delayed_connection_path and self.__connection is None:
self.__initialize_connection()
return
try:
self.check_connection()
except (SystemError, NullDatabaseConnectionError):
return
logger.debug(f"close connection to a SQLite database: path='{self.database_path}'")
self.commit()
assert self.connection # to avoid type check error
self.connection.close()
self.__initialize_connection()
@staticmethod
def __validate_db_path(database_path: str) -> None:
if typepy.is_null_string(database_path):
raise ValueError("null path")
if database_path == MEMORY_DB_NAME:
return
try:
pathvalidate.validate_filename(os.path.basename(database_path))
except AttributeError:
raise TypeError(f"database path must be a string: actual={type(database_path)}")
def __verify_db_file_existence(self, database_path: str) -> None:
"""
:raises SimpleSQLite.OperationalError: If unable to open database file.
"""
self.__validate_db_path(database_path)
if not os.path.isfile(os.path.realpath(database_path)):
raise OSError("file not found: " + database_path)
try:
connection = sqlite3.connect(database_path)
except sqlite3.OperationalError as e:
raise OperationalError(e)
connection.close()
def __delayed_connect(self) -> bool:
if self.__delayed_connection_path is None:
return False
# save and clear delayed_connection_path to avoid infinite recursion before
# calling the connect method
connection_path = self.__delayed_connection_path
self.__delayed_connection_path = None
self.connect(connection_path, cast(str, self.__mode))
return True
def __extract_attr_descs_from_tabledata(self, table_data, primary_key, add_primary_key_column):
if primary_key and not add_primary_key_column and primary_key not in table_data.headers:
raise ValueError("primary key must be one of the values of attributes")
attr_description_list = []
if add_primary_key_column:
if not primary_key:
primary_key = "id"
if primary_key in table_data.headers:
raise ValueError(
"a primary key field that will be added should not conflict "
"with existing fields."
)
attr_description_list.append(f"{primary_key} INTEGER PRIMARY KEY AUTOINCREMENT")
for col, value_type in sorted(self.__extract_col_type_from_tabledata(table_data).items()):
attr_name = table_data.headers[col]
attr_description = f"{Attr(attr_name)} {value_type:s}"
if attr_name == primary_key:
attr_description += " PRIMARY KEY"
attr_description_list.append(attr_description)
return attr_description_list
@staticmethod
def __extract_col_type_from_tabledata(table_data: TableData) -> Dict:
"""
Extract data type name for each column as SQLite names.
:param tabledata.TableData table_data:
:return: { column_number : column_data_type }
:rtype: dictionary
"""
typename_table = {
typepy.Typecode.INTEGER: "INTEGER",
typepy.Typecode.REAL_NUMBER: "REAL",
typepy.Typecode.STRING: "TEXT",
}
return {
col_idx: typename_table.get(col_dp.typecode, "TEXT")
for col_idx, col_dp in enumerate(table_data.column_dp_list)
}
def __create_table_from_tabledata(
self,
table_data: TableData,
primary_key: Optional[str],
add_primary_key_column: bool,
index_attrs: Optional[Sequence[str]],
):
self.validate_access_permission(["w", "a"])
debug_msg_list = ["__create_table_from_tabledata:", f" tbldata={table_data}"]
if primary_key:
debug_msg_list.append(f" primary_key={primary_key}")
if add_primary_key_column:
debug_msg_list.append(f" add_primary_key_column={add_primary_key_column}")
if index_attrs:
debug_msg_list.append(f" index_attrs={index_attrs}")
logger.debug("\n".join(debug_msg_list))
if table_data.is_empty():
raise ValueError(f"input table_data is empty: {table_data}")
table_data = SQLiteTableDataSanitizer(
table_data, dup_col_handler=self.dup_col_handler, max_workers=self.__max_workers
).normalize()
table_name = table_data.table_name
assert table_name
self.create_table(
table_name,
self.__extract_attr_descs_from_tabledata(
table_data, primary_key, add_primary_key_column
),
)
if add_primary_key_column:
self.insert_many(table_name, [[None] + row for row in table_data.value_matrix])
else:
self.insert_many(table_name, table_data.value_matrix)
if typepy.is_not_empty_sequence(index_attrs):
self.create_index_list(table_name, AttrList.sanitize(index_attrs)) # type: ignore
self.commit()
def connect_memdb(max_workers: Optional[int] = None) -> SimpleSQLite:
"""
:return: Instance of an in memory database.
:rtype: SimpleSQLite
:Example:
:ref:`example-connect-sqlite-db-mem`
"""
return SimpleSQLite(MEMORY_DB_NAME, "w", max_workers=max_workers)
|
cloudtunes-server/cloudtunes/services/dropbox/models.py
|
skymemoryGit/cloudtunes
| 529 |
59085
|
<reponame>skymemoryGit/cloudtunes<gh_stars>100-1000
from mongoengine import StringField
from cloudtunes.base.models import EmbeddedDocument
from cloudtunes.services.models import ServiceAccount
class DropboxAccount(ServiceAccount):
country = StringField(max_length=2)
display_name = StringField()
oauth_token_key = StringField()
oauth_token_secret = StringField()
delta_cursor = StringField()
service_name = 'Dropbox'
def get_username(self):
return self.display_name
def get_picture(self):
return None
def get_url(self):
return None
class DropboxTrack(EmbeddedDocument):
path = StringField(required=True)
|
fhir/resources/tests/test_bodystructure.py
|
cstoltze/fhir.resources
| 144 |
59104
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/BodyStructure
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import bodystructure
def impl_bodystructure_1(inst):
assert inst.description == "EDD 1/1/2017 confirmation by LMP"
assert inst.id == "fetus"
assert (
inst.identifier[0].system == "http://goodhealth.org/bodystructure/identifiers"
)
assert inst.identifier[0].value == "12345"
assert inst.location.coding[0].code == "83418008"
assert inst.location.coding[0].display == "Entire fetus (body structure)"
assert inst.location.coding[0].system == "http://snomed.info/sct"
assert inst.location.text == "Fetus"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.patient.reference == "Patient/example"
assert inst.text.status == "generated"
def test_bodystructure_1(base_settings):
"""No. 1 tests collection for BodyStructure.
Test File: bodystructure-example-fetus.json
"""
filename = base_settings["unittest_data_dir"] / "bodystructure-example-fetus.json"
inst = bodystructure.BodyStructure.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "BodyStructure" == inst.resource_type
impl_bodystructure_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "BodyStructure" == data["resourceType"]
inst2 = bodystructure.BodyStructure(**data)
impl_bodystructure_1(inst2)
def impl_bodystructure_2(inst):
assert inst.description == "7 cm maximum diameter"
assert inst.id == "tumor"
assert (
inst.identifier[0].system == "http://goodhealth.org/bodystructure/identifiers"
)
assert inst.identifier[0].value == "12345"
assert inst.image[0].contentType == "application/dicom"
assert inst.image[0].url == (
"http://imaging.acme.com/wado/server?requestType=WADO&wad" "o_details"
)
assert inst.location.coding[0].code == "78961009"
assert inst.location.coding[0].display == "Splenic structure (body structure)"
assert inst.location.coding[0].system == "http://snomed.info/sct"
assert inst.location.text == "Spleen"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.morphology.coding[0].code == "4147007"
assert inst.morphology.coding[0].display == "Mass (morphologic abnormality)"
assert inst.morphology.coding[0].system == "http://snomed.info/sct"
assert inst.morphology.text == "Splenic mass"
assert inst.patient.reference == "Patient/example"
assert inst.text.status == "generated"
def test_bodystructure_2(base_settings):
"""No. 2 tests collection for BodyStructure.
Test File: bodystructure-example-tumor.json
"""
filename = base_settings["unittest_data_dir"] / "bodystructure-example-tumor.json"
inst = bodystructure.BodyStructure.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "BodyStructure" == inst.resource_type
impl_bodystructure_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "BodyStructure" == data["resourceType"]
inst2 = bodystructure.BodyStructure(**data)
impl_bodystructure_2(inst2)
def impl_bodystructure_3(inst):
assert inst.active is False
assert inst.description == "inner surface (volar) of the left forearm"
assert inst.id == "skin-patch"
assert (
inst.identifier[0].system == "http://goodhealth.org/bodystructure/identifiers"
)
assert inst.identifier[0].value == "12345"
assert inst.location.coding[0].code == "14975008"
assert inst.location.coding[0].display == "Forearm"
assert inst.location.coding[0].system == "http://snomed.info/sct"
assert inst.location.text == "Forearm"
assert inst.locationQualifier[0].coding[0].code == "419161000"
assert inst.locationQualifier[0].coding[0].display == "Unilateral left"
assert inst.locationQualifier[0].coding[0].system == "http://snomed.info/sct"
assert inst.locationQualifier[0].text == "Left"
assert inst.locationQualifier[1].coding[0].code == "263929005"
assert inst.locationQualifier[1].coding[0].display == "Volar"
assert inst.locationQualifier[1].coding[0].system == "http://snomed.info/sct"
assert inst.locationQualifier[1].text == "Volar"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.morphology.text == "Skin patch"
assert inst.patient.reference == "Patient/example"
assert inst.text.status == "generated"
def test_bodystructure_3(base_settings):
"""No. 3 tests collection for BodyStructure.
Test File: bodystructure-example-skin-patch.json
"""
filename = (
base_settings["unittest_data_dir"] / "bodystructure-example-skin-patch.json"
)
inst = bodystructure.BodyStructure.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "BodyStructure" == inst.resource_type
impl_bodystructure_3(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "BodyStructure" == data["resourceType"]
inst2 = bodystructure.BodyStructure(**data)
impl_bodystructure_3(inst2)
|
spec/fixtures/semgrep/trivial.py
|
hstocks/salus
| 625 |
59130
|
<filename>spec/fixtures/semgrep/trivial.py
# https://semgrep.live/DdG
if 3 == 3:
print('3!')
|
l10n_br_sale/models/delivery_carrier.py
|
kaoecoito/odoo-brasil
| 181 |
59148
|
<reponame>kaoecoito/odoo-brasil<filename>l10n_br_sale/models/delivery_carrier.py
from odoo import fields, models
class DeliveryCarrier(models.Model):
_inherit = 'delivery.carrier'
partner_id = fields.Many2one('res.partner', string='Transportadora')
|
crosshair/libimpl/randomlib_test.py
|
samuelchassot/CrossHair
| 785 |
59155
|
<reponame>samuelchassot/CrossHair<gh_stars>100-1000
import copy
import random
from crosshair.core_and_libs import proxy_for_type, standalone_statespace
from crosshair.libimpl.randomlib import ExplicitRandom
def test_ExplicitRandom():
rng = ExplicitRandom([1, 2])
assert rng.randrange(0, 10) == 1
assert rng.choice(["a", "b", "c"]) == "c"
assert rng.choice(["a", "b", "c"]) == "a"
assert rng.random() == 0.0
assert repr(rng) == "crosshair.libimpl.randomlib.ExplicitRandom([1, 2, 0, 0.0])"
def test_ExplicitRandom_copy():
rng = ExplicitRandom([1, 2])
(rng2,) = copy.deepcopy([rng])
assert rng.randint(0, 5) == 1
assert rng2.randint(0, 5) == 1
assert rng.randint(0, 5) == 2
assert rng2.randint(0, 5) == 2
def test_proxy_random():
with standalone_statespace as space:
rng = proxy_for_type(random.Random, "rng")
i = rng.randrange(5, 10)
assert space.is_possible(i.var == 5)
assert space.is_possible(i.var == 9)
assert not space.is_possible(i.var == 4)
|
openbook_posts/migrations/0031_postcomment_parent_comment.py
|
TamaraAbells/okuna-api
| 164 |
59160
|
<gh_stars>100-1000
# Generated by Django 2.2 on 2019-05-02 17:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('openbook_posts', '0030_post_is_closed'),
]
operations = [
migrations.AddField(
model_name='postcomment',
name='parent_comment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='replies', to='openbook_posts.PostComment'),
),
]
|
tests/test_verilog/test_2d_array.py
|
leonardt/magma
| 167 |
59180
|
import magma as m
import magma.testing
def test_2d_array_from_verilog():
main = m.define_from_verilog(f"""
module transpose_buffer (
input logic clk,
output logic [2:0] index_inner,
output logic [2:0] index_outer,
input logic [3:0] input_data [63:0],
input logic [2:0] range_inner,
input logic [2:0] range_outer,
input logic rst_n,
input logic [2:0] stride
);
always_ff @(posedge clk, negedge rst_n) begin
if (~rst_n) begin
index_outer <= 3'h0;
index_inner <= 3'h0;
end
else begin
if (index_outer == (range_outer - 3'h1)) begin
index_outer <= 3'h0;
end
else index_outer <= index_outer + 3'h1;
if (index_inner == (range_inner - 3'h1)) begin
index_inner <= 3'h0;
end
else index_inner <= index_inner + 3'h1;
end
end
endmodule // transpose_buffer
""")[0]
m.compile("build/2d_array_from_verilog", main, output="verilog")
assert m.testing.check_files_equal(__file__,
f"build/2d_array_from_verilog.v",
f"gold/2d_array_from_verilog.v")
|
14Django/day01/BookManager/instruction01.py
|
HaoZhang95/PythonAndMachineLearning
| 937 |
59187
|
# MVT设计模式,model-view-template
# model负责和数据库交互来获取model数据
# view相当于MVC中的Controller负责处理网络请求http response
# template相当于MVC中的View负责封装html,css,js等内置模板引擎
# 具体流程:客户端发出网页请求 --> View接受网络请求 --> 找mdel去数据库找数据 -->找回的model数据返回给view
# --> view可以直接返回无修饰的model原始数据给客户端 --> 或者找template去美化数据,添加css,html等,动态生成一个html文件返回给View
# --> View将动态生成的html返回给客户端, MVT中的View充当中间人,两头链接M和T
# django安装的时候会默认安装在一个公共的路径下,这样开发不同项目的时候,可能会用到不同版本的django,因为安装在公共陆空
# 这样就会版本覆盖,其他项目可能会产生版本不兼容的异常
# 所以安装django的时候会搭建虚拟环境,一个项目对应一个环境
"""
django的配置,直接使用pycharm专业版,在设置中解释器中使用pip安装django
1- 安装成功后,整体的使用类似于angular的使用方法,关键字django-admin
2- cd到对应的目录下,django-admin startproject 项目名称
_init_.py --> 项目初始化文件,表示该项目可以被当作一个package引入
settings.py --> 项目的整体配置文件,比如在这里关联Book这个app
urls.py --> 项目的url配置文件
wsgi.py --> 项目和WSGI兼容的Web服务器入口
manage.py --> 项目运行的入口,指定配置文件路径,里面包含main函数
3- cd到项目名称下面才可以: python manage.py startapp 应用名称 (创建应用,类似于angular中的模块?)
init.py --> 应用初始化文件,表示该项目可以被当作一个package引入
admin.py --> 后台的站点管理注册文件
apps.py --> 当前app的基本信息
models.py --> 数据模型,里面存放各种bean
tests.py --> 单元测试
views.py --> 处理业务逻辑,MVT中的中间人
migrations --> 模型model迁移的,将model类制作成数据库中的表
4- 配置刚刚创建的app,在项目的settings.py中的installed_apps中添加当前app,进行组装
"""
"""
站点管理: 1- settings.py中设置语言和时区
2- python manage.py createsuperuser 创建管理员
3- 启动服务,到 http://127.0.0.1:8000/admin进行登陆
4- 在admin.py中注册自己的数据models用来在后台显示
"""
"""
ORM: object-relation-mapping 对象关系映射
优点:面向对象编程,不再是面向数据库写代码
实现了数据模型和数据库的解耦,不在关注用的是oracle,mysql还是其他数据库
缺点: object需要花费一点时间转换为sql语句,有性能损失(不过可忽略不计)
"""
|
unittest/scripts/py_devapi/validation/mysqlx_constants.py
|
mueller/mysql-shell
| 119 |
59191
|
<reponame>mueller/mysql-shell
#@ mysqlx type constants
|Type.BIT: <Type.BIT>|
|Type.TINYINT: <Type.TINYINT>|
|Type.SMALLINT: <Type.SMALLINT>|
|Type.MEDIUMINT: <Type.MEDIUMINT>|
|Type.INT: <Type.INT>|
|Type.BIGINT: <Type.BIGINT>|
|Type.FLOAT: <Type.FLOAT>|
|Type.DECIMAL: <Type.DECIMAL>|
|Type.DOUBLE: <Type.DOUBLE>|
|Type.JSON: <Type.JSON>|
|Type.STRING: <Type.STRING>|
|Type.BYTES: <Type.BYTES>|
|Type.TIME: <Type.TIME>|
|Type.DATE: <Type.DATE>|
|Type.DATETIME: <Type.DATETIME>|
|Type.SET: <Type.SET>|
|Type.ENUM: <Type.ENUM>|
|Type.GEOMETRY: <Type.GEOMETRY>|
|Type.SET: <Type.SET>|
|
ch26-Hough圆环变换/HoughCircles_camera.py
|
makelove/OpenCV-Python-Tutorial
| 2,875 |
59240
|
# -*- coding: utf-8 -*-
# @Time : 2017/7/27 13:47
# @Author : play4fun
# @File : HoughCircles_camera.py
# @Software: PyCharm
"""
HoughCircles_camera.py:
用围棋-棋子来测试
"""
import cv2
import numpy as np
from skimage.measure import compare_mse as mse
import string, random
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
cap = cv2.VideoCapture(0)
# ret = cap.set(3, 640)
# ret = cap.set(4, 480)
# margin = 60
margin = 30
def draw_line_rectangle(frame, margin):
rows, cols, ch = frame.shape # (720, 1280, 3)
half = int(cols / 2)
# 中间
cv2.line(frame, (half, 0), (half, rows), (0, 0, 255), 2)
# margin = 40
# 左边
up_left1 = (margin, margin) # 左上点
down_right1 = (cols - margin, rows - margin) # 右下点
# print(up_left, down_right)
cv2.rectangle(frame, up_left1, down_right1, (0, 255, 0), 3)
ret, temp = cap.read()
tm = 0
while cap.isOpened():
key = cv2.waitKey(1)
if key == ord("q"):
break
if key == ord('s'):
cv2.imwrite(id_generator() + '.jpg', frame2)
# Capture frame-by-frame
ret, frame = cap.read()
m = mse(cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY), cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
print('mse', m, '----\n')
if abs(m - tm) < 2: # 静止画面,不用重复计算
continue
else:
temp = frame.copy()
tm = m
#
# print(margin,frame.shape[0] - margin, margin,frame.shape[1] - margin)#40 680 40 1240
frame2 = frame[margin:frame.shape[0] - margin, margin:frame.shape[1] - margin] # .copy()
# cv2.imshow('frame2', frame2)
gray = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# edges = cv2.Canny(gray, 50, 150, apertureSize=3)
# HoughCircles(image, method, dp, minDist, circles=None, param1=None, param2=None, minRadius=None, maxRadius=None)
# circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, param1=100, param2=30, minRadius=10, maxRadius=40)
# circles = circles1[0, :, :] # 提取为二维
# circles = np.uint16(np.around(circles1))
print(circles)
cimg = frame2
if circles is not None:
for i in circles[0, :]:
# for i in circles[:]:
# draw the outer circle
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
# draw the center of the circle
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
# cv2.imshow('detected circles', cimg)
draw_line_rectangle(frame, margin)
cv2.imshow("houghlines", frame)
# cv2.imwrite('frame3.jpg', frame[margin:frame.shape[0] - margin, margin:frame.shape[1] - margin])
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
piqa/lpips.py
|
charltongroves/piqa
| 205 |
59256
|
<filename>piqa/lpips.py
r"""Learned Perceptual Image Patch Similarity (LPIPS)
This module implements the LPIPS in PyTorch.
Credits:
Inspired by [lpips-pytorch](https://github.com/S-aiueo32/lpips-pytorch)
References:
[1] The Unreasonable Effectiveness of Deep Features as a Perceptual Metric
(Zhang et al., 2018)
https://arxiv.org/abs/1801.03924
"""
import inspect
import os
import torch
import torch.nn as nn
import torchvision.models as models
import torch.hub as hub
from piqa.utils import _jit, _assert_type, _reduce
from typing import Dict, List
_SHIFT = torch.tensor([0.485, 0.456, 0.406])
_SCALE = torch.tensor([0.229, 0.224, 0.225])
_WEIGHTS_URL = (
'https://github.com/richzhang/PerceptualSimilarity'
'/raw/master/lpips/weights/{}/{}.pth'
)
def get_weights(
network: str = 'alex',
version: str = 'v0.1',
) -> Dict[str, torch.Tensor]:
r"""Returns the official LPIPS weights for `network`.
Args:
network: Specifies the perception network that is used:
`'alex'` | `'squeeze'` | `'vgg'`.
version: Specifies the official version release:
`'v0.0'` | `'v0.1'`.
Example:
>>> w = get_weights(network='alex')
>>> w.keys()
dict_keys(['0.1.weight', '1.1.weight', '2.1.weight', '3.1.weight', '4.1.weight'])
"""
# Load from URL
weights = hub.load_state_dict_from_url(
_WEIGHTS_URL.format(version, network),
map_location='cpu',
)
# Format keys
weights = {
k.replace('lin', '').replace('.model', ''): v
for (k, v) in weights.items()
}
return weights
class Intermediary(nn.Module):
r"""Module that catches and returns the outputs of indermediate
target layers of a sequential module during its forward pass.
Args:
layers: A sequential module.
targets: A list of target layer indexes.
"""
def __init__(self, layers: nn.Sequential, targets: List[int]):
r""""""
super().__init__()
self.layers = nn.ModuleList()
j = 0
seq: List[nn.Module] = []
for i, layer in enumerate(layers):
seq.append(layer)
if i == targets[j]:
self.layers.append(nn.Sequential(*seq))
seq.clear()
j += 1
if j == len(targets):
break
def forward(self, input: torch.Tensor) -> List[torch.Tensor]:
r"""Defines the computation performed at every call.
"""
output = []
for layer in self.layers:
input = layer(input)
output.append(input)
return output
class LPIPS(nn.Module):
r"""Creates a criterion that measures the LPIPS
between an input \(x\) and a target \(y\).
$$ \text{LPIPS}(x, y) = \sum_{l \, \in \, \mathcal{F}}
w_l \cdot \text{MSE}(\hat{\phi}_l(x), \hat{\phi}_l(y)) $$
where \(\hat{\phi}_l\) represents the normalized output of an
intermediate layer \(l\) in a perception network \(\mathcal{F}\).
Args:
network: Specifies the perception network \(\mathcal{F}\) to use:
`'alex'` | `'squeeze'` | `'vgg'`.
scaling: Whether the input and target need to
be scaled w.r.t. ImageNet.
dropout: Whether dropout is used or not.
pretrained: Whether the official weights \(w_l\) are used or not.
eval: Whether to initialize the object in evaluation mode or not.
reduction: Specifies the reduction to apply to the output:
`'none'` | `'mean'` | `'sum'`.
Shapes:
* Input: \((N, 3, H, W)\)
* Target: \((N, 3, H, W)\)
* Output: \((N,)\) or \(()\) depending on `reduction`
Note:
`LPIPS` is a *trainable* metric.
Example:
>>> criterion = LPIPS().cuda()
>>> x = torch.rand(5, 3, 256, 256, requires_grad=True).cuda()
>>> y = torch.rand(5, 3, 256, 256).cuda()
>>> l = criterion(x, y)
>>> l.size()
torch.Size([])
>>> l.backward()
"""
def __init__(
self,
network: str = 'alex',
scaling: bool = True,
dropout: bool = False,
pretrained: bool = True,
eval: bool = True,
reduction: str = 'mean',
):
r""""""
super().__init__()
# ImageNet scaling
self.scaling = scaling
self.register_buffer('shift', _SHIFT.view(1, -1, 1, 1))
self.register_buffer('scale', _SCALE.view(1, -1, 1, 1))
# Perception layers
if network == 'alex': # AlexNet
layers = models.alexnet(pretrained=True).features
targets = [1, 4, 7, 9, 11]
channels = [64, 192, 384, 256, 256]
elif network == 'squeeze': # SqueezeNet
layers = models.squeezenet1_1(pretrained=True).features
targets = [1, 4, 7, 9, 10, 11, 12]
channels = [64, 128, 256, 384, 384, 512, 512]
elif network == 'vgg': # VGG16
layers = models.vgg16(pretrained=True).features
targets = [3, 8, 15, 22, 29]
channels = [64, 128, 256, 512, 512]
else:
raise ValueError(f'Unknown network architecture {network}')
self.net = Intermediary(layers, targets)
for p in self.net.parameters():
p.requires_grad = False
# Linear comparators
self.lins = nn.ModuleList([
nn.Sequential(
nn.Dropout(inplace=True) if dropout else nn.Identity(),
nn.Conv2d(c, 1, kernel_size=1, bias=False),
) for c in channels
])
if pretrained:
self.lins.load_state_dict(get_weights(network=network))
if eval:
self.eval()
self.reduction = reduction
def forward(
self,
input: torch.Tensor,
target: torch.Tensor,
) -> torch.Tensor:
r"""Defines the computation performed at every call.
"""
_assert_type(
[input, target],
device=self.shift.device,
dim_range=(4, 4),
n_channels=3,
value_range=(0., 1.) if self.scaling else (0., -1.),
)
# ImageNet scaling
if self.scaling:
input = (input - self.shift) / self.scale
target = (target - self.shift) / self.scale
# LPIPS
residuals = []
for lin, fx, fy in zip(self.lins, self.net(input), self.net(target)):
fx = fx / torch.linalg.norm(fx, dim=1, keepdim=True)
fy = fy / torch.linalg.norm(fy, dim=1, keepdim=True)
mse = ((fx - fy) ** 2).mean(dim=(-1, -2), keepdim=True)
residuals.append(lin(mse).flatten())
l = torch.stack(residuals, dim=-1).sum(dim=-1)
return _reduce(l, self.reduction)
|
crabageprediction/venv/Lib/site-packages/numpy/typing/tests/data/pass/arrayterator.py
|
13rianlucero/CrabAgePrediction
| 20,453 |
59317
|
from __future__ import annotations
from typing import Any
import numpy as np
AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10)
ar_iter = np.lib.Arrayterator(AR_i8)
ar_iter.var
ar_iter.buf_size
ar_iter.start
ar_iter.stop
ar_iter.step
ar_iter.shape
ar_iter.flat
ar_iter.__array__()
for i in ar_iter:
pass
ar_iter[0]
ar_iter[...]
ar_iter[:]
ar_iter[0, 0, 0]
ar_iter[..., 0, :]
|
tests/test_compatibility.py
|
AnandJyrm/allure-pytest
| 112 |
59318
|
<filename>tests/test_compatibility.py
"""
This module holds tests for compatibility with other py.test plugins.
Created on Apr 15, 2014
@author: pupssman
"""
from hamcrest import assert_that, contains, has_property
def test_maxfail(report_for):
"""
Check that maxfail generates proper report
"""
report = report_for("""
def test_a():
assert False
def test_b():
assert True
""", extra_run_args=['-x'])
assert_that(report.findall('.//test-case'), contains(has_property('name', 'test_a')))
|
qiskit/algorithms/optimizers/tnc.py
|
Roshan-Thomas/qiskit-terra
| 1,599 |
59339
|
<gh_stars>1000+
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Truncated Newton (TNC) optimizer. """
from typing import Optional
from .scipy_optimizer import SciPyOptimizer
class TNC(SciPyOptimizer):
"""
Truncated Newton (TNC) optimizer.
TNC uses a truncated Newton algorithm to minimize a function with variables subject to bounds.
This algorithm uses gradient information; it is also called Newton Conjugate-Gradient.
It differs from the :class:`CG` method as it wraps a C implementation and allows each variable
to be given upper and lower bounds.
Uses scipy.optimize.minimize TNC
For further detail, please refer to
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
"""
_OPTIONS = ["maxiter", "disp", "accuracy", "ftol", "xtol", "gtol", "eps"]
# pylint: disable=unused-argument
def __init__(
self,
maxiter: int = 100,
disp: bool = False,
accuracy: float = 0,
ftol: float = -1,
xtol: float = -1,
gtol: float = -1,
tol: Optional[float] = None,
eps: float = 1e-08,
options: Optional[dict] = None,
max_evals_grouped: int = 1,
**kwargs,
) -> None:
"""
Args:
maxiter: Maximum number of function evaluation.
disp: Set to True to print convergence messages.
accuracy: Relative precision for finite difference calculations.
If <= machine_precision, set to sqrt(machine_precision). Defaults to 0.
ftol: Precision goal for the value of f in the stopping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol: Precision goal for the value of x in the stopping criterion
(after applying x scaling factors).
If xtol < 0.0, xtol is set to sqrt(machine_precision). Defaults to -1.
gtol: Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
tol: Tolerance for termination.
eps: Step size used for numerical approximation of the Jacobian.
options: A dictionary of solver options.
max_evals_grouped: Max number of default gradient evaluations performed simultaneously.
kwargs: additional kwargs for scipy.optimize.minimize.
"""
if options is None:
options = {}
for k, v in list(locals().items()):
if k in self._OPTIONS:
options[k] = v
super().__init__(
"TNC",
options=options,
tol=tol,
max_evals_grouped=max_evals_grouped,
**kwargs,
)
|
scripts/serial-stress-test.py
|
twopoint718/Amethyst
| 260 |
59397
|
#!/usr/bin/env python
import serial
import time
BAUD = 57600
with serial.Serial('/dev/cu.usbserial-DN05KLWU', BAUD) as port:
time.sleep(5.0)
n = 0
while True:
port.write(bytearray([n & 0xFF]))
n += 1
|
cflearn/models/cv/ssl/dino.py
|
carefree0910/carefree-learn
| 400 |
59493
|
import torch
import numpy as np
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
from torch import Tensor
from typing import Any
from typing import Dict
from typing import Tuple
from typing import Optional
from cftool.misc import update_dict
from cftool.misc import shallow_copy_dict
from torch.nn.parallel import DistributedDataParallel as DDP
from ..encoder import Encoder1DBase
from ....data import CVLoader
from ....types import tensor_dict_type
from ....protocol import StepOutputs
from ....protocol import TrainerState
from ....protocol import MetricsOutputs
from ....protocol import ModelWithCustomSteps
from ....constants import LOSS_KEY
from ....constants import INPUT_KEY
from ....constants import LATENT_KEY
from ....misc.toolkit import to_device
from ....misc.toolkit import l2_normalize
from ....misc.toolkit import get_world_size
from ....misc.toolkit import has_batch_norms
def _get_dino_defaults(name: str) -> Dict[str, Any]:
if name == "vit":
return {"patch_size": 16, "drop_path_rate": 0.1}
return {}
class Scheduler:
def __init__(self, values: np.ndarray):
self.values = values
self.max_idx = len(values) - 1
def __getitem__(self, index: int) -> Any:
return self.values[min(index, self.max_idx)]
def cosine_scheduler(
base_value: float,
final_value: float,
epochs: int,
num_step_per_epoch: int,
warmup_epochs: int = 0,
start_warmup_value: int = 0,
) -> Scheduler:
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * num_step_per_epoch
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * num_step_per_epoch - warmup_iters)
diff = base_value - final_value
schedule = final_value + 0.5 * diff * (1.0 + np.cos(np.pi * iters / len(iters)))
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * num_step_per_epoch
return Scheduler(schedule)
class MultiCropWrapper(nn.Module):
def __init__(self, backbone: nn.Module, head: nn.Module):
super().__init__()
backbone.fc, backbone.head = nn.Identity(), nn.Identity()
self.backbone = backbone
self.head = head
def forward(
self,
batch_idx: int,
batch: tensor_dict_type,
state: Optional[TrainerState] = None,
*,
img_end_idx: Optional[int] = None,
**kwargs: Any,
) -> Tensor:
img_crops = batch[INPUT_KEY]
if not isinstance(img_crops, list):
img_crops = batch[INPUT_KEY] = [img_crops]
if img_end_idx is not None:
img_crops = img_crops[:img_end_idx]
idx_crops = torch.cumsum(
torch.unique_consecutive(
torch.tensor([img_crop.shape[-1] for img_crop in img_crops]),
return_counts=True,
)[1],
0,
)
outputs = []
start_idx = 0
for end_idx in idx_crops:
local_batch = shallow_copy_dict(batch)
local_batch[INPUT_KEY] = torch.cat(img_crops[start_idx:end_idx])
idx_rs = self.backbone(batch_idx, local_batch, state, **kwargs)
idx_out = idx_rs[LATENT_KEY]
if isinstance(idx_out, tuple):
idx_out = idx_out[0]
outputs.append(idx_out)
start_idx = end_idx
return self.head(torch.cat(outputs))
class DINOHead(nn.Module):
def __init__(
self,
in_dim: int,
out_dim: int,
batch_norm: bool = False,
norm_last_layer: bool = True,
*,
num_layers: int = 3,
latent_dim: int = 2048,
bottleneck_dim: int = 256,
):
super().__init__()
num_layers = max(num_layers, 1)
if num_layers == 1:
self.mapping = nn.Linear(in_dim, bottleneck_dim)
else:
blocks = [nn.Linear(in_dim, latent_dim)]
if batch_norm:
blocks.append(nn.BatchNorm1d(latent_dim))
blocks.append(nn.GELU())
for _ in range(num_layers - 2):
blocks.append(nn.Linear(latent_dim, latent_dim))
if batch_norm:
blocks.append(nn.BatchNorm1d(latent_dim))
blocks.append(nn.GELU())
blocks.append(nn.Linear(latent_dim, bottleneck_dim))
self.mapping = nn.Sequential(*blocks)
self.apply(self._init_weights)
last = nn.Linear(bottleneck_dim, out_dim, bias=False)
self.last_layer = nn.utils.weight_norm(last)
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, net: Tensor) -> Tensor:
net = self.mapping(net)
net = nn.functional.normalize(net, dim=-1, p=2)
net = self.last_layer(net)
return net
class DINOLoss(nn.Module):
center: torch.Tensor
def __init__(
self,
out_dim: int,
teacher_temp: float,
warmup_teacher_temp: float,
warmup_teacher_temp_epochs: int,
teacher_temp_epochs: int,
*,
student_temp: float = 0.1,
center_momentum: float = 0.9,
):
super().__init__()
self.student_temp = student_temp
self.center_momentum = center_momentum
self.register_buffer("center", torch.zeros(1, out_dim))
teacher_temp_constant_epochs = teacher_temp_epochs - warmup_teacher_temp_epochs
self.teacher_temp_schedule = Scheduler(
np.concatenate(
(
np.linspace(
warmup_teacher_temp,
teacher_temp,
warmup_teacher_temp_epochs,
),
np.ones(teacher_temp_constant_epochs) * teacher_temp,
)
)
)
self.num_epochs = teacher_temp_epochs
def forward(
self,
epoch: int,
num_crops: int,
student_output: Tensor,
teacher_output: Tensor,
) -> Tensor:
student_logits = student_output / self.student_temp
student_logits_list = student_logits.chunk(num_crops)
temp = self.teacher_temp_schedule[epoch]
teacher_logits = F.softmax((teacher_output - self.center) / temp, dim=-1)
teacher_logits_list = teacher_logits.detach().chunk(2)
total_loss = 0.0
num_loss_terms = 0
for it, t_logit in enumerate(teacher_logits_list):
for iv, v_logit in enumerate(student_logits_list):
if iv == it:
continue
loss = torch.sum(-t_logit * F.log_softmax(v_logit, dim=-1), dim=-1)
total_loss += loss.mean()
num_loss_terms += 1
total_loss /= num_loss_terms
self.update_center(teacher_output)
return total_loss
@torch.no_grad()
def update_center(self, teacher_output: Tensor) -> None:
batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
if dist.is_initialized():
dist.all_reduce(batch_center)
batch_center = batch_center / (len(teacher_output) * get_world_size())
m = self.center_momentum
self.center = self.center * m + batch_center * (1.0 - m)
class DINOEvaluateLoss:
def __init__(self, train_loss: DINOLoss):
self.train_loss = train_loss
def __call__(
self,
epoch: int,
student_output: Tensor,
teacher_output: Tensor,
) -> float:
s_logits = student_output / self.train_loss.student_temp
temp = self.train_loss.teacher_temp_schedule[epoch]
centered = teacher_output - self.train_loss.center
t_logits = F.softmax(centered / temp, dim=-1)
loss = torch.sum(-t_logits * F.log_softmax(s_logits, dim=-1), dim=-1).mean()
return loss.item()
@ModelWithCustomSteps.register("dino")
class DINO(ModelWithCustomSteps):
custom_params_groups = True
custom_ddp_initialization = True
lr_schedule: Optional[Scheduler]
wd_schedule: Optional[Scheduler]
momentum_schedule: Optional[Scheduler]
def __init__(
self,
encoder1d: str = "vit",
encoder1d_config: Optional[Dict[str, Any]] = None,
student_specific: Optional[Dict[str, Any]] = None,
teacher_specific: Optional[Dict[str, Any]] = None,
*,
out_dim: int = 65536,
use_bn_in_head: bool = False,
norm_last_layer: bool = True,
teacher_temp: float = 0.07,
momentum_teacher: float = 0.996,
warmup_teacher_temp: float = 0.04,
warmup_teacher_temp_epochs: int = 30,
teacher_temp_epochs: int,
freeze_last_layer: int = 1,
weight_decay: float = 0.04,
weight_decay_end: float = 0.4,
warmup_epochs: int = 10,
):
super().__init__()
base = update_dict(encoder1d_config or {}, _get_dino_defaults(encoder1d))
student_cfg = update_dict(student_specific or {}, shallow_copy_dict(base))
teacher_cfg = update_dict(teacher_specific or {}, shallow_copy_dict(base))
student = Encoder1DBase.make(encoder1d, student_cfg)
teacher = Encoder1DBase.make(encoder1d, teacher_cfg)
self.ddp_student = self.ddp_teacher = None
self.student = MultiCropWrapper(
student,
DINOHead(
student.latent_dim,
out_dim,
use_bn_in_head,
norm_last_layer,
),
)
self.teacher = MultiCropWrapper(
teacher,
DINOHead(teacher.latent_dim, out_dim, use_bn_in_head),
)
self.freeze_last_layer = freeze_last_layer
self.teacher.load_state_dict(self.student.state_dict())
self.loss = DINOLoss(
out_dim,
teacher_temp,
warmup_teacher_temp,
warmup_teacher_temp_epochs,
teacher_temp_epochs,
)
self.evaluate_loss = DINOEvaluateLoss(self.loss)
self.momentum_teacher = momentum_teacher
self.teacher_temp_epochs = teacher_temp_epochs
self.weight_decay = weight_decay
self.weight_decay_end = weight_decay_end
self.warmup_epochs = warmup_epochs
self.lr_schedule = None
self.wd_schedule = None
self.momentum_schedule = None
@property
def student_for_training(self) -> MultiCropWrapper:
return self.ddp_student or self.student
@property
def teacher_for_training(self) -> MultiCropWrapper:
return self.ddp_teacher or self.teacher
def forward(
self,
batch_idx: int,
batch: tensor_dict_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> tensor_dict_type:
net = self.student.backbone(batch_idx, batch, state, **kwargs)[LATENT_KEY]
net = l2_normalize(net)
return {LATENT_KEY: net}
def onnx_forward(self, batch: tensor_dict_type) -> Any:
inp = batch[INPUT_KEY]
net = self.get_latent(inp, determinate=True)
return net.view(inp.shape[0], self.student.backbone.latent_dim)
def get_latent(self, net: Tensor, **kwargs: Any) -> Tensor:
return self.forward(0, {INPUT_KEY: net}, **kwargs)[LATENT_KEY]
def get_logits(self, net: Tensor) -> Tensor:
return self.student(0, {INPUT_KEY: net})
def state_dict(
self,
destination: Any = None,
prefix: str = "",
keep_vars: bool = False,
) -> Any:
states = super().state_dict(destination, prefix, keep_vars)
for k in list(states.keys()):
if k.startswith("ddp"):
states.pop(k)
return states
def summary_forward(self, batch_idx: int, batch: tensor_dict_type) -> None:
self.student(batch_idx, to_device(batch, self.device))
def _get_outputs(
self,
batch_idx: int,
batch: tensor_dict_type,
trainer: Any,
forward_kwargs: Dict[str, Any],
) -> tensor_dict_type:
teacher_output = self.teacher_for_training(
batch_idx,
batch,
trainer.state,
img_end_idx=2,
**forward_kwargs,
)
student_output = self.student_for_training(
batch_idx,
batch,
trainer.state,
**forward_kwargs,
)
return {"student": student_output, "teacher": teacher_output}
def _get_loss(
self,
batch_idx: int,
batch: tensor_dict_type,
trainer: Any,
forward_kwargs: Dict[str, Any],
) -> Tuple[tensor_dict_type, Tensor]:
with torch.cuda.amp.autocast(enabled=trainer.use_amp):
outputs = self._get_outputs(batch_idx, batch, trainer, forward_kwargs)
epoch = trainer.state.epoch
num_crops = len(batch[INPUT_KEY])
student_output = outputs["student"]
teacher_output = outputs["teacher"]
loss = self.loss(epoch, num_crops, student_output, teacher_output)
return outputs, loss
def train_step(
self,
batch_idx: int,
batch: tensor_dict_type,
trainer: Any,
forward_kwargs: Dict[str, Any],
loss_kwargs: Dict[str, Any],
) -> StepOutputs:
state = trainer.state
if self.lr_schedule is None:
self.lr_schedule = cosine_scheduler(
self.lr * (len(batch[INPUT_KEY][0]) * get_world_size()) / 256.0, # type: ignore
self.min_lr,
self.teacher_temp_epochs,
state.num_step_per_epoch,
warmup_epochs=self.warmup_epochs,
)
if self.wd_schedule is None:
self.wd_schedule = cosine_scheduler(
self.weight_decay,
self.weight_decay_end,
self.teacher_temp_epochs,
state.num_step_per_epoch,
)
# manual scheduling
optimizer = trainer.optimizers["all"]
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = self.lr_schedule[state.step]
if i == 0:
param_group["weight_decay"] = self.wd_schedule[state.step]
# forward pass
rs, loss = self._get_loss(batch_idx, batch, trainer, forward_kwargs)
# backward pass
optimizer.zero_grad()
trainer.grad_scaler.scale(loss).backward()
# clip norm
if trainer.clip_norm > 0.0:
trainer.grad_scaler.unscale_(optimizer)
nn.utils.clip_grad_norm_(
self.student_for_training.parameters(),
max_norm=trainer.clip_norm,
)
# freeze last layer
if state.epoch <= self.freeze_last_layer:
for n, p in self.student.named_parameters():
if "last_layer" in n:
p.grad = None
# update parameters
trainer.grad_scaler.step(optimizer)
trainer.grad_scaler.update()
# update momentum teacher
if self.momentum_schedule is None:
self.momentum_schedule = cosine_scheduler(
self.momentum_teacher,
1.0,
self.teacher_temp_epochs,
state.num_step_per_epoch,
)
with torch.no_grad():
m = self.momentum_schedule[state.step]
for param_q, param_k in zip(
self.student.parameters(),
self.teacher.parameters(),
):
param_k.data.mul_(m).add_((1.0 - m) * param_q.detach().data)
# return
return StepOutputs(rs, {LOSS_KEY: loss.item()})
def evaluate_step( # type: ignore
self,
loader: CVLoader,
portion: float,
trainer: Any,
) -> MetricsOutputs:
losses = []
for i, batch in enumerate(loader):
if i / len(loader) >= portion:
break
batch = to_device(batch, self.device)
outputs = self._get_outputs(i, batch, trainer, {})
losses.append(
self.evaluate_loss(
trainer.state.epoch,
outputs["student"],
outputs["teacher"],
)
)
# gather
mean_loss = sum(losses) / len(losses)
return MetricsOutputs(
-mean_loss,
{
"loss": mean_loss,
"lr": self.lr_schedule[trainer.state.step], # type: ignore
"wd": self.wd_schedule[trainer.state.step], # type: ignore
},
)
@staticmethod
def params_groups(m: nn.Module) -> Any:
regularized = []
bias_and_norm = []
for name, param in m.named_parameters():
if not param.requires_grad:
continue
if name.endswith(".bias") or len(param.shape) == 1:
bias_and_norm.append(param)
else:
regularized.append(param)
return [{"params": regularized}, {"params": bias_and_norm, "weight_decay": 0.0}]
def _init_with_trainer(self, trainer: Any) -> None:
self.teacher_for_training.requires_grad_(False)
def init_ddp(self, trainer: Any) -> None:
if has_batch_norms(self.student):
self.student = nn.SyncBatchNorm.convert_sync_batchnorm(self.student)
self.teacher = nn.SyncBatchNorm.convert_sync_batchnorm(self.teacher)
self.ddp_student = DDP(self.student, device_ids=[trainer.rank])
self.ddp_teacher = DDP(self.teacher, device_ids=[trainer.rank])
self.ddp_teacher.requires_grad_(False) # type: ignore
def permute_trainer_config(self, trainer_config: Dict[str, Any]) -> None:
# TODO : make `permute_trainer_config` more general
if trainer_config["clip_norm"] == 0.0:
trainer_config["clip_norm"] = 3.0
if trainer_config["lr"] is None:
trainer_config["lr"] = 0.0005
self.lr = trainer_config["lr"]
self.min_lr = trainer_config.pop("min_lr", 1.0e-6)
if trainer_config["optimizer_name"] is None:
trainer_config["optimizer_name"] = "adamw"
trainer_config["scheduler_name"] = "none"
__all__ = [
"DINO",
]
|
anuga/file_conversion/dem2array.py
|
samcom12/anuga_core
| 136 |
59494
|
from __future__ import absolute_import
# external modules
from past.builtins import basestring
import numpy as num
# ANUGA modules
import anuga.utilities.log as log
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a, \
netcdf_float
from .asc2dem import asc2dem
def dem2array(filename, variable_name='elevation',
easting_min=None, easting_max=None,
northing_min=None, northing_max=None,
use_cache=False, verbose=False,):
"""Read Digitial Elevation model from the following NetCDF format (.dem)
Example:
ncols 3121
nrows 1800
xllcorner 722000
yllcorner 5893000
cellsize 25
NODATA_value -9999
138.3698 137.4194 136.5062 135.5558 ..........
name_in should be .dem file to be read.
"""
import os
from anuga.file.netcdf import NetCDFFile
msg = 'Filename must be a text string'
assert isinstance(filename, basestring), msg
msg = 'Extension should be .dem'
assert os.path.splitext(filename)[1] in ['.dem'], msg
msg = 'Variable name must be a text string'
assert isinstance(variable_name, basestring), msg
# Get NetCDF
infile = NetCDFFile(filename, netcdf_mode_r)
if verbose: log.critical('Reading DEM from %s' % (filename))
ncols = int(infile.ncols)
nrows = int(infile.nrows)
xllcorner = float(infile.xllcorner) # Easting of lower left corner
yllcorner = float(infile.yllcorner) # Northing of lower left corner
cellsize = float(infile.cellsize)
NODATA_value = float(infile.NODATA_value)
zone = int(infile.zone)
false_easting = float(infile.false_easting)
false_northing = float(infile.false_northing)
# Text strings
projection = infile.projection
datum = infile.datum
units = infile.units
Z = infile.variables[variable_name][:]
Z = Z.reshape(nrows,ncols)
Z = num.where(Z == NODATA_value , num.nan, Z)
#changed the orientation of Z array to make it consistent with grd2array result
Z = num.fliplr(Z.T)
#print ncols, nrows, xllcorner,yllcorner, cellsize, NODATA_value, zone
x = num.linspace(xllcorner, xllcorner+(ncols-1)*cellsize, ncols)
y = num.linspace(yllcorner, yllcorner+(nrows-1)*cellsize, nrows)
return x,y, Z
|
browser_fingerprinting/bro_log_reader.py
|
dodoandbling/data_hacking
| 475 |
59509
|
<reponame>dodoandbling/data_hacking<filename>browser_fingerprinting/bro_log_reader.py
''' This module handles the mechanics around easily pulling in Bro Log data
The read_log method is a generator (in the python sense) for rows in a Bro log,
because of this, it's memory efficient and does not read the entire file into memory.
'''
import csv
import datetime
import optparse
import itertools
class BroLogReader():
''' This class implements a python based Bro Log Reader. '''
def __init__(self):
''' Init for BroLogReader. '''
self._delimiter = '\t'
def read_log(self, logfile, max_rows=None):
''' The read_log method is a generator for rows in a Bro log.
Usage: rows = my_bro_reader.read_log(logfile)
for row in rows:
do something with row
Because this method returns a generator, it's memory
efficient and does not read the entire file in at once.
'''
# First parse the header of the bro log
bro_fptr, field_names, field_types = self._parse_bro_header(logfile)
# Note: The parse_bro_header method has advanced us to the first
# real data row, so we can use the normal csv reader.
reader = csv.DictReader(bro_fptr, fieldnames=field_names,
delimiter=self._delimiter, restval='BRO_STOP')
for _row in itertools.islice(reader, 0, max_rows):
values = self._cast_dict(_row)
if (values):
yield values
def _parse_bro_header(self, logfile):
''' This method tries to parse the Bro log header section.
Note: My googling is failing me on the documentation on the format,
so just making a lot of assumptions and skipping some shit.
Assumption 1: The delimeter is a tab.
Assumption 2: Types are either time, string, int or float
Assumption 3: The header is always ends with #fields and #types as
the last two lines.
Format example:
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path httpheader_recon
#fields ts origin useragent header_events_json
#types time string string string
'''
# Open the logfile
_file = open(logfile, 'rb')
# Skip until you find the #fields line
_line = next(_file)
while (not _line.startswith('#fields')):
_line = next(_file)
# Read in the field names
_field_names = _line.strip().split(self._delimiter)[1:]
# Read in the types
_line = next(_file)
_field_types = _line.strip().split(self._delimiter)[1:]
# Return the header info
return _file, _field_names, _field_types
def _cast_dict(self, data_dict):
''' Internal method that makes sure any dictionary elements
are properly cast into the correct types, instead of
just treating everything like a string from the csv file
'''
for key, value in data_dict.iteritems():
if (value == 'BRO_STOP'):
return None
data_dict[key] = self._cast_value(value)
return data_dict
def _cast_value(self, value):
''' Internal method that makes sure any dictionary elements
are properly cast into the correct types, instead of
just treating everything like a string from the csv file
'''
# First try time
try:
return datetime.datetime.fromtimestamp(float(value))
# Next try a set of primitive types
except ValueError:
tests = (int, float, str)
for test in tests:
try:
return test(value)
except ValueError:
continue
return value
if __name__ == '__main__':
# Handle command-line arguments
PARSER = optparse.OptionParser()
PARSER.add_option('--logfile', default=None, help='Logfile to read from. Default: %default')
(OPTIONS, ARGUMENTS) = PARSER.parse_args()
print OPTIONS, ARGUMENTS
# Create a BRO log file reader and pull from the logfile
BRO_LOG = BroLogReader()
RECORDS = BRO_LOG.read_log(OPTIONS.logfile, max_rows=10)
for row in RECORDS:
print row
|
chapter-12/fees/app.py
|
wallacei/microservices-in-action-copy
| 115 |
59550
|
import datetime
import json
from nameko.events import EventDispatcher, event_handler
from simplebank.chassis import init_logger, init_statsd
class FeesService:
name = "fees_service"
statsd = init_statsd('simplebank-demo.fees', 'statsd')
logger = init_logger()
@event_handler("market_service", "order_placed")
@statsd.timer('charge_fee')
def charge_fee(self, payload):
self.logger.debug(
"this is a debug message from fees service", extra={"uuid": payload})
self.logger.info("charging fees", extra={
"uuid": payload})
return payload
|
python/iceberg/api/transforms/identity.py
|
moulimukherjee/incubator-iceberg
| 2,161 |
59602
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .transform import Transform
from .transform_util import TransformUtil
from ..expressions import Expressions
from ..types import TypeID
class Identity(Transform):
@staticmethod
def get(type_var):
return Identity(type_var)
def __init__(self, type_var):
self.type_var = type_var
def apply(self, value):
return value
def can_transform(self, type_var):
return type_var.is_primitive_type()
def get_result_type(self, source_type):
return source_type
def project(self, name, predicate):
return self.project_strict(name, predicate)
def project_strict(self, name, predicate):
if predicate.lit is not None:
return Expressions.predicate(predicate.op, name, predicate.lit.value)
else:
return Expressions.predicate(predicate.op, name)
def to_human_string(self, value):
if value is None:
return "null"
if self.type_var.type_id == TypeID.DATE:
return TransformUtil.human_day(value)
elif self.type_var.type_id == TypeID.TIME:
return TransformUtil.human_time(value)
elif self.type_var.type_id == TypeID.TIMESTAMP:
if self.type_var.adjust_to_utc:
return TransformUtil.human_timestamp_with_timezone(value)
else:
return TransformUtil.human_timestamp_without_timezone(value)
elif self.type_var.type_id in (TypeID.BINARY, TypeID.FIXED):
raise NotImplementedError()
# if isinstance(value, bytearray):
# return base64.b64encode(value)
# elif isinstance(value, bytes):
# return base64.b64encode(bytes(value))
# else:
# raise RuntimeError("Unsupported binary type: %s" % value.__class__.__name__)
else:
return str(value)
def __str__(self):
return "identity"
def __eq__(self, other):
if id(self) == id(other):
return True
if other is None or not isinstance(other, Identity):
return False
return self.type_var == other.type_var
def __hash__(self):
return hash(self.__key())
def __key(self):
return Identity.__class__, self.type_var
|
plugins/comment_mem_access.py
|
bennofs/execution-trace-viewer
| 186 |
59640
|
"""This plugin finds every memory access and comments the row with address and value"""
from yapsy.IPlugin import IPlugin
from core.api import Api
class PluginCommentMemAccesses(IPlugin):
def execute(self, api: Api):
want_to_continue = api.ask_user(
"Warning", "This plugin may replace some of your comments, continue?"
)
if not want_to_continue:
return
trace_data = api.get_trace_data()
trace = api.get_visible_trace()
for i, t in enumerate(trace):
if 'mem' in t and t['mem']:
comment = ""
for mem in t['mem']:
addr = hex(mem['addr'])
value = mem['value']
if mem['access'] == "READ":
comment += f"[{ addr }] -> { hex(value) } "
elif mem['access'] == "WRITE":
comment += f"[{ addr }] <- { hex(value) } "
if 0x20 <= value <= 0x7e:
comment += f"'{ chr(value) }' "
# Add comment to full trace
row = t["id"]
trace_data.trace[row]['comment'] = comment
# Add comment to visible trace too because it could be filtered_trace
trace[i]['comment'] = comment
api.update_trace_table()
|
datasets/coco_eval_proposals.py
|
StephenStorm/DETReg
| 212 |
59734
|
# pip install pycocotools opencv-python opencv-contrib-python
# wget https://github.com/opencv/opencv_extra/raw/master/testdata/cv/ximgproc/model.yml.gz
import os
import copy
import time
import argparse
import contextlib
import multiprocessing
import numpy as np
import cv2
import cv2.ximgproc
import matplotlib.patches
import matplotlib.pyplot as plt
import torch
from torchvision.datasets import CocoDetection
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def imshow_with_boxes(img, boxes_xywh, savefig):
plt.figure()
plt.imshow(img)
plt.axis('off')
for x, y, w, h in boxes_xywh.tolist():
plt.gca().add_patch(matplotlib.patches.Rectangle((x, y), w, h, linewidth=1, edgecolor='r', facecolor='none'))
plt.savefig(savefig)
plt.close()
return savefig
def selective_search(img, fast, topk):
algo = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
algo.setBaseImage(img)
if fast:
algo.switchToSelectiveSearchFast()
else:
algo.switchToSelectiveSearchQuality()
boxes_xywh = algo.process().astype(np.float32)
scores = np.ones( (len(boxes_xywh), ) )
return boxes_xywh[:topk], scores[:topk]
def edge_boxes(img, fast, topk, bgr2rgb = (2, 1, 0), algo_edgedet = cv2.ximgproc.createStructuredEdgeDetection('model.yml.gz') if os.path.exists('model.yml.gz') else None):
edges = algo_edgedet.detectEdges(img[..., bgr2rgb].astype(np.float32) / 255.0)
orimap = algo_edgedet.computeOrientation(edges)
edges = algo_edgedet.edgesNms(edges, orimap)
algo_edgeboxes = cv2.ximgproc.createEdgeBoxes()
algo_edgeboxes.setMaxBoxes(topk)
boxes_xywh, scores = algo_edgeboxes.getBoundingBoxes(edges, orimap)
if scores is None:
boxes_xywh, scores = np.array([[0, 0.0, img.shape[1], img.shape[0]]]), np.ones((1, ))
return boxes_xywh, scores.squeeze()
def process_image(image_id, img_extra, fast, resize, algo, rgb2bgr = (2, 1, 0), category_other = -1, topk = 1000):
img = np.asarray(img_extra[0])[..., rgb2bgr]
h, w = img.shape[:2]
img_det = img if resize == 1 else cv2.resize(img, (resize, resize))
boxes_xywh, scores = algo(img_det, fast, topk)
boxes_xywh = boxes_xywh.astype(np.float32) * (1 if resize == 1 else np.array([w, h, w, h]) / resize)
labels = np.full((len(boxes_xywh), ), category_other, dtype = int)
return image_id, dict(boxes = boxes_xywh, scores = scores, labels = labels)
def process_loaded(image_id, loaded, category_other = -1):
boxes_xyxy = loaded['pred_boxes_'].clamp(min = 0)
boxes_xywh = torch.stack([boxes_xyxy[:, 0], boxes_xyxy[:, 1], boxes_xyxy[:, 2] - boxes_xyxy[:, 0], boxes_xyxy[:, 3] - boxes_xyxy[:, 1]], dim = -1)
labels = np.full((len(boxes_xywh), ), category_other, dtype = int)
num_classes = loaded['pred_logits'].shape[-1]
scores = loaded['pred_logits'][:, 1:: num_classes - 2][:, 0]
I = scores.argsort(descending = True)
scores = scores[I]
boxes_xywh = boxes_xywh[I]
labels = labels[I]
return image_id, dict(boxes = boxes_xywh, scores = scores, labels = labels)
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_type = 'bbox', useCats = 0, maxDets = 100):
self.coco_gt = copy.deepcopy(coco_gt)
self.coco_eval = COCOeval(coco_gt, iouType = iou_type)
if maxDets != [100]:
self.coco_eval.params.maxDets = maxDets
if not useCats:
self.coco_eval.params.useCats = useCats
self.coco_eval.params.catIds = [-1]
coco_gt.loadAnns = lambda imgIds, loadAnns = coco_gt.loadAnns: [gt.update(dict(category_id = -1)) or gt for gt in loadAnns(imgIds)]
self.accumulate, self.summarize = self.coco_eval.accumulate, self.coco_eval.summarize
@staticmethod
def call_without_stdout(func, *args):
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
return func(*args)
def update(self, predictions):
tolist = lambda a: [a.tolist()] if a.ndim == 0 else a.tolist()
detection_results = [dict(image_id = image_id, bbox = bbox, score = score, category_id = category_id) for image_id, pred in predictions.items() if pred for bbox, score, category_id in zip(pred['boxes'].tolist(), tolist(pred['scores']), pred['labels'].tolist())]
self.coco_eval.cocoDt = self.call_without_stdout(COCO.loadRes, self.coco_gt, detection_results) if detection_results else COCO()
self.coco_eval.params.imgIds = list(predictions)
self.call_without_stdout(self.coco_eval.evaluate)
def main(args):
coco_mode = 'instances'
PATHS = dict(
train = (os.path.join(args.dataset_root, f'train{args.dataset_year}'), os.path.join(args.dataset_root, 'annotations', f'{coco_mode}_train{args.dataset_year}.json')),
val = (os.path.join(args.dataset_root, f'val{args.dataset_year}'), os.path.join(args.dataset_root, 'annotations', f'{coco_mode}_val{args.dataset_year}.json')),
)
dataset = CocoDetection(*PATHS[args.dataset_split])
coco_evaluator = CocoEvaluator(dataset.coco, maxDets = args.max_dets)
tic = time.time()
if args.output_dir:
os.makedirs(args.output_dir, exist_ok = True)
if args.algo != 'process_loaded':
preds = dict(multiprocessing.Pool(processes = args.num_workers).starmap(process_image, zip(dataset.ids, dataset, [args.fast] * len(dataset), [args.resize] * len(dataset), [globals()[args.algo]] * len(dataset))))
else:
preds = []
for i, t in enumerate(zip(dataset.ids, dataset, [args.fast] * len(dataset), [args.resize] * len(dataset), [globals()[args.algo]] * len(dataset))):
loaded = torch.load(os.path.join(args.input_dir, str(t[0]) + '.pt'), map_location = 'cpu')
preds.append(process_loaded(t[0], loaded))
if args.output_dir:
imshow_with_boxes(t[1][0], preds[-1][1]['boxes'][:5], os.path.join(args.output_dir, str(t[0]) + '.jpg'))
print(i) if i % 50 == 0 else None
preds = dict(preds)
print('proposals', time.time() - tic); tic = time.time()
coco_evaluator.update(preds)
coco_evaluator.accumulate()
coco_evaluator.summarize()
print('evaluator', time.time() - tic)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input-dir', '-i')
parser.add_argument('--output-dir', '-o')
parser.add_argument('--dataset-root')
parser.add_argument('--dataset-split', default = 'val', choices = ['train', 'val'])
parser.add_argument('--dataset-year', type = int, default = 2017)
parser.add_argument('--num-workers', type = int, default = 16)
parser.add_argument('--algo', default = 'selective_search', choices = ['selective_search', 'edge_boxes', 'process_loaded'])
parser.add_argument('--fast', action = 'store_true')
parser.add_argument('--resize', type = int, default = 128)
parser.add_argument('--max-dets', type = int, nargs = '*', default = [100])
args = parser.parse_args()
print(args)
main(args)
|
names_dataset/__init__.py
|
nischal-sanil/name-dataset
| 478 |
59750
|
<filename>names_dataset/__init__.py
from names_dataset.nd_v1 import NameDatasetV1 # noqa
from names_dataset.nd_v2 import NameDataset # noqa
|
apps/interface/business/interfacemodule.py
|
rainydaygit/testtcloudserver
| 349 |
59776
|
from flask import g, current_app, jsonify
from sqlalchemy import asc, desc, func
from apps.interface.models.interfaceapimsg import InterfaceApiMsg
from apps.interface.models.interfacecase import InterfaceCase
from apps.interface.models.interfacemodule import InterfaceModule
from apps.interface.models.interfaceproject import InterfaceProject
from apps.interface.util.utils import *
from library.api.db import db
from library.api.transfer import transfer2json
class InterfaceModuleBusiness(object):
@classmethod
def project_permission(cls, pid=None, id=None):
if g.is_admin:
return 0
if pid:
return 0 if pid in g.projectid else 1
else:
ret = InterfaceModule.query.add_columns(InterfaceModule.project_id.label('projectid')).filter(
InterfaceModule.id == id).first()
return 0 if ret.projectid in g.projectid else 1
@classmethod
def _query(cls):
return InterfaceModule.query.add_columns(
InterfaceModule.id.label('id'),
InterfaceModule.name.label('name'),
InterfaceModule.project_id.label('projectid'),
InterfaceModule.num.label('num'),
InterfaceModule.weight.label('weight'),
InterfaceModule.status.label('status'),
)
@classmethod
@transfer2json('?id|!name|!projectid|!num|!weight|!status')
def query_all_json(cls, limit, offset):
ret = cls._query().filter(InterfaceModule.status == InterfaceModule.ACTIVE) \
.order_by(desc(InterfaceModule.id)) \
.limit(limit).offset(offset).all()
return ret
@classmethod
def module_create(cls, name, project_id, num):
try:
m = InterfaceModule(
name=name,
project_id=project_id,
num=num,
)
db.session.add(m)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
def module_delete(cls, id):
try:
m = InterfaceModule.query.get(id)
m.status = InterfaceModule.DISABLE
db.session.add(m)
db.session.commit()
return 0
except Exception as e:
current_app.logger.error(str(e))
return 105, str(e)
@classmethod
def module_modify(cls, id, name, project_id):
try:
m = InterfaceModule.query.get(id)
m.name = name
m.project_id = project_id
db.session.add(m)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
@transfer2json('?id|!name|!projectid|!num|!weight|!status')
def query_json_by_id(cls, id):
ret = cls._query().filter(InterfaceModule.status == InterfaceModule.ACTIVE,
InterfaceModule.id == id).all()
return ret
@classmethod
def _query_total(cls):
return InterfaceModule.query.outerjoin(
InterfaceCase, InterfaceCase.module_id == InterfaceModule.id).add_columns(
InterfaceModule.id.label('id'),
InterfaceModule.name.label('name'),
InterfaceModule.project_id.label('projectid'),
InterfaceModule.num.label('num'),
InterfaceModule.weight.label('weight'),
InterfaceModule.status.label('status'),
func.count('*').label('total'),
)
@classmethod
@transfer2json('?id|!name|!projectid|!num|!weight|!status|!total')
def query_by_project_id_total(cls, pid):
# TODO : here need case import
# ret = cls._query_total().filter(InterfaceModule.status == InterfaceModule.ACTIVE,
# InterfaceModule.project_id == pid, Case.status != Case.DISABLE).order_by(
# desc(InterfaceModule.id)).group_by(Case.module_id).all()
ret = []
return ret
@classmethod
@transfer2json('?id|!name|!projectid|!num|!weight|!status')
def query_by_project_ids(cls, pid):
ret = cls._query().filter(InterfaceModule.status == InterfaceModule.ACTIVE,
InterfaceModule.project_id == pid).order_by(desc(InterfaceModule.id)).all()
return ret
@classmethod
def query_by_project_id(cls, pid):
tlist = []
total_ret = cls.query_by_project_id_total(pid)
for a in total_ret:
tlist.append(a['id'])
ret = cls.query_by_project_ids(pid)
for i in range(len(ret)):
if ret[i]['id'] not in tlist:
ret[i]['total'] = 0
total_ret.append(ret[i])
total_ret = sorted(total_ret, key=lambda x: x['id'], reverse=True)
return total_ret
@classmethod
def find_model(cls, page, per_page, project_name):
if not project_name:
return jsonify({'msg': '请先选择项目', 'status': 0})
peoject_id = InterfaceProject.query.filter_by(name=project_name, status=InterfaceProject.ACTIVE).first().id
all_module = InterfaceModule.query.filter_by(status=InterfaceModule.ACTIVE, project_id=peoject_id).order_by(
InterfaceModule.num.asc())
pagination = all_module.paginate(page, per_page=per_page, error_out=False)
my_module = pagination.items
total = pagination.total
my_module = [{'name': c.name, 'moduleId': c.id, 'num': c.num} for c in my_module]
# 查询出所有的接口模块是为了接口录入的时候可以选所有的模块
_all_module = [{'name': s.name, 'moduleId': s.id, 'num': s.num} for s in all_module.all()]
return jsonify({'data': my_module, 'total': total, 'status': 1, 'all_module': _all_module})
@classmethod
def add_model(cls, project_name, name, ids, number):
if not project_name:
return jsonify({'msg': '请先创建项目', 'status': 0})
if not name:
return jsonify({'msg': '模块名称不能为空', 'status': 0})
project_id = InterfaceProject.query.filter_by(name=project_name, status=InterfaceProject.ACTIVE).first().id
num = auto_num(number, InterfaceModule, project_id=project_id, status=InterfaceModule.ACTIVE)
if ids:
old_data = InterfaceModule.query.filter_by(id=ids, status=InterfaceModule.ACTIVE).first()
old_num = old_data.num
list_data = InterfaceModule.query.filter(InterfaceModule.status == InterfaceModule.ACTIVE,
InterfaceModule.project_id == project_id).order_by(
InterfaceModule.num.asc()).all()
if InterfaceModule.query.filter_by(name=name, project_id=project_id,
status=InterfaceModule.ACTIVE).first() and name != old_data.name:
return jsonify({'msg': '模块名字重复', 'status': 0})
num_sort(num, old_num, list_data, old_data)
InterfaceModuleBusiness.module_modify(ids, name, project_id)
return jsonify({'msg': '修改成功', 'status': 1})
else:
if InterfaceModule.query.filter_by(name=name, project_id=project_id, status=InterfaceModule.ACTIVE).first():
return jsonify({'msg': '模块名字重复', 'status': 0})
else:
InterfaceModuleBusiness.module_create(name, project_id, num)
return jsonify({'msg': '新建成功', 'status': 1})
@classmethod
def del_model(cls, ids):
# _edit = InterfaceModule.query.filter_by(id=ids).first()
# if current_user.id != Project.query.filter_by(id=_edit.project_id).first().user_id:
# return jsonify({'msg': '不能删除别人项目下的模块', 'status': 0})
if InterfaceApiMsg.query.filter(
InterfaceApiMsg.module_id == ids,
InterfaceApiMsg.status == InterfaceApiMsg.ACTIVE
).order_by(asc(InterfaceApiMsg.num)).all():
return jsonify({'msg': '请先删除模块下的接口用例', 'status': 0})
InterfaceModuleBusiness.module_delete(ids)
return jsonify({'msg': '删除成功', 'status': 1})
@classmethod
def stick_module(cls, module_id, project_name):
old_data = InterfaceModule.query.filter_by(id=module_id, status=InterfaceModule.ACTIVE).first()
old_num = old_data.num
list_data_id = InterfaceProject.query.filter_by(name=project_name, status=InterfaceProject.ACTIVE).first().id
list_data = InterfaceModule.query.filter_by(project_id=list_data_id, status=InterfaceModule.ACTIVE).order_by(
InterfaceModule.num.asc()).all()
num_sort(1, old_num, list_data, old_data)
db.session.commit()
return jsonify({'msg': '置顶完成', 'status': 1})
|
data_readers/kitti.py
|
xingruiy/RAFT-3D
| 133 |
59784
|
import numpy as np
import torch
import torch.utils.data as data
import torch.nn.functional as F
import os
import cv2
import math
import random
import json
import csv
import pickle
import os.path as osp
from glob import glob
import raft3d.projective_ops as pops
from . import frame_utils
from .augmentation import RGBDAugmentor, SparseAugmentor
class KITTIEval(data.Dataset):
crop = 80
def __init__(self, image_size=None, root='datasets/KITTI', do_augment=True):
self.init_seed = None
mode = "testing"
self.image1_list = sorted(glob(osp.join(root, mode, "image_2/*10.png")))
self.image2_list = sorted(glob(osp.join(root, mode, "image_2/*11.png")))
self.disp1_ga_list = sorted(glob(osp.join(root, mode, "disp_ganet_{}/*10.png".format(mode))))
self.disp2_ga_list = sorted(glob(osp.join(root, mode, "disp_ganet_{}/*11.png".format(mode))))
self.calib_list = sorted(glob(osp.join(root, mode, "calib_cam_to_cam/*.txt")))
self.intrinsics_list = []
for calib_file in self.calib_list:
with open(calib_file) as f:
reader = csv.reader(f, delimiter=' ')
for row in reader:
if row[0] == 'K_02:':
K = np.array(row[1:], dtype=np.float32).reshape(3,3)
kvec = np.array([K[0,0], K[1,1], K[0,2], K[1,2]])
self.intrinsics_list.append(kvec)
@staticmethod
def write_prediction(index, disp1, disp2, flow):
def writeFlowKITTI(filename, uv):
uv = 64.0 * uv + 2**15
valid = np.ones([uv.shape[0], uv.shape[1], 1])
uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16)
cv2.imwrite(filename, uv[..., ::-1])
def writeDispKITTI(filename, disp):
disp = (256 * disp).astype(np.uint16)
cv2.imwrite(filename, disp)
disp1 = np.pad(disp1, ((KITTIEval.crop,0),(0,0)), mode='edge')
disp2 = np.pad(disp2, ((KITTIEval.crop, 0), (0,0)), mode='edge')
flow = np.pad(flow, ((KITTIEval.crop, 0), (0,0),(0,0)), mode='edge')
disp1_path = 'kitti_submission/disp_0/%06d_10.png' % index
disp2_path = 'kitti_submission/disp_1/%06d_10.png' % index
flow_path = 'kitti_submission/flow/%06d_10.png' % index
writeDispKITTI(disp1_path, disp1)
writeDispKITTI(disp2_path, disp2)
writeFlowKITTI(flow_path, flow)
def __len__(self):
return len(self.image1_list)
def __getitem__(self, index):
intrinsics = self.intrinsics_list[index]
image1 = cv2.imread(self.image1_list[index])
image2 = cv2.imread(self.image2_list[index])
disp1 = cv2.imread(self.disp1_ga_list[index], cv2.IMREAD_ANYDEPTH) / 256.0
disp2 = cv2.imread(self.disp2_ga_list[index], cv2.IMREAD_ANYDEPTH) / 256.0
image1 = image1[self.crop:]
image2 = image2[self.crop:]
disp1 = disp1[self.crop:]
disp2 = disp2[self.crop:]
intrinsics[3] -= self.crop
image1 = torch.from_numpy(image1).float().permute(2,0,1)
image2 = torch.from_numpy(image2).float().permute(2,0,1)
disp1 = torch.from_numpy(disp1).float()
disp2 = torch.from_numpy(disp2).float()
intrinsics = torch.from_numpy(intrinsics).float()
return image1, image2, disp1, disp2, intrinsics
class KITTI(data.Dataset):
def __init__(self, image_size=None, root='datasets/KITTI', do_augment=True):
import csv
self.init_seed = None
self.crop = 80
if do_augment:
self.augmentor = SparseAugmentor(image_size)
else:
self.augmentor = None
self.image1_list = sorted(glob(osp.join(root, "training", "image_2/*10.png")))
self.image2_list = sorted(glob(osp.join(root, "training", "image_2/*11.png")))
self.disp1_list = sorted(glob(osp.join(root, "training", "disp_occ_0/*10.png")))
self.disp2_list = sorted(glob(osp.join(root, "training", "disp_occ_1/*10.png")))
self.disp1_ga_list = sorted(glob(osp.join(root, "training", "disp_ganet/*10.png")))
self.disp2_ga_list = sorted(glob(osp.join(root, "training", "disp_ganet/*11.png")))
self.flow_list = sorted(glob(osp.join(root, "training", "flow_occ/*10.png")))
self.calib_list = sorted(glob(osp.join(root, "training", "calib_cam_to_cam/*.txt")))
self.intrinsics_list = []
for calib_file in self.calib_list:
with open(calib_file) as f:
reader = csv.reader(f, delimiter=' ')
for row in reader:
if row[0] == 'K_02:':
K = np.array(row[1:], dtype=np.float32).reshape(3,3)
kvec = np.array([K[0,0], K[1,1], K[0,2], K[1,2]])
self.intrinsics_list.append(kvec)
def __len__(self):
return len(self.image1_list)
def __getitem__(self, index):
if not self.init_seed:
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
torch.manual_seed(worker_info.id)
np.random.seed(worker_info.id)
random.seed(worker_info.id)
self.init_seed = True
image1 = cv2.imread(self.image1_list[index])
image2 = cv2.imread(self.image2_list[index])
disp1 = cv2.imread(self.disp1_list[index], cv2.IMREAD_ANYDEPTH) / 256.0
disp2 = cv2.imread(self.disp2_list[index], cv2.IMREAD_ANYDEPTH) / 256.0
disp1_dense = cv2.imread(self.disp1_ga_list[index], cv2.IMREAD_ANYDEPTH) / 256.0
disp2_dense = cv2.imread(self.disp2_ga_list[index], cv2.IMREAD_ANYDEPTH) / 256.0
flow, valid = frame_utils.readFlowKITTI(self.flow_list[index])
intrinsics = self.intrinsics_list[index]
SCALE = np.random.uniform(0.08, 0.15)
# crop top 80 pixels, no ground truth information
image1 = image1[self.crop:]
image2 = image2[self.crop:]
disp1 = disp1[self.crop:]
disp2 = disp2[self.crop:]
flow = flow[self.crop:]
valid = valid[self.crop:]
disp1_dense = disp1_dense[self.crop:]
disp2_dense = disp2_dense[self.crop:]
intrinsics[3] -= self.crop
image1 = torch.from_numpy(image1).float().permute(2,0,1)
image2 = torch.from_numpy(image2).float().permute(2,0,1)
disp1 = torch.from_numpy(disp1 / intrinsics[0]) / SCALE
disp2 = torch.from_numpy(disp2 / intrinsics[0]) / SCALE
disp1_dense = torch.from_numpy(disp1_dense / intrinsics[0]) / SCALE
disp2_dense = torch.from_numpy(disp2_dense / intrinsics[0]) / SCALE
dz = (disp2 - disp1_dense).unsqueeze(dim=-1)
depth1 = 1.0 / disp1_dense.clamp(min=0.01).float()
depth2 = 1.0 / disp2_dense.clamp(min=0.01).float()
intrinsics = torch.from_numpy(intrinsics)
valid = torch.from_numpy(valid)
flow = torch.from_numpy(flow)
valid = valid * (disp2 > 0).float()
flow = torch.cat([flow, dz], -1)
if self.augmentor is not None:
image1, image2, depth1, depth2, flow, valid, intrinsics = \
self.augmentor(image1, image2, depth1, depth2, flow, valid, intrinsics)
return image1, image2, depth1, depth2, flow, valid, intrinsics
|
tests/test_content_api.py
|
tzengwei/fastapi-sqlmodel-typer
| 123 |
59810
|
<reponame>tzengwei/fastapi-sqlmodel-typer
def test_content_create(api_client_authenticated):
response = api_client_authenticated.post(
"/content/",
json={
"title": "hello test",
"text": "this is just a test",
"published": True,
"tags": ["test", "hello"],
},
)
assert response.status_code == 200
result = response.json()
assert result["slug"] == "hello-test"
def test_content_list(api_client_authenticated):
response = api_client_authenticated.get("/content/")
assert response.status_code == 200
result = response.json()
assert result[0]["slug"] == "hello-test"
|
nautobot/extras/migrations/0011_fileattachment_fileproxy.py
|
psmware-ltd/nautobot
| 384 |
59828
|
# Generated by Django 3.1.13 on 2021-07-16 21:44
from django.db import migrations, models
import nautobot.extras.models.models
import uuid
class Migration(migrations.Migration):
dependencies = [
("extras", "0010_change_cf_validation_max_min_field_to_bigint"),
]
operations = [
migrations.CreateModel(
name="FileAttachment",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True
),
),
("bytes", models.BinaryField()),
("filename", models.CharField(max_length=255)),
("mimetype", models.CharField(max_length=50)),
],
options={"ordering": ["filename"]},
),
migrations.CreateModel(
name="FileProxy",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True
),
),
("name", models.CharField(max_length=255)),
(
"file",
models.FileField(
storage=nautobot.extras.models.models.database_storage,
upload_to="extras.FileAttachment/bytes/filename/mimetype",
),
),
("uploaded_at", models.DateTimeField(auto_now_add=True)),
],
options={
"get_latest_by": "uploaded_at",
"ordering": ["name"],
"verbose_name_plural": "file proxies",
},
),
migrations.AlterModelOptions(
name="jobresult",
options={"get_latest_by": "created", "ordering": ["-created"]},
),
]
|
koku/masu/database/provider_auth_db_accessor.py
|
rubik-ai/koku
| 157 |
59831
|
<gh_stars>100-1000
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Accessor for Provider Authentication from koku database."""
from api.provider.models import ProviderAuthentication
from masu.database.koku_database_access import KokuDBAccess
class ProviderAuthDBAccessor(KokuDBAccess):
"""Class to interact with the koku database for Provider Authentication Data."""
def __init__(self, auth_id=None, credentials=None):
"""
Establish Provider Authentication database connection.
Args:
auth_id (string) the provider authentication unique database id
credentials (dict) the credentials dictionary
"""
super().__init__("public")
self._auth_id = auth_id
self._credentials = credentials
self._table = ProviderAuthentication
def _get_db_obj_query(self):
"""
Return the sqlachemy query for the provider auth object.
Args:
None
Returns:
(django.db.query.QuerySet): QuerySet of objects matching the given filters
"""
if self._auth_id and self._credentials:
query = self._table.objects.filter(id=self._auth_id, credentials=self._credentials)
elif self._auth_id:
query = self._table.objects.filter(id=self._auth_id)
elif self._credentials:
query = self._table.objects.filter(credentials=self._credentials)
else:
query = self._table.objects.none()
return query
def get_auth_id(self):
"""
Return the database id.
Args:
None
Returns:
(Integer): "1",
"""
auth_obj = self._get_db_obj_query().first()
return auth_obj.id if auth_obj else None
def get_uuid(self):
"""
Return the provider uuid.
Args:
None
Returns:
(String): "UUID v4",
example: "edf94475-235e-4b64-ba18-0b81f2de9c9e"
"""
obj = self._get_db_obj_query().first()
return obj.uuid
def get_credentials(self):
"""
Return the provider resource name.
Args:
None
Returns:
(dtring): "Provider Resource Name. i.e. AWS: RoleARN",
example: {"role_arn": "arn:aws:iam::111111111111:role/CostManagement"}
"""
obj = self._get_db_obj_query().first()
return obj.credentials
|
lite/tests/unittest_py/pass/test_elementwise_scale_fuse_pass.py
|
714627034/Paddle-Lite
| 808 |
59840
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
from auto_scan_test import FusePassAutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
from test_elementwise_util import trim_trailing_singular_dims, check_input_shape_available
import hypothesis.strategies as st
class TestElementwiseScaleFuse(FusePassAutoScanTest):
def __init__(self, *args, **kwargs):
FusePassAutoScanTest.__init__(self, *args, **kwargs)
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
if len(program_config.inputs["input_data_x"].shape) > 4 or len(
program_config.inputs["input_data_y"].shape
) > 4 or program_config.ops[1].attrs["bias_after_scale"] == False:
return False
return True
def sample_program_configs(self, draw):
in_shape_x = draw(
st.lists(
st.integers(
min_value=1, max_value=20), min_size=2, max_size=5))
in_shape_y = draw(
st.lists(
st.integers(
min_value=1, max_value=20), min_size=2, max_size=5))
axis = draw(
st.integers(
min_value=-1, max_value=max(len(in_shape_x), len(in_shape_y))))
assume(
check_input_shape_available(
in_shape_x=in_shape_x, in_shape_y=in_shape_y, axis=axis) ==
True)
#scale param
scale = draw(st.floats(min_value=0.5, max_value=5))
bias = draw(st.floats(min_value=0, max_value=1))
bias_after_scale = draw(st.sampled_from([False, True]))
elementwise_op = OpConfig(
type='elementwise_mul',
inputs={"X": ["input_data_x"],
"Y": ["input_data_y"]},
outputs={"Out": ["elementwise_output_data"]},
attrs={"data_format": 'nchw',
"axis": axis})
scale_op = OpConfig(
type='scale',
inputs={"X": ["elementwise_output_data"]},
outputs={"Out": ["output_data"]},
attrs={
"scale": scale,
"bias": bias,
"bias_after_scale": bias_after_scale
})
ops = [elementwise_op, scale_op]
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data_x": TensorConfig(shape=in_shape_x),
"input_data_y": TensorConfig(shape=in_shape_y)
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
config = CxxConfig()
return self.get_predictor_configs(), ['elementwise_mul'], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(
quant=False,
max_examples=1000,
passes=["lite_elementwise_scale_fuse_pass"])
if __name__ == "__main__":
unittest.main(argv=[''])
|
Data_Structures/Tree/Binary_Search_Tree/Binary_Search_Tree.py
|
pranjalkumar153/algo_ds_101
| 182 |
59886
|
<reponame>pranjalkumar153/algo_ds_101
# Binary Search Tree (BST) Implementation
class BSTNode:
def __init__(selfNode, nodeData): # Node Structure
selfNode.nodeData = nodeData
selfNode.left = None
selfNode.right = None
selfNode.parent = None
# Insertion Operation
def insert(selfNode, node):
if selfNode.nodeData > node.nodeData:
if selfNode.left is None:
selfNode.left = node
node.parent = selfNode
else:
selfNode.left.insert(node)
elif selfNode.nodeData < node.nodeData:
if selfNode.right is None:
selfNode.right = node
node.parent = selfNode
else:
selfNode.right.insert(node)
# Removal Operation Functions
def replace_node_of_parent(selfNode, new_node):
if selfNode.parent is not None:
if new_node is not None:
new_node.parent = selfNode.parent
if selfNode.parent.left == selfNode:
selfNode.parent.left = new_node
elif selfNode.parent.right == selfNode:
selfNode.parent.right = new_node
else:
selfNode.nodeData = new_node.nodeData
selfNode.left = new_node.left
selfNode.right = new_node.right
if new_node.left is not None:
new_node.left.parent = selfNode
if new_node.right is not None:
new_node.right.parent = selfNode
def find_min(selfNode):
current = selfNode
while current.left is not None:
current = current.left
return current
def remove(selfNode):
if (selfNode.left is not None and selfNode.right is not None):
successor = selfNode.right.find_min()
selfNode.nodeData = successor.nodeData
successor.remove()
elif selfNode.left is not None:
selfNode.replace_node_of_parent(selfNode.left)
elif selfNode.right is not None:
selfNode.replace_node_of_parent(selfNode.right)
else:
selfNode.replace_node_of_parent(None)
# Search required data within BST
def search(selfNode, nodeData):
if selfNode.nodeData > nodeData:
if selfNode.left is not None:
return selfNode.left.search(nodeData)
else:
return None
elif selfNode.nodeData < nodeData:
if selfNode.right is not None:
return selfNode.right.search(nodeData)
else:
return None
return selfNode
# InOrder Traversal Operation
def inorder(selfNode):
if selfNode.left is not None:
selfNode.left.inorder()
print(selfNode.nodeData, end=' ')
if selfNode.right is not None:
selfNode.right.inorder()
# PostOrder Traversal Operation
def postorder(selfNode):
if selfNode.left is not None:
selfNode.left.inorder()
if selfNode.right is not None:
selfNode.right.inorder()
print(selfNode.nodeData, end=' ')
# PreOrder Traversal Operation
def preorder(selfNode):
print(selfNode.nodeData, end=' ')
if selfNode.left is not None:
selfNode.left.inorder()
if selfNode.right is not None:
selfNode.right.inorder()
class BSTree: # Structure of Binary Search Tree
def __init__(selfNode):
selfNode.root = None
def inorder(selfNode):
if selfNode.root is not None:
selfNode.root.inorder()
def preorder(selfNode):
if selfNode.root is not None:
selfNode.root.preorder()
def postorder(selfNode):
if selfNode.root is not None:
selfNode.root.postorder()
def add(selfNode, nodeData):
new_node = BSTNode(nodeData)
if selfNode.root is None:
selfNode.root = new_node
else:
selfNode.root.insert(new_node)
def remove(selfNode, nodeData):
to_remove = selfNode.search(nodeData)
if (selfNode.root == to_remove and selfNode.root.left is None
and selfNode.root.right is None):
selfNode.root = None
else:
to_remove.remove()
def search(selfNode, nodeData):
if selfNode.root is not None:
return selfNode.root.search(nodeData)
bstree = BSTree() # Object of class BSTree
# Menu of Operations on BST Tree
print('BST Tree Operation Menu')
print('Add <data>')
print('Remove <data>')
print('Inorder')
print('Preorder')
print('Postorder')
print('Quit')
while True:
do = input('Enter your action => ').split()
operation = do[0].strip().lower()
if operation == 'add':
nodeData = int(do[1])
bstree.add(nodeData)
elif operation == 'remove':
nodeData = int(do[1])
bstree.remove(nodeData)
elif operation == 'inorder':
print('Inorder Traversal: ', end='')
bstree.inorder()
print()
elif operation == 'postorder':
print('Postorder Traversal: ', end='')
bstree.postorder()
print()
elif operation == 'preorder':
print('Preorder Traversal: ', end='')
bstree.preorder()
print()
elif operation == 'quit':
print("BST Tree Implementation finished.")
break
|
factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_training_status/signals.py
|
kaka-lin/azure-intelligent-edge-patterns
| 176 |
59899
|
"""App signals.
"""
import logging
from django.db.models.signals import post_save
from django.dispatch import receiver
from ..azure_projects.models import Project
from .models import TrainingStatus
logger = logging.getLogger(__name__)
@receiver(
signal=post_save,
sender=Project,
dispatch_uid="training_status_project_created_listener",
)
def training_status_project_created_listener(**kwargs):
"""Project create change."""
instance = kwargs["instance"]
created = kwargs["created"]
if not created:
logger.info("Project not created. Pass...")
return
logger.info("Azure Project created. Create TrainingStatus object.")
TrainingStatus.objects.update_or_create(
project_id=instance.id,
defaults={
"status": "ok",
"log": "Status : Has not configured",
"performance": "{}",
},
)
|
backend/breach/urls.py
|
Cancelll/rupture
| 184 |
59960
|
<gh_stars>100-1000
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^get_work/(?P<victim_id>\d+)$', views.get_work, name='get_work'),
url(r'^get_work$', views.get_work, name='get_work'),
url(r'^work_completed/(?P<victim_id>\d+)$', views.work_completed, name='work_completed'),
url(r'^work_completed$', views.work_completed, name='work_completed'),
url(r'^target$', views.TargetView.as_view(), name='TargetView'),
url(r'^victim$', views.VictimListView.as_view(), name='VictimListView'),
url(r'^attack$', views.AttackView.as_view(), name='AttackView'),
url(r'^victim/(?P<victim_id>\d+)/$', views.VictimDetailView.as_view(), name='VictimDetailView'),
url(r'^victim/notstarted/$', views.DiscoveredVictimsView.as_view(), name='DiscoveredVictimsView')
]
|
invoice/tests.py
|
Chris7/django-invoice
| 139 |
60020
|
<reponame>Chris7/django-invoice<filename>invoice/tests.py
import datetime
from django.test import TestCase
from django.contrib.auth.models import User
from addressbook.models import Address, Country
from .models import Invoice
class InvoiceTestCase(TestCase):
def setUp(self):
usr = User.objects.create(username='test',
first_name='John',
last_name='Doe',
email='<EMAIL>')
country = Country.objects.create(name='TestCountry')
address = Address.objects.create(contact_name='<NAME>',
address_one='Street',
town='Town',
postcode='PostCode',
country=country)
self.inv = Invoice.objects.create(user=usr, address=address)
def testInvoiceId(self):
inv = self.inv
self.assertEquals(inv.invoice_id, u'TTH9R')
inv.invoice_id = False
inv.save()
self.assertEquals(inv.invoice_id, u'TTH9R')
def testGetDue(self):
inv = self.inv
inv.draft = True
inv.save()
self.assertEquals(len(Invoice.objects.get_due()), 0)
inv.draft = False
inv.save()
self.assertEquals(len(Invoice.objects.get_due()), 1)
inv.invoiced = True
inv.save()
self.assertEquals(len(Invoice.objects.get_due()), 0)
today = datetime.date.today()
yesterday = today - datetime.timedelta(1)
tomorrow = today + datetime.timedelta(1)
inv.invoiced = False
inv.invoice_date = yesterday
inv.save()
self.assertEquals(len(Invoice.objects.get_due()), 1)
inv.invoice_date = tomorrow
inv.save()
self.assertEquals(len(Invoice.objects.get_due()), 0)
|
Dynamic Programming/Egg Dropping Puzzle/eggDrop.py
|
muthusk07/cs-algorithms
| 239 |
60035
|
#A Dynamic Programming based Python Program for the Egg Dropping Puzzle
import sys
# Function to get minimum number of trials
# needed in worst case with n eggs and k floors
def eggDrop(n, k):
# If there are no floors, then no trials needed. If there is one floor, one trial needed.
if (k == 0 or k == 1):
return k
# We need k trials for one egg and k floors
if (n == 1):
return k
min = sys.maxsize
# Consider all droppings from 1st floor to kth floor and return the minimum of these values plus 1.
for x in range(1, k + 1):
res = max(eggDrop(n - 1, x - 1), eggDrop(n, k - x))
if (res < min):
min = res
return min + 1
if __name__ == "__main__":
n = 2
k = 36
#or
#n=int(input("Enter number of eggs : "))
#k=int(input("Enter number of floors : "))
print("Minimum number of trials in worst case with", n, "eggs and", k, "floors is", eggDrop(n, k))
|
testData/regex/otherFunc.py
|
alek-sun/pydantic-pycharm-plugin
| 238 |
60058
|
from pydantic import BaseModel, constr
def other_func(regex):
pass
class Model(BaseModel):
abc: str = other_func(regex='<caret>[^a-zA-Z]+')
|
testing/tests/001-main/005-unittests/002-api/008-branch.py
|
fekblom/critic
| 216 |
60073
|
<gh_stars>100-1000
# @dependency 001-main/002-createrepository.py
SHA1 = "66f25ae79dcc5e200b136388771b5924a1b5ae56"
with repository.workcopy() as work:
REMOTE_URL = instance.repository_url("alice")
work.run(["checkout", "-b", "008-branch", SHA1])
work.run(["rebase", "--force-rebase", "HEAD~5"])
work.run(["push", REMOTE_URL, "008-branch"])
sha1 = work.run(["rev-parse", "HEAD"]).strip()
try:
instance.unittest("api.branch", ["basic"],
args=["--sha1=" + sha1,
"--name=008-branch"])
finally:
work.run(["push", REMOTE_URL, ":008-branch"])
|
Scatter/scatter_multi_dimension.py
|
pyecharts/pyecharts_gallery
| 759 |
60098
|
from pyecharts import options as opts
from pyecharts.charts import Scatter
from pyecharts.commons.utils import JsCode
from pyecharts.faker import Faker
c = (
Scatter()
.add_xaxis(Faker.choose())
.add_yaxis(
"商家A",
[list(z) for z in zip(Faker.values(), Faker.choose())],
label_opts=opts.LabelOpts(
formatter=JsCode(
"function(params){return params.value[1] +' : '+ params.value[2];}"
)
),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="Scatter-多维度数据"),
tooltip_opts=opts.TooltipOpts(
formatter=JsCode(
"function (params) {return params.name + ' : ' + params.value[2];}"
)
),
visualmap_opts=opts.VisualMapOpts(
type_="color", max_=150, min_=20, dimension=1
),
)
.render("scatter_multi_dimension.html")
)
|
src/test/tests/databases/h5part.py
|
visit-dav/vis
| 226 |
60109
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: h5part.py
#
# Programmer: <NAME>
# Date: January, 2009
#
# Modifications:
# <NAME>, Wed Jan 21 09:36:13 PST 2009
# Took Gunther's original code and integrated it with test suite.
#
# ----------------------------------------------------------------------------
RequiredDatabasePlugin("H5Part")
TurnOffAllAnnotations()
OpenDatabase(data_path("h5part_test_data/sample.h5part"), 0)
AddPlot("Pseudocolor", "GaussianField", 1, 0)
DrawPlots()
Test("h5part_01")
ChangeActivePlotsVar("LinearField")
View3DAtts = GetView3D()
View3DAtts.viewNormal = (1.000000, 0.000000, 0.0000000)
View3DAtts.focus = (31.5, 31.5, 31.5)
View3DAtts.viewUp = (0.000000, 1.000000, 0.0000000)
View3DAtts.viewAngle = 30
View3DAtts.parallelScale = 54.5596
View3DAtts.nearPlane = -109.119
View3DAtts.farPlane = 109.119
View3DAtts.imagePan = (0, 0)
View3DAtts.imageZoom = 1
View3DAtts.perspective = 1
View3DAtts.eyeAngle = 2
View3DAtts.centerOfRotationSet = 0
View3DAtts.centerOfRotation = (31.5, 31.5, 31.5)
SetView3D(View3DAtts)
Test("h5part_02")
DeleteActivePlots()
AddPlot("Pseudocolor", "px", 1, 0)
PseudocolorAtts = PseudocolorAttributes()
PseudocolorAtts.pointType = PseudocolorAtts.Sphere
PseudocolorAtts.pointSize = 1.5
SetPlotOptions(PseudocolorAtts)
DrawPlots()
Test("h5part_03")
AddPlot("Pseudocolor", "LinearField", 1, 0)
AddOperator("Slice", 0)
SliceAtts = SliceAttributes()
SliceAtts.originType = SliceAtts.Intercept
SliceAtts.originIntercept = 30
SliceAtts.axisType = SliceAtts.XAxis
SliceAtts.project2d = 0
SliceAtts.meshName = "particles"
SetOperatorOptions(SliceAtts)
DrawPlots()
Test("h5part_04")
Exit()
|
models/spach/spach.py
|
ritwikraha/SPACH
| 104 |
60160
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from functools import partial
import torch
from torch import nn
from timm.models.layers import DropPath
from einops.layers.torch import Reduce
from .layers import DWConv, SPATIAL_FUNC, ChannelMLP, STEM_LAYER
from .misc import reshape2n
class MixingBlock(nn.Module):
def __init__(self, dim,
spatial_func=None, scaled=True, init_values=1e-4, shared_spatial_func=False,
norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop_path=0., cpe=True,
num_heads=None, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., # attn
in_features=None, hidden_features=None, drop=0., # mlp
channel_ratio=2.0
):
super(MixingBlock, self).__init__()
spatial_kwargs = dict(act_layer=act_layer,
in_features=in_features, hidden_features=hidden_features, drop=drop, # mlp
dim=dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=proj_drop # attn
)
self.valid_spatial_func = True
if spatial_func is not None:
if shared_spatial_func:
self.spatial_func = spatial_func
else:
self.spatial_func = spatial_func(**spatial_kwargs)
self.norm1 = norm_layer(dim)
if scaled:
self.gamma_1 = nn.Parameter(init_values * torch.ones(1, 1, dim), requires_grad=True)
else:
self.gamma_1 = 1.
else:
self.valid_spatial_func = False
self.channel_func = ChannelMLP(in_features=dim, hidden_features=int(dim*channel_ratio), act_layer=act_layer,
drop=drop)
self.norm2 = norm_layer(dim)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.cpe = cpe
if cpe:
self.cpe_net = DWConv(dim)
def forward(self, x):
in_x = x
if self.valid_spatial_func:
x = x + self.drop_path(self.gamma_1 * self.spatial_func(self.norm1(in_x)))
if self.cpe:
x = x + self.cpe_net(in_x)
x = x + self.drop_path(self.channel_func(self.norm2(x)))
return x
def flops(self, input_shape):
_, N, C = input_shape
flops = 0
if self.valid_spatial_func:
flops += self.spatial_func.flops(input_shape)
flops += N * C * 2 # norm + skip
if self.cpe:
flops += self.cpe_net.flops(input_shape)
flops += self.channel_func.flops(input_shape)
flops += N * C * 2
return flops
class Spach(nn.Module):
def __init__(self,
num_classes=1000,
img_size=224,
in_chans=3,
hidden_dim=384,
patch_size=16,
net_arch=None,
act_layer=nn.GELU,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
stem_type='conv1',
scaled=True, init_values=1e-4, drop_path_rate=0., cpe=True, shared_spatial_func=False, # mixing block
num_heads=12, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0., # attn
token_ratio=0.5, channel_ratio=2.0, drop_rate=0., # mlp
downstream=False,
**kwargs
):
super(Spach, self).__init__()
self.num_classes = num_classes
self.hidden_dim = hidden_dim
self.downstream = downstream
self.stem = STEM_LAYER[stem_type](
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=hidden_dim, downstream=downstream)
self.norm1 = norm_layer(hidden_dim)
block_kwargs = dict(dim=hidden_dim, scaled=scaled, init_values=init_values, cpe=cpe,
shared_spatial_func=shared_spatial_func, norm_layer=norm_layer, act_layer=act_layer,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=proj_drop, # attn
in_features=self.stem.num_patches, hidden_features=int(self.stem.num_patches * token_ratio), channel_ratio=channel_ratio, drop=drop_rate) # mlp
self.blocks = self.make_blocks(net_arch, block_kwargs, drop_path_rate, shared_spatial_func)
self.norm2 = norm_layer(hidden_dim)
if not downstream:
self.pool = Reduce('b n c -> b c', reduction='mean')
self.head = nn.Linear(hidden_dim, self.num_classes)
self.init_weights()
def make_blocks(self, net_arch, block_kwargs, drop_path, shared_spatial_func):
if shared_spatial_func:
assert len(net_arch) == 1, '`shared_spatial_func` only support unitary spatial function'
assert net_arch[0][0] != 'pass', '`shared_spatial_func` do not support pass'
spatial_func = SPATIAL_FUNC[net_arch[0][0]](**block_kwargs)
else:
spatial_func = None
blocks = []
for func_type, depth in net_arch:
for i in range(depth):
blocks.append(MixingBlock(spatial_func=spatial_func or SPATIAL_FUNC[func_type], drop_path=drop_path,
**block_kwargs))
return nn.Sequential(*blocks)
def init_weights(self):
for n, m in self.named_modules():
_init_weights(m, n)
def forward_features(self, x):
x = self.stem(x)
x = reshape2n(x)
x = self.norm1(x)
x = self.blocks(x)
x = self.norm2(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.pool(x)
x = self.head(x)
return x
def flops(self):
flops = 0
shape = (1, self.stem.num_patches, self.hidden_dim)
# stem
flops += self.stem.flops()
flops += sum(shape)
# blocks
flops += sum([i.flops(shape) for i in self.blocks])
flops += sum(shape)
# head
flops += self.hidden_dim * self.num_classes
return flops
def _init_weights(m, n: str):
if isinstance(m, nn.Linear):
if n.startswith('head'):
nn.init.zeros_(m.weight)
nn.init.zeros_(m.bias)
else:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
if 'mlp' in n:
nn.init.normal_(m.bias, std=1e-6)
else:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
|
tests/nn/data_parallel/test_fsdp_grad_scaler.py
|
zhaojuanmao/fairscale
| 1,662 |
60197
|
<reponame>zhaojuanmao/fairscale
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with grad scaler. """
import os
import random
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairscale.nn import FullyShardedDataParallel
from fairscale.optim.grad_scaler import ShardedGradScaler
from fairscale.utils.testing import skip_if_no_cuda
try:
from torch.cuda.amp import autocast
except ImportError:
# Older version doesn't support autocast. Skip this file.
pytestmark = pytest.mark.skip
# Mixed precision needs cuda.
@skip_if_no_cuda
def test_scaler_cpu_offload_breaks():
device = torch.device("cuda")
torch.cuda.set_device(0)
# Random port in case the next test run quickly, same port would cause conflict.
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(random.randint(2000, 3000))
torch.distributed.init_process_group(backend="nccl", rank=0, world_size=1)
try:
scaler = ShardedGradScaler()
model = FullyShardedDataParallel(nn.Linear(5, 5), cpu_offload=True, mixed_precision=True)
optim = torch.optim.SGD(model.parameters(), lr=1e-3)
input = torch.rand((1, 5), dtype=torch.float).to(device)
optim.zero_grad()
with autocast():
output = model(input)
loss = F.mse_loss(input, output)
scaler.scale(loss).backward()
# TODO (Min): Need to fix. Details in issue #421.
with pytest.raises(RuntimeError):
scaler.step(optim)
scaler.update()
finally:
# Clean-up is important or the next test in this file may fail to init the PG.
torch.distributed.destroy_process_group()
del os.environ["MASTER_ADDR"]
del os.environ["MASTER_PORT"]
|
veles/external/freetype/ft_enums/ft_encodings.py
|
AkshayJainG/veles
| 1,007 |
60208
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011-2012 <NAME>
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
"""
An enumeration used to specify character sets supported by charmaps. Used in
the FT_Select_Charmap API function.
FT_ENCODING_NONE
The encoding value 0 is reserved.
FT_ENCODING_UNICODE
Corresponds to the Unicode character set. This value covers all versions of
the Unicode repertoire, including ASCII and Latin-1. Most fonts include a
Unicode charmap, but not all of them.
For example, if you want to access Unicode value U+1F028 (and the font
contains it), use value 0x1F028 as the input value for FT_Get_Char_Index.
FT_ENCODING_MS_SYMBOL
Corresponds to the Microsoft Symbol encoding, used to encode mathematical
symbols in the 32..255 character code range. For more information, see
'http://www.ceviz.net/symbol.htm'.
FT_ENCODING_SJIS
Corresponds to Japanese SJIS encoding. More info at at
'http://langsupport.japanreference.com/encoding.shtml'. See note on
multi-byte encodings below.
FT_ENCODING_GB2312
Corresponds to an encoding system for Simplified Chinese as used used in
mainland China.
FT_ENCODING_BIG5
Corresponds to an encoding system for Traditional Chinese as used in Taiwan
and Hong Kong.
FT_ENCODING_WANSUNG
Corresponds to the Korean encoding system known as Wansung. For more
information see 'http://www.microsoft.com/typography/unicode/949.txt'.
FT_ENCODING_JOHAB
The Korean standard character set (KS C 5601-1992), which corresponds to MS
Windows code page 1361. This character set includes all possible Hangeul
character combinations.
FT_ENCODING_ADOBE_LATIN_1
Corresponds to a Latin-1 encoding as defined in a Type 1 PostScript font. It
is limited to 256 character codes.
FT_ENCODING_ADOBE_STANDARD
Corresponds to the Adobe Standard encoding, as found in Type 1, CFF, and
OpenType/CFF fonts. It is limited to 256 character codes.
FT_ENCODING_ADOBE_EXPERT
Corresponds to the Adobe Expert encoding, as found in Type 1, CFF, and
OpenType/CFF fonts. It is limited to 256 character codes.
FT_ENCODING_ADOBE_CUSTOM
Corresponds to a custom encoding, as found in Type 1, CFF, and OpenType/CFF
fonts. It is limited to 256 character codes.
FT_ENCODING_APPLE_ROMAN
Corresponds to the 8-bit Apple roman encoding. Many TrueType and OpenType
fonts contain a charmap for this encoding, since older versions of Mac OS are
able to use it.
FT_ENCODING_OLD_LATIN_2
This value is deprecated and was never used nor reported by FreeType. Don't
use or test for it.
"""
def _FT_ENC_TAG(a, b, c, d):
return (ord(a) << 24 | ord(b) << 16 | ord(c) << 8 | ord(d))
FT_ENCODINGS = {'FT_ENCODING_NONE': _FT_ENC_TAG('\0', '\0', '\0', '\0'),
'FT_ENCODING_MS_SYMBOL': _FT_ENC_TAG('s', 'y', 'm', 'b'),
'FT_ENCODING_UNICODE': _FT_ENC_TAG('u', 'n', 'i', 'c'),
'FT_ENCODING_SJIS': _FT_ENC_TAG('s', 'j', 'i', 's'),
'FT_ENCODING_GB2312': _FT_ENC_TAG('g', 'b', ' ', ' '),
'FT_ENCODING_BIG5': _FT_ENC_TAG('b', 'i', 'g', '5'),
'FT_ENCODING_WANSUNG': _FT_ENC_TAG('w', 'a', 'n', 's'),
'FT_ENCODING_JOHAB': _FT_ENC_TAG('j', 'o', 'h', 'a'),
'FT_ENCODING_ADOBE_STANDARD': _FT_ENC_TAG('A', 'D', 'O', 'B'),
'FT_ENCODING_ADOBE_EXPERT': _FT_ENC_TAG('A', 'D', 'B', 'E'),
'FT_ENCODING_ADOBE_CUSTOM': _FT_ENC_TAG('A', 'D', 'B', 'C'),
'FT_ENCODING_ADOBE_LATIN1': _FT_ENC_TAG('l', 'a', 't', '1'),
'FT_ENCODING_OLD_LATIN2': _FT_ENC_TAG('l', 'a', 't', '2'),
'FT_ENCODING_APPLE_ROMAN': _FT_ENC_TAG('a', 'r', 'm', 'n')}
globals().update(FT_ENCODINGS)
|
benchmarks/perf_tabletunnel.py
|
Emersonxuelinux/aliyun-odps-python-sdk
| 412 |
60229
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import time
import cProfile
from pstats import Stats
# switch on to run in pure Python mode
from odps import options
# options.force_py = True
from odps.compat import unittest, Decimal
from odps.tests.core import TestBase
from odps.models import Schema
from datetime import datetime
# remember to reset False before committing
ENABLE_PROFILE = False
DUMP_PROFILE = False
class Test(TestBase):
COMPRESS_DATA = True
BUFFER_SIZE = 1024*1024
DATA_AMOUNT = 100000
STRING_LITERAL = "Soft kitty, warm kitty, little ball of fur; happy kitty, sleepy kitty, purr, purr"
def setUp(self):
TestBase.setUp(self)
if ENABLE_PROFILE:
self.pr = cProfile.Profile()
self.pr.enable()
fields = ['a', 'b', 'c', 'd', 'e', 'f']
types = ['bigint', 'double', 'datetime', 'boolean', 'string', 'decimal']
self.SCHEMA = Schema.from_lists(fields, types)
def tearDown(self):
if ENABLE_PROFILE:
if DUMP_PROFILE:
self.pr.dump_stats('profile.out')
p = Stats(self.pr)
p.strip_dirs()
p.sort_stats('time')
p.print_stats(40)
p.print_callees('types.py:846\(validate_value', 20)
p.print_callees('types.py:828\(_validate_primitive_value', 20)
p.print_callees('tabletunnel.py:185\(write', 20)
TestBase.teardown(self)
def testWrite(self):
table_name = 'pyodps_test_tunnel_write_performance'
self.odps.create_table(table_name, schema=self.SCHEMA, if_not_exists=True)
ss = self.tunnel.create_upload_session(table_name)
r = ss.new_record()
start = time.time()
with ss.open_record_writer(0) as writer:
for i in range(self.DATA_AMOUNT):
r[0] = 2**63-1
r[1] = 0.0001
r[2] = datetime(2015, 11, 11)
r[3] = True
r[4] = self.STRING_LITERAL
r[5] = Decimal('3.15')
writer.write(r)
n_bytes = writer.n_bytes
print(n_bytes, 'bytes', float(n_bytes) / 1024 / 1024 / (time.time() - start), 'MiB/s')
ss.commit([0])
self.odps.delete_table(table_name, if_exists=True)
def testRead(self):
table_name = 'pyodps_test_tunnel_read_performance'
self.odps.delete_table(table_name, if_exists=True)
t = self.odps.create_table(table_name, schema=self.SCHEMA)
def gen_data():
for i in range(self.DATA_AMOUNT):
r = t.new_record()
r[0] = 2 ** 63 - 1
r[1] = 0.0001
r[2] = datetime(2015, 11, 11)
r[3] = True
r[4] = self.STRING_LITERAL
r[5] = Decimal('3.15')
yield r
self.odps.write_table(t, gen_data())
if ENABLE_PROFILE:
self.pr = cProfile.Profile()
self.pr.enable()
ds = self.tunnel.create_download_session(table_name)
start = time.time()
cnt = 0
with ds.open_record_reader(0, ds.count) as reader:
for _ in reader:
cnt += 1
n_bytes = reader.n_bytes
print(n_bytes, 'bytes', float(n_bytes) / 1024 / 1024 / (time.time() - start), 'MiB/s')
self.assertEqual(self.DATA_AMOUNT, cnt)
self.odps.delete_table(table_name, if_exists=True)
def testBufferedWrite(self):
table_name = 'test_tunnel_bufferred_write'
self.odps.create_table(table_name, schema=self.SCHEMA, if_not_exists=True)
ss = self.tunnel.create_upload_session(table_name)
r = ss.new_record()
start = time.time()
with ss.open_record_writer(buffer_size=self.BUFFER_SIZE, compress=self.COMPRESS_DATA) as writer:
for i in range(self.DATA_AMOUNT):
r[0] = 2**63-1
r[1] = 0.0001
r[2] = datetime(2015, 11, 11)
r[3] = True
r[4] = self.STRING_LITERAL
r[5] = Decimal('3.15')
writer.write(r)
n_bytes = writer.n_bytes
print(n_bytes, 'bytes', float(n_bytes) / 1024 / 1024 / (time.time() - start), 'MiB/s')
ss.commit(writer.get_blocks_written())
self.odps.delete_table(table_name, if_exists=True)
if __name__ == '__main__':
unittest.main()
|
WebMirror/OutputFilters/util/feedNameLut.py
|
awesome-archive/ReadableWebProxy
| 193 |
60257
|
import urllib.parse
import cachetools
from common import database as db
from sqlalchemy.orm import joinedload
FEED_LOOKUP_CACHE = cachetools.LRUCache(maxsize=200)
def patch_blogspot(innetloc):
assert isinstance(innetloc, str), "Expected str, recieved %s" % type(innetloc)
# Blogspot domains are coerced to ".com" since they seem to localize their TLD,
# and somehow it all points to the same place in the end.
if ".blogspot." in innetloc and not innetloc.endswith(".blogspot.com"):
prefix = innetloc.split(".blogspot.")[0]
innetloc = prefix + ".blogspot.com"
return innetloc
def get_name_for_netloc_db(db_sess, netloc):
if netloc in FEED_LOOKUP_CACHE:
return FEED_LOOKUP_CACHE[netloc]
row = db_sess.query(db.RssFeedUrlMapper) \
.filter(db.RssFeedUrlMapper.feed_netloc == netloc) \
.options(joinedload('feed_entry')) \
.all()
if not row:
return False
if len(row) > 1:
print("ERROR: Multiple solutions for netloc %s?" % netloc)
feedname = row[0].feed_entry.feed_name
if feedname:
FEED_LOOKUP_CACHE[netloc] = feedname
return feedname
else:
return False
def getNiceName(session, srcurl, netloc=None, debug=False):
if netloc:
assert isinstance(netloc, str), "Expected str, recieved %s" % type(netloc)
srcnetloc = netloc
elif srcurl:
assert isinstance(srcurl, str), "Expected str, recieved %s" % type(srcurl)
srcnetloc = urllib.parse.urlparse(srcurl).netloc
else:
raise RuntimeError("You need to at least pass a srcurl or netloc!")
srcnetloc = patch_blogspot(srcnetloc)
val = get_name_for_netloc_db(session, srcnetloc)
return val
|
fluent.docs/fluent/docs/__init__.py
|
shlomyb-di/python-fluent
| 155 |
60262
|
from pathlib import Path
from .build import DocBuilder
def finalize_builddir(repo_name):
'Bookkeeping on the docs build directory'
root = Path('_build') / repo_name
with open(root / '.nojekyll', 'w') as fh:
fh.write('')
def build_root(repo_name):
'''Build the top-level documentation.
See :py:mod:`.build` on building sub-projects.
'''
with DocBuilder(repo_name, '.') as builder:
builder.build()
|
SimCalorimetry/EcalSimProducers/python/esEcalLiteDTUPedestalsProducer_cfi.py
|
ckamtsikis/cmssw
| 852 |
60265
|
<filename>SimCalorimetry/EcalSimProducers/python/esEcalLiteDTUPedestalsProducer_cfi.py
import FWCore.ParameterSet.Config as cms
EcalLiteDTUPedestalsRcd = cms.ESSource("EmptyESSource",
recordName = cms.string("EcalLiteDTUPedestalsRcd"),
firstValid = cms.vuint32(1),
iovIsRunNotTime = cms.bool(True)
)
EcalLiteDTUPedestals = cms.ESProducer(
"EcalLiteDTUPedestalsESProducer",
ComponentName = cms.string('EcalLiteDTUPedestalProducer'),
MeanPedestalsGain10 = cms.double(12),
RMSPedestalsGain10 = cms.double(2.5),
MeanPedestalsGain1 = cms.double(12.),
RMSPedestalsGain1 = cms.double(2.)
)
|
examples/torch-starter/extract-coco-features.py
|
dapatil211/Jacinle
| 114 |
60338
|
<reponame>dapatil211/Jacinle
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : extract-coco-features.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 11/27/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
"""
Extracing features from the MS-COCO dataset.
Examples:
jac-crun 0 extract-coco-features.py --caption /mnt/localssd1/coco/annotations/captions_train2014.json --image-root /mnt/localssd1/coco/raw/train2014 --output /mnt/localssd2/train.h5
"""
import os.path as osp
import queue
import threading
from PIL import Image
import torch
import torch.nn as nn
import torch.cuda as cuda
import torch.backends.cudnn as cudnn
from torch.utils.data.dataset import Dataset
import jacinle.io as io
from jacinle.cli.argument import JacArgumentParser
from jacinle.logging import get_logger
from jacinle.utils.container import GView
from jacinle.utils.tqdm import tqdm
from jactorch.cuda.copy import async_copy_to
logger = get_logger(__file__)
io.set_fs_verbose(True)
parser = JacArgumentParser()
parser.add_argument('--caption', required=True, type='checked_file', help='caption annotations (*.json)')
parser.add_argument('--image-root', required=True, type='checked_dir', help='image directory')
parser.add_argument('--output', required=True, help='output .h5 file')
parser.add_argument('--image-size', default=224, type=int, metavar='N', help='input image size')
parser.add_argument('--batch-size', default=64, type=int, metavar='N', help='batch size')
parser.add_argument('--data-workers', type=int, default=4, metavar='N', help='the num of workers that input training data')
parser.add_argument('--use-gpu', type='bool', default=True, metavar='B', help='use GPU or not')
parser.add_argument('--force-gpu', action='store_true', help='force the script to use GPUs, useful when there exists on-the-ground devices')
args = parser.parse_args()
args.output_images_json = osp.splitext(args.output)[0] + '.images.json'
if args.use_gpu:
nr_devs = cuda.device_count()
if args.force_gpu and nr_devs == 0:
nr_devs = 1
assert nr_devs > 0, 'No GPU device available'
args.gpus = [i for i in range(nr_devs)]
args.gpu_parallel = (nr_devs > 1)
class COCOImageDataset(Dataset):
def __init__(self, images, image_root, image_transform):
self.images = images
self.image_root = image_root
self.image_transform = image_transform
def __getitem__(self, index):
info = self.images[index]
feed_dict = GView()
feed_dict.image_filename = info['file_name']
if self.image_root is not None:
feed_dict.image = Image.open(osp.join(self.image_root, feed_dict.image_filename)).convert('RGB')
feed_dict.image = self.image_transform(feed_dict.image)
return feed_dict.raw()
def __len__(self):
return len(self.images)
def make_dataloader(self, batch_size, shuffle, drop_last, nr_workers):
from jactorch.data.dataloader import JacDataLoader
from jactorch.data.collate import VarLengthCollateV2
collate_guide = {
'image_filename': 'skip',
}
return JacDataLoader(
self, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last,
num_workers=nr_workers, pin_memory=True,
collate_fn=VarLengthCollateV2(collate_guide)
)
class FeatureExtractor(nn.Module):
def __init__(self):
super().__init__()
import jactorch.models.vision.resnet as resnet
self.resnet = resnet.resnet152(pretrained=True, incl_gap=False, num_classes=None)
def forward(self, feed_dict):
feed_dict = GView(feed_dict)
f = self.resnet(feed_dict.image)
output_dict = {'features': f}
return output_dict
class AsyncWriter(object):
def __init__(self, output_file, total_size):
self.output_file = output_file
self.total_size = total_size
self.queue = queue.Queue(maxsize=5)
self.output_dataset = None
self.thread = threading.Thread(target=self.target)
self.thread.start()
def feed(self, payload):
self.queue.put(payload)
def join(self):
self.queue.put(None)
self.thread.join()
def target(self):
cur_idx = 0
while True:
payload = self.queue.get()
if payload is None:
break
output_dict = payload
if self.output_dataset is None:
logger.info('Initializing the dataset.')
self.output_dataset = {
k: self.output_file.create_dataset(k, (self.total_size, ) + v.size()[1:], dtype='float32')
for k, v in output_dict.items()
}
for k, v in output_dict.items():
next_idx = cur_idx + v.size(0)
self.output_dataset[k][cur_idx:next_idx] = v.cpu().numpy()
cur_idx = next_idx
def main():
logger.critical('Loading the dataset.')
data = io.load(args.caption)
# Step 1: filter out images.
images = {c['image_id'] for c in data['annotations']}
# Step 2: build a reverse mapping for images.
id2image = {i['id']: i for i in data['images']}
images = [id2image[i] for i in images]
import torchvision.transforms as T
image_transform = T.Compose([
T.Resize((args.image_size, args.image_size)),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
dataset = COCOImageDataset(images, args.image_root, image_transform)
logger.critical('Building the model.')
model = FeatureExtractor()
if args.use_gpu:
model.cuda()
if args.gpu_parallel:
from jactorch.parallel import JacDataParallel
model = JacDataParallel(model, device_ids=args.gpus).cuda()
cudnn.benchmark = True
model.eval()
dataloader = dataset.make_dataloader(args.batch_size, shuffle=False, drop_last=False, nr_workers=args.data_workers)
output_file = io.open_h5(args.output, 'w')
writer = AsyncWriter(output_file, total_size=len(dataset))
for feed_dict in tqdm(dataloader, total=len(dataloader), desc='Extracting features'):
if args.use_gpu:
feed_dict = async_copy_to(feed_dict, 0)
with torch.no_grad():
output_dict = model(feed_dict)
writer.feed(output_dict)
writer.join()
output_file.close()
io.dump(args.output_images_json, images)
if __name__ == '__main__':
main()
|
test/tomography/test_gateset_tomography.py
|
paulineollitrault/qiskit-ignis
| 182 |
60366
|
# -*- coding: utf-8 -*-
#
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-docstring,invalid-name
import unittest
import numpy as np
from qiskit import Aer
from qiskit.compiler import assemble
from qiskit.ignis.verification.tomography import GatesetTomographyFitter
from qiskit.ignis.verification.tomography import gateset_tomography_circuits
from qiskit.ignis.verification.tomography.basis import default_gateset_basis
from qiskit.providers.aer.noise import NoiseModel
from qiskit.extensions import HGate, SGate
from qiskit.quantum_info import PTM
class TestGatesetTomography(unittest.TestCase):
@staticmethod
def collect_tomography_data(shots=10000,
noise_model=None,
gateset_basis='Default'):
backend_qasm = Aer.get_backend('qasm_simulator')
circuits = gateset_tomography_circuits(gateset_basis=gateset_basis)
qobj = assemble(circuits, shots=shots)
result = backend_qasm.run(qobj, noise_model=noise_model).result()
fitter = GatesetTomographyFitter(result, circuits, gateset_basis)
return fitter
@staticmethod
def expected_linear_inversion_gates(Gs, Fs):
rho = Gs['rho']
E = Gs['E']
B = np.array([(F @ rho).T[0] for F in Fs]).T
BB = np.linalg.inv(B)
gates = {label: BB @ G @ B for (label, G) in Gs.items()
if label not in ['E', 'rho']}
gates['E'] = E @ B
gates['rho'] = BB @ rho
return gates
@staticmethod
def hs_distance(A, B):
return sum([np.abs(x) ** 2 for x in np.nditer(A-B)])
@staticmethod
def convert_from_ptm(vector):
Id = np.sqrt(0.5) * np.array([[1, 0], [0, 1]])
X = np.sqrt(0.5) * np.array([[0, 1], [1, 0]])
Y = np.sqrt(0.5) * np.array([[0, -1j], [1j, 0]])
Z = np.sqrt(0.5) * np.array([[1, 0], [0, -1]])
v = vector.reshape(4)
return v[0] * Id + v[1] * X + v[2] * Y + v[3] * Z
def compare_gates(self, expected_gates, result_gates, labels, delta=0.2):
for label in labels:
expected_gate = expected_gates[label]
result_gate = result_gates[label].data
msg = "Failure on gate {}: Expected gate = \n{}\n" \
"vs Actual gate = \n{}".format(label,
expected_gate,
result_gate)
distance = self.hs_distance(expected_gate, result_gate)
self.assertAlmostEqual(distance, 0, delta=delta, msg=msg)
def run_test_on_basis_and_noise(self,
gateset_basis='Default',
noise_model=None,
noise_ptm=None):
if gateset_basis == 'Default':
gateset_basis = default_gateset_basis()
labels = gateset_basis.gate_labels
gates = gateset_basis.gate_matrices
gates['rho'] = np.array([[np.sqrt(0.5)], [0], [0], [np.sqrt(0.5)]])
gates['E'] = np.array([[np.sqrt(0.5), 0, 0, np.sqrt(0.5)]])
# apply noise if given
for label in labels:
if label != "Id" and noise_ptm is not None:
gates[label] = noise_ptm @ gates[label]
Fs = [gateset_basis.spam_matrix(label)
for label in gateset_basis.spam_labels]
# prepare the fitter
fitter = self.collect_tomography_data(shots=10000,
noise_model=noise_model,
gateset_basis=gateset_basis)
# linear inversion test
result_gates = fitter.linear_inversion()
expected_gates = self.expected_linear_inversion_gates(gates, Fs)
self.compare_gates(expected_gates, result_gates, labels + ['E', 'rho'])
# fitter optimization test
result_gates = fitter.fit()
expected_gates = gates
expected_gates['E'] = self.convert_from_ptm(expected_gates['E'])
expected_gates['rho'] = self.convert_from_ptm(expected_gates['rho'])
self.compare_gates(expected_gates, result_gates, labels + ['E', 'rho'])
def test_noiseless_standard_basis(self):
self.run_test_on_basis_and_noise()
def test_noiseless_h_gate_standard_basis(self):
basis = default_gateset_basis()
basis.add_gate(HGate())
self.run_test_on_basis_and_noise(gateset_basis=basis)
def test_noiseless_s_gate_standard_basis(self):
basis = default_gateset_basis()
basis.add_gate(SGate())
self.run_test_on_basis_and_noise(gateset_basis=basis)
def test_amplitude_damping_standard_basis(self):
gamma = 0.05
noise_ptm = PTM(np.array([[1, 0, 0, 0],
[0, np.sqrt(1-gamma), 0, 0],
[0, 0, np.sqrt(1-gamma), 0],
[gamma, 0, 0, 1-gamma]]))
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(noise_ptm, ['u1', 'u2', 'u3'])
self.run_test_on_basis_and_noise(noise_model=noise_model,
noise_ptm=np.real(noise_ptm.data))
def test_depolarization_standard_basis(self):
p = 0.05
noise_ptm = PTM(np.array([[1, 0, 0, 0],
[0, 1-p, 0, 0],
[0, 0, 1-p, 0],
[0, 0, 0, 1-p]]))
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(noise_ptm, ['u1', 'u2', 'u3'])
self.run_test_on_basis_and_noise(noise_model=noise_model,
noise_ptm=np.real(noise_ptm.data))
if __name__ == '__main__':
unittest.main()
|
tests/unit/lookups/handlers/test_env.py
|
avosper-intellaegis/runway
| 134 |
60377
|
"""Tests for lookup handler for env."""
# pylint: disable=no-self-use
# pyright: basic
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from runway.lookups.handlers.env import EnvLookup
if TYPE_CHECKING:
from ...factories import MockRunwayContext
ENV_VARS = {"str_val": "test"}
class TestEnvLookup:
"""Tests for EnvLookup."""
def test_handle(self, runway_context: MockRunwayContext) -> None:
"""Validate handle base functionality."""
runway_context.env.vars = ENV_VARS.copy()
result = EnvLookup.handle("str_val", context=runway_context)
assert result == "test"
def test_handle_not_found(self, runway_context: MockRunwayContext) -> None:
"""Validate exception when lookup cannot be resolved."""
runway_context.env.vars = ENV_VARS.copy()
with pytest.raises(ValueError):
EnvLookup.handle("NOT_VALID", context=runway_context)
|
websploit/modules/wifi_fap_spam.py
|
albiahbi/websploit
| 803 |
60399
|
<reponame>albiahbi/websploit
from scapy.all import *
from websploit.core import base
from threading import Thread
from websploit.core.utils import get_fake_mac, get_fake_name
conf.verb = 0
class Main(base.Module):
"""Spamming Fake access points """
parameters = {
"iface": "wlan0mon",
"count": 10,
}
completions = list(parameters.keys())
def do_execute(self, line):
"""Execute current module"""
process_list = []
try:
for _ in range(int(self.parameters['count'])):
name = get_fake_name()
mac = get_fake_mac()
p = Thread(target=SpawnAP, args=(name, mac, self.parameters['iface']))
process_list.append(p)
p.start()
self.cp.success(text=f"Access point name : {name} - MAC {mac} started.")
self.cp.info("Press Ctrl+C for stop ...")
input("")
except KeyboardInterrupt:
self.cp.warning("\nKilling all access points, please wait ...")
# for p in process_list:
# p.terminate()
# p.join()
self.cp.success("Done.")
def complete_set(self, text, line, begidx, endidx):
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in self.completions if s.startswith(mline)]
class SpawnAP:
def __init__(self, ssid, mac, iface):
self.ssid = ssid
self.mac = mac
self.iface = iface
self.run()
def run(self):
dot11 = Dot11(type=0, subtype=8, addr1="ff:ff:ff:ff:ff:ff", addr2=self.mac, addr3=self.mac)
beacon = Dot11Beacon()
essid = Dot11Elt(ID="SSID", info=self.ssid, len=len(self.ssid))
rsn = Dot11Elt(ID='RSNinfo', info=(
'\x01\x00' # RSN Version 1
'\x00\x0f\xac\x02' # Group Cipher Suite : 00-0f-ac TKIP
'\x02\x00' # 2 Pairwise Cipher Suites (next two lines)
'\x00\x0f\xac\x04' # AES Cipher
'\x00\x0f\xac\x02' # TKIP Cipher
'\x01\x00' # 1 Authentication Key Managment Suite (line below)
'\x00\x0f\xac\x02' # Pre-Shared Key
'\x00\x00')) # RSN Capabilities (no extra capabilities)
frame = RadioTap()/dot11/beacon/essid/rsn
sendp(frame, inter=0.1, iface=self.iface, loop=1)
|
latest version/web/login.py
|
AzusaYukina/Arcaea-server
| 162 |
60405
|
#import sqlite3
from flask import (Blueprint, flash, g, redirect,
render_template, request, session, url_for)
import functools
from setting import Config
import hashlib
bp = Blueprint('login', __name__, url_prefix='/web')
@bp.route('/login', methods=('GET', 'POST'))
def login():
# 登录
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
error = None
if username != Config.USERNAME or password != Config.PASSWORD:
error = '错误的用户名或密码 Incorrect username or password.'
if error is None:
session.clear()
hash_session = username + \
hashlib.sha256(password.encode("utf8")).hexdigest()
hash_session = hashlib.sha256(
hash_session.encode("utf8")).hexdigest()
session['user_id'] = hash_session
return redirect(url_for('index.index'))
flash(error)
return render_template('web/login.html')
@bp.route('/logout')
def logout():
# 登出
session.clear()
flash('成功登出 Successfully log out.')
return redirect(url_for('index.index'))
def login_required(view):
# 登录验证,写成了修饰器
@functools.wraps(view)
def wrapped_view(**kwargs):
x = session.get('user_id')
hash_session = Config.USERNAME + \
hashlib.sha256(Config.PASSWORD.encode("utf8")).hexdigest()
hash_session = hashlib.sha256(hash_session.encode("utf8")).hexdigest()
if x != hash_session:
return redirect(url_for('login.login'))
g.user = {'user_id': x, 'username': Config.USERNAME}
return view(**kwargs)
return wrapped_view
|
simple_rl/tasks/trench/TrenchOOMDPState.py
|
david-abel/mdps
| 230 |
60412
|
from simple_rl.mdp.oomdp.OOMDPStateClass import OOMDPState
class TrenchOOMDPState(OOMDPState):
''' Class for Trench World States '''
def __init__(self, objects):
OOMDPState.__init__(self, objects=objects)
def get_agent_x(self):
return self.objects["agent"][0]["x"]
def get_agent_y(self):
return self.objects["agent"][0]["y"]
def __hash__(self):
state_hash = str(self.get_agent_x()) + str(self.get_agent_y()) + str(self.objects["agent"][0]["dx"] + 1)\
+ str(self.objects["agent"][0]["dy"] + 1) + str(self.objects["agent"][0]["dest_x"])\
+ str(self.objects["agent"][0]["dest_x"]) + str(self.objects["agent"][0]["dest_y"]) + \
str(self.objects["agent"][0]["has_block"]) + "00"
for b in self.objects["block"]:
state_hash += str(b["x"]) + str(b["y"])
state_hash += "00"
for l in self.objects["lava"]:
state_hash += str(l["x"]) + str(l["y"])
return int(state_hash)
def __eq__(self, other_trench_state):
return hash(self) == hash(other_trench_state)
|
tests/schema/test_plot.py
|
vishalbelsare/FinMind
| 1,106 |
60447
|
<gh_stars>1000+
import os
import pytest
from FinMind.data import DataLoader
from FinMind.schema.plot import Labels, Series, convert_labels_series_schema
@pytest.fixture(scope="module")
def df():
user_id = os.environ.get("FINMIND_USER", "")
password = os.environ.get("FINMIND_PASSWORD", "")
data_loader = DataLoader()
data_loader.login(user_id, password)
df = data_loader.taiwan_stock_month_revenue(
stock_id="2890", start_date="2018-1M", end_date="2021-7M"
)
df["labels"] = (
df[["revenue_year", "revenue_month"]]
.astype(str)
.apply(lambda date: f"{date[0]}-{date[1]}M", axis=1)
)
df["series"] = df["revenue"].map(lambda value: round(value * 1e-8, 2))
return df
def test_Labels(df):
labels = df.to_dict("list")["labels"]
assert Labels(labels=labels)
def test_Series(df):
series = df.to_dict("list")["series"]
assert Series(series=series)
def test_convert_labels_series_schema(df):
labels = df.to_dict("list")["labels"]
series = df.to_dict("list")["series"]
labels, series = convert_labels_series_schema(labels=labels, series=series)
assert isinstance(labels, Labels)
assert isinstance(series, Series)
|
quokka/utils/custom_vars.py
|
songshansitulv/quokka
| 1,141 |
60451
|
<filename>quokka/utils/custom_vars.py
# coding: utf-8
from dynaconf.utils.parse_conf import parse_conf_data
def parse_data(data):
"""Return converted data from @int, @float, @bool, @json markers"""
return parse_conf_data(data)
def custom_var_dict(cvarlist):
cvarlist = cvarlist or []
return {
cvar['key']: parse_data(cvar['value'])
for cvar in cvarlist
}
|
test/nodes/test_accumulate.py
|
HerySon/timeflux
| 123 |
60461
|
<reponame>HerySon/timeflux
"""Tests for accumulate.py"""
import pandas as pd
import xarray as xr
from timeflux.helpers.testing import DummyData, DummyXArray
from timeflux.nodes.accumulate import AppendDataFrame, AppendDataArray
xarray_data = DummyXArray()
pandas_data = DummyData()
def test_append_dataframe():
""""Test node AppendDataFrame"""
node = AppendDataFrame()
pandas_data.reset()
node.clear()
# gate is not closed, data should be accumulated but not released
# first chunk
node.i.data = pandas_data.next(5)
node.update()
# assert no output
assert node.o.data == None
# assert the data has been buffered
pd.testing.assert_frame_equal(pandas_data._data.iloc[:5, :], node._data)
# second chunk
node.clear()
node.i.data = pandas_data.next(10)
node.update()
# assert no output
assert node.o.data == None
# assert the buffer is the concatenation of the 2 accumulated chunks
pd.testing.assert_frame_equal(pandas_data._data.iloc[:15, :], node._data)
# now a meta is received, assessing that the gate has just closed
node.i.data = pandas_data.next(5)
node.i.meta = {'gate_status': 'closed'}
node.update()
# assert output data is the concatenation of the 3 chunks
pd.testing.assert_frame_equal(pandas_data._data.iloc[:20, :], node.o.data)
def test_append_dataarray():
""""Test node AppendDataArray"""
node = AppendDataArray(dim='time')
xarray_data.reset()
node.clear()
# gate is not closed, data should be accumulated but not released
# first chunk
node.i.data = xarray_data.next(5)
node.update()
# assert no output
assert node.o.data == None
# assert the data has been buffered
xr.testing.assert_equal(xarray_data._data.isel({'time': slice(0, 5)}), node._data_list[0])
# second chunk
node.clear()
node.i.data = xarray_data.next(10)
node.update()
# assert no output
assert node.o.data == None
# assert the buffer is the concatenation of the 2 accumulated chunks
xr.testing.assert_equal(xarray_data._data.isel({'time': slice(5, 15)}), node._data_list[1])
# now a meta is received, assessing that the gate has just closed
node.i.data = xarray_data.next(5)
node.i.meta = {'gate_status': 'closed'}
node.update()
# assert output data is the concatenation of the 3 chunks
xr.testing.assert_equal(xarray_data._data.isel({'time': slice(0, 20)}), node.o.data)
|
__init__.py
|
nickm324/sensor.rpi_power
| 297 |
60519
|
"""Raspberry Pi Power Supply Checker"""
|
Misc/jython_checker.py
|
jeff5/jython-whinchat
| 577 |
60530
|
<gh_stars>100-1000
import sys
def usage():
print 'Usage: jython jython_checker.py <module name created by make_checker>'
sys.exit(1)
if not len(sys.argv) == 2:
usage()
checker_name = sys.argv[1].split('.')[0]#pop off the .py if needed
try:
checker = __import__(checker_name)
except:
print 'No module "%s" found' % checker_name
usage()
import make_checker
ignored_types = ['frame',
'code',
'traceback']
checks = []
for check in checker.checks:
index, expected_type, expected_bases, expected_dict = check
if checker.names[index] in ignored_types:
print 'Skipping', checker.names[index]
continue
checks.append(check)
ignored_members = ['__getattribute__', '__doc__']
ok, missing, bad_type, different = make_checker.do_check(checker.names, checks)
def strip_ignored(differences, key, ignored):
if not key in differences:
return
problems = differences[key]
for member in ignored_members:
if member in problems:
problems.remove(member)
for t, name, differences in different:
strip_ignored(differences, 'missing', ignored_members)
strip_ignored(differences, 'extras', ignored_members)
make_checker.report(ok, missing, bad_type, different)
|
bcs-ui/backend/templatesets/legacy_apps/configuration/migrations/0015_auto_20171227_1502.py
|
laodiu/bk-bcs
| 599 |
60650
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# Generated by Django 1.11.5 on 2017-12-27 07:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('configuration', '0014_auto_20171225_1112'),
]
operations = [
migrations.CreateModel(
name='ShowVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creator', models.CharField(max_length=32, verbose_name='创建者')),
('updator', models.CharField(max_length=32, verbose_name='更新者')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('deleted_time', models.DateTimeField(blank=True, null=True)),
('template_id', models.IntegerField(verbose_name='关联的模板 ID')),
('real_version_id', models.IntegerField(verbose_name='关联的VersionedEntity ID')),
('name', models.CharField(max_length=32, verbose_name='版本名称')),
],
),
migrations.AddField(
model_name='template',
name='draft',
field=models.TextField(default='', verbose_name='草稿'),
),
migrations.AddField(
model_name='template',
name='draft_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='草稿更新时间'),
),
migrations.AddField(
model_name='template',
name='draft_updator',
field=models.CharField(default='', max_length=32, verbose_name='草稿更新者'),
),
migrations.AlterField(
model_name='application',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterField(
model_name='configmap',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterField(
model_name='deplpyment',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterField(
model_name='secret',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterField(
model_name='service',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterField(
model_name='template',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterField(
model_name='versionedentity',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterUniqueTogether(
name='showversion',
unique_together=set([('template_id', 'name')]),
),
]
|
tests/test_element.py
|
cgarjun/Pyno
| 163 |
60658
|
"""Tests for pyno.element"""
pass
|
Chapter07/python/com/sparksamples/linearregression/LinearRegressionCrossValidationIterations.py
|
quguiliang/Machine-Learning-with-Spark-Second-Edition
| 112 |
60670
|
<reponame>quguiliang/Machine-Learning-with-Spark-Second-Edition
import os
import sys
import pylab as P
import matplotlib
import matplotlib.pyplot as plt
from com.sparksamples.util import evaluate
from com.sparksamples.linearregression.LinearRegressionUtil import get_train_test_data
try:
from pyspark import SparkContext
from pyspark import SparkConf
except ImportError as e:
print ("Error importing Spark Modules", e)
sys.exit(1)
from com.sparksamples.util import SPARK_HOME
os.environ['SPARK_HOME'] = SPARK_HOME
sys.path.append(SPARK_HOME + "/python")
def main():
execute()
def execute():
train_data, test_data = get_train_test_data()
params = [1, 5, 10, 20, 50, 100, 200]
metrics = [evaluate(train_data, test_data, param, 0.01, 0.0, 'l2', False) for param in params]
print params
print metrics
P.plot(params, metrics)
fig = matplotlib.pyplot.gcf()
plt.xscale('log')
plt.title("LinearRegressionWithSGD : Iterations")
plt.xlabel("Iterators")
plt.ylabel("RMSLE")
P.show()
if __name__ == "__main__":
main()
|
nlpcda/tools/Equivalent_char.py
|
blmoistawinde/nlpcda
| 959 |
60713
|
<reponame>blmoistawinde/nlpcda<filename>nlpcda/tools/Equivalent_char.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
from nlpcda.tools.Basetool import Basetool
from nlpcda.config import Equivalent_char_path
class EquivalentChar(Basetool):
'''
等价字,用于随机替换等价字,【字级别的】,增强数据
'''
def __init__(self, base_file=Equivalent_char_path, create_num=5, change_rate=0.05, seed=1):
super(EquivalentChar, self).__init__(base_file, create_num, change_rate, seed)
def load_paser_base_file(self):
self.base_file_mapobj = {}
for line in open(self.base_file, "r", encoding='utf-8'):
equivalent_list = line.strip().split("\t")
assert len(equivalent_list) > 1
self.add_equivalent_list(equivalent_list)
print('load :%s done' % (self.base_file))
return self.base_file_mapobj
def add_equivalent_list(self, equivalent_list):
'''
添加等价字list
:param equivalent_list:
:return:
'''
num = len(equivalent_list)
for i in range(num - 1):
self.base_file_mapobj[equivalent_list[i]] = equivalent_list[:i] + equivalent_list[i + 1:]
self.base_file_mapobj[equivalent_list[-1]] = equivalent_list[:-1]
def replace(self, replace_str: str):
replace_str = replace_str.replace('\n', '').strip()
chars = list(replace_str)
sentences = [replace_str]
t = 0
while len(sentences) < self.create_num:
t += 1
a_sentence = ''
for chrr in chars:
if chrr in self.base_file_mapobj and self.random.random() < self.change_rate:
wi = self.random.randint(0, len(self.base_file_mapobj[chrr]) - 1)
place = self.base_file_mapobj[chrr][wi]
else:
place = chrr
a_sentence += place
if a_sentence not in sentences:
sentences.append(a_sentence)
if t > self.create_num * self.loop_t / self.change_rate:
break
return sentences
def test(test_str, create_num=3, change_rate=0.3):
hoe = EquivalentChar(create_num=create_num, change_rate=change_rate)
try:
return hoe.replace(test_str)
except:
print('error in Homophone.replace')
return [test_str]
if __name__ == '__main__':
ts = '''今天是7月5日21:32:21。'''
rs = test(ts)
for s in rs:
print(s)
|
forms.py
|
deeplook/slackipy
| 103 |
60746
|
<filename>forms.py<gh_stars>100-1000
from flask_wtf import Form
from wtforms import validators
from wtforms.fields.html5 import EmailField
class InviteForm(Form):
email = EmailField('Email Address',
[validators.DataRequired(), validators.Email()])
|
mongodb/mongodb_consistent_backup/official/mongodb_consistent_backup/Upload/Rsync/Rsync.py
|
smthkissinger/docker-images
| 282 |
60777
|
import os
import logging
import re
from copy_reg import pickle
from multiprocessing import Pool
from subprocess import check_output
from types import MethodType
from RsyncUploadThread import RsyncUploadThread
from mongodb_consistent_backup.Common import config_to_string
from mongodb_consistent_backup.Errors import OperationError
from mongodb_consistent_backup.Pipeline import Task
# Allows pooled .apply_async()s to work on Class-methods:
def _reduce_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
pickle(MethodType, _reduce_method)
class Rsync(Task):
def __init__(self, manager, config, timer, base_dir, backup_dir, **kwargs):
super(Rsync, self).__init__(self.__class__.__name__, manager, config, timer, base_dir, backup_dir, **kwargs)
self.backup_location = self.config.backup.location
self.backup_name = self.config.backup.name
self.remove_uploaded = self.config.upload.remove_uploaded
self.retries = self.config.upload.retries
self.rsync_path = self.config.upload.rsync.path
self.rsync_user = self.config.upload.rsync.user
self.rsync_host = self.config.upload.rsync.host
self.rsync_port = self.config.upload.rsync.port
self.rsync_ssh_key = self.config.upload.rsync.ssh_key
self.rsync_binary = "rsync"
self.rsync_flags = ["--archive", "--compress"]
self.rsync_version = None
self._rsync_info = None
self.threads(self.config.upload.threads)
self._pool = Pool(processes=self.threads())
def init(self):
if not self.host_has_rsync():
raise OperationError("Cannot find rsync binary on this host!")
if not os.path.isdir(self.backup_dir):
logging.error("The source directory: %s does not exist or is not a directory! Skipping Rsync upload!" % self.backup_dir)
raise OperationError("The source directory: %s does not exist or is not a directory! Skipping Rsync upload!" % self.backup_dir)
def rsync_info(self):
if not self._rsync_info:
output = check_output([self.rsync_binary, "--version"])
search = re.search(r"^rsync\s+version\s([0-9.-]+)\s+protocol\sversion\s(\d+)", output)
self.rsync_version = search.group(1)
self._rsync_info = {"version": self.rsync_version, "protocol_version": int(search.group(2))}
return self._rsync_info
def host_has_rsync(self):
if self.rsync_info():
return True
return False
def get_dest_path(self):
return os.path.join(self.rsync_path, self.base_dir)
def prepare_dest_dir(self):
# mkdir -p the rsync dest path via ssh
ssh_mkdir_cmd = ["ssh"]
if self.rsync_ssh_key:
ssh_mkdir_cmd.extend(["-i", self.rsync_ssh_key])
ssh_mkdir_cmd.extend([
"%s@%s" % (self.rsync_user, self.rsync_host),
"mkdir", "-p", self.get_dest_path()
])
# run the mkdir via ssh
try:
check_output(ssh_mkdir_cmd)
except Exception, e:
logging.error("Creating rsync dest path with ssh failed for %s: %s" % (
self.rsync_host,
e
))
raise e
return True
def done(self, data):
logging.info(data)
def run(self):
try:
self.init()
self.timer.start(self.timer_name)
logging.info("Preparing destination path on %s" % self.rsync_host)
self.prepare_dest_dir()
rsync_config = {
"dest": "%s@%s:%s" % (self.rsync_user, self.rsync_host, self.get_dest_path()),
"threads": self.threads(),
"retries": self.retries
}
rsync_config.update(self.rsync_info())
logging.info("Starting upload using rsync version %s (%s)" % (
self.rsync_info()['version'],
config_to_string(rsync_config)
))
for child in os.listdir(self.backup_dir):
self._pool.apply_async(RsyncUploadThread(
os.path.join(self.backup_dir, child),
self.base_dir,
self.rsync_flags,
self.rsync_path,
self.rsync_user,
self.rsync_host,
self.rsync_port,
self.rsync_ssh_key,
self.remove_uploaded,
self.retries
).run, callback=self.done)
self.wait()
except Exception, e:
logging.error("Rsync upload failed! Error: %s" % e)
raise OperationError(e)
finally:
self.timer.stop(self.timer_name)
self.completed = True
def wait(self):
if self._pool:
logging.info("Waiting for Rsync upload threads to stop")
self._pool.close()
self._pool.join()
def close(self):
if self._pool:
logging.error("Stopping Rsync upload threads")
self._pool.terminate()
self._pool.join()
|
src/pyinfraboxutils/db.py
|
agu3rra/InfraBox
| 265 |
60782
|
import time
import os
import psycopg2
import psycopg2.extras
from pyinfraboxutils import get_logger
logger = get_logger('infrabox')
def connect_db():
while True:
try:
conn = psycopg2.connect(dbname=os.environ['INFRABOX_DATABASE_DB'],
user=os.environ['INFRABOX_DATABASE_USER'],
password=os.environ['<PASSWORD>'],
host=os.environ['INFRABOX_DATABASE_HOST'],
port=os.environ['INFRABOX_DATABASE_PORT'])
return conn
except Exception as e:
logger.warn("Could not connect to db: %s", e)
time.sleep(3)
class DB(object):
def __init__(self, conn):
self.conn = conn
def execute_one(self, stmt, args=None):
r = self.execute_many(stmt, args)
if not r:
return r
return r[0]
def execute_many(self, stmt, args=None):
c = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
c.execute(stmt, args)
r = c.fetchall()
c.close()
return r
def execute_one_dict(self, stmt, args=None):
r = self.execute_many_dict(stmt, args)
if not r:
return r
return r[0]
def execute_many_dict(self, stmt, args=None):
c = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
c.execute(stmt, args)
r = c.fetchall()
c.close()
return r
def execute(self, stmt, args=None):
c = self.conn.cursor()
c.execute(stmt, args)
c.close()
def commit(self):
self.conn.commit()
def rollback(self):
self.conn.rollback()
def close(self):
self.conn.close()
|
pclib/rtl/onehot_test.py
|
belang/pymtl
| 206 |
60787
|
#==============================================================================
# ConfigManger_test.py
#==============================================================================
from pymtl import *
from pclib.test import TestVectorSimulator
from onehot import Mux, Demux
#------------------------------------------------------------------------------
# test_Mux
#------------------------------------------------------------------------------
def test_Mux( dump_vcd, test_verilog ):
nports = 2
data_nbits = 16
# Define test input and output functions
def tv_in( model, test_vector ):
model.sel .value = test_vector[0]
model.in_[0].value = test_vector[1]
model.in_[1].value = test_vector[2]
def tv_out( model, test_vector ):
assert model.out == test_vector[3]
# Select and elaborate the model under test
model = Mux( nports, dtype = data_nbits )
model.vcd_file = dump_vcd
if test_verilog:
model = TranslationTool( model )
model.elaborate()
# Define the test vectors
test_vectors = [
# sel in[0] in[1] out
[ 0b00, 0x1111, 0x2222, 0x0000 ],
[ 0b01, 0x1111, 0x2222, 0x1111 ],
[ 0b10, 0x1111, 0x2222, 0x2222 ],
[ 0b00, 0x1111, 0x2222, 0x0000 ],
]
# Create the simulator and configure it
sim = TestVectorSimulator( model, test_vectors, tv_in, tv_out )
# Run the simulator
sim.run_test()
#------------------------------------------------------------------------------
# test_Demux
#------------------------------------------------------------------------------
def test_Demux( dump_vcd, test_verilog ):
nports = 2
data_nbits = 16
# Define test input and output functions
def tv_in( model, test_vector ):
model.sel.value = test_vector[0]
model.in_.value = test_vector[1]
def tv_out( model, test_vector ):
assert model.out[0] == test_vector[2]
assert model.out[1] == test_vector[3]
# Select and elaborate the model under test
model = Demux( nports, dtype = data_nbits )
model.vcd_file = dump_vcd
if test_verilog:
model = TranslationTool( model )
model.elaborate()
# Define the test vectors
test_vectors = [
# sel in_ out[0] out[1]
[ 0b00, 0x3333, 0x0000, 0x0000 ],
[ 0b01, 0x1111, 0x1111, 0x0000 ],
[ 0b10, 0x2222, 0x0000, 0x2222 ],
[ 0b00, 0x1111, 0x0000, 0x0000 ],
]
# Create the simulator and configure it
sim = TestVectorSimulator( model, test_vectors, tv_in, tv_out )
# Run the simulator
sim.run_test()
|
static/paddlex_restful/restful/project/visualize.py
|
cheneyveron/PaddleX
| 3,655 |
60788
|
# copytrue (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import math
import xml.etree.ElementTree as ET
from PIL import Image
def resize_img(img):
""" 调整图片尺寸
Args:
img: 图片信息
"""
h, w = img.shape[:2]
min_size = 580
if w >= h and w > min_size:
new_w = min_size
new_h = new_w * h / w
elif h >= w and h > min_size:
new_h = min_size
new_w = new_h * w / h
else:
new_h = h
new_w = w
new_img = cv2.resize(
img, (int(new_w), int(new_h)), interpolation=cv2.INTER_CUBIC)
scale_value = new_w / w
return new_img, scale_value
def plot_det_label(image, anno, labels):
""" 目标检测类型生成标注图
Args:
image: 图片路径
anno: 图片标注
labels: 图片所属数据集的类别信息
"""
catid2color = {}
img = cv2.imread(image)
img, scale_value = resize_img(img)
tree = ET.parse(anno)
objs = tree.findall('object')
color_map = get_color_map_list(len(labels) + 1)
for i, obj in enumerate(objs):
cname = obj.find('name').text
catid = labels.index(cname)
if cname not in labels:
continue
xmin = int(float(obj.find('bndbox').find('xmin').text) * scale_value)
ymin = int(float(obj.find('bndbox').find('ymin').text) * scale_value)
xmax = int(float(obj.find('bndbox').find('xmax').text) * scale_value)
ymax = int(float(obj.find('bndbox').find('ymax').text) * scale_value)
if catid not in catid2color:
catid2color[catid] = color_map[catid + 1]
color = tuple(catid2color[catid])
img = draw_rectangle_and_cname(img, xmin, ymin, xmax, ymax, cname,
color)
return img
def plot_seg_label(anno):
""" 语义分割类型生成标注图
Args:
anno: 图片标注
"""
label = pil_imread(anno)
pse_label = gray2pseudo(label)
return pse_label
def plot_insseg_label(image, anno, labels, alpha=0.7):
""" 实例分割类型生成标注图
Args:
image: 图片路径
anno: 图片标注
labels: 图片所属数据集的类别信息
"""
anno = np.load(anno, allow_pickle=True).tolist()
catid2color = dict()
img = cv2.imread(image)
img, scale_value = resize_img(img)
color_map = get_color_map_list(len(labels) + 1)
img_h = anno['h']
img_w = anno['w']
gt_class = anno['gt_class']
gt_bbox = anno['gt_bbox']
gt_poly = anno['gt_poly']
num_bbox = gt_bbox.shape[0]
num_mask = len(gt_poly)
# 描绘mask信息
img_array = np.array(img).astype('float32')
for i in range(num_mask):
cname = gt_class[i]
catid = labels.index(cname)
if cname not in labels:
continue
if catid not in catid2color:
catid2color[catid] = color_map[catid + 1]
color = np.array(catid2color[catid]).astype('float32')
import pycocotools.mask as mask_util
for x in range(len(gt_poly[i])):
for y in range(len(gt_poly[i][x])):
gt_poly[i][x][y] = int(float(gt_poly[i][x][y]) * scale_value)
poly = gt_poly[i]
rles = mask_util.frPyObjects(poly,
int(float(img_h) * scale_value),
int(float(img_w) * scale_value))
rle = mask_util.merge(rles)
mask = mask_util.decode(rle) * 255
idx = np.nonzero(mask)
img_array[idx[0], idx[1], :] *= 1.0 - alpha
img_array[idx[0], idx[1], :] += alpha * color
img = img_array.astype('uint8')
for i in range(num_bbox):
cname = gt_class[i]
catid = labels.index(cname)
if cname not in labels:
continue
if catid not in catid2color:
catid2color[catid] = color_map[catid]
color = tuple(catid2color[catid])
xmin, ymin, xmax, ymax = gt_bbox[i]
img = draw_rectangle_and_cname(img,
int(float(xmin) * scale_value),
int(float(ymin) * scale_value),
int(float(xmax) * scale_value),
int(float(ymax) * scale_value), cname,
color)
return img
def draw_rectangle_and_cname(img, xmin, ymin, xmax, ymax, cname, color):
""" 根据提供的标注信息,给图片描绘框体和类别显示
Args:
img: 图片路径
xmin: 检测框最小的x坐标
ymin: 检测框最小的y坐标
xmax: 检测框最大的x坐标
ymax: 检测框最大的y坐标
cname: 类别信息
color: 类别与颜色的对应信息
"""
# 描绘检测框
line_width = math.ceil(2 * max(img.shape[0:2]) / 600)
cv2.rectangle(
img,
pt1=(xmin, ymin),
pt2=(xmax, ymax),
color=color,
thickness=line_width)
# 计算并描绘类别信息
text_thickness = math.ceil(2 * max(img.shape[0:2]) / 1200)
fontscale = math.ceil(0.5 * max(img.shape[0:2]) / 600)
tw, th = cv2.getTextSize(
cname, 0, fontScale=fontscale, thickness=text_thickness)[0]
cv2.rectangle(
img,
pt1=(xmin + 1, ymin - th),
pt2=(xmin + int(0.7 * tw) + 1, ymin),
color=color,
thickness=-1)
cv2.putText(
img,
cname, (int(xmin) + 3, int(ymin) - 5),
0,
0.6 * fontscale, (255, 255, 255),
lineType=cv2.LINE_AA,
thickness=text_thickness)
return img
def pil_imread(file_path):
""" 将图片读成np格式数据
Args:
file_path: 图片路径
"""
img = Image.open(file_path)
return np.asarray(img)
def get_color_map_list(num_classes):
""" 为类别信息生成对应的颜色列表
Args:
num_classes: 类别数量
"""
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
def gray2pseudo(gray_image):
""" 将分割的结果映射到图片
Args:
gray_image: 灰度图
"""
color_map = get_color_map_list(256)
color_map = np.array(color_map).astype("uint8")
# 用OpenCV进行色彩映射
c1 = cv2.LUT(gray_image, color_map[:, 0])
c2 = cv2.LUT(gray_image, color_map[:, 1])
c3 = cv2.LUT(gray_image, color_map[:, 2])
pseudo_img = np.dstack((c1, c2, c3))
return pseudo_img
|
parsifal/apps/invites/migrations/0002_alter_invite_invitee_email.py
|
ShivamPytho/parsifal
| 342 |
60826
|
# Generated by Django 3.2.7 on 2021-09-08 00:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('invites', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='invite',
name='invitee_email',
field=models.EmailField(db_index=True, max_length=254, verbose_name='invitee email'),
),
]
|
test/test_parameters.py
|
DLPerf/tensorforce
| 1,132 |
60829
|
<filename>test/test_parameters.py<gh_stars>1000+
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from test.unittest_base import UnittestBase
class TestParameters(UnittestBase, unittest.TestCase):
def float_unittest(self, exploration):
agent, environment = self.prepare(exploration=exploration)
states = environment.reset()
actions = agent.act(states=states)
exploration1 = agent.model.exploration.value().numpy().item()
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
actions = agent.act(states=states)
exploration2 = agent.model.exploration.value().numpy().item()
if not isinstance(exploration, dict) or exploration['type'] == 'constant':
self.assertEqual(exploration2, exploration1)
else:
self.assertNotEqual(exploration2, exploration1)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
agent.close()
environment.close()
self.finished_test()
def int_unittest(self, horizon):
agent, environment = self.prepare(reward_estimation=dict(horizon=horizon))
states = environment.reset()
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
horizon1 = agent.model.reward_horizon.value().numpy().item()
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
horizon2 = agent.model.reward_horizon.value().numpy().item()
if not isinstance(horizon, dict) or horizon['type'] == 'constant':
self.assertEqual(horizon2, horizon1)
else:
self.assertNotEqual(horizon2, horizon1)
agent.close()
environment.close()
self.finished_test()
def test_constant(self):
self.start_tests(name='constant')
exploration = 0.1
self.float_unittest(exploration=exploration)
horizon = 4
self.int_unittest(horizon=horizon)
def test_decaying(self):
self.start_tests(name='decaying')
exploration = dict(
type='decaying', decay='exponential', unit='timesteps', num_steps=5, initial_value=0.1,
decay_rate=0.5
)
self.float_unittest(exploration=exploration)
horizon = dict(
type='polynomial', unit='timesteps', num_steps=1, initial_value=2, final_value=4,
power=2
)
self.int_unittest(horizon=horizon)
def test_exponential(self):
self.start_tests(name='exponential')
# SPECIFICATION.MD
exploration = dict(
type='exponential', unit='timesteps', num_steps=5, initial_value=0.1, decay_rate=0.5
)
self.float_unittest(exploration=exploration)
def test_linear(self):
self.start_tests(name='linear')
exploration = dict(
type='linear', unit='timesteps', num_steps=5, initial_value=0.1, final_value=0.5
)
self.float_unittest(exploration=exploration)
# SPECIFICATION.MD
horizon = dict(type='linear', unit='timesteps', num_steps=1, initial_value=2, final_value=4)
self.int_unittest(horizon=horizon)
def test_ornstein_uhlenbeck(self):
self.start_tests(name='ornstein-uhlenbeck')
exploration = dict(type='ornstein_uhlenbeck', absolute=True)
self.float_unittest(exploration=exploration)
def test_piecewise_constant(self):
self.start_tests(name='piecewise-constant')
exploration = dict(
type='piecewise_constant', unit='timesteps', boundaries=[1], values=[0.1, 0.0]
)
self.float_unittest(exploration=exploration)
horizon = dict(
type='piecewise_constant', dtype='int', unit='timesteps', boundaries=[1], values=[1, 2]
)
self.int_unittest(horizon=horizon)
def test_random(self):
self.start_tests(name='random')
exploration = dict(type='random', distribution='uniform')
self.float_unittest(exploration=exploration)
|
pydlm/predict/_dlmPredict.py
|
onnheimm/pydlm
| 423 |
60840
|
"""
===============================================================================
The code for all predicting methods
===============================================================================
"""
from pydlm.core._dlm import _dlm
class _dlmPredict(_dlm):
""" The main class containing all prediction methods.
Methods:
_oneDayAheadPredict: predict one day a head.
_continuePredict: continue predicting one day after _oneDayAheadPredict
"""
# Note the following functions will modify the status of the model, so they
# shall not be directly call through the main model if this behavior is not
# desired.
# featureDict contains all the features for prediction.
# It is a dictionary with key equals to the name of the component and
# the value as the new feature (a list). The function
# will first use the features provided in this feature dict, if not
# found, it will fetch the default feature from the component. If
# it could not find feature for some component, it returns an error
# The intermediate result will be stored in result.predictStatus as
# (start_date, next_pred_date, [all_predicted_values]), which will be
# used by _continuePredict.
def _oneDayAheadPredict(self, date, featureDict=None):
""" One day ahead prediction based on the date and the featureDict.
The prediction could be on the last day and into the future or in
the middle of the time series and ignore the rest. For predicting into
the future, the new features must be supplied to featureDict. For
prediction in the middle, the user can still supply the features which
will be used priorily. The old features will be used if featureDict is
None.
Args:
date: the prediction starts (based on the observation before and
on this date)
featureDict: the new feature value for some dynamic components.
must be specified in a form of {component_name: value}
if the feature for some dynamic component is not
supplied. The algorithm will use the features from
the old data. (which means if the prediction is out
of sample, then all dynamic component must be provided
with the new feature value)
Returns:
A tuple of (predicted_mean, predicted_variance)
"""
if date > self.n - 1:
raise NameError('The date is beyond the data range.')
# get the correct status of the model
self._setModelStatus(date=date)
self._constructEvaluationForPrediction(
date=date + 1,
featureDict=featureDict,
padded_data=self.padded_data[:(date + 1)])
# initialize the prediction status
self.builder.model.prediction.step = 0
# start predicting
self.Filter.predict(self.builder.model)
predictedObs = self.builder.model.prediction.obs
predictedObsVar = self.builder.model.prediction.obsVar
self.result.predictStatus = [
date, # start_date
date + 1, # current_date
[predictedObs[0, 0]] # all historical predictions
]
return (predictedObs, predictedObsVar)
def _continuePredict(self, featureDict=None):
""" Continue predicting one day after _oneDayAheadPredict or
after _continuePredict. After using
_oneDayAheadPredict, the user can continue predicting by using
_continuePredict. The featureDict act the same as in
_oneDayAheadPredict.
Args:
featureDict: the new feature value for some dynamic components.
see @_oneDayAheadPredict
Returns:
A tuple of (predicted_mean, predicted_variance)
"""
if self.result.predictStatus is None:
raise NameError('_continoousPredict can only be used after ' +
'_oneDayAheadPredict')
startDate = self.result.predictStatus[0]
currentDate = self.result.predictStatus[1]
self._constructEvaluationForPrediction(
date=currentDate + 1,
featureDict=featureDict,
padded_data=self.padded_data[:(startDate + 1)] +
self.result.predictStatus[2])
self.Filter.predict(self.builder.model)
predictedObs = self.builder.model.prediction.obs
predictedObsVar = self.builder.model.prediction.obsVar
self.result.predictStatus[1] += 1
self.result.predictStatus[2].append(predictedObs[0, 0])
return (predictedObs, predictedObsVar)
# This function will modify the status of the object, use with caution.
def _constructEvaluationForPrediction(self,
date,
featureDict=None,
padded_data=None):
""" Construct the evaluation matrix based on date and featureDict.
Used for prediction. Features provided in the featureDict will be used
preferrably. If the feature is not found in featureDict, the algorithm
will seek it based on the old data and the date.
Args:
featureDict: a dictionary containing {dynamic_component_name: value}
for update the feature for the corresponding component.
date: if a dynamic component name is not found in featureDict, the
algorithm is using its old feature on the given date.
padded_data: is the mix of the raw data and the predicted data. It is
used by auto regressor.
"""
# New features are provided. Update dynamic componnet.
# We distribute the featureDict back to dynamicComponents. If the date is
# out of bound, we append the feature to the feature set. If the date is
# within range, we replace the old feature with the new feature.
if featureDict is not None:
for name in featureDict:
if name in self.builder.dynamicComponents:
comp = self.builder.dynamicComponents[name]
# the date is within range
if date < comp.n:
comp.features[date] = featureDict[name]
comp.n += 1
elif date < comp.n + 1:
comp.features.append(featureDict[name])
comp.n += 1
else:
raise NameError("Feature is missing between the last predicted " +
"day and the new day")
self.builder.updateEvaluation(date, padded_data)
|
blinkpy/helpers/__init__.py
|
magicalyak/blinkpy
| 272 |
60875
|
"""Init file for blinkpy helper functions."""
|
preprocess/__init__.py
|
federicozaiter/LogClass
| 159 |
60896
|
<reponame>federicozaiter/LogClass<gh_stars>100-1000
__all__ = [
"bgl_preprocessor",
"open_source_logs",
]
|
setup.py
|
jpic/django-nested-admin
| 580 |
60899
|
<gh_stars>100-1000
#!/usr/bin/env python
import re
import os.path
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
# Find the package version in __init__.py without importing it
# (which we cannot do because it has extensive dependencies).
init_file = os.path.join(os.path.dirname(__file__),
'nested_admin', '__init__.py')
with open(init_file, 'r') as f:
for line in f:
m = re.search(r'''^__version__ = (['"])(.+?)\1$''', line)
if m is not None:
version = m.group(2)
break
else:
raise LookupError('Unable to find __version__ in ' + init_file)
setup(
name='django-nested-admin',
version=version,
install_requires=[
'python-monkey-business>=1.0.0',
'six',
],
description="Django admin classes that allow for nested inlines",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/theatlantic/django-nested-admin',
packages=find_packages(),
license='BSD',
platforms='any',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
],
include_package_data=True,
zip_safe=False,
long_description=''.join(list(open('README.rst'))[3:]))
|
sfaira/unit_tests/data_for_tests/loaders/consts.py
|
theislab/sfaira
| 110 |
60922
|
<filename>sfaira/unit_tests/data_for_tests/loaders/consts.py<gh_stars>100-1000
ASSEMBLY_HUMAN = "Homo_sapiens.GRCh38.104"
ASSEMBLY_MOUSE = "Mus_musculus.GRCm39.104"
CELLTYPES = ["adventitial cell", "endothelial cell", "acinar cell", "pancreatic PP cell", "type B pancreatic cell"]
CL_VERSION = "v2021-08-10"
|
en_transformer/__init__.py
|
dumpmemory/En-transformer
| 108 |
60935
|
from en_transformer.en_transformer import EquivariantAttention, EnTransformer
|
tests/test_perf_counters.py
|
gglin001/poptorch
| 128 |
60959
|
<reponame>gglin001/poptorch
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import torch
import pytest
import poptorch
import helpers
class Model(torch.nn.Module):
def forward(self, x, y):
return torch.matmul(x, y)
def assert_perf_counter_size(perf, inputs, outputs, steps, outsteps=None):
def assert_size(perf, elems, steps):
assert len(perf) == elems
for elem in perf:
assert len(elem) == steps
outsteps = outsteps or steps
assert_size(perf['input'], inputs, steps)
assert_size(perf['input_complete'], inputs, steps)
assert_size(perf['output'], outputs, outsteps)
assert_size(perf['output_complete'], outputs, outsteps)
def assert_latency_values(model):
def check(latency):
(minimum, maximum, average) = latency
assert minimum <= average
assert average <= maximum
host2ipu = model.getHostIpuLatency()
compute = model.getComputeLatency()
ipu2host = model.getIpuHostLatency()
round_trip = model.getLatency()
check(host2ipu)
check(compute)
check(ipu2host)
check(round_trip)
def test_simple():
x = torch.randn(100, 100)
y = torch.randn(100, 100)
model = Model()
poptorch_model = poptorch.inferenceModel(model)
poptorch_model(x, y)
perf = poptorch_model.getPerfCounters()
assert_perf_counter_size(perf, 2, 1, 1)
assert_latency_values(poptorch_model)
def test_steps():
x = torch.randn(10, 100, 100)
y = torch.randn(10, 100, 100)
model = Model()
opts = poptorch.Options().deviceIterations(10)
poptorch_model = poptorch.inferenceModel(model, opts)
poptorch_model(x, y)
perf = poptorch_model.getPerfCounters()
assert_perf_counter_size(perf, 2, 1, 10)
assert_latency_values(poptorch_model)
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Hardware IPU needed")
def test_replicas():
x = torch.randn(4, 100, 100)
y = torch.randn(4, 100, 100)
model = Model()
opts = poptorch.Options().replicationFactor(4)
poptorch_model = poptorch.inferenceModel(model, opts)
poptorch_model(x, y)
perf = poptorch_model.getPerfCounters()
assert_perf_counter_size(perf, 2, 1, 4)
assert_latency_values(poptorch_model)
@pytest.mark.parametrize("mode_tuple", [(poptorch.AnchorMode.Final, 1),
(poptorch.AnchorMode.All, 1),
(poptorch.AnchorMode.Sum, 1),
(poptorch.AnchorMode.EveryN, 2)])
@pytest.mark.parametrize("steps", [2, 4])
@pytest.mark.parametrize("replicas", [1, 2])
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Hardware IPU needed")
def test_inference(mode_tuple, steps, replicas):
model = Model()
opts = poptorch.Options()
opts.anchorMode(mode_tuple[0], mode_tuple[1])
opts.deviceIterations(steps)
opts.replicationFactor(replicas)
poptorch_model = poptorch.inferenceModel(model, opts)
torch.manual_seed(42)
x = torch.randn(16, 100, 100)
y = torch.randn(16, 100, 100)
poptorch_model(x, y)
perf = poptorch_model.getPerfCounters()
outsteps = steps * replicas
if mode_tuple[0] in [poptorch.AnchorMode.Final, poptorch.AnchorMode.Sum]:
outsteps = replicas
elif mode_tuple[0] is poptorch.AnchorMode.EveryN:
outsteps = steps // mode_tuple[1] * replicas
assert_perf_counter_size(perf, 2, 1, steps * replicas, outsteps)
assert_latency_values(poptorch_model)
@pytest.mark.parametrize("mode_tuple", [(poptorch.AnchorMode.Final, 1),
(poptorch.AnchorMode.All, 1),
(poptorch.AnchorMode.Sum, 1),
(poptorch.AnchorMode.EveryN, 2)])
@pytest.mark.parametrize("steps", [2, 4])
@pytest.mark.parametrize("accums", [1, 2])
@pytest.mark.parametrize("replicas", [1, 2])
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Hardware IPU needed")
def test_training(mode_tuple, steps, accums, replicas):
torch.manual_seed(42)
inputs = torch.randn(16, 100)
targets = torch.randn(16, 100)
opts = poptorch.Options()
opts.anchorMode(mode_tuple[0], mode_tuple[1])
opts.deviceIterations(steps)
opts.Training.gradientAccumulation(accums)
opts.replicationFactor(replicas)
model = torch.nn.Linear(100, 100)
poptorch_model = helpers.trainingModelWithLoss(model,
loss=torch.nn.L1Loss(),
options=opts)
poptorch_model(inputs, targets)
perf = poptorch_model.getPerfCounters()
outsteps = steps * accums * replicas
if mode_tuple[0] in [poptorch.AnchorMode.Final, poptorch.AnchorMode.Sum]:
outsteps = replicas
elif mode_tuple[0] is poptorch.AnchorMode.EveryN:
outsteps = steps // mode_tuple[1] * accums * replicas
assert_perf_counter_size(perf, 2, 2, steps * accums * replicas, outsteps)
assert_latency_values(poptorch_model)
def test_synthetic_data():
model = Model()
opts = poptorch.Options()
opts.deviceIterations(16)
opts.enableSyntheticData(True)
poptorch_model = poptorch.inferenceModel(model, opts)
torch.manual_seed(42)
x = torch.randn(16, 100, 100)
y = torch.randn(16, 100, 100)
poptorch_model(x, y)
perf = poptorch_model.getPerfCounters()
assert_perf_counter_size(perf, 2, 1, 0, 0)
latency = poptorch_model.getLatency()
assert latency == (0., 0., 0.)
|
scripts/rct_to_text.py
|
tomhoper/scibert
| 1,143 |
60962
|
<reponame>tomhoper/scibert
"""
Script to convert Pubmed RCT dataset to textual format for sent classification
"""
import jsonlines
import click
import pathlib
@click.command()
@click.argument('inpath')
@click.argument('outpath')
def convert(inpath, outpath):
pathlib.Path(outpath).parent.mkdir(parents=True, exist_ok=True)
with open(inpath) as f_in:
with jsonlines.open(outpath, 'w') as f_out:
for line in f_in:
abstract_id = ''
line = line.strip()
if not line:
continue
if line.startswith('###'):
abstract_id = line
continue
label, sent = line.split('\t')
f_out.write({'label': label, 'text': sent, 'metadata':abstract_id})
convert()
|
testcase/test_case_4_QuickdrawDataset4dict_random_attention_mask.py
|
Team-Squad-Up/multigraph_transformer
| 268 |
60992
|
<reponame>Team-Squad-Up/multigraph_transformer<filename>testcase/test_case_4_QuickdrawDataset4dict_random_attention_mask.py
import numpy as np
def produce_adjacent_matrix_random(stroke_length, conn=0.15):
attention_mask = np.random.choice(a=[0, 1], size=[7, 7], p=[conn, 1-conn])
attention_mask[stroke_length: , :] = 2
attention_mask[:, stroke_length : ] = 2
#####
attention_mask = np.triu(attention_mask)
attention_mask += attention_mask.T - np.diag(attention_mask.diagonal())
for i in range(stroke_length):
attention_mask[i, i] = 0
return attention_mask
att_msk = produce_adjacent_matrix_random(4, 0.5)
print(att_msk)
print("----------------------")
print((att_msk.T == att_msk).all())
|
bin/measure_interface.py
|
fchapoton/CrisisMappingToolkit
| 178 |
61011
|
# -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
from PyQt4 import QtGui, QtCore
import sys
from threading import Thread
from LLAMA import Ui_Lake_Level_UI
from plot_water_levelui import *
from lake_measure import *
class ProgressPopup(QtGui.QWidget):
update_signal = QtCore.pyqtSignal(int, int, str, str, int, int)
def __init__(self, cancel_function):
QtGui.QWidget.__init__(self)
self.update_signal.connect(self.apply_update, QtCore.Qt.QueuedConnection)
self.cancel_function = cancel_function
self.lake_totals = None
self.lake_counts = None
self.progressBar = QtGui.QProgressBar(self)
self.progressBar.setMinimumSize(500, 50)
self.progressBar.setMaximumSize(500, 50)
self.progressBar.setRange(0, 100)
self.progressBar.setValue(0)
self.status = QtGui.QLabel(self)
self.status.setText("")
self.cancelButton = QtGui.QPushButton('Cancel', self)
self.cancelButton.setMinimumSize(50, 30)
self.cancelButton.setMaximumSize(100, 50)
self.cancelButton.clicked[bool].connect(self._cancel)
vbox = QtGui.QVBoxLayout(self)
vbox.addWidget(self.progressBar)
vbox.addWidget(self.status)
vbox.addWidget(self.cancelButton)
vbox.addStretch(1)
self.setLayout(vbox)
def update_function(self, lakes_number, lakes_total, lake_name, lake_date, lake_image, lake_image_total):
self.update_signal.emit(lakes_number, lakes_total, lake_name, lake_date, lake_image, lake_image_total)
def apply_update(self, lakes_number, lakes_total, lake_name, lake_date, lake_image, lake_image_total):
if self.lake_totals == None:
self.lake_totals = [10] * lakes_total
self.lake_counts = [0] * lakes_total
self.lake_totals[lakes_number] = lake_image_total
self.lake_counts[lakes_number] = lake_image
total = sum(self.lake_totals)
progress = sum(self.lake_counts)
self.status.setText('Completed processing %s on %s.' % (lake_name, lake_date))
self.progressBar.setValue(float(progress) / total * 100)
def closeEvent(self, event):
if self.cancel_function != None:
self.cancel_function()
event.accept()
def _cancel(self):
self.close()
class Lake_Level_App(QtGui.QMainWindow, Ui_Lake_Level_UI):
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
self.start_date = '1984-04-25'
# Sets end date to current date.
self.end_date = str((QtCore.QDate.currentDate()).toString('yyyy-MM-dd'))
self.selected_lake = 'Lake Tahoe'
self.selectlakeDropMenu.activated[str].connect(self.selectLakeHandle)
self.okBtn.clicked.connect(self.okHandle)
# Sets end date as current date. Couldn't set this option in QT Designer
self.endDate.setDate(QtCore.QDate.currentDate())
self.endDate.dateChanged[QtCore.QDate].connect(self.endHandle)
self.startDate.dateChanged[QtCore.QDate].connect(self.startHandle)
self.faiState = False
self.ndtiState = False
self.completedSignal.connect(self.completeLakeThread, QtCore.Qt.QueuedConnection)
def selectLakeHandle(self, text):
self.selected_lake = str(text)
def startHandle(self, date):
self.start_date = str(date.toString('yyyy-MM-dd'))
def endHandle(self, date):
self.end_date = str(date.toString('yyyy-MM-dd'))
completedSignal = QtCore.pyqtSignal()
@QtCore.pyqtSlot()
def completeLakeThread(self):
if self.tableCheckbox.isChecked():
table_water_level(self.selected_lake, self.start_date, self.end_date, result_dir='results', output_file=self.table_output_file)
if self.graphCheckbox.isChecked():
plot_water_level(self.selected_lake, self.start_date, self.end_date, result_dir='results')
self.popup.close()
def okHandle(self):
if self.algaeCheckbox.isChecked():
self.faiState = True
else:
self.faiState = False
if self.turbidityCheckbox.isChecked():
self.ndtiState = True
else:
self.ndtiState = False
# Heat map checkbox is not functioning. Add under here:
# if self.lake_areaCheckbox.isChecked():
if self.tableCheckbox.isChecked():
self.table_output_file = QtGui.QFileDialog.getSaveFileName(self, 'Choose Output File', 'results/' + self.selected_lake + '.csv', 'CSV File (*.csv *.txt)')
self.popup = ProgressPopup(Lake_Level_Cancel)
self.lake_thread = Thread(target=Lake_Level_Run, args=(self.selected_lake, self.start_date, self.end_date, \
'results', self.faiState, self.ndtiState, self.popup.update_function, self.completedSignal.emit))
self.popup.show()
self.lake_thread.start()
# CHANGE THIS. NEED TO MAKE THESE PARTS WAIT UNTIL LAKE_THREAD IS FINISHED.
def main():
app = QtGui.QApplication(sys.argv) # A new instance of QApplication
form = Lake_Level_App() # We set the form to be our ExampleApp (design)
form.show() # Show the form
app.exec_() # and execute the app
if __name__ == '__main__': # if we're running file directly and not importing it
main()
|
run_proto_exp.py
|
renmengye/inc-few-shot-attractor-public
| 122 |
61025
|
<reponame>renmengye/inc-few-shot-attractor-public
"""Runs a baseline for prototype networks for incremental few-shot learning.
Author: <NAME> (<EMAIL>)
See run_exp.py for usage.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import os
import six
import tensorflow as tf
from tqdm import tqdm
from fewshot.utils import logger
from run_exp import (get_config, get_restore_saver, get_datasets, get_model,
save_config, get_exp_logger, get_saver, restore_model,
final_log)
from train_lib import get_metadata
log = logger.get()
FLAGS = tf.flags.FLAGS
def calculate_protos(sess, model, num_classes_a, task_a_it, num_steps):
"""Calculates the prototypes of the entire training set."""
prototypes = []
for idx in six.moves.xrange(num_classes_a):
prototypes.append([])
for step in six.moves.xrange(num_steps):
x, y = task_a_it.next()
h = sess.run(model.h_a, feed_dict={model.inputs: x})
for jj, idx in enumerate(y):
prototypes[idx].append(h[jj])
for idx in six.moves.xrange(num_classes_a):
prototypes[idx] = np.array(prototypes[idx]).mean(axis=0)
return np.array(prototypes)
def calculate_episode_protos(sess, model, num_classes_a, nway, episode,
old_and_new):
"""Caluclates the prototypes of a single episode."""
prototypes = []
for idx in six.moves.xrange(nway):
prototypes.append([])
h = sess.run(model.h_a, feed_dict={model.inputs: episode.x_train})
for idx in six.moves.xrange(episode.x_train.shape[0]):
if old_and_new:
prototypes[episode.y_train[idx] - num_classes_a].append(h[idx])
else:
prototypes[episode.y_train[idx]].append(h[idx])
for idx in six.moves.xrange(nway):
prototypes[idx] = np.array(prototypes[idx]).mean(axis=0)
return np.array(prototypes)
def cosine(h, protos):
"""Cosine similarity."""
proto_t = protos.T
result = np.dot(h, proto_t) / np.sqrt(np.sum(
h**2, axis=1, keepdims=True)) / np.sqrt(
np.sum(proto_t**2, axis=0, keepdims=True))
return result
def euclidean(h, protos):
"""Euclidean similarity."""
h_ = np.expand_dims(h, 1)
protos_ = np.expand_dims(protos, 0)
return -np.sum((h_ - protos_)**2, axis=2)
def dot(h, protos):
"""Dot product."""
return np.dot(h, protos.T)
def evaluate_b(sess,
model,
task_it,
num_steps,
num_classes_a,
num_classes_b,
prototypes_a=None,
old_and_new=False,
similarity='euclidean'):
"""Evaluate the model on task A."""
acc_list = np.zeros([num_steps])
if old_and_new:
acc_list_old = np.zeros([num_steps])
acc_list_new = np.zeros([num_steps])
acc_list_old2 = np.zeros([num_steps])
acc_list_new2 = np.zeros([num_steps])
it = tqdm(six.moves.xrange(num_steps), ncols=0)
for tt in it:
task_data = task_it.next()
prototypes_b = calculate_episode_protos(
sess, model, num_classes_a, num_classes_b, task_data, old_and_new)
if old_and_new:
all_prototypes = np.concatenate([prototypes_a, prototypes_b])
else:
all_prototypes = prototypes_b
h_test = sess.run(model.h_a, feed_dict={model.inputs: task_data.x_test})
if similarity == 'cosine':
logits = cosine(h_test, all_prototypes)
elif similarity == 'euclidean':
logits = euclidean(h_test, all_prototypes)
elif similarity == 'dot':
logits = dot(h_test, all_prototypes)
else:
raise ValueError('Unknown similarity function')
correct = np.equal(np.argmax(logits, axis=1),
task_data.y_test).astype(np.float32)
_acc = correct.mean()
acc_list[tt] = _acc
if old_and_new:
is_new = task_data.y_test >= num_classes_a
is_old = np.logical_not(is_new)
_acc_old = correct[is_old].mean()
_acc_new = correct[is_new].mean()
correct_new = np.equal(
np.argmax(logits[is_new, num_classes_a:], axis=1),
task_data.y_test[is_new] - num_classes_a).astype(np.float32)
_acc_new2 = correct_new.mean()
correct_old = np.equal(
np.argmax(logits[is_old, :num_classes_a], axis=1),
task_data.y_test[is_old]).astype(np.float32)
_acc_old2 = correct_old.mean()
acc_list_old[tt] = _acc_old
acc_list_new[tt] = _acc_new
acc_list_new2[tt] = _acc_new2
acc_list_old2[tt] = _acc_old2
it.set_postfix(
acc_b=u'{:.3f}±{:.3f}'.format(
np.array(acc_list).sum() * 100.0 / float(tt + 1),
np.array(acc_list).std() / np.sqrt(float(tt + 1)) * 100.0),
acc_b_old=u'{:.3f}±{:.3f}'.format(
np.array(acc_list_old).sum() * 100.0 / float(tt + 1),
np.array(acc_list_old).std() / np.sqrt(float(tt + 1)) * 100.0),
acc_b_old2=u'{:.3f}±{:.3f}'.format(
np.array(acc_list_old2).sum() * 100.0 / float(tt + 1),
np.array(acc_list_old2).std() / np.sqrt(float(tt + 1)) * 100.0),
acc_b_new=u'{:.3f}±{:.3f}'.format(
np.array(acc_list_new).sum() * 100.0 / float(tt + 1),
np.array(acc_list_new).std() / np.sqrt(float(tt + 1)) * 100.0),
acc_b_new2=u'{:.3f}±{:.3f}'.format(
np.array(acc_list_new2).sum() * 100.0 / float(tt + 1),
np.array(acc_list_new2).std() / np.sqrt(float(tt + 1)) * 100.0))
else:
it.set_postfix(acc_b=u'{:.3f}±{:.3f}'.format(
np.array(acc_list).sum() * 100.0 / float(tt + 1),
np.array(acc_list).std() / np.sqrt(float(tt + 1)) * 100.0))
results_dict = {
'acc': acc_list.mean(),
'acc_se': acc_list.std() / np.sqrt(float(acc_list.size))
}
if old_and_new:
results_dict['acc_old'] = acc_list_old.mean()
results_dict['acc_old_se'] = acc_list_old.std() / np.sqrt(
float(acc_list_old.size))
results_dict['acc_old2'] = acc_list_old2.mean()
results_dict['acc_old2_se'] = acc_list_old2.std() / np.sqrt(
float(acc_list_old2.size))
results_dict['acc_new'] = acc_list_new.mean()
results_dict['acc_new_se'] = acc_list_new.std() / np.sqrt(
float(acc_list_new.size))
results_dict['acc_new2'] = acc_list_new2.mean()
results_dict['acc_new2_se'] = acc_list_new2.std() / np.sqrt(
float(acc_list_new2.size))
results_dict['delta_a'] = results_dict['acc_old'] - results_dict['acc_old2']
results_dict['delta_b'] = results_dict['acc_new'] - results_dict['acc_new2']
results_dict['delta'] = 0.5 * (
results_dict['delta_a'] + results_dict['delta_b'])
return results_dict
def main():
# ------------------------------------------------------------------------
# Flags
nshot = FLAGS.nshot
dataset = FLAGS.dataset
nclasses_train = FLAGS.nclasses_b
nclasses_val = FLAGS.nclasses_b
nclasses_test = FLAGS.nclasses_b
num_test = FLAGS.ntest
is_eval = FLAGS.eval
nepisode_final = FLAGS.nepisode_final
run_test = FLAGS.test
pretrain = FLAGS.pretrain
retest = FLAGS.retest
tag = FLAGS.tag
# ------------------------------------------------------------------------
# Configuration
config = get_config(FLAGS.config)
opt_config = config.optimizer_config
old_and_new = config.transfer_config.old_and_new
similarity = config.protonet_config.similarity
# ------------------------------------------------------------------------
# Log folder
assert tag is not None, 'Please add a name for the experiment'
log_folder = os.path.join(FLAGS.results, dataset, 'n{}w{}'.format(
nshot, nclasses_val), tag)
log.info('Experiment ID {}'.format(tag))
if not os.path.exists(log_folder):
os.makedirs(log_folder)
elif not is_eval:
assert False, 'Folder {} exists. Pick another tag.'.format(log_folder)
# ------------------------------------------------------------------------
# Model
metadata = get_metadata(dataset)
with log.verbose_level(2):
model_dict = get_model(
config,
metadata['num_classes_a'],
nclasses_train,
nclasses_val,
nclasses_test,
is_eval=is_eval)
model = model_dict['val']
modelv = model_dict['val']
# ------------------------------------------------------------------------
# Dataset
seed = 0
with log.verbose_level(2):
data = get_datasets(dataset, metadata, nshot, num_test,
opt_config.batch_size, opt_config.num_gpu,
metadata['num_classes_a'], nclasses_train, nclasses_val,
nclasses_test, old_and_new, seed, True)
# ------------------------------------------------------------------------
# Save configurations
save_config(config, log_folder)
# ------------------------------------------------------------------------
# Log outputs
restore_saver = get_restore_saver(
retest=retest,
cosine_a=modelv.config.protonet_config.cosine_a,
reinit_tau=modelv.config.protonet_config.reinit_tau)
logger = get_exp_logger(log_folder)
saver = get_saver(log_folder)
# ------------------------------------------------------------------------
# Create a TensorFlow session
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=sess_config)
# ------------------------------------------------------------------------
# Initialize model
restore_model(
sess, model, modelv, restore_saver, is_eval=is_eval, pretrain=pretrain)
# ------------------------------------------------------------------------
# Calculate prototypes A.
if old_and_new:
prototypes_a = calculate_protos(sess, model, model.num_classes_a,
data['a_train'], nepisode_final)
else:
prototypes_a = None
# ------------------------------------------------------------------------
# Run on val set.
results = {}
results['val_b'] = evaluate_b(
sess,
model,
data['b_val'],
nepisode_final,
model.num_classes_a,
nclasses_val,
prototypes_a=prototypes_a,
old_and_new=old_and_new,
similarity=similarity)
# ------------------------------------------------------------------------
# Run on test set.
if run_test:
results['test_b'] = evaluate_b(
sess,
model,
data['b_test'],
nepisode_final,
model.num_classes_a,
nclasses_val,
prototypes_a=prototypes_a,
old_and_new=old_and_new,
similarity=similarity)
# ------------------------------------------------------------------------
# Log results.
final_log(log_folder, results, old_and_new=old_and_new)
if __name__ == '__main__':
main()
|
plot_parametric_plot.py
|
vinnamkim/large-batch-training
| 122 |
61046
|
<filename>plot_parametric_plot.py<gh_stars>100-1000
'''
Train CNNs on the CIFAR10/CIFAR100
Plots a parametric plot between SB and LB
minimizers demonstrating the relative sharpness
of the two minima.
Requirements:
- Keras (with Theano)
- Matplotlib
- Numpy
GPU run command:
KERAS_BACKEND=theano python plot_parametric_plot.py --network C[1-4]
'''
from __future__ import print_function
from keras.datasets import cifar10, cifar100
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
import numpy
import matplotlib.pyplot as plt
import argparse
import network_zoo
parser = argparse.ArgumentParser(description=
'''This code first trains the user-specific network (C[1-4])
using small-batch ADAM and large-batch ADAM, and then plots
the parametric plot connecting the two minimizers
illustrating the sharpness difference.''')
parser.add_argument('-n', '--network', help='''Selects which network
to plot the parametric plots for.
Choices are C1, C2, C3 and C4.''', required=True)
network_choice = vars(parser.parse_args())['network']
nb_epoch = 20
# the data, shuffled and split between train and test sets
if network_choice in ['C1', 'C2']:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
nb_classes = 10
elif network_choice in ['C3', 'C4']:
(X_train, y_train), (X_test, y_test) = cifar100.load_data()
nb_classes = 100
else:
raise ValueError('''Invalid choice of network.
Please choose one of C1, C2, C3 or C4.
Refer to the paper for details regarding these networks''')
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
# build the network
if network_choice in ['C1', 'C3']:
model = network_zoo.shallownet(nb_classes)
elif network_choice in ['C2', 'C4']:
model = network_zoo.deepnet(nb_classes)
# let's train the model using Adam
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.save_weights('x0.h5')
# let's first find the small-batch solution
model.fit(X_train, Y_train,
batch_size=256,
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test),
shuffle=True)
sb_solution = [p.get_value() for p in model.trainable_weights]
# re-compiling to reset the optimizer accumulators
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# setting the initial (starting) point
model.load_weights('x0.h5')
# now, let's train the large-batch solution
model.fit(X_train, Y_train,
batch_size=5000,
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test))
lb_solution = [p.get_value() for p in model.trainable_weights]
# parametric plot data collection
# we discretize the interval [-1,2] into 25 pieces
alpha_range = numpy.linspace(-1, 2, 25)
data_for_plotting = numpy.zeros((25, 4))
i = 0
for alpha in alpha_range:
for p in range(len(sb_solution)):
model.trainable_weights[p].set_value(lb_solution[p]*alpha +
sb_solution[p]*(1-alpha))
train_xent, train_acc = model.evaluate(X_train, Y_train,
batch_size=5000, verbose=0)
test_xent, test_acc = model.evaluate(X_test, Y_test,
batch_size=5000, verbose=0)
data_for_plotting[i, :] = [train_xent, train_acc, test_xent, test_acc]
i += 1
# finally, let's plot the data
# we plot the XENT loss on the left Y-axis
# and accuracy on the right Y-axis
# if you don't have Matplotlib, simply print
# data_for_plotting to file and use a different plotter
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(alpha_range, data_for_plotting[:, 0], 'b-')
ax1.plot(alpha_range, data_for_plotting[:, 2], 'b--')
ax2.plot(alpha_range, data_for_plotting[:, 1]*100., 'r-')
ax2.plot(alpha_range, data_for_plotting[:, 3]*100., 'r--')
ax1.set_xlabel('alpha')
ax1.set_ylabel('Cross Entropy', color='b')
ax2.set_ylabel('Accuracy', color='r')
ax1.legend(('Train', 'Test'), loc=0)
ax1.grid(b=True, which='both')
plt.savefig('Figures/'+network_choice+'.pdf')
print('Plot save as ' + network_choice + '.pdf in the Figures/ folder')
|
tensorflow_graphics/math/tests/feature_representation_test.py
|
Liang813/graphics
| 2,759 |
61049
|
<reponame>Liang813/graphics
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for feature representations."""
from absl.testing import parameterized
from tensorflow_graphics.math import feature_representation
from tensorflow_graphics.util import test_case
class FeatureRepresentationTest(test_case.TestCase):
@parameterized.parameters(
(3, (3,)),
(4, (2, 3)),
(8, (5, 3, 6)),
)
def test_random_rays_exception_exception_not_raised(self,
num_frequencies,
*shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
feature_representation.positional_encoding, shapes,
num_frequencies=num_frequencies)
if __name__ == "__main__":
test_case.main()
|
applications/DEMApplication/tests/test_DEM_search_tolerance.py
|
lkusch/Kratos
| 778 |
61073
|
import os
import KratosMultiphysics
from KratosMultiphysics import Logger
Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.DEMApplication.DEM_analysis_stage
import numpy as np
import auxiliary_functions_for_tests
this_working_dir_backup = os.getcwd()
def GetFilePath(fileName):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)
class DEM3D_SearchToleranceMain(KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage, KratosUnittest.TestCase):
def Initialize(self):
super().Initialize()
for node in self.spheres_model_part.Nodes:
self.initial_normal_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Z)
@classmethod
def GetMainPath(self):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
def GetProblemNameWithPath(self):
return os.path.join(self.main_path, self.DEM_parameters["problem_name"].GetString())
def FinalizeSolutionStep(self):
super().FinalizeSolutionStep()
for node in self.spheres_model_part.Nodes:
#reference data with freq=1 searchtolerance=0.0
if node.Id == 2:
tol = 1.0e-15
if np.isclose(self.time, 0.02, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -5.86502139707038
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.115, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -3.3859516373258987
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.22, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -0.5929799879392164
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
def Finalize(self):
self.procedures.RemoveFoldersWithResults(str(self.main_path), str(self.problem_name), '')
super().Finalize()
class DEM3D_SearchTolerance1(DEM3D_SearchToleranceMain):
def FinalizeSolutionStep(self):
KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep(self)
for node in self.spheres_model_part.Nodes:
if node.Id == 2:
tol = 1.0e-15
if np.isclose(self.time, 0.02, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -5.8654458179811835
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.115, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -3.3861319639727263
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.22, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -0.594495289987086
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
class DEM3D_SearchTolerance2(DEM3D_SearchToleranceMain):
def FinalizeSolutionStep(self):
KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep(self)
for node in self.spheres_model_part.Nodes:
if node.Id == 2:
tol = 1.0e-15
if np.isclose(self.time, 0.02, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -5.865445816566027
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.115, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -3.386128017385994
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.22, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -0.5941551772701182
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
class DEM3D_SearchTolerance3(DEM3D_SearchToleranceMain):
def FinalizeSolutionStep(self):
KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep(self)
for node in self.spheres_model_part.Nodes:
if node.Id == 2:
tol = 1.0e-15
if np.isclose(self.time, 0.02, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -5.86502139707038
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.115, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -3.3859516373258987
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.22, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -0.5929799879392164
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
class TestSearchTolerance(KratosUnittest.TestCase):
@classmethod
def test_SearchA(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
project_parameters["SearchTolerance"].SetDouble(0.0)
project_parameters["search_tolerance_against_walls"].SetDouble(0.0)
project_parameters["NeighbourSearchFrequency"].SetInt(1)
model = KratosMultiphysics.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchToleranceMain, model, project_parameters, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
@classmethod
def test_SearchB(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
project_parameters["SearchTolerance"].SetDouble(0.0)
project_parameters["search_tolerance_against_walls"].SetDouble(0.0)
project_parameters["NeighbourSearchFrequency"].SetInt(10)
model = KratosMultiphysics.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchTolerance1, model, project_parameters, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
@classmethod
def test_SearchC(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
project_parameters["SearchTolerance"].SetDouble(1e-04)
project_parameters["search_tolerance_against_walls"].SetDouble(1e-04)
project_parameters["NeighbourSearchFrequency"].SetInt(20)
model = KratosMultiphysics.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchTolerance2, model, project_parameters, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
@classmethod
def test_SearchD(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
project_parameters["SearchTolerance"].SetDouble(1e-03)
project_parameters["search_tolerance_against_walls"].SetDouble(1e-03)
project_parameters["NeighbourSearchFrequency"].SetInt(20)
model = KratosMultiphysics.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchTolerance3, model, project_parameters, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
if __name__ == "__main__":
Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
KratosUnittest.main()
|
mmdet/core/bbox/assigners/__init__.py
|
yanglinGEM/ReDet
| 270 |
61107
|
from .base_assigner import BaseAssigner
from .max_iou_assigner import MaxIoUAssigner
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .max_iou_assigner_hbb_cy import MaxIoUAssignerCy
from .max_iou_assigner_rbbox import MaxIoUAssignerRbbox
from .approx_max_iou_assigner_cy import ApproxMaxIoUAssignerCy
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'MaxIoUAssignerCy', 'MaxIoUAssignerRbbox','ApproxMaxIoUAssignerCy'
]
|
soccer3d/instancesegm/utils.py
|
ngerstle/soccerontable
| 465 |
61114
|
import numpy as np
import scipy
import cv2
def get_pixel_neighbors(height, width):
"""
Estimate the 4 neighbors of every pixel in an image
:param height: image height
:param width: image width
:return: pixel index - neighbor index lists
"""
pix_id = []
neighbor_id = []
for i in range(height):
for j in range(width):
n = []
if i == 0:
n = n + [(i + 1) * width + j]
elif i == height - 1:
n = n + [(i - 1) * width + j]
else:
n = n + [(i + 1) * width + j, (i - 1) * width + j]
if j == 0:
n = n + [i * width + j + 1]
elif j == width - 1:
n = n + [i * width + j - 1]
else:
n = n + [i * width + j + 1, i * width + j - 1]
for k in n:
pix_id.append(i*width+j)
neighbor_id.append(k)
return pix_id, neighbor_id
limps = np.array(
[[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 11], [11, 12], [12, 13], [1, 8],
[8, 9], [9, 10], [14, 15], [16, 17], [0, 14], [0, 15], [14, 16], [15, 17]])
def get_instance_skeleton_buffer(h, w, poses):
output = np.zeros((h, w, 3), dtype=np.float32) - 1
for i in range(len(poses)):
keypoints = poses[i]
lbl = i
for k in range(limps.shape[0]):
kp1, kp2 = limps[k, :].astype(int)
bone_start = keypoints[kp1, :]
bone_end = keypoints[kp2, :]
bone_start[0] = np.maximum(np.minimum(bone_start[0], w - 1), 0.)
bone_start[1] = np.maximum(np.minimum(bone_start[1], h - 1), 0.)
bone_end[0] = np.maximum(np.minimum(bone_end[0], w - 1), 0.)
bone_end[1] = np.maximum(np.minimum(bone_end[1], h - 1), 0.)
if bone_start[2] > 0.0:
output[int(bone_start[1]), int(bone_start[0])] = 1
cv2.circle(output, (int(bone_start[0]), int(bone_start[1])), 2, (lbl, 0, 0), -1)
if bone_end[2] > 0.0:
output[int(bone_end[1]), int(bone_end[0])] = 1
cv2.circle(output, (int(bone_end[0]), int(bone_end[1])), 2, (lbl, 0, 0), -1)
if bone_start[2] > 0.0 and bone_end[2] > 0.0:
cv2.line(output, (int(bone_start[0]), int(bone_start[1])), (int(bone_end[0]), int(bone_end[1])), (lbl, 0, 0), 1)
return output[:, :, 0]
def get_poseimg_for_opt(sel_pose, poseimg, init_mask, n_bg=50):
h, w = init_mask.shape[:2]
bg_label = 1
output = np.zeros((h, w, 3), dtype=np.float32) - 1
II, JJ = (poseimg > 0).nonzero()
Isel, J_sel = (poseimg == sel_pose).nonzero()
output[II, JJ] = 0
output[Isel, J_sel] = 2
init_mask[Isel, J_sel] = 1
# Sample also from points in the field
init_mask = cv2.dilate(init_mask, np.ones((25, 25), np.uint8), iterations=1)
I_bg, J_bg = (init_mask == 0).nonzero()
rand_index = np.random.permutation(len(I_bg))[:n_bg]
bg_points = np.array([J_bg[rand_index], I_bg[rand_index]]).T
for k in range(bg_points.shape[0]):
cv2.circle(output, (int(bg_points[k, 0]), int(bg_points[k, 1])), 2, (bg_label, 0, 0), -1)
return output[:, :, 0]
def draw_poses_for_optimization(sel_pose, keypoints_list, init_mask, n_bg=50):
h, w = init_mask.shape[:2]
bg_label = 0
output = np.zeros((h, w, 3), dtype=np.float32)-1
for i in range(len(keypoints_list)):
keypoints = keypoints_list[i]
if i == sel_pose:
lbl = 2
else:
lbl = 1
for k in range(limps.shape[0]):
kp1, kp2 = limps[k, :].astype(int)
bone_start = keypoints[kp1, :]
bone_end = keypoints[kp2, :]
bone_start[0] = np.maximum(np.minimum(bone_start[0], w - 1), 0.)
bone_start[1] = np.maximum(np.minimum(bone_start[1], h - 1), 0.)
bone_end[0] = np.maximum(np.minimum(bone_end[0], w - 1), 0.)
bone_end[1] = np.maximum(np.minimum(bone_end[1], h - 1), 0.)
if bone_start[2] > 0.0:
output[int(bone_start[1]), int(bone_start[0])] = 1
cv2.circle(output, (int(bone_start[0]), int(bone_start[1])), 2, (lbl, 0, 0), -1)
if bone_end[2] > 0.0:
output[int(bone_end[1]), int(bone_end[0])] = 1
cv2.circle(output, (int(bone_end[0]), int(bone_end[1])), 2, (lbl, 0, 0), -1)
if bone_start[2] > 0.0 and bone_end[2] > 0.0:
cv2.line(output, (int(bone_start[0]), int(bone_start[1])), (int(bone_end[0]), int(bone_end[1])), (lbl, 0, 0), 1)
# Draw circles for the bg players keypoints
# for k in range(bg_keypoints.shape[0]):
# cv2.circle(output, (int(bg_keypoints[k, 0]), int(bg_keypoints[k, 1])), 2, (bg_keypoint_lable, 0, 0), -1)
# Sample also from points in the field
init_mask = cv2.dilate(init_mask, np.ones((5, 5), np.uint8), iterations=1)
I_bg, J_bg = (init_mask == 0).nonzero()
rand_index = np.random.permutation(len(I_bg))[:n_bg]
bg_points = np.array([J_bg[rand_index], I_bg[rand_index]]).T
for k in range(bg_points.shape[0]):
cv2.circle(output, (int(bg_points[k, 0]), int(bg_points[k, 1])), 2, (bg_label, 0, 0), -1)
return output[:, :, 0]
def set_U(strokes, h, w, dim):
N = h*w
y = np.zeros((N, dim))
U = scipy.sparse.lil_matrix((N, N))
for p in range(strokes.shape[0]):
i = strokes[p, 1]
j = strokes[p, 0]
index = int(i * w + j)
for ii in range(dim):
y[index, ii] = strokes[p, ii+2]
U[index, index] = 1
return U, y
def set_DW(image, edges=None, sigma1=1000., sigma2=0.01):
image = image.astype(float)
h, w = image.shape[0:2]
N = h * w
pixd, neighborid = get_pixel_neighbors(h, w)
i, j = np.unravel_index(pixd, (h, w))
ii, jj = np.unravel_index(neighborid, (h, w))
pix_diff = np.squeeze((image[i, j, :] - image[ii, jj, :]) ** 2)
if len(pix_diff.shape) == 1:
pix_diff = pix_diff[:, np.newaxis]
weight0 = np.exp(-(np.sum(pix_diff, axis=1)) / sigma1)
weight1 = np.exp(-((edges[i, j]) ** 2) / sigma2)
# neighbor_info = np.vstack((pixd, neighborid, weight0)).T
M = len(pixd)
D = scipy.sparse.lil_matrix((M, N))
W = scipy.sparse.lil_matrix((M, M))
p = np.arange(0, M, 1)
D[p, pixd] = 1
D[p, neighborid] = -1
W[p, p] = weight1
return D, W
|
src/einsteinpy/geodesic/geodesic.py
|
epsilonethan/einsteinpy
| 485 |
61138
|
import warnings
import numpy as np
from einsteinpy.integrators import GeodesicIntegrator
from .utils import _P, _kerr, _kerrnewman, _sch
class Geodesic:
"""
Base Class for defining Geodesics
Working in Geometrized Units (M-Units),
with :math:`c = G = M = k_e = 1`
"""
def __init__(
self,
metric,
metric_params,
position,
momentum,
time_like=True,
return_cartesian=True,
**kwargs,
):
"""
Constructor
Parameters
----------
metric : str
Name of the metric. Currently, these metrics are supported:
1. Schwarzschild
2. Kerr
3. KerrNewman
metric_params : array_like
Tuple of parameters to pass to the metric
E.g., ``(a,)`` for Kerr
position : array_like
3-Position
4-Position is initialized by taking ``t = 0.0``
momentum : array_like
3-Momentum
4-Momentum is calculated automatically,
considering the value of ``time_like``
time_like : bool, optional
Determines type of Geodesic
``True`` for Time-like geodesics
``False`` for Null-like geodesics
Defaults to ``True``
return_cartesian : bool, optional
Whether to return calculated positions in Cartesian Coordinates
This only affects the coordinates. Momenta are dimensionless
quantities, and are returned in Spherical Polar Coordinates.
Defaults to ``True``
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
# Contravariant Metrics, defined so far
_METRICS = {
"Schwarzschild": _sch,
"Kerr": _kerr,
"KerrNewman": _kerrnewman,
}
if metric not in _METRICS:
raise NotImplementedError(
f"'{metric}' is unsupported. Currently, these metrics are supported:\
\n1. Schwarzschild\n2. Kerr\n3. KerrNewman"
)
self.metric_name = metric
self.metric = _METRICS[metric]
self.metric_params = metric_params
if metric == "Schwarzschild":
self.metric_params = (0.0,)
self.position = np.array([0.0, *position])
self.momentum = _P(
self.metric, metric_params, self.position, momentum, time_like
)
self.time_like = time_like
self.kind = "Time-like" if time_like else "Null-like"
self.coords = "Cartesian" if return_cartesian else "Spherical Polar"
self._trajectory = self.calculate_trajectory(**kwargs)
def __repr__(self):
return f"""Geodesic Object:(\n\
Type : ({self.kind}),\n\
Metric : ({self.metric_name}),\n\
Metric Parameters : ({self.metric_params}),\n\
Initial 4-Position : ({self.position}),\n\
Initial 4-Momentum : ({self.momentum}),\n\
Trajectory = (\n\
{self.trajectory}\n\
),\n\
Output Position Coordinate System = ({self.coords})\n\
))"""
def __str__(self):
return self.__repr__()
@property
def trajectory(self):
"""
Returns the trajectory of the test particle
"""
return self._trajectory
def calculate_trajectory(self, **kwargs):
"""
Calculate trajectory in spacetime
Parameters
----------
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Returns
-------
~numpy.ndarray
N-element numpy array, containing step count
~numpy.ndarray
Shape-(N, 8) numpy array, containing
(4-Position, 4-Momentum) for each step
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
g, g_prms = self.metric, self.metric_params
q0, p0 = self.position, self.momentum
tl = self.time_like
N = kwargs.get("steps", 50)
dl = kwargs.get("delta", 0.5)
rtol = kwargs.get("rtol", 1e-2)
atol = kwargs.get("atol", 1e-2)
order = kwargs.get("order", 2)
omega = kwargs.get("omega", 1.0)
sw = kwargs.get("suppress_warnings", False)
steps = np.arange(N)
geodint = GeodesicIntegrator(
metric=g,
metric_params=g_prms,
q0=q0,
p0=p0,
time_like=tl,
steps=N,
delta=dl,
rtol=rtol,
atol=atol,
order=order,
omega=omega,
suppress_warnings=sw,
)
for i in steps:
geodint.step()
vecs = np.array(geodint.results, dtype=float)
q1 = vecs[:, 0]
p1 = vecs[:, 1]
results = np.hstack((q1, p1))
# Ignoring
# q2 = vecs[:, 2]
# p2 = vecs[:, 3]
if self.coords == "Cartesian":
# Converting to Cartesian from Spherical Polar Coordinates
# Note that momenta cannot be converted this way,
# due to ambiguities in the signs of v_r and v_th (velocities)
t, r, th, ph = q1.T
pt, pr, pth, pph = p1.T
x = r * np.sin(th) * np.cos(ph)
y = r * np.sin(th) * np.sin(ph)
z = r * np.cos(th)
cart_results = np.vstack((t, x, y, z, pt, pr, pth, pph)).T
return steps, cart_results
return steps, results
class Nulllike(Geodesic):
"""
Class for defining Null-like Geodesics
"""
def __init__(
self, metric, metric_params, position, momentum, return_cartesian=True, **kwargs
):
"""
Constructor
Parameters
----------
metric : str
Name of the metric. Currently, these metrics are supported:
1. Schwarzschild
2. Kerr
3. KerrNewman
metric_params : array_like
Tuple of parameters to pass to the metric
E.g., ``(a,)`` for Kerr
position : array_like
3-Position
4-Position is initialized by taking ``t = 0.0``
momentum : array_like
3-Momentum
4-Momentum is calculated automatically,
considering the value of ``time_like``
return_cartesian : bool, optional
Whether to return calculated positions in Cartesian Coordinates
This only affects the coordinates. The momenta dimensionless
quantities, and are returned in Spherical Polar Coordinates.
Defaults to ``True``
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
super().__init__(
metric=metric,
metric_params=metric_params,
position=position,
momentum=momentum,
time_like=False,
return_cartesian=return_cartesian,
**kwargs,
)
class Timelike(Geodesic):
"""
Class for defining Time-like Geodesics
"""
def __init__(
self, metric, metric_params, position, momentum, return_cartesian=True, **kwargs
):
"""
Constructor
Parameters
----------
metric : str
Name of the metric. Currently, these metrics are supported:
1. Schwarzschild
2. Kerr
3. KerrNewman
metric_params : array_like
Tuple of parameters to pass to the metric
E.g., ``(a,)`` for Kerr
position : array_like
3-Position
4-Position is initialized by taking ``t = 0.0``
momentum : array_like
3-Momentum
4-Momentum is calculated automatically,
considering the value of ``time_like``
return_cartesian : bool, optional
Whether to return calculated positions in Cartesian Coordinates
This only affects the coordinates. The momenta dimensionless
quantities, and are returned in Spherical Polar Coordinates.
Defaults to ``True``
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
super().__init__(
metric=metric,
metric_params=metric_params,
position=position,
momentum=momentum,
time_like=True,
return_cartesian=return_cartesian,
**kwargs,
)
|
components/isceobj/Orbit/PRC.py
|
vincentschut/isce2
| 1,133 |
61155
|
<reponame>vincentschut/isce2
import os
import logging
import datetime
from isceobj.Orbit.Orbit import Orbit
from isceobj.Orbit.Orbit import StateVector
from isceobj.Util.decorators import type_check, logged, pickled
class PRC(object):
"""A class to parse orbit data from D-PAF"""
logging_name = "isce.orbit.PRC.PRC"
@logged
def __init__(self, file=None):
self.filename = file
self.firstEpoch = 0
self.lastEpoch = 0
self.tdtOffset = 0
self.orbit = Orbit()
self.orbit.configure()
self.orbit.setOrbitQuality('Precise')
self.orbit.setOrbitSource('PRC')
return None
def getOrbit(self):
return self.orbit
def parse(self):
#People still seem to be using the old .Z format
#Adding support for it - PSA
if os.path.splitext(self.filename)[1] == '.Z':
from subprocess import Popen, PIPE
fp = Popen(["zcat", self.filename], stdout=PIPE).stdout
else:
fp = open(self.filename,'r')
data = fp.read()
fp.close()
numLines = int(len(data)/130)
for i in range(numLines):
line = data[i*130:(i+1)*130]
self.__parseLine(line)
def __parseLine(self,line):
"""Parse a line from a PRC orbit file"""
referenceFrame = line[0:6].decode('utf-8')
if (referenceFrame == 'STATE '):
self.__parseStateLine(line)
if (referenceFrame == 'STTERR'):
self.__parseTerrestrialLine(line)
def __parseTerrestrialLine(self,line):
j2000Day = float(line[14:20])/10.0 + 0.5
tdt = float(line[20:31])/1e6
x = float(line[31:43])/1e3
y = float(line[43:55])/1e3
z = float(line[55:67])/1e3
vx = float(line[67:78])/1e6
vy = float(line[78:89])/1e6
vz = float(line[89:100])/1e6
quality = line[127]
tdt = tdt - self.tdtOffset
dt = self.__j2000ToDatetime(j2000Day,tdt)
sv = StateVector()
sv.configure()
sv.setTime(dt)
sv.setPosition([x,y,z])
sv.setVelocity([vx,vy,vz])
self.orbit.addStateVector(sv)
def __parseStateLine(self,line):
self.firstEpoch = self.__j2000ToDatetime(float(line[6:12])/10.0,0.0)
self.lastEpoch = self.__j2000ToDatetime(float(line[12:18])/10.0,0.0)
self.tdtOffset = float(line[47:52])
self.tdtOffset = self.tdtOffset/1e3
def __j2000ToDatetime(self,j2000Day,tdt):
"""Convert the number of days since 1 Jan. 2000 to a datetime object"""
j2000 = datetime.datetime(year=2000,month=1,day=1)
dt = j2000 + datetime.timedelta(days=j2000Day,seconds=tdt)
return dt
pass
@pickled
class Arclist(object):
"""A class for parsing the old ROI_PAC PRC arclist file"""
logging_name = 'isce.Orbit.PRC.Arclist'
@logged
def __init__(self, file=None):
self.filename = file
self.arclist = []
return None
def parse(self):
fp = open(self.filename,'r')
for line in fp.readlines():
data = line.split()
start = float(data[1])/10.0
end = float(data[2])/10.0
arc = Arc()
arc.filename = data[0]
arc.setStart(self.__j2000ToDatetime(start, 86400.0/2.0))
arc.setStop(self.__j2000ToDatetime(end,86400.0/2.0))
self.arclist.append(arc)
def getArc(self,time):
"""Given a datetime object, determine the first arc number that contains precise ephemeris"""
inRange = []
# Make a list containing all of the
# arcs that span <code>time</code>
for arc in self.arclist:
if (arc.inRange(time)):
inRange.append(arc)
if (len(inRange) == 0):
self.logger.error("No valid arcs found spanning %s" % (time))
if (len(inRange) > 0):
self.logger.info("%s valid arcs found spanning %s" % (len(inRange),time))
return inRange[0].filename
def getOrbitFile(self,time):
filename = self.getArc(time)
return filename
def __j2000ToDatetime(self,j2000Day,tdt):
"""Convert the number of days since 1 Jan. 2000 to a datetime object"""
j2000 = datetime.datetime(year=2000,month=1,day=1)
dt = j2000 + datetime.timedelta(days=j2000Day,seconds=tdt)
return dt
class Arc(object):
"""A class representing an orbital arc segment"""
def __init__(self):
self.filename = None
self._start = None
self._stop = None
def getStart(self):
return self._start
@type_check(datetime.datetime)
def setStart(self,start):
self._start = start
def getStop(self):
return self._stop
@type_check(datetime.datetime)
def setStop(self,stop):
self._stop = stop
def inRange(self, time):
"""Determine whether a time stamp lies within the
start and stop times"""
return self._start <= time <= self._stop
start = property(fget=getStart,fset=setStart)
stop = property(fget=getStop,fset=setStop)
pass
|
authx/middleware/__init__.py
|
theoohoho/authx
| 141 |
61179
|
<gh_stars>100-1000
from authx.middleware.Oauth2 import MiddlewareOauth2
__all__ = ["MiddlewareOauth2"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.