hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f788f931d4a28f803c07b6e69b05c5823cc5c0a2
| 7,785 |
py
|
Python
|
paddlenlp/utils/tools.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/utils/tools.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/utils/tools.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
from .log import logger
def static_params_to_dygraph(model, static_tensor_dict):
"""Simple tool for convert static paramters to dygraph paramters dict.
**NOTE** The model must both support static graph and dygraph mode.
Args:
model (nn.Layer): the model of a neural network.
static_tensor_dict (string): path of which locate the saved paramters in static mode.
Usualy load by `paddle.static.load_program_state`.
Returns:
[tensor dict]: a state dict the same as the dygraph mode.
"""
state_dict = model.state_dict()
# static_tensor_dict = paddle.static.load_program_state(static_params_path)
ret_dict = dict()
for n, p in state_dict.items():
if p.name not in static_tensor_dict:
logger.info("%s paramter is missing from you state dict." % n)
continue
ret_dict[n] = static_tensor_dict[p.name]
return ret_dict
def dygraph_params_to_static(model, dygraph_tensor_dict, topo=None):
"""Simple tool for convert dygraph paramters to static paramters dict.
**NOTE** The model must both support static graph and dygraph mode.
Args:
model (nn.Layer): the model of a neural network.
dygraph_tensor_dict (string): path of which locate the saved paramters in static mode.
Returns:
[tensor dict]: a state dict the same as the dygraph mode.
"""
state_dict = model.state_dict()
ret_dict = dict()
for name, parm in state_dict.items():
if name not in dygraph_tensor_dict:
logger.info("%s paramter is missing from you state dict." % name)
continue
tensor = dygraph_tensor_dict[name]
if parm.is_distributed:
assert topo is not None
for dim, v in enumerate(tensor.shape):
if parm.shape[dim] != v:
break
splited = np.split(tensor, topo.mp_info.size,
axis=dim)[topo.mp_info.rank]
ret_dict[parm.name] = splited
else:
ret_dict[parm.name] = tensor
return ret_dict
class TimeCostAverage(object):
"""
Simple tool for calcluating time average cost in the process of training and inferencing.
"""
def __init__(self):
self.reset()
def reset(self):
"""
Reset the recoder state, and reset the `cnt` to zero.
"""
self.cnt = 0
self.total_time = 0
def record(self, usetime):
"""
Recoding the time cost in current step and accumulating the `cnt`.
"""
self.cnt += 1
self.total_time += usetime
def get_average(self):
"""
Returning the average time cost after the start of training.
"""
if self.cnt == 0:
return 0
return self.total_time / self.cnt
def get_env_device():
"""
Return the device name of running enviroment.
"""
if paddle.is_compiled_with_cuda():
return 'gpu'
elif paddle.is_compiled_with_npu():
return 'npu'
elif paddle.is_compiled_with_rocm():
return 'rocm'
elif paddle.is_compiled_with_xpu():
return 'xpu'
return 'cpu'
def compare_version(version, pair_version):
"""
Args:
version (str): The first version string needed to be compared.
The format of version string should be as follow : "xxx.yyy.zzz".
pair_version (str): The second version string needed to be compared.
The format of version string should be as follow : "xxx.yyy.zzz".
Returns:
int: The result of comparasion. 1 means version > pair_version; 0 means
version = pair_version; -1 means version < pair_version.
Examples:
>>> compare_version("2.2.1", "2.2.0")
>>> 1
>>> compare_version("2.2.0", "2.2.0")
>>> 0
>>> compare_version("2.2.0-rc0", "2.2.0")
>>> -1
>>> compare_version("2.3.0-rc0", "2.2.0")
>>> 1
"""
version = version.strip()
pair_version = pair_version.strip()
if version == pair_version:
return 0
version_list = version.split(".")
pair_version_list = pair_version.split(".")
for version_code, pair_version_code in zip(version_list, pair_version_list):
if not version_code.isnumeric():
return -1
if not pair_version_code.isnumeric():
return 1
if int(version_code) > int(pair_version_code):
return 1
elif int(version_code) < int(pair_version_code):
return -1
return 0
def get_bool_ids_greater_than(probs, limit=0.5, return_prob=False):
"""
Get idx of the last dimension in probability arrays, which is greater than a limitation.
Args:
probs (List[List[float]]): The input probability arrays.
limit (float): The limitation for probability.
return_prob (bool): Whether to return the probability
Returns:
List[List[int]]: The index of the last dimension meet the conditions.
"""
probs = np.array(probs)
dim_len = len(probs.shape)
if dim_len > 1:
result = []
for p in probs:
result.append(get_bool_ids_greater_than(p, limit, return_prob))
return result
else:
result = []
for i, p in enumerate(probs):
if p > limit:
if return_prob:
result.append((i, p))
else:
result.append(i)
return result
def get_span(start_ids, end_ids, with_prob=False):
"""
Get span set from position start and end list.
Args:
start_ids (List[int]/List[tuple]): The start index list.
end_ids (List[int]/List[tuple]): The end index list.
with_prob (bool): If True, each element for start_ids and end_ids is a tuple aslike: (index, probability).
Returns:
set: The span set without overlapping, every id can only be used once .
"""
if with_prob:
start_ids = sorted(start_ids, key=lambda x: x[0])
end_ids = sorted(end_ids, key=lambda x: x[0])
else:
start_ids = sorted(start_ids)
end_ids = sorted(end_ids)
start_pointer = 0
end_pointer = 0
len_start = len(start_ids)
len_end = len(end_ids)
couple_dict = {}
while start_pointer < len_start and end_pointer < len_end:
if with_prob:
start_id = start_ids[start_pointer][0]
end_id = end_ids[end_pointer][0]
else:
start_id = start_ids[start_pointer]
end_id = end_ids[end_pointer]
if start_id == end_id:
couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]
start_pointer += 1
end_pointer += 1
continue
if start_id < end_id:
couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]
start_pointer += 1
continue
if start_id > end_id:
end_pointer += 1
continue
result = [(couple_dict[end], end) for end in couple_dict]
result = set(result)
return result
| 32.169421 | 114 | 0.618754 |
581a1467384ed3c7465f96d2a89e8062848df008
| 4,811 |
py
|
Python
|
pacman-arch/test/pacman/util.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/util.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/util.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
# Copyright (c) 2006 by Aurelien Foret <[email protected]>
# Copyright (c) 2006-2021 Pacman Development Team <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import hashlib
import tap
SELFPATH = os.path.abspath(os.path.dirname(__file__))
# ALPM
PM_ROOT = "/"
PM_DBPATH = "var/lib/pacman"
PM_SYNCDBPATH = "var/lib/pacman/sync"
PM_LOCK = "var/lib/pacman/db.lck"
PM_CACHEDIR = "var/cache/pacman/pkg"
PM_EXT_PKG = ".pkg.tar.gz"
PM_HOOKDIR = "etc/pacman.d/hooks"
# Pacman
PACCONF = "etc/pacman.conf"
# Pactest
TMPDIR = "tmp"
SYNCREPO = "var/pub"
LOGFILE = "var/log/pactest.log"
verbose = 0
def vprint(msg):
if verbose:
tap.diag(msg)
#
# Methods to generate files
#
def getfileinfo(filename):
data = {
'changed': False,
'isdir': False,
'islink': False,
'link': None,
'hasperms': False,
'perms': None,
}
if filename[-1] == "*":
data["changed"] = True
filename = filename.rstrip("*")
if filename.find(" -> ") != -1:
filename, link = filename.split(" -> ")
data["islink"] = True
data["link"] = link
elif filename.find("|") != -1:
filename, perms = filename.split("|")
data["hasperms"] = True
data["perms"] = int(perms, 8)
if filename[-1] == "/":
data["isdir"] = True
data["filename"] = filename
return data
def mkfile(base, name, data=""):
info = getfileinfo(name)
filename = info["filename"]
path = os.path.join(base, filename)
if info["isdir"]:
if not os.path.isdir(path):
os.makedirs(path, 0o755)
return path
dir_path = os.path.dirname(path)
if dir_path and not os.path.isdir(dir_path):
os.makedirs(dir_path, 0o755)
if info["islink"]:
os.symlink(info["link"], path)
else:
writedata(path, data)
if info["perms"]:
os.chmod(path, info["perms"])
return path
def writedata(filename, data):
if isinstance(data, list):
data = "\n".join(data)
fd = open(filename, "w")
if data:
fd.write(data)
if data[-1] != "\n":
fd.write("\n")
fd.close()
def mkcfgfile(filename, root, option, db):
# Options
data = ["[options]"]
for key, value in option.items():
data.extend(["%s = %s" % (key, j) for j in value])
# Repositories
# sort by repo name so tests can predict repo order, rather than be
# subjects to the whims of python dict() ordering
for key in sorted(db.keys()):
if key != "local":
value = db[key]
data.append("[%s]\n" % (value.treename))
data.append("SigLevel = %s\n" % (value.getverify()))
if value.syncdir:
data.append("Server = file://%s" % (os.path.join(root, SYNCREPO, value.treename)))
for optkey, optval in value.option.items():
data.extend(["%s = %s" % (optkey, j) for j in optval])
mkfile(root, filename, "\n".join(data))
#
# MD5 helpers
#
def getmd5sum(filename):
if not os.path.isfile(filename):
return ""
fd = open(filename, "rb")
checksum = hashlib.md5()
while 1:
block = fd.read(32 * 1024)
if not block:
break
checksum.update(block)
fd.close()
return checksum.hexdigest()
def mkmd5sum(data):
checksum = hashlib.md5()
checksum.update(("%s\n" % data).encode('utf8'))
return checksum.hexdigest()
#
# Miscellaneous
#
def which(filename, path=None):
if not path:
path = os.environ["PATH"].split(os.pathsep)
for p in path:
f = os.path.join(p, filename)
if os.access(f, os.F_OK):
return f
return None
def grep(filename, pattern):
pat = re.compile(pattern)
myfile = open(filename, 'r')
for line in myfile:
if pat.search(line):
myfile.close()
return True
myfile.close()
return False
def mkdir(path):
if os.path.isdir(path):
return
elif os.path.isfile(path):
raise OSError("'%s' already exists and is not a directory" % path)
os.makedirs(path, 0o755)
| 25.727273 | 98 | 0.594887 |
b72fe19d01acee6f62a8e04b5b867719df5a113e
| 2,562 |
py
|
Python
|
tests/onegov/core/test_elements.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/core/test_elements.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/core/test_elements.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.utils import Bunch
from onegov.core.elements import Link, Confirm, Intercooler
def test_link(render_element):
# text is translated
result = render_element(Link(text="Settings", url='/settings'))
assert result.pyquery('a').text() == "Settings"
assert result.pyquery('a').attr('href') == '/settings'
# other attributes are rendered
result = render_element(Link(text='foo', url='#', attrs={
'data-foo': 'bar'
}))
assert result.pyquery('a').attr('data-foo') == 'bar'
# we show a hint if the link is hidden from public
result = render_element(Link(text='hidden', url='#', model=Bunch(
access='private'
)))
def test_confirm_link(render_element):
result = render_element(Link(text="Delete", url='#', traits=(
Confirm(
"Confirm?",
"Extra...",
"Yes",
"No"
),
), attrs={'class': 'foo'}))
assert result.pyquery('a').attr('data-confirm') == "Confirm?"
assert result.pyquery('a').attr('data-confirm-extra') == "Extra..."
assert result.pyquery('a').attr('data-confirm-yes') == "Yes"
assert result.pyquery('a').attr('data-confirm-no') == "No"
assert result.pyquery('a').attr('class') in ('foo confirm', 'confirm foo')
def test_link_slots():
# make sure that the Link class as well as all its parents have
# __slots__ defined (for some lookup speed and memory improvements)
assert not hasattr(Link("Slots", '#'), '__dict__')
def test_intercooler_link(render_element):
result = render_element(Link(text="Delete", traits=Intercooler(
request_method="POST", redirect_after='#redirect', target='#target'
)))
assert result.pyquery('a').attr('ic-post-to') == '#'
assert result.pyquery('a').attr('ic-target') == '#target'
assert result.pyquery('a').attr('redirect-after') == '#redirect'
assert result.pyquery('a').attr('href') is None
def test_class_attributes(render_element):
result = render_element(Link(text="Delete", attrs={
'class': 'foo'
}))
assert result.pyquery('a').attr('class') == 'foo'
result = render_element(Link(text="Delete", attrs={
'class': ('foo', 'bar')
}))
assert result.pyquery('a').attr('class') in ('foo bar', 'bar foo')
result = render_element(Link(text="Delete", attrs={
'class': ('foo', 'bar')
}))
assert result.pyquery('a').attr('class') in ('foo bar', 'bar foo')
result = render_element(Link(text="Delete"))
assert result.pyquery('a').attr('class') is None
| 34.16 | 78 | 0.62178 |
b7d6b61bd5c672ea3b72fcf0504562145ddd5f77
| 6,503 |
py
|
Python
|
src/test/tests/hybrid/ddf_vs_dbinning.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/test/tests/hybrid/ddf_vs_dbinning.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/test/tests/hybrid/ddf_vs_dbinning.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
from visit_utils import *
import math
def setup_plot():
DeleteAllPlots()
OpenDatabase(silo_data_path("rect3d.silo"))
exprs.define("coords", "coord(quadmesh3d)",etype="vector")
exprs.define("mesh_x_zonal","recenter(coords[0])")
exprs.define("mesh_y_zonal","recenter(coords[1])")
exprs.define("mass","d * volume(quadmesh3d)")
AddPlot("Pseudocolor","mass")
DrawPlots()
def ddf(opts):
# work around quirks related to the ddf pipeline expecting
# vars to already exist
predraw_vars = [ opts["codomain"]]
predraw_vars.extend(opts["varnames"])
for v in predraw_vars:
ChangeActivePlotsVar(v)
atts = visit.ConstructDDFAttributes()
ddf_op_map = {"avg": atts.Average,
"min": atts.Minimum,
"max": atts.Maximum,
"stddev": atts.StandardDeviation,
"var": atts.Variance,
"sum": atts.Sum,
"count": atts.Count,
"rms": atts.RMS,
"pdf": atts.PDF}
atts.ddfName = opts["name"]
atts.codomainName = opts["codomain"]
atts.varnames = opts["varnames"]
atts.ranges = opts["ranges"]
atts.numSamples = opts["samples"]
atts.statisticalOperator = ddf_op_map[opts["op"]]
visit.ConstructDDF(atts)
ndims = len(atts.numSamples)
ddf_varname = "%s_%s_%dd" % (opts["codomain"],opts["op"],ndims)
if len(atts.numSamples) == 1:
src_fname = "%s.ultra" % atts.ddfName
des_fname = "%s.ult" % (atts.ddfName)
common.sexe("mv %s %s" % (src_fname, des_fname))
lines = open(des_fname).readlines()
f = open(des_fname, "w")
f.write("# %s\n" % (ddf_varname))
for l in lines[1:]:
f.write(l)
f.close()
else:
ofname = "%s.vtk" % atts.ddfName
orig_vtk_var = "SCALARS %s float" % opts["codomain"]
ddf_vtk_var = "SCALARS %s float" % ddf_varname
data = open(ofname).read()
f = open(ofname, "w")
data = data.replace(orig_vtk_var,ddf_vtk_var)
f.write(data)
print("[ddf output: %s]" % ofname)
return ofname
def test_orig_mass():
setup_plot()
Test("ddf_vs_dbinning_input_plot")
res = query("Variable Sum")
DeleteAllPlots()
return res
def test_dbinning_using_coords():
setup_plot()
AddOperator("DataBinning")
datts = DataBinningAttributes()
datts.numDimensions = datts.Two
datts.dim1BinBasedOn = datts.X
datts.dim1SpecifyRange = 0
datts.dim1NumBins = 10
datts.dim2BinBasedOn = datts.Y
datts.dim2SpecifyRange = 0
datts.dim2NumBins = 10
datts.outOfBoundsBehavior = datts.Clamp
datts.reductionOperator = datts.Sum
datts.varForReduction = "mass"
datts.emptyVal = 0
datts.outputType = datts.OutputOnBins
SetOperatorOptions(datts)
DrawPlots()
# we have to export b/c we can't query the
# result of the operated created expr ...
ofname = "dbin_mass_sum_using_coords"
eatts = ExportDBAttributes()
eatts.db_type = "VTK"
eatts.filename = ofname
ExportDatabase(eatts)
DeleteAllPlots()
dbin_varname = "%s_%s_%dd" % ("mass","sum",2)
ofname += ".vtk"
orig_vtk_var = "SCALARS %s float" % "operators/DataBinning"
ddf_vtk_var = "SCALARS %s float" % dbin_varname
data = open(ofname).read()
f = open(ofname, "w")
data = data.replace(orig_vtk_var,ddf_vtk_var)
f.write(data)
f.close()
OpenDatabase(ofname)
AddPlot("Pseudocolor","mass_sum_2d")
DrawPlots()
Test("ddf_vs_dbinning_dbin_coords_result")
res = query("Variable Sum")
DeleteAllPlots()
CloseDatabase(ofname)
return res
def test_dbinning_using_coords_exprs():
setup_plot()
AddOperator("DataBinning")
datts = DataBinningAttributes()
datts.numDimensions = datts.Two
datts.dim1BinBasedOn = datts.Variable
datts.dim1Var = "mesh_x_zonal"
datts.dim1SpecifyRange = 0
datts.dim1NumBins = 10
datts.dim2BinBasedOn = datts.Variable
datts.dim2Var = "mesh_y_zonal"
datts.dim2SpecifyRange = 0
datts.dim2NumBins = 10
datts.outOfBoundsBehavior = datts.Clamp
datts.reductionOperator = datts.Sum
datts.varForReduction = "mass"
datts.emptyVal = 0
datts.outputType = datts.OutputOnBins
SetOperatorOptions(datts)
DrawPlots()
# we have to export b/c we can't query the
# result of the operated created expr ...
ofname = "dbin_mass_sum_using_coords_exprs"
eatts = ExportDBAttributes()
eatts.db_type = "VTK"
eatts.filename = ofname
ExportDatabase(eatts)
DeleteAllPlots()
dbin_varname = "%s_%s_%dd" % ("mass","sum",2)
ofname += ".vtk"
orig_vtk_var = "SCALARS %s float" % "operators/DataBinning"
ddf_vtk_var = "SCALARS %s float" % dbin_varname
data = open(ofname).read()
f = open(ofname, "w")
data = data.replace(orig_vtk_var,ddf_vtk_var)
f.write(data)
f.close()
OpenDatabase(ofname)
AddPlot("Pseudocolor","mass_sum_2d")
DrawPlots()
Test("ddf_vs_dbinning_dbin_coords_exprs_result")
res = query("Variable Sum")
DeleteAllPlots()
CloseDatabase(ofname)
return res
def test_ddf():
setup_plot()
ddf_opts = {"name": "ddf_mass_sum",
"op" : "sum",
"codomain" : "mass",
"varnames" : ("mesh_x_zonal",
"mesh_y_zonal"),
"ranges" : (0,1,
0,1),
"samples" : (10,10)}
ddf(ddf_opts)
DeleteAllPlots()
OpenDatabase("ddf_mass_sum.vtk")
AddPlot("Pseudocolor","mass_sum_2d")
DrawPlots()
Test("ddf_vs_dbinning_ddf_result")
res = query("Variable Sum")
DeleteAllPlots()
CloseDatabase("ddf_mass_sum.vtk")
return res
orig_val = test_orig_mass()
ddf_val = test_ddf()
dbin_coords_val = test_dbinning_using_coords()
dbin_cexprs_val = test_dbinning_using_coords_exprs()
TestText("Orig","Mass Sum = %s" % orig_val)
TestText("DDF","Mass Sum = %s" % ddf_val)
TestText("DBIN with Coords","Mass Sum = %s" % dbin_coords_val)
TestText("DBIN with Coords Exprs","Mass Sum = %s" % dbin_cexprs_val)
TestValueLT("Orig Equals DDF",abs(orig_val - ddf_val), 1e-4 )
TestValueLT("Orig Equals DBIN with Coords",abs(orig_val - dbin_coords_val), 1e-4 )
TestValueLT("Orig Equals DBIN with Coords Exprs",abs(orig_val - dbin_cexprs_val), 1e-4 )
Exit()
| 30.530516 | 88 | 0.627403 |
4d101470bc24f374c184b991e70cd6bf397529a6
| 4,136 |
py
|
Python
|
python/en/archive/topics/command_line_arguments/command_line_arguments.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/topics/command_line_arguments/command_line_arguments.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/topics/command_line_arguments/command_line_arguments.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
command_line_arguments.py
Draft: 2019-11-13 (Wed)
This is a class version of test-getopt_full.py at
https://github.com/aimldl/python3/blob/master/topics/command_line_arguments/test-getopt_full.py
Example:
$ python command_line_arguments.py -c conf.txt -i in.txt -o out.txt
argc=7
argv=['-c', 'conf.txt', '-i', 'in.txt', '-o', 'out.txt']
sys.argv[0]=/home/aimldl/command_line_arguments.py
sys.argv[1]=-c
sys.argv[2]=conf.txt
sys.argv[3]=-i
sys.argv[4]=in.txt
sys.argv[5]=-o
sys.argv[6]=out.txt
opts=[('-c', 'conf.txt'), ('-i', 'in.txt'), ('-o', 'out.txt')]
args=[]
config_file=conf.txt
input_file=in.txt
output_file=out.txt
$
"""
import sys, getopt
class CommandLineArguments:
# This class is assumed to be a singleton.
# Constructor
def __init__(self):
pass
# Member functions
def print_inputs( self, argc, argv ):
print(f"argc={argc}" )
print(f"argv={argv}" )
for index in range(argc):
sys_argv = sys.argv[ index ]
print(f"sys.argv[{index}]={sys_argv}" )
def main( self, argc, argv, debug=False ):
if debug:
self.print_inputs( argc, argv )
# When "$ python test-getopt_more.py" is run, bypass parse_arguments.
# Otherwise TypeError occurs in "opts, args = getopt.getopt( argv, ... )".
# TypeError: 'NoneType' object is not iterable
if argc > 1:
#parse_arguments( argc, argv )
self.parse_arguments( argc, argv, debug=True )
def parse_arguments( self, argc, argv, debug=False ):
'''
opts,args = getopt.getopt( argv,"",[] )
Input
argv is the (entire) argument list
"" is a short option starting with a hyphen -. Example: -h
An argument should be followed by a colon (:).
[] is a long option start with two hyphens --. Example: --help
An argument should be followed by an equal sign ('=').
Output
opts is a list of (option, value) pairs.
args is the list of program arguments left after the option list was stripped.
'''
assert isinstance(argc, int), 'argc must be an integer'
assert isinstance(argv, list), 'argv must be a list'
try:
# YOU MAY CHANGE THIS PART
short_options = "hc:i:o:" # Note : is used.
long_options = ["help", "config=", "input=", "output="] # Note = is used.
# YOU MAY CHANGE THIS PART
opts,args = getopt.getopt( argv, short_options, long_options )
if debug:
print(f"opts={opts}" )
print(f"args={args}" )
except getopt.GetoptError:
self.usage()
sys.exit(2)
# YOU MAY CHANGE THIS PART
config_file = ''
input_file = ''
output_file = ''
# YOU MAY CHANGE THIS PART
for opt, arg in opts:
if opt in ("-h", "--help"):
self.usage()
sys.exit()
# YOU MAY CHANGE THIS PART
elif opt in ("-c", "--config"):
config_file = arg
if debug:
print(f"config_file={config_file}" )
elif opt in ("-i", "--input"):
input_file = arg
if debug:
print(f"input_file={input_file}" )
elif opt in ("-o","--output"):
output_file = arg
if debug:
print(f"output_file={output_file}" )
# YOU MAY CHANGE THIS PART
else :
self.usage()
sys.exit(2)
def usage( self ):
print("usage: $ python command_line_arguments.py -h")
if __name__ == "__main__":
cla = CommandLineArguments()
# Process the command line arguments
argc = len( sys.argv )
argv = sys.argv[1:]
#cla.main( argc, argv )
cla.main( argc, argv, debug=True )
# EOF
| 31.815385 | 97 | 0.53119 |
4d18936f76d5e7b98eeb6d17d2257da2a066fdc2
| 385 |
py
|
Python
|
py-basics/src/lectures/lists/exercise2.py
|
AndrasTarlos/s4f
|
bfe2d631a9a2715953d8ac5ddc8ef97d3cefb426
|
[
"CC0-1.0"
] | null | null | null |
py-basics/src/lectures/lists/exercise2.py
|
AndrasTarlos/s4f
|
bfe2d631a9a2715953d8ac5ddc8ef97d3cefb426
|
[
"CC0-1.0"
] | null | null | null |
py-basics/src/lectures/lists/exercise2.py
|
AndrasTarlos/s4f
|
bfe2d631a9a2715953d8ac5ddc8ef97d3cefb426
|
[
"CC0-1.0"
] | 4 |
2021-12-13T15:52:00.000Z
|
2022-03-28T13:54:53.000Z
|
"""
List Exercise 2
Implementieren Sie die Funktion includes() zur Überprüfung, ob ein bestimmtes Element 'search_element' in
der Liste enthalten ist. Benutzen Sie die nachfolgenden Tests zur Kontrolle.
"""
def includes(my_list, search_element):
return search_element in my_list
# Tests
print(includes([1, 2, 3, 4], 3)) # -> True
print(includes([1, 2, 3, 4], 5)) # -> False
| 24.0625 | 105 | 0.716883 |
421dc039646aa97978bd6e781f4546797741d924
| 645 |
py
|
Python
|
tests/test_datatypes.py
|
MZH-bust/genutil
|
f17190ec484d5844f8950908cc07556a5b1429e7
|
[
"MIT"
] | null | null | null |
tests/test_datatypes.py
|
MZH-bust/genutil
|
f17190ec484d5844f8950908cc07556a5b1429e7
|
[
"MIT"
] | null | null | null |
tests/test_datatypes.py
|
MZH-bust/genutil
|
f17190ec484d5844f8950908cc07556a5b1429e7
|
[
"MIT"
] | null | null | null |
import pytest
from genutil import datatypes
class TestIsListOfStrings:
@pytest.mark.parametrize(
"test_parameter,expected",
[
pytest.param(["this", "is", "a", "list", "of", "strings"], True, id="Param1 - List of Strings"),
pytest.param("no list, but string", False, id="Param2 - String only"),
pytest.param(["this", "is", "a", 9], False, id="Param3 - List contains int"),
pytest.param(10, False, id="Param4 - only int"),
],
)
def test_is_list_of_strings(self, test_parameter, expected):
assert datatypes.is_list_of_strings(test_parameter) == expected
| 37.941176 | 108 | 0.615504 |
c4398fc2571aa2bf3c82a1ee5c5fd0508ff75b59
| 1,425 |
py
|
Python
|
7-assets/past-student-repos/data_struct_and_algo-master/max_sum_on_rotation.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/data_struct_and_algo-master/max_sum_on_rotation.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/data_struct_and_algo-master/max_sum_on_rotation.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# Input: arr[] = {1, 20, 2, 10}
# Output: 72
def single_rotation(arr,l):
temp=arr[0]
for i in range(l-1):
arr[i]=arr[i+1]
arr[l-1]=temp
def sum_calculate(arr,l):
sum=0
for i in range(l):
sum=sum+arr[i]*(i)
return sum
def max_finder(arr,l):
max=arr[0]
for i in range(l):
if max<arr[i]:
max=arr[i]
maximum=max
for i in range(l):
if max == arr[i]:
temp=i
index=temp+1
for j in range(index):
single_rotation(arr,len(arr))
arr=[10, 1, 2, 3, 4, 5, 6, 7, 8, 9]
max_finder(arr,len(arr))
result=sum_calculate(arr,len(arr))
print("Max sum is: "+ str(result))
#optimized approach
# '''Python program to find maximum value of Sum(i*arr[i])'''
# # returns max possible value of Sum(i*arr[i])
# def maxSum(arr):
# # stores sum of arr[i]
# arrSum = 0
# # stores sum of i*arr[i]
# currVal = 0
# n = len(arr)
# for i in range(0, n):
# arrSum = arrSum + arr[i]
# currVal = currVal + (i*arr[i])
# # initialize result
# maxVal = currVal
# # try all rotations one by one and find the maximum
# # rotation sum
# for j in range(1, n):
# currVal = currVal + arrSum-n*arr[n-j]
# if currVal > maxVal:
# maxVal = currVal
# # return result
# return maxVal
# # test maxsum(arr) function
# arr = [10, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# print("Max sum is: ", maxSum(arr))
| 19 | 61 | 0.555789 |
6717d9c142e9411315a2a1880908e9b395f56901
| 14,833 |
py
|
Python
|
app/models.py
|
jkopka/price_tracker
|
370dd320a3d54a3bd955b62df337dfe87b58f7ee
|
[
"MIT"
] | null | null | null |
app/models.py
|
jkopka/price_tracker
|
370dd320a3d54a3bd955b62df337dfe87b58f7ee
|
[
"MIT"
] | 2 |
2020-07-04T18:44:37.000Z
|
2020-08-10T06:29:53.000Z
|
app/models.py
|
jkopka/price_tracker
|
370dd320a3d54a3bd955b62df337dfe87b58f7ee
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
from urllib.parse import urljoin
from re import sub
from decimal import Decimal
import numpy as np
import matplotlib.pyplot as plt
from flask import Markup
from urllib.parse import urlparse
import logging
import time
# Objekt für einzelne Suchen
class SearchItem:
def __init__(self, url):
self.url = url
self.all_prices = []
self.quantity = 0
self.quantity_ignored = 0
self.search_query = ""
self.url_next_page = ""
self.searched = False
self.error = ""
def get_search_query(self):
return self.search_query
def get_percentile(self, perc):
# rint(self.all_prices)
return np.percentile(self.all_prices, perc).round(2)
def get_quantity(self):
return self.quantity
def get_quantity_ignored(self):
return self.quantity_ignored
# Plattform
class Plattform:
"""
Zentrale Klasse für das Crawlen.
Über init einrichten. Dann über .fetch() crawlen.
"""
def __init__(self, urls=[], keywords=[]):
"""
Initialisiert die Klasse.
Zu übergebende Parameter: urls<liste>, keywords<liste>
"""
logging.basicConfig(
format="%(asctime)s %(message)s", filename="logging.log", level=logging.INFO
)
self.base_url_ebay_kleinanzeigen = "https://www.ebay-kleinanzeigen.de/"
self.base_url_ebay_de = "https://www.ebay.de/"
self.max_articles = 1000
self.urls = urls
self.keywords = [element.lower() for element in keywords]
# print(self.keywords)
self.headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
}
self.proxies = {
"http": None,
"https": None,
}
search_items = []
for url in urls:
# Für jeden übergebenen Link wird ein SearchItem angelegt. Hier wird auch direkt gecheckt,
# ob die URL valid und ob es sich um die mobile Website handelt.
if self.uri_validator(url) == True:
print("--------")
logging.info("URL: " + url)
print("--------")
search_items.append(SearchItem(self.get_web_version(url)))
self.search_items = search_items
def get_web_version(self, url):
"""
Funktion checkt, ob es sich bei dem Link um die mobile Website hält. Wenn ja, wird der Link zur Desktopversion geholt.
Todo: Es fehlt noch der Teil für eBay.de
"""
# print(url)
if "m.ebay-kleinanzeigen" in url:
print("Mobile version detected!")
r = requests.get(url, headers=self.headers, proxies=self.proxies)
doc = BeautifulSoup(r.text.replace("​", ""), "html.parser")
url = urljoin(
self.base_url_ebay_kleinanzeigen,
doc.find(id="footer-webversion-link").get("href"),
)
return url
def uri_validator(self, x):
"""
Validiert ein URL
"""
try:
result = urlparse(x)
return all([result.scheme, result.netloc, result.path])
except:
return False
def set_max_articles(self, max_articles):
"""
Setzt die maximal zu crawlenden Artikel.
"""
self.max_articles = max_articles if max_articles > 0 else 1000
def fetch_url(self, url):
"""
Holt eine URL mittels requests und liefert das Response-Objekt zurück.
"""
try:
# print('...fetching with headers',url)
r = requests.get(url, headers=self.headers, proxies=self.proxies)
r.raise_for_status()
return r
except:
# print('fetch_url>except!', url)
print(r.status_code)
return r
def fetch(self):
"""
.fetch crawled jede URL.
Keine Parameter. Bei Erfolg True, bei einem Fehler False.
"""
if len(self.search_items) == 0:
return False
result = []
for search_item in self.search_items:
# https://www.ebay-kleinanzeigen.de/s-boote-bootszubehoer/detmold/jolle/k0c211l1792r30
if "ebay-kleinanzeigen.de" in search_item.url:
result.append(self.fetch_page_ebay_kleinanzeigen(search_item))
elif "ebay.de" in search_item.url:
result.append(self.fetch_page_ebay_de(search_item))
else:
print("Link unbekannt! -> ", search_item.url)
# Momentan noch nicht implementiert!
# elif search_item.site == 'ebay.de':
# result.append(self.fetch_page_ebay_de(search_item))
# print(result)
for res in result:
if res == False:
return False
return True
def fetch_page_ebay_kleinanzeigen(self, search_item):
"""Hole die Artikel der Seite.
Übergabe von zu holender URL + aktuelle Anzahl der Artikel.
Weitere Seiten werden über Rekursion bearbeitet.
Rückgabe: Alle Artikelpreise als list, Anzahl der bearbeiteten Artikel
"""
keywords = self.keywords
# Artikel holen
article = self.fetch_url(search_item.url)
if article == False:
return False
doc = BeautifulSoup(article.text.replace("​", ""), "html.parser")
doc_search_query = doc.find(id="site-search-query")
# Falls der Titel 'Security Violation', mit False zurück
if article.status_code == 503:
search_item.error = doc.select_one("title").text.strip()
print("Error-Code: ", article.status_code)
# print(doc)
return False
if doc.select_one("title").text.strip() == "Security Violation (503)":
print("Security Violation (503)")
# print(doc)
search_item.error = doc.select_one("title").text.strip()
return False
elif doc_search_query is None:
print("None")
# print(doc)
search_item.error = "None"
return False
# Suchstring speichern
search_item.search_query = doc_search_query.get("value")
all_prices = []
for element in doc.select(".aditem"):
# Link auf Artikel
# link = element.select_one('.ellipsis').get('href')
# Titel holen
title = element.select_one(".ellipsis").text.strip().lower()
# Titel nach Keywords ausschließen
if [title for keyword in keywords if (keyword in title)]:
# print('Keyword!Title')
search_item.quantity_ignored += 1
continue
# Anreisser-Description nach Keywords ausschließen
descr = element.select_one(".aditem-main p").text.strip().lower()
if [descr for keyword in keywords if (keyword in descr)]:
# print('Keyword!Descr')
search_item.quantity_ignored += 1
continue
# Preis holen
price = element.select_one(".aditem-details").strong.text.strip()
# Preis säubern
price = self.clean_price( price)
if price == False:
search_item.quantity_ignored += 1
continue
# print(" # ", title, price)
search_item.quantity += 1
all_prices.append(price)
# Nächste Seite aufrufen
next_page = doc.select_one(".pagination-next")
# print(next_page)
# Wenn Link auf nächste Seite und Anzahl der Anzeigen nicht über self.max_articles...
if next_page and search_item.quantity < self.max_articles:
search_item.url_next_page = urljoin(
self.base_url_ebay_kleinanzeigen, next_page.get("href")
)
# print(url_next_page)
time.sleep(0.4)
print("next page!", search_item.quantity)
self.fetch_page_ebay_kleinanzeigen(search_item)
if doc_search_query.get("value") in search_item.all_prices:
print("alle_preise: url schon vorhanden!", doc_search_query.get("value"))
search_item.all_prices.extend(all_prices)
else:
print(
"alle_preise: url noch nicht vorhanden!", doc_search_query.get("value")
)
search_item.all_prices = all_prices
search_item.searched = True
self.searched = True
return True
def fetch_page_ebay_de(self, search_item):
"""Hole die Artikel der Seite.
Übergabe von zu holender URL + aktuelle Anzahl der Artikel.
Weitere Seiten werden über Rekursion bearbeitet.
Rückgabe: Alle Artikelpreise als list, Anzahl der bearbeiteten Artikel
"""
keywords = self.keywords
# Artikel holen
article = self.fetch_url(search_item.url)
if article == False:
return False
doc = BeautifulSoup(article.text.replace("​", ""), "html.parser")
doc_search_query = doc.find(id="gh-ac")
# Falls der Titel 'Security Violation', mit False zurück
if article.status_code == 503:
search_item.error = doc.select_one("title").text.strip()
print("Error-Code: ", article.status_code)
# print(doc)
return False
if doc.select_one("title").text.strip() == "Security Violation (503)":
print("Security Violation (503)")
# print(doc)
search_item.error = doc.select_one("title").text.strip()
return False
elif doc_search_query is None:
print("None")
# print(doc)
search_item.error = "None"
return False
# Suchstring speichern
search_item.search_query = doc_search_query.get("value")
all_prices = []
for element in doc.select(".sresult"):
# Link auf Artikel
# link = element.select_one('.ellipsis').get('href')
# Titel holen
title = (
element.select_one(".lvtitle")
.text.replace("Neues Angebot", "")
.strip()
.lower()
)
# Titel nach Keywords ausschließen
if [title for keyword in keywords if (keyword in title)]:
# print('Keyword!Title')
search_item.quantity_ignored += 1
continue
# Preis holen
price = element.select_one(".lvprice").text.strip()
# Preis säubern
price = self.clean_price( price)
if price == False:
search_item.quantity_ignored += 1
continue
# print(' # ', title, price)
search_item.quantity += 1
all_prices.append(price)
# print(title,': ', price)
# Nächste Seite aufrufen
next_page = doc.select_one(".pagn-next .gspr")
# print(next_page)
# Wenn Link auf nächste Seite und Anzahl der Anzeigen nicht über self.max_articles...
if next_page and search_item.quantity < self.max_articles:
search_item.url_next_page = urljoin(
self.base_url_ebay_de, next_page.get("href")
)
# print(url_next_page)
time.sleep(0.4)
print("next page!", search_item.quantity)
self.fetch_page_ebay_kleinanzeigen(search_item)
if doc_search_query.get("value") in search_item.all_prices:
print("alle_preise: url schon vorhanden!", doc_search_query.get("value"))
search_item.all_prices.extend(all_prices)
else:
print(
"alle_preise: url noch nicht vorhanden!", doc_search_query.get("value")
)
search_item.all_prices = all_prices
search_item.searched = True
self.searched = True
return True
def clean_price( self, price):
'''
Original Preis übergeben und verschiedene Optionen filtern. False wird zurückgegeben, wenn der Preis nicht eindeutig ist.
'''
cleaning_strings_cut = ('UVP','(','Bisher')
if price == "VB" or price.strip() == "" or "bis" in price or "Zu verschenken" in price:
return False
for string_cut in cleaning_strings_cut:
if string_cut in price:
price = price[:price.index(string_cut)].strip()
try:
if '.' in price:
price = price.replace('.','')
price = float(
price.replace(" €", "")
.replace("EUR", "")
.replace(',','.')
.replace(" VB", "")
.strip()
)
except:
return False
return price
def get_error(self):
"""
Liefert alle bisherigen Fehler zurück
"""
error = ""
for search_item in self.search_items:
if not search_item.error == "":
error += Markup(search_item.url + ": " + search_item.error)
return error
def get_search_querys(self):
"""
Liefert zur Anzeige die Suchbegriffe.
"""
if len(self.search_items) > 1:
search_querys_text = ""
for search_item in self.search_items:
if not search_querys_text == "":
search_querys_text += " - "
search_querys_text += search_item.search_query
else:
search_querys_text = self.search_items[0].search_query
return search_querys_text
def get_plot(self):
"""
Generiert den Boxplot für die URLs.
Rückgabe ist ein png.
"""
import io
import base64
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
matplotlib.use("agg")
fig, axs = plt.subplots()
all_prices_list = []
labels_list = []
for search_item in self.search_items:
all_prices_list.append(search_item.all_prices)
labels_list.append(search_item.search_query)
axs.boxplot(all_prices_list, labels=labels_list)
# Convert plot to PNG image
pngImage = io.BytesIO()
FigureCanvas(fig).print_png(pngImage)
# Encode PNG image to base64 string
pngImageB64String = "data:image/png;base64,"
pngImageB64String += base64.b64encode(pngImage.getvalue()).decode("utf8")
return pngImageB64String
| 34.819249 | 149 | 0.57035 |
3f3fa889de296183a0378e06a3c8af385a29f4c5
| 1,557 |
py
|
Python
|
Utils/MatlabWhistleDetection/python/extract_wav.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | 15 |
2015-01-12T10:46:29.000Z
|
2022-03-28T05:13:14.000Z
|
Utils/MatlabWhistleDetection/python/extract_wav.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | 2 |
2019-01-20T21:07:50.000Z
|
2020-01-22T14:00:28.000Z
|
Utils/MatlabWhistleDetection/python/extract_wav.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | 5 |
2018-02-07T18:18:10.000Z
|
2019-10-15T17:01:41.000Z
|
import os
import sys
import getopt
"""
Extracts the audio from our game videos. This script expects that ffmpeg is installed and in the PYTHONPATH.
Usage: python extract_wav.py -i <path_to_folder_where_mp4_files_are>
"""
def parse_arguments(argv):
input_path = ''
try:
opts, args = getopt.getopt(argv, "hi:", ["ifile="])
except getopt.GetoptError:
print('python extract_wav.py -i <path>')
sys.exit(2)
if opts is None:
print('python extract_wav.py -i <path>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('python extract_wav.py -i <path>')
sys.exit()
elif opt in ("-i", "--ifile"):
input_path = arg
return input_path
def extract_wav(input_path):
for file in os.listdir(input_path):
if file.endswith(".mp4") or file.endswith(".MP4"):
file = os.path.join(input_path, file)
filename = os.path.splitext(file)[0]
print("Filename: ", filename)
"""
-map_channel:
The first 0 is the input file id
The next 1 is the stream specifier - should be the audio stream, 0 is video
The next 0 is the channel id
-ar 8000 resamples the channel to 8kHz
"""
os.system("ffmpeg -i {0} -map_channel 0.1.0 -ar 8000 {1}.wav".format(file, filename))
else:
continue
if __name__ == '__main__':
path = parse_arguments(sys.argv[1:])
extract_wav(path)
| 27.803571 | 111 | 0.572897 |
58fb641ddef7dd56129d8590322e1acc160f4372
| 170 |
py
|
Python
|
exercises/ja/solution_02_10_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/ja/solution_02_10_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/ja/solution_02_10_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
nlp = spacy.load("ja_core_news_md")
doc1 = nlp("暖かい夏の日です")
doc2 = nlp("外は晴れています")
# doc1とdoc2の類似度を取得
similarity = doc1.similarity(doc2)
print(similarity)
| 15.454545 | 35 | 0.747059 |
18dd95fdf486775b427fa0e34f665f471886c16e
| 2,883 |
py
|
Python
|
ods2md.py
|
tuksik/kennytm-ods2md
|
cf5e322aa3e3d5eb4dcd72e9531ddb277854ea02
|
[
"MIT"
] | null | null | null |
ods2md.py
|
tuksik/kennytm-ods2md
|
cf5e322aa3e3d5eb4dcd72e9531ddb277854ea02
|
[
"MIT"
] | null | null | null |
ods2md.py
|
tuksik/kennytm-ods2md
|
cf5e322aa3e3d5eb4dcd72e9531ddb277854ea02
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2015, 2017 Kenny Chan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import ezodf
import sys
import unicodedata
# Ref: http://stackoverflow.com/a/31666966/224671
DISPLAY_WIDTH = {
'A': 1,
'F': 2,
'H': 1,
'N': 1,
'Na': 1,
'W': 2,
}
def display_text(cell):
v = cell.value
if isinstance(v, float):
return '{:g}'.format(v)
elif v is None:
return ''
else:
return str(v)
def display_len(s):
return sum(DISPLAY_WIDTH[unicodedata.east_asian_width(c)] for c in s)
def main(odf_path, out_file):
ods = ezodf.opendoc(odf_path)
for sheet in ods.sheets:
column_widths = [max(display_len(display_text(cell)) for cell in column) for column in sheet.columns()]
if not any(column_widths):
continue
print('##', sheet.name, file=out_file)
printed_header = False
for row in sheet.rows():
contents = [display_text(cell) for cell in row]
if not any(contents):
continue
print('|', end='', file=out_file)
for m, content in enumerate(contents):
column_width = column_widths[m]
if not column_width:
continue
disp_len = column_width + len(content) - display_len(content)
print(' {0:<{1}}'.format(content, disp_len), end=' |', file=out_file)
print(file=out_file)
if not printed_header:
printed_header = True
print('|', end='', file=out_file)
for w in column_widths:
if w:
print(':', '-' * (w+1), '|', sep='', end='', file=out_file)
print(file=out_file)
if __name__ == '__main__':
main(sys.argv[1], sys.stdout)
| 34.73494 | 111 | 0.635796 |
e1409b6ab26fffacafd213ca4b4a376b80aed345
| 1,514 |
py
|
Python
|
14 Server, PDF Text extraction/scraper.py
|
manuelapaganini/20_21_Workfile
|
5ec3637d18cbd73256b56682d9b99547e21a24d9
|
[
"MIT"
] | 6 |
2019-08-06T14:53:34.000Z
|
2020-10-16T19:44:16.000Z
|
14 Server, PDF Text extraction/scraper.py
|
manuelapaganini/20_21_Workfile
|
5ec3637d18cbd73256b56682d9b99547e21a24d9
|
[
"MIT"
] | 1 |
2020-06-25T09:46:58.000Z
|
2020-06-25T09:46:58.000Z
|
14 Server, PDF Text extraction/scraper.py
|
manuelapaganini/20_21_Workfile
|
5ec3637d18cbd73256b56682d9b99547e21a24d9
|
[
"MIT"
] | 2 |
2019-09-16T13:05:51.000Z
|
2019-09-27T09:07:49.000Z
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
import time
import pandas as pd
from bs4 import BeautifulSoup
#Wir starten den Browser auf
driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver')
#Und nun sagen wir dem Browser, welche Seite er besuchen sollte.
driver.get('https://www.zefix.ch')
#Geben wir ihm etwas ZEit
time.sleep(10)
#Und nun geben wir unseren Begriff ein. ZUerst suchen wir das richtige Feld. Wir benutzen dafür den Webinspector.
#https://selenium-python.readthedocs.io/locating-elements.html
search = driver.find_element_by_id('firm-name-fomfield')
#Jetzt schicken wir das, was wir suchen wollen
search.send_keys('bäckerei')
#Und jetzt suchen wir nach dem Button
click = driver.find_element_by_id('submit-search-btn')
#Und wir klicken
click.click()
#Das kann dauern, bauen wir zur Not genügend Zeit ein.
time.sleep(5)
#Und jetzt speichern wir diese ganze Seite ab. Den Inhalt rausziehen wollen wir später.
page = driver.page_source.encode('utf-8')
button = driver.find_elements_by_class_name('btn')[14]
with open("page.htm", "wb+") as file:
file.write(page)
file.close()
#its the 14th page
for elem in range(35):
driver.find_elements_by_class_name('btn')[14].click()
time.sleep(3)
page = driver.page_source.encode('utf-8')
with open("pages/page"+str(elem)+".htm", "wb+") as file:
file.write(page)
file.close()
| 36.926829 | 113 | 0.758256 |
beca66fd1093873b130ccbeef5e67429df005b04
| 315 |
py
|
Python
|
Packs/CaseManagement-Generic/Scripts/LinkIncidentsButton/LinkIncidentsButton.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CaseManagement-Generic/Scripts/LinkIncidentsButton/LinkIncidentsButton.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CaseManagement-Generic/Scripts/LinkIncidentsButton/LinkIncidentsButton.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
action = demisto.getArg('action')
if action not in ['link', 'unlink']:
action = 'link'
demisto.results(demisto.executeCommand("linkIncidents", {"linkedIncidentIDs": demisto.getArg("linkedIncidentIDs"),
"action": action}))
| 35 | 114 | 0.606349 |
3639e32504c69a680e54972a052608364a7f5904
| 3,532 |
py
|
Python
|
research/cv/ssd_resnet50/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/ssd_resnet50/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/ssd_resnet50/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""post process for 310 inference"""
import os
import argparse
import numpy as np
from PIL import Image
from src.config import config
from src.eval_utils import metrics
batch_size = 1
parser = argparse.ArgumentParser(description="ssd acc calculation")
parser.add_argument("--result_path", type=str, required=True, help="result files path.")
parser.add_argument("--img_path", type=str, required=True, help="image file path.")
parser.add_argument("--anno_file", type=str, required=True, help="annotation file.")
parser.add_argument("--drop", action="store_true", help="drop iscrowd images or not.")
args = parser.parse_args()
def get_imgSize(file_name):
img = Image.open(file_name)
return img.size
def get_result(result_path, img_id_file_path):
"""print the mAP"""
if args.drop:
from pycocotools.coco import COCO
train_cls = config.classes
train_cls_dict = {}
for i, cls in enumerate(train_cls):
train_cls_dict[cls] = i
coco = COCO(args.anno_file)
classs_dict = {}
cat_ids = coco.loadCats(coco.getCatIds())
for cat in cat_ids:
classs_dict[cat["id"]] = cat["name"]
files = os.listdir(img_id_file_path)
pred_data = []
for file in files:
img_ids_name = file.split('.')[0]
img_id = int(np.squeeze(img_ids_name))
if args.drop:
anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = coco.loadAnns(anno_ids)
annos = []
iscrowd = False
for label in anno:
bbox = label["bbox"]
class_name = classs_dict[label["category_id"]]
iscrowd = iscrowd or label["iscrowd"]
if class_name in train_cls:
x_min, x_max = bbox[0], bbox[0] + bbox[2]
y_min, y_max = bbox[1], bbox[1] + bbox[3]
annos.append(list(map(round, [y_min, x_min, y_max, x_max])) + [train_cls_dict[class_name]])
if iscrowd or (not annos):
continue
img_size = get_imgSize(os.path.join(img_id_file_path, file))
image_shape = np.array([img_size[1], img_size[0]])
result_path_0 = os.path.join(result_path, img_ids_name + "_0.bin")
result_path_1 = os.path.join(result_path, img_ids_name + "_1.bin")
boxes = np.fromfile(result_path_0, dtype=np.float32).reshape(config.num_ssd_boxes, 4)
box_scores = np.fromfile(result_path_1, dtype=np.float32).reshape(config.num_ssd_boxes, config.num_classes)
pred_data.append({
"boxes": boxes,
"box_scores": box_scores,
"img_id": img_id,
"image_shape": image_shape
})
mAP = metrics(pred_data, args.anno_file)
print(f" mAP:{mAP}")
if __name__ == '__main__':
get_result(args.result_path, args.img_path)
| 39.244444 | 115 | 0.635617 |
367948e4f4cad432e628a0a5718fa9052acf05a5
| 2,538 |
py
|
Python
|
tests/test_tipc/bigru_crf/export_model.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_tipc/bigru_crf/export_model.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_tipc/bigru_crf/export_model.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import paddle
from paddle.static import InputSpec
import paddlenlp as ppnlp
from paddlenlp.data import Vocab
from data import load_vocab
from model import BiGruCrf
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--data_dir", type=str, default=None, help="The folder where the dataset is located.")
parser.add_argument("--params_path", type=str, default='./checkpoints/final.pdparams', help="The path of model parameter to be loaded.")
parser.add_argument("--output_path", type=str, default='./infer_model', help="The path of model parameter in static graph to be saved.")
parser.add_argument("--emb_dim", type=int, default=128, help="The dimension in which a word is embedded.")
parser.add_argument("--hidden_size", type=int, default=128, help="The number of hidden nodes in the GRU layer.")
args = parser.parse_args()
# yapf: enable
def main():
word_vocab = load_vocab(os.path.join(args.data_dir, 'word.dic'))
label_vocab = load_vocab(os.path.join(args.data_dir, 'tag.dic'))
model = BiGruCrf(args.emb_dim, args.hidden_size, len(word_vocab),
len(label_vocab))
state_dict = paddle.load(args.params_path)
model.set_dict(state_dict)
model.eval()
model = paddle.jit.to_static(model,
input_spec=[
InputSpec(shape=[None, None],
dtype="int64",
name='token_ids'),
InputSpec(shape=[None],
dtype="int64",
name='length')
])
# Save in static graph model.
paddle.jit.save(model, os.path.join(args.output_path, "inference"))
if __name__ == "__main__":
main()
| 39.65625 | 136 | 0.639086 |
62d4a793bd739cd33232d2134880a8d12117672b
| 914 |
py
|
Python
|
tf/clasificador2/clasificador_dir.py
|
alffore/lokroids-python
|
ac3bbc328140e53ab181034d2e3d5d5d17dc9203
|
[
"MIT"
] | null | null | null |
tf/clasificador2/clasificador_dir.py
|
alffore/lokroids-python
|
ac3bbc328140e53ab181034d2e3d5d5d17dc9203
|
[
"MIT"
] | null | null | null |
tf/clasificador2/clasificador_dir.py
|
alffore/lokroids-python
|
ac3bbc328140e53ab181034d2e3d5d5d17dc9203
|
[
"MIT"
] | null | null | null |
# coding=UTF-8
import cv2
import sys
import os
import tensorflow as tf
import filetype
CATEGORIAS = ['dormido', 'despierto', 'otro']
IMG_SIZE = int(sys.argv[3])
def preparaimg(filepath):
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
model = tf.keras.models.load_model(sys.argv[1])
path = sys.argv[2]
for img in os.listdir(path):
tipo_archivo = filetype.guess(os.path.join(path, img))
if tipo_archivo is not None and tipo_archivo.mime == 'image/jpeg':
prediction = model.predict([preparaimg(os.path.join(path, img))])
print(img + " " + str(prediction[0]))
# print(img + " " + str(np.dot(prediction[0], [1, 0, 0])))
top_k = prediction[0].argsort()[-len(prediction[0]):][::-1]
print(img+' '+str(top_k[0])+" "+CATEGORIAS[top_k[0]])
| 28.5625 | 73 | 0.658643 |
3dab09c9f18895b904125d9ba2ec741590c1da28
| 1,372 |
py
|
Python
|
Packs/ServiceNow/Scripts/ServiceNowIncidentStatus/ServiceNowIncidentStatus.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/ServiceNow/Scripts/ServiceNowIncidentStatus/ServiceNowIncidentStatus.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/ServiceNow/Scripts/ServiceNowIncidentStatus/ServiceNowIncidentStatus.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
COLORS = {
'1 - New': '#00CD33', # (success green)
'2 - In Progress': '#7995D4', # (royal blue)
'3 - On Hold': '#FF9000', # (warning orange)
'4 - Awaiting Caller': '#FF9000', # (warning orange)
'5 - Awaiting Evidence': '#FF9000', # (warning orange)
'6 - Resolved': '#89A5C1', # (polo)
'7 - Closed': '#9AA0A3', # (natural grey)
'8 - Canceled': '#FF1744' # (alert-red)
}
TEXT = {
'1 - New': 'New',
'2 - In Progress': 'In Progress',
'3 - On Hold': 'On-Hold',
'4 - Awaiting Caller': 'Awaiting Caller',
'5 - Awaiting Evidence': 'Awaiting Evidence',
'6 - Resolved': 'Resolved',
'7 - Closed': 'Closed',
'8 - Canceled': 'Canceled'
}
incident = demisto.incidents()
service_now_state = (incident[0].get('CustomFields', {}).get('servicenowstate'))
try:
text_color = COLORS[service_now_state]
text_content = TEXT[service_now_state]
except Exception as e:
demisto.debug(f'SnowIncidentStatus debug - state is: {service_now_state}\n{e}')
text_color = '#000000'
text_content = 'Pending Update'
html = f"<div style='color:{text_color};text-align:center;'><h2>{text_content}</h2></div>"
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': html
})
| 30.488889 | 90 | 0.61516 |
3dd1f074801fdda6a493b5523fc6da4e546d091d
| 14,609 |
py
|
Python
|
mltrain-nips-2017/lu_jensen/visdial_workshop.pytorch/train/train_G.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
mltrain-nips-2017/lu_jensen/visdial_workshop.pytorch/train/train_G.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
mltrain-nips-2017/lu_jensen/visdial_workshop.pytorch/train/train_G.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-10-14T07:30:18.000Z
|
2019-10-14T07:30:18.000Z
|
from __future__ import print_function
import argparse
import os
import random
import sys
sys.path.append(os.getcwd())
import pdb
import time
import numpy as np
import json
import progressbar
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate, \
decode_txt, sample_batch_neg, l2_norm
import misc.dataLoader as dl
import misc.model as model
from misc.encoder_QIH import _netE
from misc.netG import _netG
import datetime
parser = argparse.ArgumentParser()
parser.add_argument('--input_img_h5', default='data/vdl_img_vgg.h5', help='path to dataset, now hdf5 file')
parser.add_argument('--input_ques_h5', default='data/visdial_data.h5', help='path to dataset, now hdf5 file')
parser.add_argument('--input_json', default='data/visdial_params.json', help='path to dataset, now hdf5 file')
parser.add_argument('--outf', default='./save', help='folder to output images and model checkpoints')
parser.add_argument('--encoder', default='G_QIH_VGG', help='what encoder to use.')
parser.add_argument('--model_path', default='', help='folder to output images and model checkpoints')
parser.add_argument('--num_val', default=0, help='number of image split out as validation set.')
parser.add_argument('--niter', type=int, default=50, help='number of epochs to train for')
parser.add_argument('--start_epoch', type=int, default=0, help='start of epochs to train for')
parser.add_argument('--negative_sample', type=int, default=20, help='folder to output images and model checkpoints')
parser.add_argument('--neg_batch_sample', type=int, default=30, help='folder to output images and model checkpoints')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=6)
parser.add_argument('--batchSize', type=int, default=128, help='input batch size')
parser.add_argument('--save_iter', type=int, default=1, help='number of epochs to train for')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')
parser.add_argument('--lr', type=float, default=0.0004, help='learning rate for, default=0.00005')
parser.add_argument('--beta1', type=float, default=0.8, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--verbose' , action='store_true', help='show the sampled caption')
parser.add_argument('--conv_feat_size', type=int, default=512, help='input batch size')
parser.add_argument('--model', type=str, default='LSTM', help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--ninp', type=int, default=300, help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=512, help='humber of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1, help='number of layers')
parser.add_argument('--dropout', type=int, default=0.5, help='number of layers')
parser.add_argument('--clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--margin', type=float, default=2, help='number of epochs to train for')
parser.add_argument('--log_interval', type=int, default=50, help='how many iterations show the log info')
opt = parser.parse_args()
print(opt)
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if opt.model_path != '':
print("=> loading checkpoint '{}'".format(opt.model_path))
checkpoint = torch.load(opt.model_path)
model_path = opt.model_path
opt = checkpoint['opt']
opt.start_epoch = checkpoint['epoch']
opt.model_path = model_path
opt.batchSize = 128
opt.niter = 100
else:
t = datetime.datetime.now()
cur_time = '%s-%s-%s' %(t.day, t.month, t.hour)
save_path = os.path.join(opt.outf, opt.encoder + '.' + cur_time)
try:
os.makedirs(save_path)
except OSError:
pass
####################################################################################
# Data Loader
####################################################################################
dataset = dl.train(input_img_h5=opt.input_img_h5, input_ques_h5=opt.input_ques_h5,
input_json=opt.input_json, negative_sample = opt.negative_sample,
num_val = opt.num_val, data_split = 'train')
dataset_val = dl.validate(input_img_h5=opt.input_img_h5, input_ques_h5=opt.input_ques_h5,
input_json=opt.input_json, negative_sample = opt.negative_sample,
num_val = opt.num_val, data_split = 'test')
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
dataloader_val = torch.utils.data.DataLoader(dataset_val, batch_size=5,
shuffle=False, num_workers=int(opt.workers))
####################################################################################
# Build the Model
####################################################################################
vocab_size = dataset.vocab_size
ques_length = dataset.ques_length
ans_length = dataset.ans_length + 1
his_length = dataset.ques_length + dataset.ans_length
itow = dataset.itow
img_feat_size = opt.conv_feat_size
netE = _netE(opt.model, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, img_feat_size)
netW = model._netW(vocab_size, opt.ninp, opt.dropout)
netG = _netG(opt.model, vocab_size, opt.ninp, opt.nhid, opt.nlayers, opt.dropout)
critG = model.LMCriterion()
sampler = model.gumbel_sampler()
if opt.cuda:
netW.cuda()
netE.cuda()
netG.cuda()
critG.cuda()
sampler.cuda()
if opt.model_path != '':
netW.load_state_dict(checkpoint['netW'])
netE.load_state_dict(checkpoint['netE'])
netG.load_state_dict(checkpoint['netG'])
# training function
def train(epoch):
netW.train()
netE.train()
netG.train()
lr = adjust_learning_rate(optimizer, epoch, opt.lr)
data_iter = iter(dataloader)
ques_hidden = netE.init_hidden(opt.batchSize)
hist_hidden = netE.init_hidden(opt.batchSize)
average_loss = 0
count = 0
i = 0
total_loss = 0
while i < len(dataloader):
data = data_iter.next()
image, history, question, answer, answerT, answerLen, answerIdx, \
questionL, negAnswer, negAnswerLen, negAnswerIdx = data
batch_size = question.size(0)
image = image.view(-1, img_feat_size)
img_input.data.resize_(image.size()).copy_(image)
for rnd in range(10):
ques = question[:,rnd,:].t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
ans, tans = answer[:,rnd,:].t(), answerT[:,rnd,:].t()
his_input.data.resize_(his.size()).copy_(his)
ques_input.data.resize_(ques.size()).copy_(ques)
ans_input.data.resize_(ans.size()).copy_(ans)
ans_target.data.resize_(tans.size()).copy_(tans)
ques_emb = netW(ques_input, format = 'index')
his_emb = netW(his_input, format = 'index')
ques_hidden = repackage_hidden(ques_hidden, batch_size)
hist_hidden = repackage_hidden(hist_hidden, his_input.size(1))
encoder_feat, ques_hidden = netE(ques_emb, his_emb, img_input, \
ques_hidden, hist_hidden, rnd+1)
_, ques_hidden = netG(encoder_feat.view(1,-1,opt.ninp), ques_hidden)
ans_emb = netW(ans_input)
logprob, ques_hidden = netG(ans_emb, ques_hidden)
loss = critG(logprob, ans_target.view(-1, 1))
loss = loss / torch.sum(ans_target.data.gt(0))
average_loss += loss.data[0]
total_loss += loss.data[0]
# do backward.
netW.zero_grad()
netE.zero_grad()
netG.zero_grad()
loss.backward()
optimizer.step()
count += 1
i += 1
if i % opt.log_interval == 0:
average_loss /= count
print("step {} / {} (epoch {}), g_loss {:.3f}, lr = {:.6f}"\
.format(i, len(dataloader), epoch, average_loss, lr))
average_loss = 0
count = 0
return total_loss / (10 * i), lr
def val():
netE.eval()
netW.eval()
netG.eval()
data_iter_val = iter(dataloader_val)
ques_hidden = netE.init_hidden(opt.batchSize)
hist_hidden = netE.init_hidden(opt.batchSize)
i = 0
average_loss = 0
rank_all_tmp = []
while i < len(dataloader_val):
data = data_iter_val.next()
image, history, question, answer, answerT, questionL, opt_answer, \
opt_answerT, answer_ids, answerLen, opt_answerLen, img_id = data
batch_size = question.size(0)
image = image.view(-1, img_feat_size)
img_input.data.resize_(image.size()).copy_(image)
for rnd in range(10):
# get the corresponding round QA and history.
ques, tans = question[:,rnd,:].t(), opt_answerT[:,rnd,:].clone().view(-1, ans_length).t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
ans = opt_answer[:,rnd,:,:].clone().view(-1, ans_length).t()
gt_id = answer_ids[:,rnd]
his_input.data.resize_(his.size()).copy_(his)
ques_input.data.resize_(ques.size()).copy_(ques)
ans_input.data.resize_(ans.size()).copy_(ans)
ans_target.data.resize_(tans.size()).copy_(tans)
gt_index.data.resize_(gt_id.size()).copy_(gt_id)
ques_emb = netW(ques_input, format = 'index')
his_emb = netW(his_input, format = 'index')
ques_hidden = repackage_hidden(ques_hidden, batch_size)
hist_hidden = repackage_hidden(hist_hidden, his_input.size(1))
encoder_feat, ques_hidden = netE(ques_emb, his_emb, img_input, \
ques_hidden, hist_hidden, rnd+1)
_, ques_hidden = netG(encoder_feat.view(1,-1,opt.ninp), ques_hidden)
hidden_replicated = []
for hid in ques_hidden:
hidden_replicated.append(hid.view(opt.nlayers, batch_size, 1, \
opt.nhid).expand(opt.nlayers, batch_size, 100, opt.nhid).clone().view(opt.nlayers, -1, opt.nhid))
hidden_replicated = tuple(hidden_replicated)
ans_emb = netW(ans_input, format = 'index')
output, _ = netG(ans_emb, hidden_replicated)
logprob = - output
logprob_select = torch.gather(logprob, 1, ans_target.view(-1,1))
mask = ans_target.data.eq(0) # generate the mask
if isinstance(logprob, Variable):
mask = Variable(mask, volatile=logprob.volatile)
logprob_select.masked_fill_(mask.view_as(logprob_select), 0)
prob = logprob_select.view(ans_length, -1, 100).sum(0).view(-1,100)
for b in range(batch_size):
gt_index.data[b] = gt_index.data[b] + b*100
gt_score = prob.view(-1).index_select(0, gt_index)
sort_score, sort_idx = torch.sort(prob, 1)
count = sort_score.lt(gt_score.view(-1,1).expand_as(sort_score))
rank = count.sum(1) + 1
rank_all_tmp += list(rank.view(-1).data.cpu().numpy())
i += 1
return rank_all_tmp, average_loss
####################################################################################
# Main
####################################################################################
img_input = torch.FloatTensor(opt.batchSize, 49, 512)
ques_input = torch.LongTensor(ques_length, opt.batchSize)
his_input = torch.LongTensor(his_length, opt.batchSize)
ans_input = torch.LongTensor(ans_length, opt.batchSize)
ans_target = torch.LongTensor(ans_length, opt.batchSize)
ans_sample = torch.LongTensor(1, opt.batchSize)
noise_input = torch.FloatTensor(opt.batchSize)
gt_index = torch.LongTensor(opt.batchSize)
if opt.cuda:
img_input, his_input = img_input.cuda(), his_input.cuda()
ques_input, ans_input = ques_input.cuda(), ans_input.cuda()
ans_target, ans_sample = ans_target.cuda(), ans_sample.cuda()
noise_input = noise_input.cuda()
gt_index = gt_index.cuda()
ques_input = Variable(ques_input)
ans_input = Variable(ans_input)
ans_target = Variable(ans_target)
ans_sample = Variable(ans_sample)
noise_input = Variable(noise_input)
img_input = Variable(img_input)
his_input = Variable(his_input)
gt_index = Variable(gt_index)
optimizer = optim.Adam([{'params': netW.parameters()},
{'params': netG.parameters()},
{'params': netE.parameters()}], lr=opt.lr, betas=(opt.beta1, 0.999))
history = []
for epoch in range(opt.start_epoch+1, opt.niter):
t = time.time()
train_loss, lr = train(epoch)
print ('Epoch: %d learningRate %4f train loss %4f Time: %3f' % (epoch, lr, train_loss, time.time()-t))
print('Evaluating ... ')
rank_all, val_loss = val()
R1 = np.sum(np.array(rank_all)==1) / float(len(rank_all))
R5 = np.sum(np.array(rank_all)<=5) / float(len(rank_all))
R10 = np.sum(np.array(rank_all)<=10) / float(len(rank_all))
ave = np.sum(np.array(rank_all)) / float(len(rank_all))
mrr = np.sum(1/(np.array(rank_all, dtype='float'))) / float(len(rank_all))
print ('%d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' %(epoch, len(dataloader_val), mrr, R1, R5, R10, ave))
train_his = {'loss': train_loss}
val_his = {'R1': R1, 'R5':R5, 'R10': R10, 'Mean':ave, 'mrr':mrr}
history.append({'epoch':epoch, 'train': train_his, 'val': val_his})
# saving the model.
if epoch % opt.save_iter == 0:
torch.save({'epoch': epoch,
'opt': opt,
'netW': netW.state_dict(),
'netG': netG.state_dict(),
'netE': netE.state_dict()},
'%s/epoch_%d.pth' % (save_path, epoch))
json.dump(history, open('%s/log.json' %(save_path), 'w'))
| 39.590786 | 118 | 0.630912 |
9abde9590954dcb678193a7a66faed82267b7746
| 192 |
py
|
Python
|
top/clearlight/base/runoob/error/error_01.py
|
ClearlightY/Python_learn
|
93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232
|
[
"Apache-2.0"
] | 1 |
2020-01-16T09:23:43.000Z
|
2020-01-16T09:23:43.000Z
|
top/clearlight/base/runoob/error/error_01.py
|
ClearlightY/Python_learn
|
93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232
|
[
"Apache-2.0"
] | null | null | null |
top/clearlight/base/runoob/error/error_01.py
|
ClearlightY/Python_learn
|
93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232
|
[
"Apache-2.0"
] | null | null | null |
def mye(level):
if level < 1:
raise Exception("Invalid level!")
# 触发异常后,后面的代码就不会再执行
try:
mye(0) # 触发异常
except Exception as err:
print(1, err)
else:
print(2)
| 14.769231 | 41 | 0.578125 |
772f6e8f3bcedc9cbec66eab050585305eca1dc8
| 3,318 |
py
|
Python
|
thread_float_bbs.py
|
ikeikeikeike/scrapy-2ch-summary-spider
|
7142693f25025a09390377649a727cfd33d15af3
|
[
"MIT"
] | 2 |
2015-01-12T08:23:35.000Z
|
2017-07-28T15:02:26.000Z
|
thread_float_bbs.py
|
ikeikeikeike/scrapy-2ch-summary-spider
|
7142693f25025a09390377649a727cfd33d15af3
|
[
"MIT"
] | null | null | null |
thread_float_bbs.py
|
ikeikeikeike/scrapy-2ch-summary-spider
|
7142693f25025a09390377649a727cfd33d15af3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import itertools
from scrapy.spider import BaseSpider
from scrapy.selector import Selector
from scrapy.http.request import Request
import feedparser
from scrapy_mongodb import MongoDBPipeline
collection = None
def _request_ignores(url, settings=None):
""" すでに登録済みはリクエストしない
"""
global collection
if not collection and settings:
collection = MongoDBPipeline(settings).collection
row = collection.find_one({'url': url})
return row and len(row.get('contents', [])) > 0
class ThreadFloatBbsSpider(BaseSpider):
""" For 2ch summary site.
"""
def __init__(self, *args, **kwargs):
super(BaseSpider, self).__init__(*args, **kwargs)
self.feeds = None
def parse(self, response):
""" main
"""
return self._parse_response(response, self._rdf_to_links)
def _rdf_to_links(self, response):
""" rdf fileからlinkを抽出する
"""
self.feeds = feedparser.parse(response.url)
for feed in self.feeds['entries']:
yield feed['link']
def _parse_response(self, response, rdf_to_links):
""" 処理を単体にする
"""
links = rdf_to_links(response)
for link in links:
if not _request_ignores(link, self.settings):
yield self._move_to_spider_page(response, link)
def _move_to_spider_page(self, response, link):
""" move to spider page(scrape page)
"""
return Request(link, callback=self.spider_page, method="GET")
def request_title(self, url, item):
""" Request url with item.
"""
if url:
request = Request(url, callback=self._parse_title,
method="GET", dont_filter=True)
request.meta['item'] = item
yield request
else:
yield item
def _parse_title(self, response):
""" Scraping title from url.
"""
sel = Selector(response)
item = response.request.meta['item']
item['source_title'] = self.get_text(sel.xpath('//h1'))
yield item
def get_text(self, selector):
""" textが存在すれば値を返す
"""
text = selector.xpath('text()').extract()
if len(text) < 1:
return
elif not text[0]:
return
else:
return text[0].strip()
def get_feed(self, url):
""" feedを返す
"""
predicate = lambda f: f['link'] == url
return itertools.ifilter(predicate, self.feeds['entries']).next()
class SequenceAppend(object):
""" 数字の場合indexを進める
"""
def __init__(self, template):
self.template = template
self.items = []
def append(self, item):
if not self.items:
base = self.template.copy()
else:
base = self.items[-1].copy()
self._sequence_loop(base, base)
self.items.append(dict(base, **item))
def result(self):
return self.items
def _sequence_loop(self, base, item):
for key, value in item.iteritems():
if value is int:
value = 0
elif isinstance(value, int):
value += 1
elif isinstance(value, long):
value += 1L
base.update({key: value})
| 25.921875 | 73 | 0.57173 |
62724a923dd489807922b5dc845a52a4c8ed8c56
| 394 |
py
|
Python
|
sakf/db/nosql/nosql.py
|
spdir/sakf
|
9a07c5f90765201a42d524dc6d4554f4ccd3c750
|
[
"Apache-2.0"
] | null | null | null |
sakf/db/nosql/nosql.py
|
spdir/sakf
|
9a07c5f90765201a42d524dc6d4554f4ccd3c750
|
[
"Apache-2.0"
] | null | null | null |
sakf/db/nosql/nosql.py
|
spdir/sakf
|
9a07c5f90765201a42d524dc6d4554f4ccd3c750
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import jdb2
from sakf.conf import globalConfig
def nosqlDB():
"""
creat nodb db obj
:return:
"""
_nosql_conf = globalConfig.config.get('nodb')
_nosqlFile = _nosql_conf.get('file')
_dump_time = _nosql_conf.get('dump_time')
_dump = _nosql_conf.get('dump', False)
noSql = jdb2.NoSql(dump=_dump, nosqlFile=_nosqlFile, dumpTime=_dump_time)
return noSql
| 23.176471 | 75 | 0.700508 |
6593b826e62099da56eb2eadb02bfa96e0211a8d
| 787 |
py
|
Python
|
Problems/Depth-First Search/easy/CousinsBT/cousins_in_bt.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Depth-First Search/easy/CousinsBT/cousins_in_bt.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Depth-First Search/easy/CousinsBT/cousins_in_bt.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def isCousins(self, root: Optional[TreeNode], x: int, y: int) -> bool:
compare = []
def check(cur_node: Optional[TreeNode], depth: int, prev_node: int):
if not cur_node:
return
if cur_node.val == x:
compare.append((depth, prev_node))
if cur_node.val == y:
compare.append((depth, prev_node))
check(cur_node.left, depth + 1, cur_node.val)
check(cur_node.right, depth + 1, cur_node.val)
check(root, 0, -1)
return compare[0][0] == compare[1][0] and compare[0][1] != compare[1][1]
| 26.233333 | 76 | 0.597205 |
b2384000bdcd943fb13098761f1cc4096d467b87
| 3,740 |
py
|
Python
|
models/syntaxsql/net_utils.py
|
inyukwo1/qgm_decoder
|
70e60afec140ec3e2ee04f980a384e1cf28d761c
|
[
"MIT"
] | null | null | null |
models/syntaxsql/net_utils.py
|
inyukwo1/qgm_decoder
|
70e60afec140ec3e2ee04f980a384e1cf28d761c
|
[
"MIT"
] | null | null | null |
models/syntaxsql/net_utils.py
|
inyukwo1/qgm_decoder
|
70e60afec140ec3e2ee04f980a384e1cf28d761c
|
[
"MIT"
] | null | null | null |
def to_batch_seq(batch):
q_seq = []
history = []
label = []
for item in batch:
q_seq.append(item['question_tokens'])
history.append(item["history"])
label.append(item["label"])
return q_seq, history, label
# CHANGED
def to_batch_tables(batch, table_type):
# col_lens = []
col_seq = []
tname_seqs = []
par_tnum_seqs = []
foreign_keys = []
for item in batch:
ts = item["ts"]
tname_toks = [x.split(" ") for x in ts[0]]
col_type = ts[2]
cols = [x.split(" ") for xid, x in ts[1]]
tab_seq = [xid for xid, x in ts[1]]
cols_add = []
for tid, col, ct in zip(tab_seq, cols, col_type):
col_one = [ct]
if tid == -1:
tabn = ["all"]
else:
if table_type == "no":
tabn = []
elif table_type == "struct":
tabn = []
else:
tabn = tname_toks[tid]
for t in tabn:
if t not in col:
col_one.append(t)
col_one.extend(col)
cols_add.append(col_one)
col_seq.append(cols_add)
tname_seqs.append(tname_toks)
par_tnum_seqs.append(tab_seq)
foreign_keys.append(ts[3])
return col_seq, tname_seqs, par_tnum_seqs, foreign_keys
def to_batch_from_candidates(par_tab_nums, batch):
from_candidates = []
for idx, item in enumerate(batch):
table_candidate = item["from"]
col_candidates = [0]
for col, par in enumerate(par_tab_nums[idx]):
if str(par) in table_candidate:
col_candidates.append(col)
from_candidates.append(col_candidates)
return from_candidates
def make_compound_table(dev_db_compound_num, table_dict, my_db_id, db_ids):
if dev_db_compound_num == 0:
return table_dict[my_db_id]
selected_db_ids = random.sample(db_ids, dev_db_compound_num)
if my_db_id in selected_db_ids:
selected_db_ids.remove(my_db_id)
compound_table = deepcopy(table_dict[my_db_id])
for dev_db_id in selected_db_ids:
new_table = table_dict[dev_db_id]
if random.randint(0, 10) < 5:
new_table = compound_table
compound_table = deepcopy(table_dict[dev_db_id])
compound_table = append_table(compound_table, new_table)
return compound_table
def append_table(compound_table, new_table):
for table_name in new_table["table_names"]:
if table_name in compound_table["table_names"]:
return compound_table
new_table_offset = len(compound_table["table_names"])
new_column_offset = len(compound_table["column_names"]) - 1
compound_table["table_names"].extend(new_table["table_names"])
compound_table["table_names_original"].extend(new_table["table_names_original"])
for p in new_table["primary_keys"]:
compound_table["primary_keys"].append(p + new_column_offset)
for f, p in new_table["foreign_keys"]:
compound_table["foreign_keys"].append([f + new_column_offset, p + new_column_offset])
compound_table["column_types"].extend(new_table["column_types"])
for t, name in new_table["column_names_original"][1:]:
compound_table["column_names_original"].append([t + new_table_offset, name])
for t, name in new_table["column_names"][1:]:
compound_table["column_names"].append([t + new_table_offset, name])
return compound_table
def index_to_column_name(index, table):
column_name = table["column_names"][index][1]
table_index = table["column_names"][index][0]
table_name = table["table_names"][table_index]
return table_name, column_name, index
| 35.961538 | 93 | 0.631818 |
b2ad2f6ba80c0b860b88095f733e43696a7ffe64
| 718 |
py
|
Python
|
turngen/test_mcts.py
|
amrohendawi/AlphaZero-implementation
|
42103e63308ba256208b6dd6ddcbef2e797e9932
|
[
"MIT"
] | null | null | null |
turngen/test_mcts.py
|
amrohendawi/AlphaZero-implementation
|
42103e63308ba256208b6dd6ddcbef2e797e9932
|
[
"MIT"
] | null | null | null |
turngen/test_mcts.py
|
amrohendawi/AlphaZero-implementation
|
42103e63308ba256208b6dd6ddcbef2e797e9932
|
[
"MIT"
] | null | null | null |
import montecarlo as mc
import state as s
import numpy as np
import alphazero.NeuralNet as nn
def main():
monteCarlo = mc.MCTS(2)
nnet = nn.NeuralNet()
board = np.array([ -1, 1, 1, 1, 1, 1, 1,-1,
0, 1, 1, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 4, 4, 4, 4, 4, 4, 0,
-1, 4, 4, 4, 4, 4, 4,-1]).reshape(8, 8)
player = 0
state = s.State(board, player)
turn = monteCarlo.search(state, nnet)
print(turn)
if __name__ == "__main__":
main()
| 27.615385 | 63 | 0.4039 |
a79de92abde8a036971edb9e164fe877f323fa18
| 554 |
py
|
Python
|
rename.py
|
loublock/Adam-Soundbox
|
ba859411a5c289c4ea61735233906d657785071d
|
[
"MIT"
] | 1 |
2020-08-24T19:27:48.000Z
|
2020-08-24T19:27:48.000Z
|
rename.py
|
loublock/Adam-Soundbox
|
ba859411a5c289c4ea61735233906d657785071d
|
[
"MIT"
] | null | null | null |
rename.py
|
loublock/Adam-Soundbox
|
ba859411a5c289c4ea61735233906d657785071d
|
[
"MIT"
] | null | null | null |
import os
path = 'SD_CARD/MP3/'
count = 1
for filename in os.listdir('SD_CARD/MP3/'):
if count > 9 and count <= 99:
os.rename(r'SD_CARD/MP3/' + filename,r'SD_CARD/MP3/00' + str(count) + '.mp3')
elif count > 99 and count <= 999:
os.rename(r'SD_CARD/MP3/' + filename,r'SD_CARD/MP3/0' + str(count) + '.mp3')
elif count > 999:
os.rename(r'SD_CARD/MP3/' + filename,r'SD_CARD/MP3/' + str(count) + '.mp3')
else:
os.rename(r'SD_CARD/MP3/' + filename,r'SD_CARD/MP3/000' + str(count) + '.mp3')
count += 1
| 32.588235 | 86 | 0.584838 |
ac393dd807b5c59556b193ea8bb84dc1c5bb967f
| 2,716 |
py
|
Python
|
Python/Buch_Python3_Das_umfassende_Praxisbuch/Kapitel_07_Sequenzen_Mengen_und_Generatoren/06_voting_example/06_voting_example.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | null | null | null |
Python/Buch_Python3_Das_umfassende_Praxisbuch/Kapitel_07_Sequenzen_Mengen_und_Generatoren/06_voting_example/06_voting_example.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | 6 |
2020-12-24T15:15:09.000Z
|
2022-01-13T01:58:35.000Z
|
Python/Buch_Python3_Das_umfassende_Praxisbuch/Kapitel_07_Sequenzen_Mengen_und_Generatoren/06_voting_example/06_voting_example.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
###
# File: 06_voting_example.py
# Project: Kapitel_07_Sequenzen_Mengen_und_Generatoren
# Created Date: Sunday 03.03.2019, 20:34
# Author: Apop85
# -----
# Last Modified: Monday 04.03.2019, 12:20
# -----
# Copyright (c) 2019 Apop85
# This software is published under the MIT license.
# Check http://www.opensource.org/licenses/MIT for further informations
# -----
# Description: Example Chapter 7. Page 217. Create a list of parties and use the List to vote.
###
import os
os.chdir(os.path.dirname(__file__))
def create_chart_file(item_list):
file_writer=open('.\\charts.txt', 'w')
for i in range(len(item_list)):
file_writer.write(str(i)+',0,'+item_list[i]+'\n')
file_writer.close()
def get_candidates():
file_reader=open('.\\charts.txt', 'r')
lines=file_reader.readlines()
file_reader.close()
overall_list=[]
for line in lines:
overall_list.append([])
for item in line.split(','):
if item.isdecimal():
overall_list[-1]+=[int(item)]
else:
overall_list[-1]+=[item.strip('\n')]
return overall_list
def do_vote(chart_list):
while True:
print('Index'.center(15)+'|'+'Votes'.center(15)+'|'+'Partie'.center(15))
print(''.center(47, '-'))
for item in chart_list:
for value in item:
print(str(value).center(15), end='')
if item.index(value) != len(item)-1:
print('|', end='')
print()
vote=input('Vote for your partie: ')
if vote.isdecimal() and 0 <= int(vote) < len(chart_list):
chart_list[int(vote)][1]+=1
elif vote == '':
return print_winner(chart_list)
else:
print('Invalid choice.')
def print_winner(chart_list):
most_votes=[0,0]
# Find the partie with the highes vote
for i in range(len(chart_list)):
if chart_list[i][1] > most_votes[1]:
most_votes=[i,chart_list[i][1]]
winner=(chart_list[most_votes[0]][-1],most_votes[1])
# Check if there are other parties with the same amount of votes
for i in range(len(chart_list)):
if chart_list[i][1] == most_votes[1] and i != most_votes[0]:
if type(winner[0]) == str:
winner=[winner[0],chart_list[i][-1]],winner[1]
else:
winner[0]+=[chart_list[i][-1]]
if winner[1] == 0:
winner=('No winner',0)
return winner
parties=['CVP','SVP','SP','FDP','Gruene','BDP','EVP']
create_chart_file(parties)
chart_list=get_candidates()
winner=do_vote(chart_list)
print('Winner is: '+str(winner[0])+' with '+str(winner[1])+' votes!')
| 33.530864 | 94 | 0.594256 |
3bfa4b294340d6fee73a1d146009ab06204ed881
| 8,331 |
py
|
Python
|
contrib/0.挖宝行动/youzidata-机坪跑道航空器识别/src/data/yolo_dataset.py
|
huaweicloud/ModelArts-Lab
|
75d06fb70d81469cc23cd422200877ce443866be
|
[
"Apache-2.0"
] | 1,045 |
2019-05-09T02:50:43.000Z
|
2022-03-31T06:22:11.000Z
|
contrib/0.挖宝行动/youzidata-机坪跑道航空器识别/src/data/yolo_dataset.py
|
huaweicloud/ModelArts-Lab
|
75d06fb70d81469cc23cd422200877ce443866be
|
[
"Apache-2.0"
] | 1,468 |
2019-05-16T00:48:18.000Z
|
2022-03-08T04:12:44.000Z
|
contrib/0.挖宝行动/youzidata-机坪跑道航空器识别/src/data/yolo_dataset.py
|
huaweicloud/ModelArts-Lab
|
75d06fb70d81469cc23cd422200877ce443866be
|
[
"Apache-2.0"
] | 1,077 |
2019-05-09T02:50:53.000Z
|
2022-03-27T11:05:32.000Z
|
# Copyright 2018 Deep Learning Service of Huawei Cloud. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from moxing.framework import file
from data.yolo_load.detection_dataset import Detection_dataset
from utils.read_image_to_list import get_image_list
from mxnet import gluon, io, nd
def _pad_arrs_to_max_length(arrs, max_gt_box_number, pad_axis=0, pad_val=-1):
"""Inner Implementation of the Pad batchify"""
if not isinstance(arrs[0], (nd.NDArray, np.ndarray)):
arrs = [np.asarray(ele) for ele in arrs]
max_size = max_gt_box_number
ret_shape = list(arrs[0].shape)
ret_shape[pad_axis] = max_size
ret_shape = (len(arrs), ) + tuple(ret_shape)
ret = nd.full(shape=ret_shape, val=pad_val, dtype=arrs[0].dtype)
for i, arr in enumerate(arrs):
if arr.shape[pad_axis] == max_size:
ret[i] = arr
else:
slices = [slice(None) for _ in range(arr.ndim)]
slices[pad_axis] = slice(0, arr.shape[pad_axis])
slices = [slice(i, i + 1)] + slices
ret[tuple(slices)] = arr
return ret
class _train_batchify_fn(object):
def __init__(self, max_gt_box_number):
self._max_gt_box_number = max_gt_box_number
def __call__(self, data):
"""Collate train data into batch."""
img_data = nd.stack(*[item[0] for item in data])
center_targets = nd.stack(*[item[1] for item in data])
scale_targets = nd.stack(*[item[2] for item in data])
weights = nd.stack(*[item[3] for item in data])
objectness = nd.stack(*[item[4] for item in data])
class_targets = nd.stack(*[item[5] for item in data])
gt_bboxes = _pad_arrs_to_max_length([item[6] for item in data],
self._max_gt_box_number,
pad_axis=0, pad_val=-1)
batch_data = io.DataBatch(data=[img_data],
label=[gt_bboxes, objectness, center_targets,
scale_targets, weights, class_targets])
return batch_data
class _val_batchify_fn(object):
def __init__(self, max_gt_box_number):
self._max_gt_box_number = max_gt_box_number
def __call__(self, data):
"""Collate train data into batch."""
img_data = nd.stack(*[item[0] for item in data])
gt_bboxes = _pad_arrs_to_max_length([item[1] for item in data],
self._max_gt_box_number,
pad_axis=0, pad_val=-1)
batch_data = io.DataBatch(data=[img_data],
label=[gt_bboxes])
return batch_data
def _get_provide_data(next_batch):
next_data = next_batch.data
return [io.DataDesc(name='data', shape=next_data[0].shape)]
def _get_provide_label(next_batch, gt_boxes_shape=(32, 56, 4), is_train=True):
next_label = next_batch.label
if is_train:
provide_label = [io.DataDesc(name='gt_boxes',
shape=next_label[0].shape),
io.DataDesc(name='obj_t', shape=next_label[1].shape),
io.DataDesc(name='centers_t',
shape=next_label[2].shape),
io.DataDesc(name='scales_t',
shape=next_label[3].shape),
io.DataDesc(name='weights_t',
shape=next_label[4].shape),
io.DataDesc(name='clas_t', shape=next_label[5].shape)]
else:
provide_label = None
return provide_label
def _reset():
pass
def get_data_iter(data_path, train_file=None, val_file=None, split_spec=1,
hyper_train={}, hyper_val={}, **kwargs):
train_set = None
val_set = None
train_list = None
val_list = None
if train_file is not None:
assert file.exists(train_file), 'not found train file'
train_path = file.read(train_file).split("\n")[0:-1]
train_list = [path.replace('\r', '').split(' ') for path in train_path]
train_list = [[os.path.join(data_path, path[0]),
os.path.join(data_path, path[1])] for path in train_list]
if val_file is not None:
assert file.exists(val_file), 'not found val file'
val_path = file.read(val_file).split("\n")[0:-1]
val_list = [path.replace('\r', '').split(' ') for path in val_path]
val_list = [[os.path.join(data_path, path[0]),
os.path.join(data_path, path[1])] for path in val_list]
if train_file is None and val_file is None:
train_list, val_list, _ = get_image_list(data_path, split_spec)
if 'anchors' not in kwargs:
kwargs['anchors'] = [[116, 90, 156, 198, 373, 326],
[30, 61, 62, 45, 59, 119],
[10, 13, 16, 30, 33, 23]]
if 'offsets' not in kwargs:
kwargs['offsets'] = [(13, 13), (26, 26), (52, 52)]
if train_list is not None and len(train_list) > 0:
dataset = Detection_dataset(img_list=train_list,
index_file=hyper_train.get(
'index_file', None),
width=hyper_train.get('width', 416),
height=hyper_train.get('height', 416),
is_train=True,
** kwargs)
max_gt_box_number = max([len(item) for item in dataset.label_cache])
batch_size = hyper_train.get('batch_size', 32)
train_set = gluon.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=hyper_train.get('shuffle', True),
batchify_fn=_train_batchify_fn(max_gt_box_number),
last_batch='rollover',
num_workers=hyper_train.get('preprocess_threads', 4))
next_data_batch = next(iter(train_set))
setattr(train_set, 'reset', _reset)
setattr(train_set, 'provide_data', _get_provide_data(next_data_batch))
setattr(train_set, 'provide_label', _get_provide_label(
next_data_batch, (batch_size, max_gt_box_number, 4), is_train=True))
if val_list is not None and len(val_list) > 0:
assert 'index_file' in hyper_val and file.exists(
hyper_val['index_file']), 'not found label name file'
dataset = Detection_dataset(img_list=val_list,
index_file=hyper_val.get(
'index_file'),
width=hyper_val.get('width', 416),
height=hyper_val.get('height', 416),
is_train=False,
** kwargs)
max_gt_box_number = max([len(item) for item in dataset.label_cache])
batch_size = hyper_val.get('batch_size', 32)
val_set = gluon.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=hyper_val.get('shuffle', True),
batchify_fn=_val_batchify_fn(max_gt_box_number),
last_batch='keep',
num_workers=hyper_val.get('preprocess_threads', 4))
next_data_batch = next(iter(val_set))
setattr(val_set, 'reset', _reset)
setattr(val_set, 'provide_data', _get_provide_data(next_data_batch))
setattr(val_set, 'provide_label', _get_provide_label(
next_data_batch, is_train=False))
return train_set, val_set
| 45.774725 | 80 | 0.580483 |
ce2254fb627e25c68c6cea60e4ad2d54c2a44a57
| 145 |
py
|
Python
|
hardware/chip/rtl872xd/hal/hal_test/uart/ucube.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | 4,538 |
2017-10-20T05:19:03.000Z
|
2022-03-30T02:29:30.000Z
|
hardware/chip/rtl872xd/hal/hal_test/uart/ucube.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | 1,088 |
2017-10-21T07:57:22.000Z
|
2022-03-31T08:15:49.000Z
|
hardware/chip/rtl872xd/hal/hal_test/uart/ucube.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | 1,860 |
2017-10-20T05:22:35.000Z
|
2022-03-27T10:54:14.000Z
|
src = Split('''
uart_test.c
''')
component = aos_component('uart_test', src)
component.add_cflags('-Wall')
component.add_cflags('-Werror')
| 16.111111 | 43 | 0.689655 |
024e111fd4f6f72908ffde93407e9cbee5a5191b
| 124 |
py
|
Python
|
tests/conftest.py
|
Kludex/fastapi-template
|
47256eb8f8c7439a4d669172d94ce84c62cdb25a
|
[
"MIT"
] | 14 |
2021-03-27T22:18:56.000Z
|
2022-03-21T19:04:48.000Z
|
tests/conftest.py
|
Kludex/fastapi-template
|
47256eb8f8c7439a4d669172d94ce84c62cdb25a
|
[
"MIT"
] | 33 |
2021-03-28T21:06:22.000Z
|
2022-03-07T14:18:26.000Z
|
tests/conftest.py
|
Kludex/fastapi-template
|
47256eb8f8c7439a4d669172d94ce84c62cdb25a
|
[
"MIT"
] | null | null | null |
import pathlib
import pytest
@pytest.fixture()
def root_dir() -> pathlib.PosixPath:
return pathlib.Path().absolute()
| 13.777778 | 36 | 0.725806 |
ce48e39d702f37a92e96b02b7bc8ff6f0a2fe3c4
| 7,560 |
py
|
Python
|
Sephrasto.py
|
JoergRue/Sephrasto
|
a4fa3c2c1b095b674a9e71416ca448e3be3de225
|
[
"MIT"
] | null | null | null |
Sephrasto.py
|
JoergRue/Sephrasto
|
a4fa3c2c1b095b674a9e71416ca448e3be3de225
|
[
"MIT"
] | null | null | null |
Sephrasto.py
|
JoergRue/Sephrasto
|
a4fa3c2c1b095b674a9e71416ca448e3be3de225
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 23 21:30:34 2017
@author: Aeolitus
"""
from PyQt5 import QtWidgets, QtCore, QtGui
import sys
import logging
import os.path
import MainWindow
import CharakterEditor
import DatenbankEdit
import CharakterMain
import DatenbankMain
from Wolke import Wolke
import yaml
from EinstellungenWrapper import EinstellungenWrapper
import Version
loglevels = {0: logging.ERROR, 1: logging.WARNING, 2: logging.DEBUG}
logging.basicConfig(filename="sephrasto.log", \
level=loglevels[Wolke.Settings['Logging']], \
format="%(asctime)s | %(levelname)s | %(filename)s::%(funcName)s(%(lineno)d) | %(message)s")
def sephrasto_excepthook(exc_type, exc_value, tb):
traceback = [' Traceback (most recent call last):']
while tb:
filename = tb.tb_frame.f_code.co_filename
name = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno
traceback.append(' File "%.500s", line %d, in %.500s' %(filename, lineno, name))
tb = tb.tb_next
# Exception type and value
exception = ' %s: %s' %(exc_type.__name__, exc_value)
logging.critical(exception + "\n".join(traceback))
#Try to show message box, hopefully its not a crash in Qt
messagebox = QtWidgets.QMessageBox()
messagebox.setWindowTitle("Fehler!")
messagebox.setText("Unerwarteter Fehler:" + exception + ". Bei Fragen zum diesem Fehler bitte sephrasto.log mitsenden.")
messagebox.setIcon(QtWidgets.QMessageBox.Critical)
messagebox.setStandardButtons(QtWidgets.QMessageBox.Ok)
messagebox.exec_()
class MainWindowWrapper(object):
'''
Main Class responsible for running the entire application.
Just shows three buttons and handles the execution of the individual subparts.
'''
def __init__(self):
sys.excepthook = sephrasto_excepthook
'''
Initializes the GUI and connects the buttons.
'''
self._version_ = "v" + str(Version._sephrasto_version_major) + "." + str(Version._sephrasto_version_minor) + "." + str(Version._sephrasto_version_build)
logging.critical("Starte Sephrasto " + self._version_) #critical so it's always printed, independent of the debug level setting
super().__init__()
#Make sure the application scales properly, i.e. in Win10 users can change the UI scale in the display settings
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
self.app = QtCore.QCoreApplication.instance()
if self.app is None:
self.app = QtWidgets.QApplication(sys.argv)
#self.app.setStyleSheet("*[readOnly=\"true\"] { background-color: #F5F5F5 } QAbstractScrollArea #scrollAreaWidgetContents { background-color: #FFFFFF }")
self.app.setStyleSheet("""
*[readOnly=\"true\"]
{
background-color: #FFFFFF;
border: none
}
QAbstractScrollArea #scrollAreaWidgetContents
{
background-color: #FFFFFF
}
""")
self.Form = QtWidgets.QWidget()
self.ui = MainWindow.Ui_Form()
self.ui.setupUi(self.Form)
self.ui.buttonNew.clicked.connect(self.createNew)
self.ui.buttonEdit.clicked.connect(self.editExisting)
self.ui.buttonRules.clicked.connect(self.editRuleset)
self.ui.buttonSettings.clicked.connect(self.editSettings)
self.ui.labelVersion.setText(self._version_ + " - by Aeolitus ")
self.app.setWindowIcon(QtGui.QIcon('icon_large.png'))
# Get the Settings loaded
EinstellungenWrapper.load()
logging.getLogger().setLevel(loglevels[Wolke.Settings['Logging']])
self.Form.show()
sys.exit(self.app.exec_())
def createNew(self):
'''
Creates a new CharakterEditor which is empty and shows it.
'''
self.ed = CharakterEditor.Editor(self.savePathUpdated)
if self.ed.noDatabase:
raise Exception("Konnte datenbank.xml nicht finden")
self.ed.formMain = QtWidgets.QWidget()
self.ed.ui = CharakterMain.Ui_formMain()
self.ed.ui.setupUi(self.ed.formMain)
self.ed.ui.tabs.removeTab(0)
self.ed.ui.tabs.removeTab(0)
self.ed.setupMainForm()
self.savePathUpdated()
self.ed.formMain.show()
def editExisting(self):
'''
Creates a CharakterEditor for an existing character and shows it.
'''
if os.path.isdir(Wolke.Settings['Pfad-Chars']):
startDir = Wolke.Settings['Pfad-Chars']
else:
startDir = ""
spath, _ = QtWidgets.QFileDialog.getOpenFileName(None,"Charakter laden...",startDir,"XML-Datei (*.xml)")
if spath == "":
return
if not spath.endswith(".xml"):
spath = spath + ".xml"
try:
self.ed = CharakterEditor.Editor(self.savePathUpdated, spath)
except Exception as e:
logging.error("Sephrasto Fehlercode " + str(Wolke.Fehlercode) + ". Exception: " + str(e))
infoBox = QtWidgets.QMessageBox()
infoBox.setIcon(QtWidgets.QMessageBox.Information)
if Wolke.Fehlercode <= -40 and Wolke.Fehlercode > -80:
infoBox.setText("Charakterdatei öffnen fehlgeschlagen")
infoBox.setInformativeText("Die XML-Datei konnte nicht gelesen werden.\n\
Fehlercode: " + str(Wolke.Fehlercode) + "\n\
Fehlermeldung: " + Wolke.ErrorCode[Wolke.Fehlercode] + "\n")
infoBox.setWindowTitle("Fehlerhafte Datei")
else:
infoBox.setText("Ein unerwarteter Fehler ist aufgetreten!")
infoBox.setInformativeText("Ein Fehler ist aufgetreten. Versuche, Sephrasto neu zu starten?\n\
Fehlercode: " + str(Wolke.Fehlercode) + "\n")
infoBox.setWindowTitle("Unbekannter Fehler")
infoBox.setStandardButtons(QtWidgets.QMessageBox.Ok)
infoBox.setEscapeButton(QtWidgets.QMessageBox.Close)
infoBox.exec_()
else:
if self.ed.noDatabase:
raise Exception("Konnte datenbank.xml nicht finden")
self.ed.formMain = QtWidgets.QWidget()
self.ed.ui = CharakterMain.Ui_formMain()
self.ed.ui.setupUi(self.ed.formMain)
self.ed.ui.tabs.removeTab(0)
self.ed.ui.tabs.removeTab(0)
self.ed.setupMainForm()
self.savePathUpdated()
self.ed.formMain.show()
def editRuleset(self):
'''
Creates the DatenbankEdit Form and shows the contents of datenbank.xml.
'''
self.D = DatenbankEdit.DatenbankEdit()
self.D.Form = QtWidgets.QWidget()
self.D.ui = DatenbankMain.Ui_Form()
self.D.ui.setupUi(self.D.Form)
self.D.setupGUI()
self.D.Form.show()
def editSettings(self):
EinstellungenWrapper()
def savePathUpdated(self):
file = " - Neuer Charakter"
if self.ed.savepath:
file = " - " + os.path.basename(self.ed.savepath)
rules = ""
if Wolke.DB.datei:
rules = " (" + os.path.basename(Wolke.DB.datei) + ")"
self.ed.formMain.setWindowTitle("Sephrasto" + file + rules)
if __name__ == "__main__":
itm = MainWindowWrapper()
| 40.427807 | 161 | 0.63836 |
0cc57be939cacc9e98dc3136d3ccf6ef1562d646
| 1,912 |
py
|
Python
|
codeit/oop/transports.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
codeit/oop/transports.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
codeit/oop/transports.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
from vehicle import Vehicle
class Bicycle(Vehicle):
max_speed = 15
def __init__(self, speed):
self._speed = speed
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, new_value):
self._speed = new_value if 0 <= new_value <= Bicycle.max_speed else 0
def start(self):
print('자전거 페달 돌리기 시작합니다.')
self.speed = self.max_speed / 3
def __str__(self):
return f'이 자전거는 현재 {self.speed}km/h로 주행 중입니다.'
class NormalCar(Vehicle):
def __init__(self, speed, max_speed):
self._speed = 0
self.max_speed = max_speed
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, new_value):
self._speed = new_value if 0 <= new_value <= self.max_speed else 0
def start(self):
print('일반 자동차 시동겁니다.')
self.speed = self.max_speed / 2
def __str__(self):
return f'이 일반 자동차는 현재 {self.speed}km/h로 주행 중입니다.'
class SportsCar(Vehicle):
def __init__(self, speed, max_speed):
self._speed = speed
self.max_speed = max_speed
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, new_value):
self._speed = new_value if 0 <= new_value <= self.max_speed else 0
def start(self):
print('스포츠카 시동겁니다.')
self.speed = self.max_speed
def __str__(self):
return f'이 스포츠카는 현재 {self.speed}km/h로 주행 중입니다.'
if __name__ == '__main__':
# 자전거 인스턴스
bicycle = Bicycle(0)
# 일반 자동차 인스턴스
car = NormalCar(0, 100)
# 스포츠카 인스턴스
sports_car = SportsCar(0, 200)
# 정의한 인스턴스들을 모두 주행 시작시킨다
bicycle.start()
car.start()
sports_car.start()
# 자전거의 속도를 출력한다
print(bicycle)
# 자전거만 주행을 멈춰준다
bicycle.stop()
# 결과 값을 출력한다
print(bicycle)
print(car)
print(sports_car)
| 20.126316 | 77 | 0.598849 |
b2d2ed6b1a8a1264916646d07cfdf043ed82812c
| 3,914 |
py
|
Python
|
src/onegov/election_day/layouts/default.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/election_day/layouts/default.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/election_day/layouts/default.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from babel import Locale
from cached_property import cached_property
from datetime import datetime
from onegov.ballot import VoteCollection
from onegov.core.i18n import SiteLocale
from onegov.core.layout import ChameleonLayout
from onegov.core.static import StaticFile
from onegov.election_day import _
from onegov.election_day.collections import ArchivedResultCollection
from onegov.user import Auth
class DefaultLayout(ChameleonLayout):
day_long_format = 'skeleton:MMMMd'
date_long_format = 'long'
datetime_long_format = 'medium'
docs_base_url = 'https://github.com/OneGov/onegov-cloud' \
'/tree/master/docs/api/election_day'
def __init__(self, model, request):
super().__init__(model, request)
self.request.include('common')
self.request.include('chosen')
self.request.include('custom')
if 'headerless' in request.params:
request.browser_session['headerless'] = True
if 'headerful' in request.params:
if request.browser_session.has('headerless'):
del request.browser_session['headerless']
def title(self):
return ''
@cached_property
def principal(self):
return self.request.app.principal
@cached_property
def has_districts(self):
return self.principal.has_districts
@cached_property
def homepage_link(self):
return self.request.link(self.principal)
def get_opendata_link(self, lang):
return f"{self.docs_base_url}/open_data_{lang}.md"
@cached_property
def opendata_link(self):
lang = (self.request.locale or 'en')[:2]
return self.get_opendata_link(lang)
@cached_property
def terms_icon(self):
static_file = StaticFile.from_application(
self.app, 'images/terms_by.svg'
)
return self.request.link(static_file)
@cached_property
def terms_link(self):
lang = (self.request.locale or 'en')[:2]
return "https://opendata.swiss/{}/terms-of-use".format(lang)
@cached_property
def format_description_link(self):
lang = (self.request.locale or 'en')[:2]
return f"{self.docs_base_url}/format__{lang}.md"
@cached_property
def font_awesome_path(self):
static_file = StaticFile.from_application(
self.app, 'font-awesome/css/font-awesome.min.css')
return self.request.link(static_file)
def get_topojson_link(self, id, year):
return self.request.link(
StaticFile('mapdata/{}/{}.json'.format(year, id))
)
@cached_property
def copyright_year(self):
return datetime.utcnow().year
@cached_property
def manage_link(self):
return self.request.link(VoteCollection(self.app.session()))
@cached_property
def login_link(self):
if not self.request.is_logged_in:
return self.request.link(
Auth.from_request(self.request, to=self.manage_link),
name='login'
)
@cached_property
def logout_link(self):
if self.request.is_logged_in:
return self.request.link(
Auth.from_request(self.request), name='logout')
@cached_property
def archive(self):
return ArchivedResultCollection(self.request.session)
@cached_property
def locales(self):
to = self.request.url
def get_name(locale):
return Locale.parse(locale).get_language_name().capitalize()
def get_link(locale):
return self.request.link(SiteLocale(locale, to))
return [
(get_name(locale), get_link(locale))
for locale in sorted(self.app.locales)
]
def format_name(self, item):
if hasattr(item, 'entity_id'):
return item.name if item.entity_id else _("Expats")
return item.name or _("Expats")
| 29.428571 | 72 | 0.655084 |
0bf75ffc14183ea1a2ac1a22331593a7ef86822e
| 1,620 |
py
|
Python
|
PMIa/2014/VINOGRADOVA_J_S/task_7_48.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PMIa/2014/VINOGRADOVA_J_S/task_7_48.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PMIa/2014/VINOGRADOVA_J_S/task_7_48.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 6. Вариант 48.
# Создайте игру, в которой компьютер загадывает название одной из двадцати
# башен Московского кремля, а игрок должен его угадать.
# Vinogradova J.
# 31.03.2016
import random
name = random.randint(1,12)
if name == 1 :
name = 'Беклемишевская'
elif name == 2 :
name = 'Константино-Еленинская'
elif name == 3 :
name = 'Набатная'
elif name == 4 :
name = 'Царская'
elif name == 5 :
name = 'Спасская'
elif name == 6 :
name = 'Сенатская'
elif name == 7 :
name = 'Никольская'
elif name == 8 :
name = 'Собакина'
elif name == 9 :
name = 'Граненая'
elif name == 10 :
name = 'Троицкая'
elif name == 11 :
name = 'Кутафья'
elif name == 12 :
name = 'Комендатская'
elif name == 13 :
name = 'Оружейная'
elif name == 14 :
name = 'Боровицкая'
elif name == 15 :
name = 'Водовзводная'
elif name == 16 :
name = 'Благовещенская'
elif name == 17 :
name = 'Тайницкая'
elif name == 18 :
name = 'Первая Безымянная'
elif name == 19 :
name = 'Вторая Безымянная'
else :
name = 'Петровская'
trial = 19
bonus = 11000
while trial > 0 :
answer = input('\nКак Вы думаете, какая башня загадана? ')
if answer == name :
print('\nВы угадали!')
print('Вам начислено', bonus, 'баллов.')
break
else :
print('\nВы не угадали!!!')
if trial > 1 :
print('Попробуйте еще раз.')
else :
print('Правильный ответ: ', name)
trial -= 1
bonus -= 1000
input('\n\nНажмите Enter для выхода.')
| 22.816901 | 75 | 0.558025 |
f0f764b0db8aca5c56a82e63ae276f69198633e6
| 903 |
py
|
Python
|
kernel/blog/migrations/0002_auto_20180605_1353.py
|
sageteam/behpack
|
3b8afb81dc7da70807308af4c8a2d2ab92b1a133
|
[
"MIT"
] | null | null | null |
kernel/blog/migrations/0002_auto_20180605_1353.py
|
sageteam/behpack
|
3b8afb81dc7da70807308af4c8a2d2ab92b1a133
|
[
"MIT"
] | null | null | null |
kernel/blog/migrations/0002_auto_20180605_1353.py
|
sageteam/behpack
|
3b8afb81dc7da70807308af4c8a2d2ab92b1a133
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-06-05 09:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='news',
name='sku',
field=models.CharField(default='vxY6mlScUwA', help_text='Unique code for refrence to supervisors', max_length=15),
),
migrations.AlterField(
model_name='newsmovies',
name='sku',
field=models.CharField(default='HXFm4TZBuwI', help_text='Unique code for refrence to supervisors', max_length=15),
),
migrations.AlterField(
model_name='newsphotos',
name='sku',
field=models.CharField(default='xPBjYllJVBs', help_text='Unique code for refrence to supervisors', max_length=15),
),
]
| 31.137931 | 126 | 0.612403 |
0b15922f9b1b565f5c12d8838021c8e30ac2f17a
| 808 |
py
|
Python
|
535-encode-and-decode-tinyurl/535-encode-and-decode-tinyurl.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2 |
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
535-encode-and-decode-tinyurl/535-encode-and-decode-tinyurl.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
535-encode-and-decode-tinyurl/535-encode-and-decode-tinyurl.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
class Codec:
def __init__(self):
self.codec_dict = dict()
self.codec_reversed = dict()
self.codec_len = 0
def encode(self, longUrl: str) -> str:
"""Encodes a URL to a shortened URL.
"""
if longUrl not in self.codec_dict:
self.codec_dict[longUrl]=self.codec_len
self.codec_reversed[self.codec_len] = longUrl
self.codec_len+=1
return "http://tinyurl.com/{}".format(self.codec_dict[longUrl])
def decode(self, shortUrl: str) -> str:
"""Decodes a shortened URL to its original URL.
"""
val = int(shortUrl.split("/")[-1])
return self.codec_reversed[val]
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(url))
| 32.32 | 71 | 0.600248 |
acbbe6f47814bc2d3c6890dde8a7d1503844ddbb
| 34,894 |
py
|
Python
|
exportmodul.py
|
MaliziaGrimm/Lohnvorerfassung-50a-fuer-DATEV
|
41b99deacc5bfee6562907de109a8ad5af917d01
|
[
"MIT"
] | null | null | null |
exportmodul.py
|
MaliziaGrimm/Lohnvorerfassung-50a-fuer-DATEV
|
41b99deacc5bfee6562907de109a8ad5af917d01
|
[
"MIT"
] | null | null | null |
exportmodul.py
|
MaliziaGrimm/Lohnvorerfassung-50a-fuer-DATEV
|
41b99deacc5bfee6562907de109a8ad5af917d01
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import request, render_template
import os, time, csv
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, Text, MetaData, Table, DATE
from sqlalchemy.sql import select, update
import datenbank_obj, funktionen, setting
import pandas as pd
import datetime
def export_steuer(var_abrmonat, var_abrjahr, var_beraternummer, var_mandantennummer):
# nur für Auswahl Monat/Jahr
return
def export_steuerli(var_abrmonat, var_abrjahr, var_beraternummer, var_mandantennummer):
# brauche ich für Auswahl der Datensätze ggf.
# aktuell werden alle erfassten DS exportiert, die noch nicht exportert wurden
# unabhängig vom Erfassungsmonat
return
def export_steuerliste(var_abrmonat, var_abrjahr, var_beraternummer, var_mandantennummer):
# der eigentliche ExcelExport bzw.
# PDF Druck (geplant)
var_stmonat=request.form["form_stmonat"]
var_stjahr=request.form["form_stjahr"]
engine = create_engine('sqlite:///daten/abrechnungsdaten.db')
metadata = datenbank_obj.getdbmetadata(engine)
abrechnungsdaten = datenbank_obj.abrechnungsdaten_dbobj(metadata)
metadata.create_all()
if var_stmonat=="01" and var_stjahr=="2022":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"01\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
elif var_stmonat=="02" and var_stjahr=="2022":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"02\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
elif var_stmonat=="03" and var_stjahr=="2022":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"03\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
elif var_stmonat=="04" and var_stjahr=="2022":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"04\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
elif var_stmonat=="05" and var_stjahr=="2022":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"05\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
elif var_stmonat=="06" and var_stjahr=="2022":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"06\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
elif var_stmonat=="07" and var_stjahr=="2022":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"07\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
elif var_stmonat=="08" and var_stjahr=="2022":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"08\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
elif var_stmonat=="09" and var_stjahr=="2022":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"09\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
elif var_stmonat=="10" and var_stjahr=="2022":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"10\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
elif var_stmonat=="11" and var_stjahr=="2022":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"11\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
elif var_stmonat=="12" and var_stjahr=="2022":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"12\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
elif var_stmonat=="01" and var_stjahr=="2023":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"01\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine)
elif var_stmonat=="02" and var_stjahr=="2023":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"02\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine)
elif var_stmonat=="03" and var_stjahr=="2023":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"03\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine)
elif var_stmonat=="04" and var_stjahr=="2023":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"04\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine)
elif var_stmonat=="05" and var_stjahr=="2023":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"05\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine)
elif var_stmonat=="06" and var_stjahr=="2023":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"06\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine)
elif var_stmonat=="07" and var_stjahr=="2023":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"07\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine)
elif var_stmonat=="08" and var_stjahr=="2023":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"08\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine)
elif var_stmonat=="09" and var_stjahr=="2023":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"09\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine)
elif var_stmonat=="10" and var_stjahr=="2023":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"10\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine)
elif var_stmonat=="11" and var_stjahr=="2023":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"11\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine)
elif var_stmonat=="12" and var_stjahr=="2023":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"12\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine)
elif var_stmonat=="01" and var_stjahr=="2024":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"01\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine)
elif var_stmonat=="02" and var_stjahr=="2024":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"02\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine)
elif var_stmonat=="03" and var_stjahr=="2024":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"03\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine)
elif var_stmonat=="04" and var_stjahr=="2024":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"04\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine)
elif var_stmonat=="05" and var_stjahr=="2024":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"05\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine)
elif var_stmonat=="06" and var_stjahr=="2024":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"06\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine)
elif var_stmonat=="07" and var_stjahr=="2024":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"07\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine)
elif var_stmonat=="08" and var_stjahr=="2024":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"08\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine)
elif var_stmonat=="09" and var_stjahr=="2024":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"09\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine)
elif var_stmonat=="10" and var_stjahr=="2024":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"10\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine)
elif var_stmonat=="11" and var_stjahr=="2024":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"11\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine)
elif var_stmonat=="12" and var_stjahr=="2024":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"12\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine)
elif var_stmonat=="01" and var_stjahr=="2025":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"01\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine)
elif var_stmonat=="02" and var_stjahr=="2025":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"02\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine)
elif var_stmonat=="03" and var_stjahr=="2025":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"03\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine)
elif var_stmonat=="04" and var_stjahr=="2025":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"04\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine)
elif var_stmonat=="05" and var_stjahr=="2025":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"05\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine)
elif var_stmonat=="06" and var_stjahr=="2025":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"06\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine)
elif var_stmonat=="07" and var_stjahr=="2025":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"07\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine)
elif var_stmonat=="08" and var_stjahr=="2025":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"08\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine)
elif var_stmonat=="09" and var_stjahr=="2025":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"09\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine)
elif var_stmonat=="10" and var_stjahr=="2025":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"10\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine)
elif var_stmonat=="11" and var_stjahr=="2025":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"11\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine)
elif var_stmonat=="12" and var_stjahr=="2025":
result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"12\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine)
else:
var_version_titel = setting.Version_Titel
var_version_program = setting.Version_Program
var_text=("Zeitraum nicht verfügbar!")
return render_template('/index.html', v_text=var_text, v_bnr=var_beraternummer, v_mdt=var_mandantennummer, v_heute="Fehler !", v_monat=var_abrmonat, v_jahr=var_abrjahr, v_version_program=var_version_program, v_version_titel=var_version_titel)
### variabler Monat aktuell nicht abfragebar - result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==var_stmonat AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine)
result.to_csv("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_stmonat+"_"+var_stjahr+"_Export_Monatsauswertung_16.csv", sep=';', encoding='utf-16', index=False, mode='w')
result.to_csv("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_stmonat+"_"+var_stjahr+"_Export_Monatsauswertung_8.csv", sep=';', encoding='utf-8', index=False, mode='w')
# Zwischendatei anlegen für Buchungsliste Agenturprovision AG Anteil
result.to_csv("daten/ZW_Buchungsliste_AGP_AG.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w')
result.to_csv("daten/ZW_Buchungsliste_AGP_AN.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w')
# Quell und Zieldatei öffnen - Agenturprov AG Werte in Buchungsliste zu schreiben
filequelle=open("daten/ZW_Buchungsliste_AGP_AG.txt")
fileziel=open("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_stmonat+"_"+var_stjahr+"_AGP_AGWerte_Buchungsliste.csv","w", encoding='utf-8')
#Beschreibung der Felder aus der Quelldatei
#stelle 1 = Satznummer; stelle 2 = BNR; stelle 3 = Mdt; stelle 4 = PNR; stelle 5 = Lohnart; stelle 6 = LohnartText; stelle 7 = Wert; stelle 8 = Kostenstelle; stelle 9 = Kostenträger;
#stelle 10 = Art der Tätigkeit; stelle 11 = Freitext; stelle 12 = Buchungsmonat; stelle 13 = Buchungsjahr; stelle 14 = %Agentur gesamt; stelle 15 = %Agentur AN Anteil; stelle 16 = agenturprovwert_AN;
#stelle 17 = agenturprovwert_AG, stelle 18 = lohnartustabzug; stelle 19 = ustwert; stelle 20 = kontoust; stelle 21 = exportlodas; stelle 22 = exportlohnundgehalt; stelle 23 = exportwiederholung;
#stelle 24 = exportdatum; stelle 25 = Agenturnummer
AGP_Gegenkonto = funktionen.fibukonten_dic_lesen("konto_ggagp")
#Beschreibung Exportdatei
#AGP Gegenkonto (aus dict); Agentur (Personenkonto Rewe) wird auf 99988 gesetzt falls leer; Wert AGP AG in -; Buchungsdatum mit 01MMJJJJ; freier Text als Buchungstext 120 Zeichen ?????
for x in filequelle:
stelle1,stelle2,stelle3,stelle4,stelle5,stelle6,stelle7,stelle8,stelle9,stelle10,stelle11,stelle12,stelle13,stelle14,stelle15,stelle16,stelle17,stelle18,stelle19,stelle20,stelle21,stelle22,stelle23,stelle24,stelle25=x.split("|")
stelle25 = (stelle25.strip())
if str(stelle17) != "0.0" and str(stelle17) != "0":
if stelle25 == "":
stelle25 = "99988"
fileziel.write(AGP_Gegenkonto+";"+stelle25+";"+stelle17+";01"+stelle12+stelle13+";"+stelle8+";"+stelle9+";PNR: "+stelle4+" AGP %: "+stelle14+" davon AGP AN %: "+stelle15+" Text:"+stelle11+";0\n")
filequelle.close()
fileziel.close()
# Quell und Zieldatei öffnen - AGP Werte um Buchungsliste zu schreiben
filequelle=open("daten/ZW_Buchungsliste_AGP_AN.txt")
fileziel=open("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_stmonat+"_"+var_stjahr+"_AGP_ANWerte_Buchungsliste.csv","w", encoding='utf-8')
# Buchungsliste Agenturprov AN Werte schreiben
AGP_AN_Gegenkonto = funktionen.fibukonten_dic_lesen("konto_ggagpan")
#Beschreibung Exportdatei
#AGP Gegenkonto (aus dict); Agentur (Personenkonto Rewe) wird auf 99988 gesetzt falls leer; Wert AGP AN in -; Buchungsdatum mit 01MMJJJJ; freier Text als Buchungstext 120 Zeichen ?????
for x in filequelle:
stelle1,stelle2,stelle3,stelle4,stelle5,stelle6,stelle7,stelle8,stelle9,stelle10,stelle11,stelle12,stelle13,stelle14,stelle15,stelle16,stelle17,stelle18,stelle19,stelle20,stelle21,stelle22,stelle23,stelle24,stelle25=x.split("|")
stelle25 = (stelle25.strip())
if str(stelle16) != "0.0" and str(stelle16) != "0":
if stelle25 == "":
stelle25 = "99988"
fileziel.write(AGP_AN_Gegenkonto+";"+stelle25+";"+stelle16+";01"+stelle12+stelle13+";"+stelle8+";"+stelle9+";PNR: "+stelle4+" AGP %: "+stelle14+" davon AGP AN %: "+stelle15+" Text:"+stelle11+";0\n")
filequelle.close()
fileziel.close()
#Beschreibung der Felder aus der Quelldatei
#stelle 1 = Satznummer; stelle 2 = BNR; stelle 3 = Mdt; stelle 4 = PNR; stelle 5 = Lohnart; stelle 6 = LohnartText; stelle 7 = Wert; stelle 8 = Kostenstelle; stelle 9 = Kostenträger;
#stelle 10 = Art der Tätigkeit; stelle 11 = Freitext; stelle 12 = Buchungsmonat; stelle 13 = Buchungsjahr; stelle 14 = %Agentur gesamt; stelle 15 = %Agentur AN Anteil; stelle 16 = agenturprovwert_AN;
#stelle 17 = agenturprovwert_AG, stelle 18 = lohnartustabzug; stelle 19 = ustwert; stelle 20 = kontoust; stelle 21 = exportlodas; stelle 22 = exportlohnundgehalt; stelle 23 = exportwiederholung;
#stelle 24 = exportdatum; stelle 25 = Agenturnummer
# Quell und Zieldatei öffnen - AGP Werte um Buchungsliste zu schreiben
filequelle=open("daten/ZW_Buchungsliste_AGP_AG.txt")
fileziel=open("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_stmonat+"_"+var_stjahr+"_AG_USt_Werte_Buchungsliste.csv","w", encoding='utf-8')
# Buchungsliste Agenturprov AN Werte schreiben
AG_USt_konto = funktionen.fibukonten_dic_lesen("konto_ust19")
GG_AG_USt_konto = funktionen.fibukonten_dic_lesen("konto_ggust19")
#Beschreibung Exportdatei
#AG USt Gegenkonto (aus dict); Agentur (Personenkonto Rewe) wird auf "unbekannt" gesetzt falls leer; Buchungsdatum mit 01MMJJJJ; freier Text als Buchungstext 120 Zeichen ?????
for x in filequelle:
stelle1,stelle2,stelle3,stelle4,stelle5,stelle6,stelle7,stelle8,stelle9,stelle10,stelle11,stelle12,stelle13,stelle14,stelle15,stelle16,stelle17,stelle18,stelle19,stelle20,stelle21,stelle22,stelle23,stelle24,stelle25=x.split("|")
stelle25 = (stelle25.strip())
if str(stelle18) == "0" and str(stelle19) != "0" and str(stelle19) != "0.0":
if str(stelle16) != "0.0" and str(stelle16) != "0":
if stelle25 == "":
stelle25 = "AG unbekannt"
fileziel.write(AG_USt_konto+";"+GG_AG_USt_konto+";"+stelle19+";01"+stelle12+stelle13+";"+stelle8+";"+stelle9+";PNR: "+stelle4+" Agentur: "+stelle25+" Text:"+stelle11+";0\n")
filequelle.close()
fileziel.close()
##################### PDF Block --------------------------- - NOCH OFFEN
#
##################### komplett entfernt
if result.shape[0] != 0:
var_text = result.shape[0]
var_text="Es wurden "+str(var_text)+" Datensätze in die Datei Export_Steuer exportiert. Weitere Auswertungen stehen zur Verfügung."
else:
var_text="Es wurden keine Datensätze als Steuerwerte exportiert"
return var_text, var_stmonat, var_stjahr
def export_csv(var_abrmonat, var_abrjahr, var_beraternummer, var_mandantennummer):
engine = create_engine('sqlite:///daten/abrechnungsdaten.db')
metadata = datenbank_obj.getdbmetadata(engine)
abrechnungsdaten = datenbank_obj.abrechnungsdaten_dbobj(metadata)
metadata.create_all()
result = pd.read_sql("SELECT * FROM abrechnungsdaten", engine)
result.to_csv("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Export.csv", sep=';', encoding='utf-16', index=False, mode='w')
if result.shape[0] != 0:
var_text = result.shape[0]
var_text="Es wurden "+str(var_text)+" Datensätze als csv Daten exportiert."
else:
var_text="Es wurden keine Datensätze als csv Daten exportiert"
# Export alle DS nach Excel
return var_text
## sollte nach vielen anpassungen nicht mehr funktionieren - ungeprüft
def export_lohnundgehalt(var_abrmonat, var_abrjahr, var_beraternummer, var_mandantenummer):
engine = create_engine('sqlite:///daten/abrechnungsdaten.db')
metadata = datenbank_obj.getdbmetadata(engine)
abrechnungsdaten = datenbank_obj.abrechnungsdaten_dbobj(metadata)
metadata.create_all()
if request.method == 'POST':
neuedatei = open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"LuG.txt", "w")
neuedatei.write(var_beraternummer+";"+var_mandantenummer+";"+var_abrmonat+"/"+var_abrjahr+"\n")
neuedatei.close()
# Export der Lohnarten und Nettobe/abzüge
result = pd.read_sql("SELECT abrechnungsdaten.PNR, abrechnungsdaten.lohnart, abrechnungsdaten.wert, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.exportlohnundgehalt==\"N\" ", engine)
result.to_csv("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"LuG.txt", sep=';', encoding='utf-8', index=False, header=False, mode='a')
### NEU AGP und UST auch in LUG Datei
# Export der USt in Zwischendatei
result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.lohnartustabzug, abrechnungsdaten.ustwert, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.ustwert != "0" AND abrechnungsdaten.exportlohnundgehalt==\"N\" ', engine)
result.to_csv("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"LuG.txt", sep=';', encoding='utf-8', index=False, header=False, mode='a')
# Export der Agenturprovision in Zwischendatei
result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.agenturprovwert_AN, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.agenturprovwert_AN != "0" AND abrechnungsdaten.exportlohnundgehalt==\"N\" ', engine)
result.to_csv("daten/ZW_LuG_AGP.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w')
############
# Quell und Zieldatei öffnen - AGP Werte um Lohnart einzufügen
filequelle=open("daten/ZW_LuG_AGP.txt")
fileziel=open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"LuG.txt","a", encoding='utf-8')
#Beschreibung der Felder aus der Quelldatei
#stelle 1 = PNR; stelle 2 = Wert; stelle 3 = Kostenstelle; stelle 4 = Kostentraeger
AGP_Lohnart = funktionen.lohnarten_dic_lesen("loa_nb6")
for x in filequelle:
stelle1,stelle2,stelle3,stelle4=x.split("|")
stelle4 = (stelle4.strip())
# stelle2 = stelle2.replace(".", ",")
fileziel.write(stelle1+";"+AGP_Lohnart+";"+stelle2+";"+stelle3+";"+stelle4+"\n")
filequelle.close()
fileziel.close()
hdatum = datetime.datetime.now()
hdatum = hdatum.strftime("%d.%m.%Y")
conn = engine.connect()
abrechnungsdatenupdate = abrechnungsdaten.update().where(abrechnungsdaten.c.exportlohnundgehalt=="N").values(exportlohnundgehalt="J", exportlodas="X", exportwiederholung="X", abrechnungsmonat=var_abrmonat, abrechnungsjahr=var_abrjahr, exportdatum=str(hdatum))
conn.execute(abrechnungsdatenupdate)
abrechnungsdatenupdate = abrechnungsdaten.select()
conn.execute(abrechnungsdatenupdate).fetchall()
if result.shape[0] != 0:
var_text = result.shape[0]
var_text="Es wurden "+str(var_text)+" Datensätze für Lohn und Gehalt exportiert."
filequelle=open("daten/abrechnungszeitraum.txt","r", encoding='utf-8')
for x in filequelle:
var_abrmonat,var_abrjahr=x.split("|")
break
var_abrmonat = int(var_abrmonat)+1
if var_abrmonat < 10:
var_abrmonat = str(var_abrmonat)
var_abrmonat = "0"+var_abrmonat
else:
var_abrmonat = str(var_abrmonat)
if var_abrmonat == "13":
var_abrmonat = "01"
var_abrjahr = int(var_abrjahr)+1
var_abrjahr = str(var_abrjahr)
filequelle=open("daten/abrechnungszeitraum.txt","w")
filequelle.write(var_abrmonat+"|"+var_abrjahr)
filequelle.close()
else:
var_text="Es wurden keine Datensätze für Lohn und Gehalt exportiert"
pass
else:
var_text="Es werden die Datensätze der Monatsübersicht für Lohn und Gehalt exportiert"
pass
return var_text
### Export Lodas in Funktion aktuell 2022-02-14 mit AGP und USt
### Tabellen auf Netto und Brutto geändert
### NEU* 20220402 USt wenn AG übernimmt - LOA 0 in SQL DB
### USt wenn AN trägt Nettoabzug in SQl DB
def export_lodas(var_abrmonat, var_abrjahr, var_beraternummer, var_mandantenummer):
engine = create_engine('sqlite:///daten/abrechnungsdaten.db')
metadata = datenbank_obj.getdbmetadata(engine)
abrechnungsdaten = datenbank_obj.abrechnungsdaten_dbobj(metadata)
metadata.create_all()
if request.method == 'POST':
if os.path.exists("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Lodas.txt"):
## Datei öffnen und Daten werden angehangen
fileziel=open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Lodas.txt","a")
fileziel.write("\n* Stunden zur Abrechnung von Mitarbeitern\n")
fileziel.write("[Bewegungsdaten]\n")
else:
## Datei neu öffnen und Kopfdaten schreiben
fileziel=open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Lodas.txt","w")
# schreiben in Lodas Importdatei
fileziel.write("[Allgemein]\nZiel=LODAS\nVersion_SST=1.0\nBeraterNr=")
fileziel.write(var_beraternummer)
fileziel.write("\nMandantenNr=")
fileziel.write(var_mandantenummer)
fileziel.write("\nDatumsformat=JJJJ-MM-TT")
fileziel.write("\nStringbegrenzer='")
fileziel.write("\n\n* LEGENDE:\n* Datei erzeugt mit Tool ARMTool\n* AP: Andreé Rosenkranz; [email protected]\n\n")
fileziel.write("* Satzbeschreibungen zur Übergabe von Bewegungsdaten für Mitarbeiter\n[Satzbeschreibung]\n")
# fileziel.write("\n10;u_lod_bwd_buchung_brutto;abrechnung_zeitraum#bwd;pnr#bwd;la_eigene#bwd;brutto_fest_bez#bwd;kostenstelle#bwd;kostentraeger#bwd;")
# fileziel.write("\n11;u_lod_bwd_buchung_netto;abrechnung_zeitraum#bwd;pnr#bwd;nba_nr#bwd;netto_betrag#bwd;")
fileziel.write("\n10;u_lod_bwd_buchung_standard;abrechnung_zeitraum#bwd;pnr#bwd;la_eigene#bwd;bs_nr#bwd;bs_wert_butab#bwd;kostenstelle#bwd;kostentraeger#bwd;")
fileziel.write("\n\n")
fileziel.write("* Werte zur Abrechnung von Mitarbeitern\n\n")
fileziel.write("[Bewegungsdaten]\n\n")
# Export der USt in Zwischendatei
# Neu* 20220401 ohne USt AG result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.lohnartustabzug, abrechnungsdaten.ustwert, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.ustwert != "0" AND abrechnungsdaten.exportlodas==\"N\" ', engine)
result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.lohnartustabzug, abrechnungsdaten.ustwert, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.lohnartustabzug != "0" AND abrechnungsdaten.exportlodas==\"N\" ', engine)
result.to_csv("daten/ZW_Lodas_USt.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w')
# Export der Agenturprovision AN in Zwischendatei
result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.agenturprovwert_AN, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.agenturprovwert_AN != "0" AND abrechnungsdaten.exportlodas==\"N\" ', engine)
result.to_csv("daten/ZW_Lodas_AGP_AN.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w')
# Export der Agenturprovision AG in Zwischendatei
result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.agenturprovwert_AG, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.agenturprovwert_AG != "0" AND abrechnungsdaten.exportlodas==\"N\" ', engine)
result.to_csv("daten/ZW_Lodas_AGP_AG.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w')
# Export der Lohnarten und Nettobe/abzüge
result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.lohnart, abrechnungsdaten.wert, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.exportlodas==\"N\" ', engine)
result.to_csv("daten/ZW_Lodas.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w')
############
# Quell und Zieldatei öffnen - AGP Werte
filequelle=open("daten/ZW_Lodas_AGP_AN.txt")
fileziel=open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Lodas.txt","a", encoding='utf-8')
#Beschreibung der Felder aus der Quelldatei
#stelle 1 = PNR; stelle 2 = Wert; stelle 3 = Kostenstelle; stelle 4 = Kostentraeger
AGP_Lohnart = funktionen.lohnarten_dic_lesen("loa_nb6")
for x in filequelle:
stelle1,stelle2,stelle3,stelle4=x.split("|")
stelle4 = (stelle4.strip())
var_bs = "3"
stelle2 = stelle2.replace(".", ",")
# fileziel.write("11;"+var_abrjahr+"-"+var_abrmonat+"-01;"+stelle1+";"+AGP_Lohnart+";"+stelle2+";"+stelle3+";"+stelle4+";\n")
fileziel.write("10;"+var_abrjahr+"-"+var_abrmonat+"-01;"+stelle1+";"+AGP_Lohnart+";"+var_bs+";"+stelle2+";"+stelle3+";"+stelle4+";\n")
filequelle.close()
fileziel.close()
############
# Quell und Zieldatei öffnen - USt Werte
filequelle=open("daten/ZW_Lodas_Ust.txt")
fileziel=open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Lodas.txt","a", encoding='utf-8')
#Beschreibung der Felder aus der Quelldatei
#stelle 1 = PNR; stelle 2 = Lohnart; stelle 3 = Wert; stelle 4 = Kostenstelle; stelle 5 = Kostentraeger
for x in filequelle:
stelle1,stelle2,stelle3,stelle4,stelle5=x.split("|")
stelle5 = (stelle5.strip())
if int(stelle2) > 8999:
var_bs = "3"
var_sa = "11"
else:
var_bs = "2"
var_sa = "10"
stelle3 = stelle3.replace(".", ",")
# fileziel.write(var_sa+";"+var_abrjahr+"-"+var_abrmonat+"-01;"+stelle1+";"+stelle2+";"+stelle3+";"+stelle4+";"+stelle5+";\n")
fileziel.write("10;"+var_abrjahr+"-"+var_abrmonat+"-01;"+stelle1+";"+stelle2+";"+var_bs+";"+stelle3+";"+stelle4+";"+stelle5+";\n")
filequelle.close()
fileziel.close()
filequelle=open("daten/ZW_Lodas.txt")
fileziel=open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Lodas.txt","a", encoding='utf-8')
#Beschreibung der Felder aus der Quelldatei
#stelle 1 = PNR; stelle 2 = Lohnart; stelle 3 = Wert; stelle 4 = Kostenstelle; stelle 5 = Kostentraeger
for x in filequelle:
stelle1,stelle2,stelle3,stelle4,stelle5=x.split("|")
stelle5 = (stelle5.strip())
if int(stelle2) > 8999:
var_bs = "3"
var_sa = "11"
else:
var_bs = "2"
var_sa = "10"
stelle3 = stelle3.replace(".", ",")
# fileziel.write(var_sa+";"+var_abrjahr+"-"+var_abrmonat+"-01;"+stelle1+";"+stelle2+";"+stelle3+";"+stelle4+";"+stelle5+";\n")
fileziel.write("10;"+var_abrjahr+"-"+var_abrmonat+"-01;"+stelle1+";"+stelle2+";"+var_bs+";"+stelle3+";"+stelle4+";"+stelle5+";\n")
fileziel.write("\n\n[Hinweisdaten]\n\nDaten uebernommen aus Erfassungstool ARMTool\nfuer die korrekte Berechnung saemtlicher Werte ist allein der Anwender verantwortlich!\n")
#Dateien schließen
filequelle.close()
fileziel.close()
######################
hdatum = datetime.datetime.now()
hdatum = hdatum.strftime("%d.%m.%Y")
conn = engine.connect()
abrechnungsdatenupdate = abrechnungsdaten.update().where(abrechnungsdaten.c.exportlodas=="N").values(exportlohnundgehalt="X", exportlodas="J", exportwiederholung="X", abrechnungsmonat=var_abrmonat, abrechnungsjahr=var_abrjahr, exportdatum=str(hdatum))
conn.execute(abrechnungsdatenupdate)
abrechnungsdatenupdate = abrechnungsdaten.select()
conn.execute(abrechnungsdatenupdate).fetchall()
if result.shape[0] != 0:
var_text = result.shape[0]
var_text="Es wurden "+str(var_text)+" Datensätze für Lodas exportiert."
filequelle=open("daten/abrechnungszeitraum.txt","r", encoding='utf-8')
for x in filequelle:
var_abrmonat,var_abrjahr=x.split("|")
break
var_abrmonat = int(var_abrmonat)+1
if var_abrmonat < 10:
var_abrmonat = str(var_abrmonat)
var_abrmonat = "0"+var_abrmonat
else:
var_abrmonat = str(var_abrmonat)
if var_abrmonat == "13":
var_abrmonat = "01"
var_abrjahr = int(var_abrjahr)+1
var_abrjahr = str(var_abrjahr)
filequelle=open("daten/abrechnungszeitraum.txt","w")
filequelle.write(var_abrmonat+"|"+var_abrjahr)
filequelle.close()
pass
else:
var_text="Es wurden keine Datensätze für Lodas exportiert"
pass
else:
var_text="Es werden die Datensätze der Monatsübersicht für Lodas exportiert"
pass
return var_text
| 70.635628 | 316 | 0.681865 |
c5ea4da058d255d3e25980e8b5d5bf136c0cc014
| 3,119 |
py
|
Python
|
python/oneflow/nn/modules/in_top_k.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 1 |
2021-09-13T02:34:53.000Z
|
2021-09-13T02:34:53.000Z
|
python/oneflow/nn/modules/in_top_k.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/nn/modules/in_top_k.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 1 |
2021-01-17T03:34:39.000Z
|
2021-01-17T03:34:39.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.module import Module
class InTopk(Module):
def __init__(self, k) -> None:
super().__init__()
self._in_top_k = (
flow.builtin_op("in_top_k")
.Input("targets")
.Input("predictions")
.Output("out")
.Attr("k", k)
.Build()
)
def forward(self, targets, predictions):
assert (
targets.shape[0] == predictions.shape[0]
), "The num of targets must equal the num of predictions"
assert len(targets.shape) == 1, "The dimension of targets must be 1"
assert len(predictions.shape) == 2, "The dimension of predictions must be 2"
return self._in_top_k(targets, predictions)
def in_top_k_op(targets, predictions, k):
"""Says whether the targets are in the top K predictions.
Args:
targets (Tensor): the target tensor of type int32 or int64.
predictions (Tensor): the predictions tensor of type float32 .
k (int): Number of top elements to look at for computing precision.
Returns:
oneflow.Tensor: A Tensor of type bool. Computed Precision at k as a bool Tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> targets1 = flow.Tensor(np.array([3, 1]), dtype=flow.int32)
>>> predictions1 = flow.Tensor(np.array([[0.0, 1.0, 2.0, 3.0], [3.0, 2.0, 1.0, 0.0],]), dtype=flow.float32)
>>> out1 = flow.in_top_k(targets1, predictions1, k=1)
>>> out1
tensor([1, 0], dtype=oneflow.int8)
>>> out2 = flow.in_top_k(targets1, predictions1, k=2)
>>> out2
tensor([1, 1], dtype=oneflow.int8)
>>> targets2 = flow.Tensor(np.array([3, 1]), dtype=flow.int32, device=flow.device('cuda'))
>>> predictions2 = flow.Tensor(np.array([[0.0, 1.0, 2.0, 3.0], [3.0, 2.0, 1.0, 0.0],]), dtype=flow.float32, device=flow.device('cuda'))
>>> out3 = flow.in_top_k(targets2, predictions2, k=1)
>>> out3
tensor([1, 0], device='cuda:0', dtype=oneflow.int8)
"""
return InTopk(k=k)(targets, predictions)[0]
@register_tensor_op("in_top_k")
def in_top_k_op_tensor(targets, predictions, k):
"""
in_top_k() -> Tensor
See :func:`oneflow.in_top_k`
"""
return InTopk(k=k)(targets, predictions)[0]
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| 33.537634 | 143 | 0.639307 |
a8795bb44959370b2560cf2e7355d2e856b814cc
| 1,866 |
py
|
Python
|
infrastructure/cosmosdb.py
|
lizzyTheLizard/homeserver-azure
|
e79bd23ea09a1ce1a77afd73bb9acfd402dfdc57
|
[
"MIT"
] | null | null | null |
infrastructure/cosmosdb.py
|
lizzyTheLizard/homeserver-azure
|
e79bd23ea09a1ce1a77afd73bb9acfd402dfdc57
|
[
"MIT"
] | null | null | null |
infrastructure/cosmosdb.py
|
lizzyTheLizard/homeserver-azure
|
e79bd23ea09a1ce1a77afd73bb9acfd402dfdc57
|
[
"MIT"
] | null | null | null |
from azwrapper import *
def createCosmosDBAccount(group, accountName):
if resourceExists(group, accountName):
print('ComsosDB already exists: ' + accountName)
else:
print('Create database ' + accountName)
createDbAccount = "cosmosdb create -g {} -n {} --capabilities {}"
azSafe(createDbAccount.format(group, accountName, "EnableServerless"))
def createCosmosDBDatabases(group, accountName, configuration):
if configuration is None:
return;
checkDBExistence = "cosmosdb sql database list -g {} -a {}"
resultList = azSafe(checkDBExistence.format(group, accountName))
createDatabase = "cosmosdb sql database create -g {} -a {} -n {}"
for databaseName in configuration:
resourceExists = any(elem["name"] == databaseName for elem in resultList)
if resourceExists:
print('database already exists ' + databaseName)
else:
azSafe(createDatabase.format(group, accountName, databaseName))
_createCosmosDBContainers(group, accountName, databaseName, configuration[databaseName])
def _createCosmosDBContainers(group, accountName, databaseName, containers):
if containers is None:
return;
checkContainerExistence = "cosmosdb sql container list -g {} -a {} -d {}"
resultList = azSafe(checkContainerExistence.format(group, accountName, databaseName))
createContainer = "cosmosdb sql container create -g {} -a {} -d {} -n {} --partition-key-path {}"
for containerName in containers:
resourceExists = any(elem["name"] == containerName for elem in resultList)
if resourceExists:
print('container already exists ' + containerName)
else:
containerKey = containers[containerName]
azSafe(createContainer.format(group, accountName, databaseName, containerName, containerKey))
| 50.432432 | 105 | 0.692926 |
a87f7aed36e7fc9a75ee23496f61ca32906a59c8
| 17,187 |
py
|
Python
|
Packs/CadoResponse/Integrations/CadoResponse/CadoResponse.py
|
cstone112/content
|
7f039931b8cfc20e89df52d895440b7321149a0d
|
[
"MIT"
] | 2 |
2021-12-06T21:38:24.000Z
|
2022-01-13T08:23:36.000Z
|
Packs/CadoResponse/Integrations/CadoResponse/CadoResponse.py
|
cstone112/content
|
7f039931b8cfc20e89df52d895440b7321149a0d
|
[
"MIT"
] | 87 |
2022-02-23T12:10:53.000Z
|
2022-03-31T11:29:05.000Z
|
Packs/CadoResponse/Integrations/CadoResponse/CadoResponse.py
|
cstone112/content
|
7f039931b8cfc20e89df52d895440b7321149a0d
|
[
"MIT"
] | 2 |
2022-01-05T15:27:01.000Z
|
2022-02-01T19:27:43.000Z
|
''' Cado Response API Integration for the Cortex XSOAR Platform '''
import time
import traceback
from typing import Any, Dict, Optional
from CommonServerPython import *
from CommonServerUserPython import *
import demistomock as demisto
import requests
''' Module Level Declarations '''
requests.packages.urllib3.disable_warnings()
CadoResponseCombinedOutput = Union[Dict[str, Any], List[Dict[str, Any]]]
DATE_FORMAT: str = '%Y-%m-%dT%H:%M:%SZ'
''' Cado Response API Client Code '''
class Client(BaseClient):
''' Client that makes HTTP requests to the Cado Response API '''
def heartbeat(self) -> Dict[str, Any]:
''' Calls the GET /api/v2/system/status endpoint to verify
everything is working
:return JSON response from /system/status endpoint
:rtype Dict[str, Any]
'''
return self._http_request(
method='GET',
url_suffix='/system/status'
)
def create_project(self, project_name: str, project_description: Optional[str]) -> Dict[str, Any]:
''' Calls the POST /api/v2/projects endpoint to create a new
project with given parameters
:param str project_name: Name of the project
:param Optional[str] project_description: Description for the project
:return JSON response from /projects endpoint
:rtype Dict[str, Any]
'''
if not project_name.endswith('_XSOAR'):
project_name += '_XSOAR'
if not project_description:
project_description = 'This is a project in Cado Response created through Cortex XSOAR!'
payload: Dict[str, Any] = {
'caseName': project_name,
'description': project_description
}
return self._http_request(
method='POST',
url_suffix='/projects',
json_data=payload
)
def get_project(self, project_id: Optional[int]) -> Dict[str, Any]:
''' Calls the GET /api/v2/projects endpoint to retrieve a
project with given parameters
:param Optional[int] project_id: ID of the project to retrieve
:return JSON response from /projects endpoint
:rtype Dict[str, Any]
'''
if not project_id:
project_id = demisto.params().get('CadoResponse_DefaultProject', 1)
return self._http_request(
method='GET',
url_suffix=f'/projects/{project_id}'
)
def list_projects(self, limit: int) -> List[Dict[str, Any]]:
''' Calls the GET /api/v2/projects endpoint to retrieve a list
of created projects
:return JSON response from /projects endpoint
:rtype Dict[str, Any]
'''
data: List[Dict[str, Any]] = self._http_request(
method='GET',
url_suffix='/projects'
)
return data[:limit]
def get_pipeline(self, pipeline_id: Optional[int], project_id: Optional[int]) -> Dict[str, Any]:
''' Calls the GET /api/v2/tasks/pipelines endpoint to
retrieve details about a given pipeline
:param Optional[int] pipeline_id: The id of the pipeline to retrieve
:param Optional[int] project_id: The id of the project the pipeline belongs to
:return JSON response from /tasks/pipelines endpoint
:rtype Dict[str, Any]
'''
if not pipeline_id:
return {}
if not project_id:
project_id = demisto.params().get('CadoResponse_DefaultProject', 1)
return self._http_request(
method='GET',
url_suffix='/tasks/pipelines',
params={
'project_id': project_id,
'pipeline_id': pipeline_id
}
)
def list_pipelines(self, project_id: Optional[int], limit: int) -> List[Dict[str, Any]]:
''' Calls the GET /api/v2/tasks/pipelines endpoint to
retrieve details about all of a projects pipelines
:param Optional[int] project_id: The id of the project the pipeline belongs to
:return JSON response from /tasks/pipelines endpoint
:rtype Dict[str, Any]
'''
if not project_id:
project_id = demisto.params().get('CadoResponse_DefaultProject', 1)
data: Dict[str, Any] = self._http_request(
method='GET',
url_suffix='/tasks/pipelines',
params={
'project_id': project_id,
}
)
pipelines: List[Dict[str, Any]] = data['pipelines']
return pipelines[:limit]
def list_instances(self, project_id: Optional[int], region: Optional[str], limit: int) -> List[Dict[str, Any]]:
''' Calls the GET /api/v2/projects/{id}/imports/ec2 endpoint to
retrieve details about a regions EC2 instances
:param Optional[int] project_id: The id of the project to query available instances in
:param Optional[str] region: The AWS region to search instances in
:return JSON response from /projects/{id}/imports/ec2 endpoint
:rtype Dict[str, Any]
'''
if not project_id:
project_id = demisto.params().get('CadoResponse_DefaultProject', 1)
if not region:
region = demisto.params().get('CadoResponse_DefaultRegion', 'us-east-1')
data: Dict[str, Any] = self._http_request(
method='GET',
url_suffix=f'/projects/{project_id}/imports/ec2',
params={
'region': region
}
)
instances: List[Dict[str, Any]] = data['instances']
return instances[:limit]
def list_buckets(self, project_id: Optional[int], limit: int) -> Dict[str, Any]:
''' Calls the GET /api/v2/projects/{id}/imports/s3 endpoint to
retrieve details about all the available S3 buckets
:param Optional[int] project_id: The id of the project to query available buckets in
:return JSON response from /projects/{id}/imports/s3 endpoint
:rtype Dict[str, Any]
'''
if not project_id:
project_id = demisto.params().get('CadoResponse_DefaultProject', 1)
data: Dict[str, Any] = self._http_request(
method='GET',
url_suffix=f'/projects/{project_id}/imports/s3'
)
data['buckets'] = data['buckets'][:limit]
return data
def trigger_instance_acquisition(self, project_id: Optional[int], instance_id: Optional[str], region: Optional[str],
bucket: Optional[str], compress: bool = True, include_disks: bool = True,
include_hash: bool = False, include_logs: bool = True,
include_screenshot: bool = True) -> Dict[str, Any]:
''' Calls the POST /api/v2/projects/{id}/imports/ec2 endpoint to
trigger an acquisition of a given instance
:param Optional[int] project_id: The ID of the project you wish to attach the acquisition to
:param str instance_id: ID of the EC2 instance to acquire
:param Optional[str] region: AWS region in which the EC2 instance is located
:param Optional[str] bucket: S3 bucket where the uploaded disk image resides
:param bool compress: Flag indicating if disk compression is enabled
:param bool include_disks: Flag indicating if we include disk image in the acquisition
:param bool include_hash: Flag indicating if we calculate the hash of the disk
:param bool include_logs: Flag indicating if we include system logs in the acquisition
:param bool include_screenshot: Flag indicating if we include a screenshot of the system in the acquisition
:return JSON response from /projects/{id}/imports/ec2 endpoint
:rtype Dict[str, Any]
'''
if not project_id:
project_id = demisto.params().get('CadoResponse_DefaultProject', 1)
if not region:
region = demisto.params().get('CadoResponse_DefaultRegion', 'us-east-1')
if not bucket:
bucket = demisto.params().get('CadoResponse_DefaultBucket', 'cado-default-bucket')
payload: Dict[str, Any] = {
'bucket': bucket,
'compress': compress,
'include_disks': include_disks,
'include_hash': include_hash,
'include_logs': include_logs,
'include_screenshot': include_screenshot,
'instance_id': instance_id,
'region': region
}
return self._http_request(
method='POST',
url_suffix=f'/projects/{project_id}/imports/ec2',
json_data=payload
)
def trigger_bucket_acquisition(self, project_id: Optional[int], bucket: Optional[str],
file_name: Optional[str]) -> Dict[str, Any]:
''' Calls the POST /api/v2/projects/{id}/imports/s3 endpoint to
trigger an acquisition of a given bucket or file
:param Optional[int] project_id: The ID of the project you wish to attach the acquisition to
:param Optional[str] bucket: The S3 bucket name containing the file
:param str file_name: The name of the file to process
:return JSON response from /projects/{id}/imports/ec2 endpoint
:rtype Dict[str, Any]
'''
if not project_id:
project_id = demisto.params().get('CadoResponse_DefaultProject', 1)
if not bucket:
bucket = demisto.params().get('CadoResponse_DefaultBucket', 'cado-default-bucket')
payload: Dict[str, Any] = {
'bucket': bucket,
'file_name': file_name
}
return self._http_request(
method='POST',
url_suffix=f'/projects/{project_id}/imports/s3',
json_data=payload
)
''' Command Line Handlers '''
def test_module(client: Client) -> str:
''' Command handler for !test-module '''
result: Dict[str, Any] = client.heartbeat()
status: Optional[str] = result['status']
if status is not None and status == 'Running':
return 'ok'
return 'Cado Response is not running'
def create_project_command(client: Client, args: Dict[str, Any]) -> CommandResults:
''' Command handler for cado-create-project '''
unix_timestamp: str = str(int(time.time()))
project_name: str = args.get('project_name', unix_timestamp)
project_description: Optional[str] = args.get('project_description', None)
result: Dict[str, Any] = client.create_project(project_name, project_description)
return CommandResults(
outputs_prefix='CadoResponse.Project',
outputs_key_field='id',
outputs=result
)
def list_project_command(client: Client, args: Dict[str, Any]) -> CommandResults:
''' Command handler for cado-list-project '''
project_id: Optional[int] = args.get('project_id', None)
limit: int = int(args.get('limit', 50))
if project_id:
result: Any = client.get_project(project_id)
else:
result = client.list_projects(limit)
return CommandResults(
outputs_prefix='CadoResponse.Projects',
outputs_key_field='id',
outputs=result
)
def get_pipeline_command(client: Client, args: Dict[str, Any]) -> CommandResults:
''' Command handler for cado-get-pipeline '''
project_id: Optional[int] = args.get('project_id', None)
limit: int = int(args.get('limit', 50))
pipeline_id: Optional[int] = args.get('pipeline_id', None)
if pipeline_id:
result: CadoResponseCombinedOutput = client.get_pipeline(pipeline_id, project_id)
else:
result = client.list_pipelines(project_id, limit)
return CommandResults(
outputs_prefix='CadoResponse.Pipelines',
outputs_key_field='pipeline_id',
outputs=result
)
def list_ec2_command(client: Client, args: Dict[str, Any]) -> CommandResults:
''' Command handler for cado-list-ec2 '''
project_id: Optional[int] = args.get('project_id', None)
region: Optional[str] = args.get('region', None)
limit: int = int(args.get('limit', 100))
result: List[Dict[str, Any]] = client.list_instances(project_id, region, limit)
return CommandResults(
outputs_prefix='CadoResponse.EC2Instances',
outputs_key_field='id',
outputs=result
)
def list_s3_command(client: Client, args: Dict[str, Any]) -> CommandResults:
''' Command handler for cado-list-s3 '''
project_id: Optional[int] = args.get('project_id', None)
limit: int = int(args.get('limit', 100))
result: Dict[str, Any] = client.list_buckets(project_id, limit)
return CommandResults(
outputs_prefix='CadoResponse.S3Buckets',
outputs=result
)
def trigger_ec2_command(client: Client, args: Dict[str, Any]) -> CommandResults:
''' Command handler for cado-trigger-ec2 '''
project_id: Optional[int] = args.get('project_id', None)
instance_id: Optional[str] = args.get('instance_id', None)
region: Optional[str] = args.get('region', None)
bucket: Optional[str] = args.get('bucket', None)
compress: bool = args.get('compress', True)
include_disks: bool = args.get('include_disks', True)
include_hash: bool = args.get('include_hash', False)
include_logs: bool = args.get('include_logs', True)
include_screenshot: bool = args.get('include_screenshot', True)
if not instance_id:
raise DemistoException('region is a required parameter!')
result: Dict[str, Any] = client.trigger_instance_acquisition(
project_id, instance_id, region, bucket, compress,
include_disks, include_hash, include_logs, include_screenshot
)
return CommandResults(
outputs_prefix='CadoResponse.EC2Acquistion',
outputs_key_field='pipeline_id',
outputs=result
)
def trigger_s3_command(client: Client, args: Dict[str, Any]) -> CommandResults:
''' Command handler for cado-trigger-s3 '''
project_id: Optional[int] = args.get('project_id', None)
bucket: Optional[str] = args.get('bucket', None)
file_name: Optional[str] = args.get('file_name', None)
if not bucket:
raise DemistoException('bucket is a required parameter!')
if not file_name:
raise DemistoException('file_name is a required parameter!')
result: Dict[str, Any] = client.trigger_bucket_acquisition(project_id, bucket, file_name)
return CommandResults(
outputs_prefix='CadoResponse.S3Acquisition',
outputs_key_field='pipeline_id',
outputs=result.get('pipelines')
)
''' Helper Functions '''
def enrich_errors(message: str, command: str) -> str:
''' Helper function to return better error messages.
:param str message: Error message
:param str command: Calling command
:return: A better error message
:rtype str
'''
if command == 'cado-create-project' and 'Project name already exists' in message:
return f'Project name {demisto.args().get("project_name")} already exists!'
else:
return f'Failed to execute {demisto.command()} command.\nError:\n{message}'
''' Entrypoint '''
def main() -> None:
api_key: str = demisto.params().get('apikey')
base_url: str = urljoin(demisto.params()['url'], '/api/v2')
verify_certificate: bool = not demisto.params().get('insecure', False)
proxy: bool = demisto.params().get('proxy', False)
command: str = demisto.command()
args: Dict[str, Any] = demisto.args()
headers: Dict[str, Any] = {
'Authorization': f'Bearer {api_key}'
}
try:
client: Client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy
)
if command == 'test-module':
return_results(test_module(client))
elif command == 'cado-create-project':
return_results(create_project_command(client, args))
elif command == 'cado-list-project':
return_results(list_project_command(client, args))
elif command == 'cado-get-pipeline':
return_results(get_pipeline_command(client, args))
elif command == 'cado-list-ec2':
return_results(list_ec2_command(client, args))
elif command == 'cado-list-s3':
return_results(list_s3_command(client, args))
elif command == 'cado-trigger-ec2':
return_results(trigger_ec2_command(client, args))
elif command == 'cado-trigger-s3':
return_results(trigger_s3_command(client, args))
except Exception as e:
message: str = str(e)
if '404' in message:
return_results(f'Nothing found for {command}')
else:
demisto.error(traceback.format_exc())
return_error(enrich_errors(message, command), error=e)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 34.512048 | 120 | 0.62588 |
a89f298697050a2c424f873ab03e66ee832c4dc3
| 35 |
py
|
Python
|
lib/python3.5/tarfile.py
|
hwroitzsch/BikersLifeSaver
|
469c738fdd6352c44a3f20689b17fa8ac04ad8a2
|
[
"MIT"
] | 1 |
2020-08-16T04:04:23.000Z
|
2020-08-16T04:04:23.000Z
|
lib/python3.5/tarfile.py
|
hwroitzsch/BikersLifeSaver
|
469c738fdd6352c44a3f20689b17fa8ac04ad8a2
|
[
"MIT"
] | 5 |
2020-06-05T18:53:24.000Z
|
2021-12-13T19:49:15.000Z
|
lib/python3.5/tarfile.py
|
hwroitzsch/BikersLifeSaver
|
469c738fdd6352c44a3f20689b17fa8ac04ad8a2
|
[
"MIT"
] | null | null | null |
/usr/local/lib/python3.5/tarfile.py
| 35 | 35 | 0.8 |
76872eea649dcd5c2106cccdf2659c2c3759be61
| 3,024 |
py
|
Python
|
computer-networking-a-top-down-approach/cp2/email.py
|
Jocs/reading-notes
|
26b8331877a2de034b8860bc3e3967893112d52d
|
[
"MIT"
] | 3 |
2021-08-04T07:59:48.000Z
|
2022-03-26T23:58:17.000Z
|
computer-networking-a-top-down-approach/cp2/email.py
|
Jocs/reading-notes
|
26b8331877a2de034b8860bc3e3967893112d52d
|
[
"MIT"
] | null | null | null |
computer-networking-a-top-down-approach/cp2/email.py
|
Jocs/reading-notes
|
26b8331877a2de034b8860bc3e3967893112d52d
|
[
"MIT"
] | null | null | null |
from socket import *
msg = "I love computer networks!"
contenttype = "text/plain"
endmsg = "\r\n.\r\n"
# Choose a mail server (e.g. Google mail server) and call it mailserver
mailserver = 'smtp.126.com' #Fill in start #Fill in end
# Create socket called clientSocket and establish a TCP connection with mailserver
#Fill in start
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((mailserver, 25))
#Fill in end
recv = clientSocket.recv(1024)
print(recv)
if recv[:3] != '220':
print('220 reply not received from server: connect.')
# Send HELO command and print server response.
heloCommand = 'HELO Alice\r\n'
clientSocket.send(heloCommand.encode())
recv1 = clientSocket.recv(1024)
print(recv1)
if recv1[:3] != '250':
print('250 reply not received from server.: hello')
# Auth
authCommand = 'AUTH LOGIN\r\n'
clientSocket.send(authCommand.encode())
recv2 = clientSocket.recv(1024)
print(recv2)
if recv2[:3] != '334':
print('334 replay not received from server.: auth')
# set username and password
username = 'bHVvcmFuMTk4OEAxMjYuY29t\r\n'
password = '******\r\n'
clientSocket.sendall(username)
recv3 = clientSocket.recv(1024)
print(recv3)
if recv3[:3] != '334':
print('334 replay not received from server.: username')
clientSocket.sendall(password)
recv4 = clientSocket.recv(1024)
print(recv4)
if recv4[:3] != '235':
print('235 replay not received from server.: password')
# Send MAIL FROM command and print server response.
# Fill in start
fromCommand = 'MAIL FROM: <[email protected]>\r\n'
clientSocket.sendall(fromCommand.encode())
recv5 = clientSocket.recv(1024)
print(recv5)
if recv5[:3] != '250':
print('250 replay not received from server.: mail from')
# Fill in end
# Send RCPT TO command and print server response.
# Fill in start
toCommand = 'RCPT TO: <[email protected]>\r\n'
clientSocket.sendall(toCommand.encode())
recv6 = clientSocket.recv(1024)
print(recv6)
if recv6[:3] != '250':
print('250 replay not received from server.: mail to')
# Fill in end
# Send DATA command and print server response.
# Fill in start
dataCommand = 'DATA\r\n'
clientSocket.send(dataCommand.encode())
recv7 = clientSocket.recv(1024)
print(recv7)
if recv7[:3] != '250':
print('250 replay not received from server.: data')
# Fill in end
# Send message data.
fromaddress = '[email protected]'
toaddress = '[email protected]'
subject = 'email from script'
message = 'from:' + fromaddress + '\r\n'
message += 'to:' + toaddress + '\r\n'
message += 'subject:' + subject + '\r\n'
message += 'Content-Type:' + contenttype + '\t\n'
message += '\r\n' + msg
clientSocket.sendall(message.encode())
# Message ends with a single period.
clientSocket.sendall(endmsg.encode())
recv9 = clientSocket.recv(1024).decode()
print(recv9)
if (recv9[:3] != '250'):
print('250 reply not received from server')
# Message ends with a single period.
# Fill in start
clientSocket.sendall('QUIT\r\n'.encode())
# Fill in end
# Send QUIT command and get server response.
# Fill in start
clientSocket.close()
# Fill in end
| 28.8 | 82 | 0.719907 |
4fb1670a959d3fb2101469c3784de01390232933
| 18,818 |
py
|
Python
|
hihope_neptune-oh_hid/00_src/v0.1/third_party/LVM2/daemons/lvmdbusd/cmdhandler.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | 1 |
2022-02-15T08:51:55.000Z
|
2022-02-15T08:51:55.000Z
|
hihope_neptune-oh_hid/00_src/v0.3/third_party/LVM2/daemons/lvmdbusd/cmdhandler.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
hihope_neptune-oh_hid/00_src/v0.3/third_party/LVM2/daemons/lvmdbusd/cmdhandler.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen, PIPE
import time
import threading
from itertools import chain
import collections
import traceback
import os
from lvmdbusd import cfg
from lvmdbusd.utils import pv_dest_ranges, log_debug, log_error, add_no_notify
from lvmdbusd.lvm_shell_proxy import LVMShellProxy
try:
import simplejson as json
except ImportError:
import json
SEP = '{|}'
total_time = 0.0
total_count = 0
# We need to prevent different threads from using the same lvm shell
# at the same time.
cmd_lock = threading.RLock()
class LvmExecutionMeta(object):
def __init__(self, start, ended, cmd, ec, stdout_txt, stderr_txt):
self.lock = threading.RLock()
self.start = start
self.ended = ended
self.cmd = cmd
self.ec = ec
self.stdout_txt = stdout_txt
self.stderr_txt = stderr_txt
def __str__(self):
with self.lock:
return "EC= %d for %s\n" \
"STARTED: %f, ENDED: %f\n" \
"STDOUT=%s\n" \
"STDERR=%s\n" % \
(self.ec, str(self.cmd), self.start, self.ended, self.stdout_txt,
self.stderr_txt)
class LvmFlightRecorder(object):
def __init__(self, size=16):
self.queue = collections.deque(maxlen=size)
def add(self, lvm_exec_meta):
self.queue.append(lvm_exec_meta)
def dump(self):
with cmd_lock:
if len(self.queue):
log_error("LVM dbus flight recorder START")
for c in self.queue:
log_error(str(c))
log_error("LVM dbus flight recorder END")
cfg.blackbox = LvmFlightRecorder()
def _debug_c(cmd, exit_code, out):
log_error('CMD= %s' % ' '.join(cmd))
log_error(("EC= %d" % exit_code))
log_error(("STDOUT=\n %s\n" % out[0]))
log_error(("STDERR=\n %s\n" % out[1]))
def call_lvm(command, debug=False):
"""
Call an executable and return a tuple of exitcode, stdout, stderr
:param command: Command to execute
:param debug: Dump debug to stdout
"""
# print 'STACK:'
# for line in traceback.format_stack():
# print line.strip()
# Prepend the full lvm executable so that we can run different versions
# in different locations on the same box
command.insert(0, cfg.LVM_CMD)
command = add_no_notify(command)
process = Popen(command, stdout=PIPE, stderr=PIPE, close_fds=True,
env=os.environ)
out = process.communicate()
stdout_text = bytes(out[0]).decode("utf-8")
stderr_text = bytes(out[1]).decode("utf-8")
if debug or process.returncode != 0:
_debug_c(command, process.returncode, (stdout_text, stderr_text))
return process.returncode, stdout_text, stderr_text
# The actual method which gets called to invoke the lvm command, can vary
# from forking a new process to using lvm shell
_t_call = call_lvm
def _shell_cfg():
global _t_call
# noinspection PyBroadException
try:
lvm_shell = LVMShellProxy()
_t_call = lvm_shell.call_lvm
cfg.SHELL_IN_USE = lvm_shell
return True
except Exception:
_t_call = call_lvm
cfg.SHELL_IN_USE = None
log_error(traceback.format_exc())
log_error("Unable to utilize lvm shell, dropping back to fork & exec")
return False
def set_execution(shell):
global _t_call
with cmd_lock:
# If the user requested lvm shell and we are currently setup that
# way, just return
if cfg.SHELL_IN_USE and shell:
return True
else:
if not shell and cfg.SHELL_IN_USE:
cfg.SHELL_IN_USE.exit_shell()
cfg.SHELL_IN_USE = None
_t_call = call_lvm
if shell:
if cfg.args.use_json:
return _shell_cfg()
else:
return False
return True
def time_wrapper(command, debug=False):
global total_time
global total_count
with cmd_lock:
start = time.time()
results = _t_call(command, debug)
ended = time.time()
total_time += (ended - start)
total_count += 1
cfg.blackbox.add(LvmExecutionMeta(start, ended, command, *results))
return results
call = time_wrapper
# Default cmd
# Place default arguments for every command here.
def _dc(cmd, args):
c = [cmd, '--noheading', '--separator', '%s' % SEP, '--nosuffix',
'--unbuffered', '--units', 'b']
c.extend(args)
return c
def parse(out):
rc = []
for line in out.split('\n'):
# This line includes separators, so process them
if SEP in line:
elem = line.split(SEP)
cleaned_elem = []
for e in elem:
e = e.strip()
cleaned_elem.append(e)
if len(cleaned_elem) > 1:
rc.append(cleaned_elem)
else:
t = line.strip()
if len(t) > 0:
rc.append(t)
return rc
def parse_column_names(out, column_names):
lines = parse(out)
rc = []
for i in range(0, len(lines)):
d = dict(list(zip(column_names, lines[i])))
rc.append(d)
return rc
def options_to_cli_args(options):
rc = []
for k, v in list(dict(options).items()):
if k.startswith("-"):
rc.append(k)
else:
rc.append("--%s" % k)
if v != "":
rc.append(str(v))
return rc
def pv_remove(device, remove_options):
cmd = ['pvremove']
cmd.extend(options_to_cli_args(remove_options))
cmd.append(device)
return call(cmd)
def _qt(tag_name):
return '@%s' % tag_name
def _tag(operation, what, add, rm, tag_options):
cmd = [operation]
cmd.extend(options_to_cli_args(tag_options))
if isinstance(what, list):
cmd.extend(what)
else:
cmd.append(what)
if add:
cmd.extend(list(chain.from_iterable(
('--addtag', _qt(x)) for x in add)))
if rm:
cmd.extend(list(chain.from_iterable(
('--deltag', _qt(x)) for x in rm)))
return call(cmd, False)
def pv_tag(pv_devices, add, rm, tag_options):
return _tag('pvchange', pv_devices, add, rm, tag_options)
def vg_tag(vg_name, add, rm, tag_options):
return _tag('vgchange', vg_name, add, rm, tag_options)
def lv_tag(lv_name, add, rm, tag_options):
return _tag('lvchange', lv_name, add, rm, tag_options)
def vg_rename(vg, new_name, rename_options):
cmd = ['vgrename']
cmd.extend(options_to_cli_args(rename_options))
cmd.extend([vg, new_name])
return call(cmd)
def vg_remove(vg_name, remove_options):
cmd = ['vgremove']
cmd.extend(options_to_cli_args(remove_options))
cmd.extend(['-f', vg_name])
return call(cmd)
def vg_lv_create(vg_name, create_options, name, size_bytes, pv_dests):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name, '--yes'])
pv_dest_ranges(cmd, pv_dests)
return call(cmd)
def vg_lv_snapshot(vg_name, snapshot_options, name, size_bytes):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(snapshot_options))
cmd.extend(["-s"])
if size_bytes != 0:
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name])
return call(cmd)
def _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
if not thin_pool:
cmd.extend(['--size', str(size_bytes) + 'B'])
else:
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
cmd.extend(['--yes'])
return cmd
def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
cmd.extend(['--name', name, vg_name])
return call(cmd)
def vg_lv_create_striped(vg_name, create_options, name, size_bytes,
num_stripes, stripe_size_kb, thin_pool):
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
cmd.extend(['--stripes', str(num_stripes)])
if stripe_size_kb != 0:
cmd.extend(['--stripesize', str(stripe_size_kb)])
cmd.extend(['--name', name, vg_name])
return call(cmd)
def _vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes,
num_stripes, stripe_size_kb):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', raid_type])
cmd.extend(['--size', str(size_bytes) + 'B'])
if num_stripes != 0:
cmd.extend(['--stripes', str(num_stripes)])
if stripe_size_kb != 0:
cmd.extend(['--stripesize', str(stripe_size_kb)])
cmd.extend(['--name', name, vg_name, '--yes'])
return call(cmd)
def vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes,
num_stripes, stripe_size_kb):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
return _vg_lv_create_raid(vg_name, create_options, name, raid_type,
size_bytes, num_stripes, stripe_size_kb)
def vg_lv_create_mirror(
vg_name, create_options, name, size_bytes, num_copies):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', 'mirror'])
cmd.extend(['--mirrors', str(num_copies)])
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name, '--yes'])
return call(cmd)
def vg_create_cache_pool(md_full_name, data_full_name, create_options):
cmd = ['lvconvert']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', 'cache-pool', '--force', '-y',
'--poolmetadata', md_full_name, data_full_name])
return call(cmd)
def vg_create_thin_pool(md_full_name, data_full_name, create_options):
cmd = ['lvconvert']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', 'thin-pool', '--force', '-y',
'--poolmetadata', md_full_name, data_full_name])
return call(cmd)
def lv_remove(lv_path, remove_options):
cmd = ['lvremove']
cmd.extend(options_to_cli_args(remove_options))
cmd.extend(['-f', lv_path])
return call(cmd)
def lv_rename(lv_path, new_name, rename_options):
cmd = ['lvrename']
cmd.extend(options_to_cli_args(rename_options))
cmd.extend([lv_path, new_name])
return call(cmd)
def lv_resize(lv_full_name, size_change, pv_dests,
resize_options):
cmd = ['lvresize', '--force']
cmd.extend(options_to_cli_args(resize_options))
if size_change < 0:
cmd.append("-L-%dB" % (-size_change))
else:
cmd.append("-L+%dB" % (size_change))
cmd.append(lv_full_name)
pv_dest_ranges(cmd, pv_dests)
return call(cmd)
def lv_lv_create(lv_full_name, create_options, name, size_bytes):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--virtualsize', str(size_bytes) + 'B', '-T'])
cmd.extend(['--name', name, lv_full_name, '--yes'])
return call(cmd)
def lv_cache_lv(cache_pool_full_name, lv_full_name, cache_options):
# lvconvert --type cache --cachepool VG/CachePoolLV VG/OriginLV
cmd = ['lvconvert']
cmd.extend(options_to_cli_args(cache_options))
cmd.extend(['-y', '--type', 'cache', '--cachepool',
cache_pool_full_name, lv_full_name])
return call(cmd)
def lv_detach_cache(lv_full_name, detach_options, destroy_cache):
cmd = ['lvconvert']
if destroy_cache:
option = '--uncache'
else:
# Currently fairly dangerous
# see: https://bugzilla.redhat.com/show_bug.cgi?id=1248972
option = '--splitcache'
cmd.extend(options_to_cli_args(detach_options))
# needed to prevent interactive questions
cmd.extend(["--yes", "--force"])
cmd.extend([option, lv_full_name])
return call(cmd)
def supports_json():
cmd = ['help']
rc, out, err = call(cmd)
if rc == 0:
if cfg.SHELL_IN_USE:
return True
else:
if 'fullreport' in err:
return True
return False
def lvm_full_report_json():
pv_columns = ['pv_name', 'pv_uuid', 'pv_fmt', 'pv_size', 'pv_free',
'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free',
'pv_ba_start', 'pv_ba_size', 'pe_start', 'pv_pe_count',
'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name',
'vg_uuid', 'pv_missing']
pv_seg_columns = ['pvseg_start', 'pvseg_size', 'segtype',
'pv_uuid', 'lv_uuid', 'pv_name']
vg_columns = ['vg_name', 'vg_uuid', 'vg_fmt', 'vg_size', 'vg_free',
'vg_sysid', 'vg_extent_size', 'vg_extent_count',
'vg_free_count', 'vg_profile', 'max_lv', 'max_pv',
'pv_count', 'lv_count', 'snap_count', 'vg_seqno',
'vg_mda_count', 'vg_mda_free', 'vg_mda_size',
'vg_mda_used_count', 'vg_attr', 'vg_tags']
lv_columns = ['lv_uuid', 'lv_name', 'lv_path', 'lv_size',
'vg_name', 'pool_lv_uuid', 'pool_lv', 'origin_uuid',
'origin', 'data_percent',
'lv_attr', 'lv_tags', 'vg_uuid', 'lv_active', 'data_lv',
'metadata_lv', 'lv_parent', 'lv_role', 'lv_layout',
'snap_percent', 'metadata_percent', 'copy_percent',
'sync_percent', 'lv_metadata_size', 'move_pv', 'move_pv_uuid']
lv_seg_columns = ['seg_pe_ranges', 'segtype', 'lv_uuid']
cmd = _dc('fullreport', [
'-a', # Need hidden too
'--configreport', 'pv', '-o', ','.join(pv_columns),
'--configreport', 'vg', '-o', ','.join(vg_columns),
'--configreport', 'lv', '-o', ','.join(lv_columns),
'--configreport', 'seg', '-o', ','.join(lv_seg_columns),
'--configreport', 'pvseg', '-o', ','.join(pv_seg_columns),
'--reportformat', 'json'
])
rc, out, err = call(cmd)
if rc == 0:
# With the current implementation, if we are using the shell then we
# are using JSON and JSON is returned back to us as it was parsed to
# figure out if we completed OK or not
if cfg.SHELL_IN_USE:
assert(type(out) == dict)
return out
else:
return json.loads(out)
return None
def pv_retrieve_with_segs(device=None):
d = []
err = ""
out = ""
rc = 0
columns = ['pv_name', 'pv_uuid', 'pv_fmt', 'pv_size', 'pv_free',
'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free',
'pv_ba_start', 'pv_ba_size', 'pe_start', 'pv_pe_count',
'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name',
'vg_uuid', 'pvseg_start', 'pvseg_size', 'segtype', 'pv_missing']
# Lvm has some issues where it returns failure when querying pvs when other
# operations are in process, see:
# https://bugzilla.redhat.com/show_bug.cgi?id=1274085
for i in range(0, 10):
cmd = _dc('pvs', ['-o', ','.join(columns)])
if device:
cmd.extend(device)
rc, out, err = call(cmd)
if rc == 0:
d = parse_column_names(out, columns)
break
else:
time.sleep(0.2)
log_debug("LVM Bug workaround, retrying pvs command...")
if rc != 0:
msg = "We were unable to get pvs to return without error after " \
"trying 10 times, RC=%d, STDERR=(%s), STDOUT=(%s)" % \
(rc, err, out)
log_error(msg)
raise RuntimeError(msg)
return d
def pv_resize(device, size_bytes, create_options):
cmd = ['pvresize']
cmd.extend(options_to_cli_args(create_options))
if size_bytes != 0:
cmd.extend(['--yes', '--setphysicalvolumesize', str(size_bytes) + 'B'])
cmd.extend([device])
return call(cmd)
def pv_create(create_options, devices):
cmd = ['pvcreate', '-ff']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(devices)
return call(cmd)
def pv_allocatable(device, yes, allocation_options):
yn = 'n'
if yes:
yn = 'y'
cmd = ['pvchange']
cmd.extend(options_to_cli_args(allocation_options))
cmd.extend(['-x', yn, device])
return call(cmd)
def pv_scan(activate, cache, device_paths, major_minors, scan_options):
cmd = ['pvscan']
cmd.extend(options_to_cli_args(scan_options))
if activate:
cmd.extend(['--activate', "ay"])
if cache:
cmd.append('--cache')
if len(device_paths) > 0:
for d in device_paths:
cmd.append(d)
if len(major_minors) > 0:
for mm in major_minors:
cmd.append("%s:%s" % (mm))
return call(cmd)
def vg_create(create_options, pv_devices, name):
cmd = ['vgcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.append(name)
cmd.extend(pv_devices)
return call(cmd)
def vg_change(change_options, name):
cmd = ['vgchange']
cmd.extend(options_to_cli_args(change_options))
cmd.append(name)
return call(cmd)
def vg_reduce(vg_name, missing, pv_devices, reduce_options):
cmd = ['vgreduce']
cmd.extend(options_to_cli_args(reduce_options))
if missing:
cmd.append('--removemissing')
elif len(pv_devices) == 0:
cmd.append('--all')
cmd.append(vg_name)
cmd.extend(pv_devices)
return call(cmd)
def vg_extend(vg_name, extend_devices, extend_options):
cmd = ['vgextend']
cmd.extend(options_to_cli_args(extend_options))
cmd.append(vg_name)
cmd.extend(extend_devices)
return call(cmd)
def _vg_value_set(name, arguments, options):
cmd = ['vgchange']
cmd.extend(options_to_cli_args(options))
cmd.append(name)
cmd.extend(arguments)
return call(cmd)
def vg_allocation_policy(vg_name, policy, policy_options):
return _vg_value_set(vg_name, ['--alloc', policy], policy_options)
def vg_max_pv(vg_name, number, max_options):
return _vg_value_set(vg_name, ['--maxphysicalvolumes', str(number)],
max_options)
def vg_max_lv(vg_name, number, max_options):
return _vg_value_set(vg_name, ['-l', str(number)], max_options)
def vg_uuid_gen(vg_name, ignore, options):
assert ignore is None
return _vg_value_set(vg_name, ['--uuid'], options)
def activate_deactivate(op, name, activate, control_flags, options):
cmd = [op]
cmd.extend(options_to_cli_args(options))
op = '-a'
if control_flags:
# Autoactivation
if (1 << 0) & control_flags:
op += 'a'
# Exclusive locking (Cluster)
if (1 << 1) & control_flags:
op += 'e'
# Local node activation
if (1 << 2) & control_flags:
op += 'l'
# Activation modes
if (1 << 3) & control_flags:
cmd.extend(['--activationmode', 'complete'])
elif (1 << 4) & control_flags:
cmd.extend(['--activationmode', 'partial'])
# Ignore activation skip
if (1 << 5) & control_flags:
cmd.append('--ignoreactivationskip')
if activate:
op += 'y'
else:
op += 'n'
cmd.append(op)
cmd.append(name)
return call(cmd)
def vg_retrieve(vg_specific):
if vg_specific:
assert isinstance(vg_specific, list)
columns = ['vg_name', 'vg_uuid', 'vg_fmt', 'vg_size', 'vg_free',
'vg_sysid', 'vg_extent_size', 'vg_extent_count',
'vg_free_count', 'vg_profile', 'max_lv', 'max_pv',
'pv_count', 'lv_count', 'snap_count', 'vg_seqno',
'vg_mda_count', 'vg_mda_free', 'vg_mda_size',
'vg_mda_used_count', 'vg_attr', 'vg_tags']
cmd = _dc('vgs', ['-o', ','.join(columns)])
if vg_specific:
cmd.extend(vg_specific)
d = []
rc, out, err = call(cmd)
if rc == 0:
d = parse_column_names(out, columns)
return d
def lv_retrieve_with_segments():
columns = ['lv_uuid', 'lv_name', 'lv_path', 'lv_size',
'vg_name', 'pool_lv_uuid', 'pool_lv', 'origin_uuid',
'origin', 'data_percent',
'lv_attr', 'lv_tags', 'vg_uuid', 'lv_active', 'data_lv',
'metadata_lv', 'seg_pe_ranges', 'segtype', 'lv_parent',
'lv_role', 'lv_layout',
'snap_percent', 'metadata_percent', 'copy_percent',
'sync_percent', 'lv_metadata_size', 'move_pv', 'move_pv_uuid']
cmd = _dc('lvs', ['-a', '-o', ','.join(columns)])
rc, out, err = call(cmd)
d = []
if rc == 0:
d = parse_column_names(out, columns)
return d
if __name__ == '__main__':
pv_data = pv_retrieve_with_segs()
for p in pv_data:
print(str(p))
| 24.924503 | 78 | 0.691306 |
8b2bff222da0952a2a2b2adcc82956a89e5eecd6
| 679 |
py
|
Python
|
Algorithms/DynamicProgramming/Longest Common Subsequence/lcs_dynamic.py
|
Nidita/Data-Structures-Algorithms
|
7b5198c8d37e9a70dd0885c6eef6dddd9d85d74a
|
[
"MIT"
] | 26 |
2019-07-17T11:05:43.000Z
|
2022-02-06T08:31:40.000Z
|
Algorithms/DynamicProgramming/Longest Common Subsequence/lcs_dynamic.py
|
Nidita/Data-Structures-Algorithms
|
7b5198c8d37e9a70dd0885c6eef6dddd9d85d74a
|
[
"MIT"
] | 7 |
2019-07-16T19:52:25.000Z
|
2022-01-08T08:03:44.000Z
|
Algorithms/DynamicProgramming/Longest Common Subsequence/lcs_dynamic.py
|
Nidita/Data-Structures-Algorithms
|
7b5198c8d37e9a70dd0885c6eef6dddd9d85d74a
|
[
"MIT"
] | 19 |
2020-01-14T02:44:28.000Z
|
2021-12-27T17:31:59.000Z
|
def lcs(s1, s2, x, y):
if arr[x-1][y-1] != -1:
return arr[x-1][y-1]
if x==0 or y==0:
arr[x-1][y-1] = 0
return 0
elif s1[x-1] == s2[y-1]:
arr[x-1][y-1] = 1 + lcs(s1, s2, x-1, y-1)
return arr[x-1][y-1]
else:
arr[x-1][y-1] = max(lcs(s1, s2, x-1, y), lcs(s1, s2, x, y-1))
return arr[x-1][y-1]
input_string_1 = 'AGGTABPIXIL'
input_string_2 = 'GXTXAYBPXL'
arr = [[-1 for i in range(len(input_string_2))] for i in range(len(input_string_1))]
import time
init = time.time()
print(lcs(input_string_1, input_string_2, len(input_string_1), len(input_string_2)))
end = time.time()
print((end - init) * 1000)
| 21.21875 | 84 | 0.555228 |
50ad6f764c13e48dce605fcce721d08f212c4bdb
| 170 |
py
|
Python
|
exercises/fr/exc_01_02_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/fr/exc_01_02_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/fr/exc_01_02_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
# Importe spaCy
import ____
# Crée l'objet nlp français
nlp = ____
# Traite un texte
doc = nlp("Ceci est une phrase.")
# Affiche le texte du document
print(____.text)
| 14.166667 | 33 | 0.717647 |
ba24c1927a095432b0acf43547b2e9f348a098b7
| 38,795 |
py
|
Python
|
Packs/Netskope/Integrations/NetskopeAPIv1/NetskopeAPIv1.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | 2 |
2021-12-06T21:38:24.000Z
|
2022-01-13T08:23:36.000Z
|
Packs/Netskope/Integrations/NetskopeAPIv1/NetskopeAPIv1.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | 61 |
2021-10-07T08:54:38.000Z
|
2022-03-31T10:25:35.000Z
|
Packs/Netskope/Integrations/NetskopeAPIv1/NetskopeAPIv1.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | 2 |
2022-01-05T15:27:01.000Z
|
2022-02-01T19:27:43.000Z
|
# type: ignore
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from urllib.parse import urljoin
import urllib3
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
# disable insecure warnings
urllib3.disable_warnings()
DEFAULT_PAGE = 1
DEFAULT_LIMIT = 50
DEFAULT_MAX_FETCH = DEFAULT_LIMIT
DEFAULT_EVENTS_FETCH = DEFAULT_LIMIT
DEFAULT_EVENT_TYPE = 'application'
DEFAULT_FIRST_FETCH = '7 days'
MAX_LIMIT = 100
MAX_FETCH = 200
MAX_EVENTS_FETCH = 200
TIME_PERIOD_MAPPING = {
'Last 60 Minutes': 3600,
'Last 24 Hours': 86400,
'Last 7 Days': 604800,
'Last 30 Days': 2592000,
'Last 60 Days': 5184000,
'Last 90 Days': 7776000
}
class Client(BaseClient):
"""
Client for Netskope RESTful API.
Args:
base_url (str): The base URL of Netskope.
token (str): The token to authenticate against Netskope API.
use_ssl (bool): Specifies whether to verify the SSL certificate or not.
use_proxy (bool): Specifies if to use XSOAR proxy settings.
"""
def __init__(self, base_url: str, token: str, use_ssl: bool, use_proxy: bool):
super().__init__(urljoin(base_url, '/api/v1/'), verify=use_ssl, proxy=use_proxy)
self._session.params['token'] = token
def list_events_request(self,
query: Optional[str] = None,
event_type: Optional[str] = None,
timeperiod: Optional[int] = None,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
insertion_start_time: Optional[int] = None,
insertion_end_time: Optional[int] = None,
limit: Optional[int] = None,
skip: Optional[int] = None,
unsorted: Optional[bool] = None) -> Dict[str, Any]:
"""
Get events extracted from SaaS traffic and or logs.
Args:
query (Optional[str]): Free query to filter the events.
event_type (Optional[str]): Select events by their type.
timeperiod (Optional[int]): Get all events from a certain time period.
start_time (Optional[int]): Restrict events to those that have timestamps greater than the provided timestamp.
end_time (Optional[int]): Restrict events to those that have timestamps less than or equal to the provided timestamp.
insertion_start_time (Optional[int]): Restrict events to those that were inserted to the system
after the provided timestamp.
insertion_end_time (Optional[int]): Restrict events to those that were inserted to the system
before the provided timestamp.
limit (Optional[int]): The maximum amount of events to retrieve (up to 10000 events).
skip (Optional[int]): The skip number of the events to retrieve (minimum is 1).
unsorted (Optional[bool]): If true, the returned data will not be sorted (useful for improved performance).
Returns:
Dict[str, Any]: Netskope events.
"""
body = remove_empty_elements({
'query': query,
'type': event_type,
'timeperiod': timeperiod,
'starttime': start_time,
'endtime': end_time,
'insertionstarttime': insertion_start_time,
'insertionendtime': insertion_end_time,
'limit': limit,
'skip': skip,
'unsorted': unsorted
})
return self._http_request(method='POST', url_suffix='events', json_data=body)
def list_alerts_request(self,
query: Optional[str] = None,
alert_type: Optional[str] = None,
acked: Optional[bool] = None,
timeperiod: Optional[int] = None,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
insertion_start_time: Optional[int] = None,
insertion_end_time: Optional[int] = None,
limit: Optional[int] = None,
skip: Optional[int] = None,
unsorted: Optional[bool] = None) -> Dict[str, Any]:
"""
Get alerts generated by Netskope, including policy, DLP, and watch list alerts.
Args:
query (Optional[str]): Free query to filter the alerts.
alert_type (Optional[str]): Select alerts by their type.
acked (Optional[bool]): Whether to retrieve acknowledged alerts or not.
timeperiod (Optional[int]): Get alerts from certain time period.
start_time (Optional[int]): Restrict alerts to those that have timestamps greater than the provided timestamp.
end_time (Optional[int]): Restrict alerts to those that have timestamps less than or equal to the provided timestamp.
insertion_start_time (Optional[int]): Restrict alerts which have been inserted into the system
after the provided timestamp.
insertion_end_time (Optional[int]): Restrict alerts which have been inserted into the system
before the provided timestamp.
limit (Optional[int]): The maximum number of alerts to return (up to 10000).
skip (Optional[int]): The skip number of the alerts to retrieve (minimum is 1).
unsorted (Optional[bool]): If true, the returned data will not be sorted (useful for improved performance).
Returns:
Dict[str, Any]: Netskope alerts.
"""
body = remove_empty_elements({
'query': query,
'alert_type': alert_type,
'acked': acked,
'timeperiod': timeperiod,
'starttime': start_time,
'endtime': end_time,
'insertionstarttime': insertion_start_time,
'insertionendtime': insertion_end_time,
'limit': limit,
'skip': skip,
'unsorted': unsorted
})
return self._http_request(method='POST', url_suffix='alerts', json_data=body)
def list_quarantined_files_request(self,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
limit: Optional[int] = None,
skip: Optional[int] = None) -> Dict[str, Any]:
"""
List all quarantined files.
Args:
start_time (Optional[int]): Get files last modified within a certain time period.
end_time (Optional[int]): Get files last modified within a certain time period.
limit (Optional[int]): The maximum amount of clients to retrieve (up to 10000).
skip (Optional[int]): The skip number of the clients to retrieve (minimum is 1).
Returns:
Dict[str, Any]: Netskope quarantine files.
"""
body = remove_empty_elements({
'starttime': start_time,
'endtime': end_time,
'limit': limit,
'skip': skip,
'op': 'get-files'
})
return self._http_request(method='POST', url_suffix='quarantine', json_data=body)
def get_quarantined_file_request(self, quarantine_profile_id: str, file_id: str) -> bytes:
"""
Download a quarantined file.
Args:
quarantine_profile_id (str): The ID of quarantine profile.
file_id (str): The ID of the quarantined file.
Returns:
bytes: The quarantined file content.
"""
body = {
'quarantine_profile_id': quarantine_profile_id,
'file_id': file_id,
'op': 'download-url'
}
return self._http_request(method='POST',
url_suffix='quarantine',
json_data=body,
resp_type='content')
def update_quarantined_file_request(self, quarantine_profile_id: str, file_id: str,
action: str) -> None:
"""
Take an action on a quarantined file.
Args:
quarantine_profile_id (str): The profile id of the quarantined file.
file_id (str): The id of the quarantined file.
action (str): Action to be performed on a quarantined.
"""
body = {
'quarantine_profile_id': quarantine_profile_id,
'file_id': file_id,
'action': action,
'op': 'take-action'
}
self._http_request(method='POST', url_suffix='quarantine', json_data=body, resp_type='text')
def update_url_list_request(self, name: str, urls: List[str]) -> None:
"""
Update the URL List with the values provided.
Args:
name (str): Name of an existing URL List shown in the Netskope UI on the URL List skip.
urls (List[str]): The content of the URL list.
"""
body = {'name': name, 'list': ','.join(urls)}
self._http_request(method='POST', url_suffix='updateUrlList', json_data=body)
def update_file_hash_list_request(self, name: str, hashes: List[str]) -> None:
"""
Update file hash list with the values provided.
Args:
name (str): Name of an existing file hash list shown in the Netskope UI on the file hash list skip.
hashes (str): List of file hashes (md5 or sha256).
"""
body = {'name': name, 'list': ','.join(hashes)}
return self._http_request(method='POST', url_suffix='updateFileHashList', json_data=body)
def list_clients_request(self,
query: Optional[str] = None,
limit: Optional[int] = None,
skip: Optional[int] = None) -> Dict[str, Any]:
"""
Get information about the Netskope clients.
Args:
query (Optional[str]): Free query on the clients, based on the client fields.
limit (Optional[int]): The maximum amount of clients to retrieve (up to 10000).
skip (Optional[int]): The skip number of the clients to retrieve (minimum is 1).
Returns:
Dict[str, Any]: The clients information.
"""
body = remove_empty_elements({'query': query, 'limit': limit, 'skip': skip})
return self._http_request(method='POST', url_suffix='clients', params=body)
def _http_request(self, *args, **kwargs):
response = super()._http_request(*args, **kwargs)
if isinstance(response, dict) and 'errors' in response:
errors = '\n'.join(response['errors'])
raise DemistoException(f'Invalid API call: {errors}', res=response)
return response
def arg_to_boolean(arg: Optional[str]) -> Optional[bool]:
"""
Converts an XSOAR argument to a Python boolean or None.
Args:
arg (Optional[str]): The argument to convert.
Returns:
Optional[bool]: A boolean if arg can be converted,
or None if arg is None.
"""
if arg is None:
return None
return argToBoolean(arg)
def arg_to_seconds_timestamp(arg: Optional[str]) -> Optional[int]:
"""
Converts an XSOAR date string argument to a timestamp in seconds.
Args:
arg (Optional[str]): The argument to convert.
Returns:
Optional[int]: A timestamp if arg can be converted,
or None if arg is None.
"""
if arg is None:
return None
return date_to_seconds_timestamp(arg_to_datetime(arg))
def date_to_seconds_timestamp(date_str_or_dt: Union[str, datetime]) -> int:
"""
Converts date string or datetime object to a timestamp in seconds.
Args:
date_str_or_dt (Union[str, datetime]): The datestring or datetime.
Returns:
int: The timestamp in seconds.
"""
return date_to_timestamp(date_str_or_dt) // 1000
def validate_time_arguments(start_time: Optional[int] = None,
end_time: Optional[int] = None,
insertion_start_time: Optional[int] = None,
insertion_end_time: Optional[int] = None,
timeperiod: Optional[int] = None) -> None:
"""
Validates time arguments from the user.
The user must provide one of the following:
- start_time and end_time.
- insertion_start_time and insertion_end_time.
- timeperiod.
Args:
start_time (Optional[int], optional): The start time to fetch from the API.
end_time (Optional[int], optional): The end time to fetch from the API.
insertion_start_time (Optional[int], optional): The insertion start time to fetch from the API.
insertion_end_time (Optional[int], optional): The insertion end time to fetch from the API.
timeperiod (Optional[str], optional): The timeperiod to fetch from the API.
Raises:
DemistoException: The user did not provide valid timestamp.
"""
combination = (all((start_time, end_time)), all(
(insertion_start_time, insertion_end_time)), bool(timeperiod))
if not any(combination):
raise DemistoException('Missing time arguments. Please provide start_time and end_time, '
'or insertion_start_time and or insertion_end_time or timeperiod.')
if combination.count(True) > 1:
raise DemistoException(
'Invalid time arguments. Please provide only start_time and end_time, '
'or insertion_start_time and or insertion_end_time or timeperiod. '
'You must not combine between the mentioned options.')
def validate_fetch_params(max_fetch: int, max_events_fetch: int, fetch_events: bool,
first_fetch: str, event_types: List[str]) -> None:
"""
Validates the parameters for fetch incident command.
Args:
max_fetch: (int): The maximum number of incidents for one fetch.
max_events_fetch (int) The maximum number of events per incident for one fetch.
fetch_events (bool): Whether or not fetch events when fetching incident.
first_fetch: (str): First fetch time in words.
"""
if first_fetch:
arg_to_datetime(first_fetch) # verify that it is a date.
if max_fetch > MAX_FETCH:
return_error(f'The Maximum number of incidents per fetch should not exceed {MAX_FETCH}.')
if fetch_events and max_events_fetch > MAX_EVENTS_FETCH:
return_error(
f'The Maximum number of events for each incident per fetch should not exceed {MAX_EVENTS_FETCH}.'
)
if not isinstance(event_types, list):
return_error('The fetched event types must be a list.')
def get_pagination_readable_message(header: str, page: int, limit: int) -> str:
return f'{header}\n Current page size: {limit}\n Showing page {page} out of others that may exist.'
def get_pagination_arguments(args: Dict[str, Any]) -> Tuple[int, int, int]:
"""
Gets and validates pagination arguments for client (skip and limit).
Args:
args (Dict[str, Any]): The command arguments (page and limit).
Returns:
Tuple[int, int]: The page, calculated skip and limit after validation.
"""
page = arg_to_number(args.get('page', DEFAULT_PAGE))
limit = arg_to_number(args.get('limit', DEFAULT_LIMIT))
if page < 1:
raise DemistoException('Page argument must be greater than 1')
if not 1 <= limit <= MAX_LIMIT:
raise DemistoException(f'Limit argument must be between 1 to {MAX_LIMIT}')
return page, (page - 1) * limit, limit
def list_events_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Get events extracted from SaaS traffic and or logs.
Args:
client (client): The Netskope client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
query = args.get('query')
event_type = args['event_type']
timeperiod = TIME_PERIOD_MAPPING.get(args.get('timeperiod'))
start_time = arg_to_seconds_timestamp(args.get('start_time'))
end_time = arg_to_seconds_timestamp(args.get('end_time'))
insertion_start_time = arg_to_seconds_timestamp(args.get('insertion_start_time'))
insertion_end_time = arg_to_seconds_timestamp(args.get('insertion_end_time'))
page, skip, limit = get_pagination_arguments(args)
unsorted = arg_to_boolean(args.get('unsorted'))
validate_time_arguments(start_time=start_time,
end_time=end_time,
timeperiod=timeperiod,
insertion_start_time=insertion_start_time,
insertion_end_time=insertion_end_time)
response = client.list_events_request(query=query,
event_type=event_type,
timeperiod=timeperiod,
start_time=start_time,
end_time=end_time,
insertion_start_time=insertion_start_time,
insertion_end_time=insertion_end_time,
limit=limit,
skip=skip,
unsorted=unsorted)
outputs = deepcopy(response['data'])
for event in outputs:
event['event_id'] = event['_id']
event['timestamp'] = timestamp_to_datestring(event['timestamp'] * 1000)
readable_output = tableToMarkdown(
get_pagination_readable_message('Events List:', page=page, limit=limit),
outputs,
removeNull=True,
headers=['event_id', 'timestamp', 'type', 'access_method', 'app', 'traffic_type'],
headerTransform=string_to_table_header)
return CommandResults(outputs_prefix='Netskope.Event',
outputs_key_field='event_id',
outputs=outputs,
readable_output=readable_output,
raw_response=response)
def list_alerts_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Get alerts generated by Netskope, including policy, DLP, and watch list alerts.
Args:
client (client): The Netskope client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
query = args.get('query')
alert_type = args.get('alert_type')
acked = arg_to_boolean(args.get('acked'))
timeperiod = TIME_PERIOD_MAPPING.get(args.get('timeperiod'))
start_time = arg_to_seconds_timestamp(args.get('start_time'))
end_time = arg_to_seconds_timestamp(args.get('end_time'))
insertion_start_time = arg_to_seconds_timestamp(args.get('insertion_start_time'))
insertion_end_time = arg_to_seconds_timestamp(args.get('insertion_end_time'))
page, skip, limit = get_pagination_arguments(args)
unsorted = arg_to_boolean(args.get('unsorted'))
validate_time_arguments(start_time=start_time,
end_time=end_time,
timeperiod=timeperiod,
insertion_start_time=insertion_start_time,
insertion_end_time=insertion_end_time)
response = client.list_alerts_request(query=query,
alert_type=alert_type,
acked=acked,
timeperiod=timeperiod,
start_time=start_time,
end_time=end_time,
insertion_start_time=insertion_start_time,
insertion_end_time=insertion_end_time,
limit=limit,
skip=skip,
unsorted=unsorted)
outputs = deepcopy(response['data'])
for alert in outputs:
alert['alert_id'] = alert['_id']
alert['timestamp'] = timestamp_to_datestring(alert['timestamp'] * 1000)
readable_output = tableToMarkdown(
get_pagination_readable_message('Alerts List:', page=page, limit=limit),
outputs,
removeNull=True,
headers=['alert_id', 'alert_name', 'alert_type', 'timestamp', 'action'],
headerTransform=string_to_table_header)
return CommandResults(outputs_prefix='Netskope.Alert',
outputs_key_field='alert_id',
outputs=outputs,
readable_output=readable_output,
raw_response=response)
def list_quarantined_files_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
List all quarantined files.
Args:
client (client): The Netskope client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
start_time = arg_to_seconds_timestamp(args.get('start_time'))
end_time = arg_to_seconds_timestamp(args.get('end_time'))
page, skip, limit = get_pagination_arguments(args)
response = client.list_quarantined_files_request(start_time=start_time,
end_time=end_time,
limit=limit,
skip=skip)
outputs = dict_safe_get(response, ['data', 'quarantined'])
for output in outputs:
for file_output in output['files']:
file_output['quarantine_profile_id'] = output['quarantine_profile_id']
file_output['quarantine_profile_name'] = output['quarantine_profile_name']
outputs = sum((output['files'] for output in outputs), [])
readable_header = get_pagination_readable_message('Quarantined Files List:',
page=page,
limit=limit)
readable_output = tableToMarkdown(readable_header,
outputs,
removeNull=True,
headers=[
'quarantine_profile_id', 'quarantine_profile_name',
'file_id', 'original_file_name', 'policy'
],
headerTransform=string_to_table_header)
return CommandResults(outputs_prefix='Netskope.Quarantine',
outputs_key_field='file_id',
outputs=outputs,
readable_output=readable_output,
raw_response=response)
def get_quarantined_file_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Download a quarantined file.
Args:
client (client): The Netskope client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
quarantine_profile_id = args['quarantine_profile_id']
file_id = args['file_id']
response = client.get_quarantined_file_request(quarantine_profile_id=quarantine_profile_id,
file_id=file_id)
return fileResult(filename=f'{file_id}.zip', data=response, file_type=EntryType.FILE)
def update_quarantined_file_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Take an action on a quarantined file.
Args:
client (client): The Netskope client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
quarantine_profile_id = args['quarantine_profile_id']
file_id = args['file_id']
action = args['action']
client.update_quarantined_file_request(quarantine_profile_id=quarantine_profile_id,
file_id=file_id,
action=action)
readable_output = f'## The file {file_id} was successfully {action}ed!'
return CommandResults(readable_output=readable_output)
def update_url_list_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Update the URL List with the values provided.
Args:
client (client): The Netskope client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
name = args['name']
urls = argToList(args['urls'])
client.update_url_list_request(name=name, urls=urls)
outputs = {'name': name, 'URL': urls}
readable_output = f'URL List {name}:\n{", ".join(urls)}'
return CommandResults(outputs_prefix='Netskope.URLList',
outputs_key_field='name',
outputs=outputs,
readable_output=readable_output)
def update_file_hash_list_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Update file hash list with the values provided.
Args:
client (client): The Netskope client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
name = args.get('name')
hashes = argToList(args.get('hash'))
client.update_file_hash_list_request(name=name, hashes=hashes)
outputs = {'name': name, 'hash': hashes}
readable_output = f'Hash List {name}:\n{", ".join(hashes)}'
return CommandResults(outputs_prefix='Netskope.FileHashList',
outputs_key_field='name',
outputs=outputs,
readable_output=readable_output)
def list_clients_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Get information about the Netskope clients.
Args:
client (client): The Netskope client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
query = args.get('query')
page, skip, limit = get_pagination_arguments(args)
response = client.list_clients_request(query=query, limit=limit, skip=skip)
outputs = [client['attributes'] for client in response['data']]
for output in outputs:
output['client_id'] = output['_id']
readable_header = get_pagination_readable_message('Clients List:', page=page, limit=limit)
readable_output = tableToMarkdown(
readable_header,
outputs,
removeNull=True,
headers=['client_id', 'client_version', 'device_id', 'user_added_time'],
headerTransform=string_to_table_header)
return CommandResults(outputs_prefix='Netskope.Client',
outputs_key_field='client_id',
outputs=outputs,
readable_output=readable_output,
raw_response=response)
def list_host_associated_user_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
List all users of certain host by its hostname.
Args:
client (client): The Netskope client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
hostname = args['hostname']
page, skip, limit = get_pagination_arguments(args)
response = client.list_clients_request(query=f'host_info.hostname eq {hostname}',
limit=limit,
skip=skip)
outputs = sum((client['attributes'].get('users') for client in response['data']), [])
for output in outputs:
output['user_id'] = output['_id']
readable_header = get_pagination_readable_message(f'Users Associated With {hostname}:',
page=page,
limit=limit)
readable_output = tableToMarkdown(readable_header,
outputs,
removeNull=True,
headers=['user_id', 'username', 'user_source'],
headerTransform=string_to_table_header)
return CommandResults(outputs_prefix='Netskope.User',
outputs_key_field='user_id',
outputs=outputs,
readable_output=readable_output,
raw_response=response)
def list_user_associated_host_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
List all hosts related to a certain username.
Args:
client (client): The Netskope client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
username = args['username']
page, skip, limit = get_pagination_arguments(args)
response = client.list_clients_request(query=f'username eq {username}', limit=limit, skip=skip)
outputs = []
for client in response['data']:
attributes = client['attributes']
agent_status = dict_safe_get(attributes, ['last_event', 'status'])
outputs.append({'agent_status': agent_status, **attributes['host_info']})
readable_header = get_pagination_readable_message(f'Hosts Associated With {username}:',
page=page,
limit=limit)
readable_output = tableToMarkdown(readable_header,
outputs,
removeNull=True,
headers=['hostname', 'os_version', 'agent_status'],
headerTransform=string_to_table_header)
return CommandResults(outputs_prefix='Netskope.Host',
outputs_key_field='nsdeviceuid',
outputs=outputs,
readable_output=readable_output,
raw_response=response)
def test_module(client: Client, max_fetch: int, first_fetch: str, fetch_events: bool,
max_events_fetch: int, event_types: List[str]) -> str:
"""
Validates all integration parameters, and tests connection to Netskope instance.
"""
validate_fetch_params(max_fetch, max_events_fetch, fetch_events, first_fetch, event_types)
client.list_alerts_request(limit=1,
skip=0,
start_time=date_to_seconds_timestamp(datetime.now()),
end_time=date_to_seconds_timestamp(datetime.now()))
return 'ok'
def fetch_multiple_type_events(client: Client, max_fetch: int, start_time: int,
event_types: List[str],
query: Optional[str]) -> List[Dict[str, Any]]:
"""
Fetches events from multiple types.
The function makes an API call for each type, since the API requires
specifying the event type.
Args:
client (Client): The Netskope client.
max_fetch (int): The maximum amount of events to fetch for each type.
start_time (int): The time to fetch the events from.
event_types (List[str]): The event types to fetch as incidents.
query (Optional[str]): Query for filtering the events.
Returns:
List[Dict[str, Any]]: The fetched events.
"""
events = []
if event_types:
max_fetch = max_fetch // len(event_types)
for event_type in event_types:
new_events = client.list_events_request(start_time=start_time,
end_time=date_to_seconds_timestamp(datetime.now()),
limit=max_fetch,
unsorted=False,
event_type=event_type,
query=query)['data']
for event in new_events:
event['event_id'] = event['_id']
event['incident_type'] = event_type
events.extend(new_events)
return events
def fetch_incidents(client: Client, max_fetch: int, first_fetch: str, fetch_events: bool,
max_events_fetch: int, event_types: List[str], alerts_query: Optional[str],
events_query: Optional[str]) -> None:
"""
Fetches alerts and events as incidents.
Args:
client (Client): The Netskope client.
max_fetch (int): Maximum number of incidents to fetch.
first_fetch (str): The timestamp to fetch the incidents from.
max_events_fetch (int): Maximum number of events to fetch.
event_types (List[str]): The type of events to fetch.
alerts_query (Optional[str]): Query for filtering the fetched alerts.
events_query (Optional[str]): Query for filtering the fetched events.
"""
validate_fetch_params(max_fetch, max_events_fetch, fetch_events, first_fetch, event_types)
last_run = demisto.getLastRun() or {}
first_fetch = arg_to_seconds_timestamp(first_fetch)
last_alert_time = last_run.get('last_alert_time') or first_fetch
alerts = client.list_alerts_request(start_time=last_alert_time,
end_time=date_to_seconds_timestamp(datetime.now()),
limit=max_fetch,
query=alerts_query,
unsorted=False)['data']
last_event_time = last_run.get('last_event_time') or first_fetch
if fetch_events:
events = fetch_multiple_type_events(client,
max_fetch=max_events_fetch,
start_time=last_event_time,
event_types=event_types,
query=events_query)
else:
events = []
incidents = []
for alert in alerts:
alert['incident_type'] = alert['alert_type']
incidents.append({
'name': alert['alert_name'],
'occurred': timestamp_to_datestring(alert['timestamp']),
'rawJSON': json.dumps(alert)
})
for event in events:
incidents.append({
'name': event['event_id'],
'occurred': timestamp_to_datestring(event['timestamp']),
'rawJSON': json.dumps(event)
})
# The alerts and events are sorted in descending order.
# Also, we increment the timestamp in one second to avoid duplicates.
demisto.setLastRun({
'last_alert_time': alerts[0]['timestamp'] + 1 if alerts else last_alert_time,
'last_event_time': events[0]['timestamp'] + 1 if events else last_event_time
})
demisto.incidents(incidents)
def main():
params = demisto.params()
url = params['url']
token = params['credentials']['password']
use_ssl = not params.get('insecure', False)
use_proxy = params.get('proxy', False)
max_fetch = arg_to_number(params.get('max_fetch', DEFAULT_MAX_FETCH))
first_fetch = params.get('first_fetch', DEFAULT_FIRST_FETCH)
fetch_events = argToBoolean(params.get('fetch_events', False))
event_types = argToList(params.get('fetch_event_types', DEFAULT_EVENT_TYPE))
max_events_fetch = arg_to_number(params.get('max_events_fetch', DEFAULT_EVENTS_FETCH))
client = Client(url, token, use_ssl, use_proxy)
commands = {
'netskope-event-list': list_events_command,
'netskope-alert-list': list_alerts_command,
'netskope-quarantined-file-list': list_quarantined_files_command,
'netskope-quarantined-file-get': get_quarantined_file_command,
'netskope-quarantined-file-update': update_quarantined_file_command,
'netskope-url-list-update': update_url_list_command,
'netskope-file-hash-list-update': update_file_hash_list_command,
'netskope-client-list': list_clients_command,
'netskope-host-associated-user-list': list_host_associated_user_command,
'netskope-user-associated-host-list': list_user_associated_host_command,
}
try:
command = demisto.command()
if command == 'test-module':
return_results(
test_module(client,
max_fetch=max_fetch,
first_fetch=first_fetch,
fetch_events=fetch_events,
max_events_fetch=max_events_fetch,
event_types=event_types))
elif command == 'fetch-incidents':
fetch_incidents(client,
max_fetch=max_fetch,
first_fetch=first_fetch,
fetch_events=fetch_events,
max_events_fetch=max_events_fetch,
event_types=event_types,
alerts_query=demisto.params().get('alert_query'),
events_query=demisto.params().get('events_query'))
elif command in commands:
return_results(commands[command](client, demisto.args()))
else:
raise NotImplementedError(f'The command {command} does not exist!')
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{e}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 40.53814 | 129 | 0.593143 |
e84d456546a7effda0c065864dac6f4667e69a6e
| 1,691 |
py
|
Python
|
Packs/GoogleChronicleBackstory/Scripts/ExtractDomainFromIOCDomainMatchRes/ExtractDomainFromIOCDomainMatchRes_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/GoogleChronicleBackstory/Scripts/ExtractDomainFromIOCDomainMatchRes/ExtractDomainFromIOCDomainMatchRes_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/GoogleChronicleBackstory/Scripts/ExtractDomainFromIOCDomainMatchRes/ExtractDomainFromIOCDomainMatchRes_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from unittest.mock import patch
import demistomock as demisto
import ExtractDomainFromIOCDomainMatchRes
ARGS = {'json_response': "{\"Artifact\": \"e9428.b.akamaiedge.net\", \"IocIngestTime\": \"2020-07-17T20:00:00Z\", "
"\"FirstAccessedTime\": \"2018-11-05T12:01:29Z\", \"LastAccessedTime\": "
"\"2018-11-09T11:51:03Z\", \"Sources\": [{ \"Category\": \"Observed serving executable\", "
"\"IntRawConfidenceScore\": 0, \"NormalizedConfidenceScore\": \"Low\", \"RawSeverity\": "
"\"Low\", \"Source\": \"ET Intelligence Rep List\"}]}"}
def test_main_success(mocker):
"""
When main function is called, get_entry_context should be called.
"""
mocker.patch.object(demisto, 'args', return_value=ARGS)
mocker.patch.object(ExtractDomainFromIOCDomainMatchRes, 'get_entry_context',
return_value={})
ExtractDomainFromIOCDomainMatchRes.main()
assert ExtractDomainFromIOCDomainMatchRes.get_entry_context.called
@patch('ExtractDomainFromIOCDomainMatchRes.return_error')
def test_main_failure(mock_return_error, capfd, mocker):
"""
When main function gets some exception then valid message should be printed.
"""
mocker.patch.object(demisto, 'args', return_value=ARGS)
mocker.patch.object(ExtractDomainFromIOCDomainMatchRes, 'get_entry_context', side_effect=Exception)
with capfd.disabled():
ExtractDomainFromIOCDomainMatchRes.main()
mock_return_error.assert_called_once_with('Error occurred while extracting Domain from IOC Domain Matches '
'response:\n')
| 44.5 | 116 | 0.662921 |
fa24c06f6cb71d0d132ad5eaf12a37f9f961d403
| 3,896 |
py
|
Python
|
examples/relationship/manytoonefield/views.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 5 |
2020-07-14T07:48:10.000Z
|
2021-12-20T21:20:10.000Z
|
examples/relationship/manytoonefield/views.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 7 |
2021-03-26T03:13:38.000Z
|
2022-03-12T00:42:03.000Z
|
examples/relationship/manytoonefield/views.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 1 |
2021-02-16T07:04:25.000Z
|
2021-02-16T07:04:25.000Z
|
from django.shortcuts import render, HttpResponse
from .models import Reporter, Article
from datetime import date
def single_create(request):
# 测试用例-1: 创建一条维表数据和一条主表数据.
# 1. 创建维表数据.
# INSERT INTO `manytoonefield_reporter` (`first_name`, `last_name`, `email`)
# VALUES ('John', 'Smith', '[email protected]')
# RETURNING `manytoonefield_reporter`.`id`;
r = Reporter(first_name='John', last_name='Smith', email='[email protected]')
r.save()
# 2. 创建主表数据, 同时将维表数据作为参数
# INSERT INTO `manytoonefield_article` (`headline`, `pub_date`, `reporter_id`)
# VALUES ('This is a test', '2005-07-27', 1)
# RETURNING `manytoonefield_article`.`id`;
a = Article(headline="This is a test", pub_date=date(2005, 7, 27), reporter=r)
a.save()
# 3. 正向查询.
# SELECT `manytoonefield_article`.`id`,
# `manytoonefield_article`.`headline`,
# `manytoonefield_article`.`pub_date`,
# `manytoonefield_article`.`reporter_id`
# FROM `manytoonefield_article`
# WHERE `manytoonefield_article`.`id` = 1 LIMIT 21;
af = Article.objects.get(pk=1)
# N+1 查询
# SELECT `manytoonefield_reporter`.`id`,
# `manytoonefield_reporter`.`first_name`,
# `manytoonefield_reporter`.`last_name`,
# `manytoonefield_reporter`.`email`
# FROM `manytoonefield_reporter`
# WHERE `manytoonefield_reporter`.`id` = 1 LIMIT 21;
print("af.reporter.id: ", af.reporter.id)
# 4. 反向查询.
# SELECT `manytoonefield_reporter`.`id`,
# `manytoonefield_reporter`.`first_name`,
# `manytoonefield_reporter`.`last_name`,
# `manytoonefield_reporter`.`email`
# FROM `manytoonefield_reporter`
# WHERE `manytoonefield_reporter`.`id` = 1 LIMIT 21;
r = Reporter.objects.get(pk=1)
# SELECT `manytoonefield_article`.`id`,
# `manytoonefield_article`.`headline`,
# `manytoonefield_article`.`pub_date`,
# `manytoonefield_article`.`reporter_id`
# FROM `manytoonefield_article`
# WHERE `manytoonefield_article`.`reporter_id` = 1 LIMIT 21; # TODO: 为什么 all 对应的是limit 21?
print(r.article_set.all())
return HttpResponse("view_create")
def multi_create(request):
# 测试用例-2: 创建一条维表数据和多条主表数据.
# 1. 创建维表数据.
r = Reporter.objects.create(first_name='John', last_name='Smith', email='[email protected]')
# 2. 创建30条主表数据, 同时将维表数据作为参数
for i in range(30):
Article.objects.create(headline=f"This is a test-{i}", pub_date=date(2005, 7, 27), reporter=r)
# 3. 正向查询
af = Article.objects.get(pk=1) # Article 是 Many; Reporter 是 One;
print("af.reporter.id: ", af.reporter.id) # 触发N+1;
# 4. 反向查询
# SELECT `manytoonefield_reporter`.`id`,
# `manytoonefield_reporter`.`first_name`,
# `manytoonefield_reporter`.`last_name`,
# `manytoonefield_reporter`.`email`
# FROM `manytoonefield_reporter`
# WHERE `manytoonefield_reporter`.`id` = 1 LIMIT 21;
r = Reporter.objects.get(pk=1)
# SELECT `manytoonefield_article`.`id`,
# `manytoonefield_article`.`headline`,
# `manytoonefield_article`.`pub_date`,
# `manytoonefield_article`.`reporter_id`
# FROM `manytoonefield_article`
# WHERE `manytoonefield_article`.`reporter_id` = 1 LIMIT 21
articles = r.article_set.all()
print("articles: ", articles)
# SELECT `manytoonefield_article`.`id`,
# `manytoonefield_article`.`headline`,
# `manytoonefield_article`.`pub_date`,
# `manytoonefield_article`.`reporter_id`
# FROM `manytoonefield_article`
# WHERE `manytoonefield_article`.`reporter_id` = 1;
for article in articles:
print("article.id: ", article.id)
return HttpResponse("view_query")
| 42.813187 | 118 | 0.630903 |
fa4e1e4a8bcd6ba18676178d49cdccda27580841
| 1,657 |
py
|
Python
|
Curso-Em-Video-Python/1Materias/20_Funcoes/#021 função parte 2.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/1Materias/20_Funcoes/#021 função parte 2.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/1Materias/20_Funcoes/#021 função parte 2.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
'''print('Ajuda Interativa')
print('Usando o help(e o comando)')
#help(print)'''
#exemplo DOCTRINGS**
'''def contador(i, f, p):
"""
-> faz uma contagem e mostra na tela.
:param i: Inicio da contagem
:param f: Fim da contagem
:param p: Passo da contagem
:return: sem retorno
"""
c = i
while c <= f:
print(f'{c} ', end='')
c += p
print('FIM!')
resp = notas(10, 9, 8, sit=True)# Notas mostra a doctrings
help(contador)'''
#Exemplo parametros opcionais
'''def soma(a=0, b=0, c=0):
"""
-> Faz a soma de três valores e mostra o resultado na tela.
:param a: primeiro valor
:param b: Segundo valor
:param c: terceiro valor
:return:
"""
s = a + b + c
print(f'A soma vale {s}')
soma(3, 2, 9)'''
#Escopo de Variavel
'''def teste(b):
global a # faz o A valer oq ta dentro da função
a = 8
b += 4
c = 2
print(f'A dentro vale {a}')
print(f'B dentro vale {b}')
print(f'C dentro vale {c}')
a = 5
teste(a)
print(f'A fora vale {a}')'''
#Retornando Valores
'''def soma(a=0, b=0, c=0):
s = a + b + c
return s
r1 = soma(3, 2, 5)
r2 = soma(2, 2)
r3 = soma(6)
print(f'Os resultados foram {r1}, {r2}, {r3}')'''
#Exercicio aula
print('Fatorial')
def fatorial(num=1):
f = 1
for c in range(num, 0, -1):
f *= c
return f
f1 = fatorial(5)
f2 = fatorial(4)
f3 = fatorial()
print(f'Os resultados são {f1}, {f2}, {f3}')
print('Par e impar')
def par(n=0):
if n % 2 == 0:
return True
else:
return False
num = int(input('Digite um numero: '))
if par(num):
print('É par!')
else:
print('Não é par!')
| 18.411111 | 63 | 0.554013 |
ad4de6af8414028b9dbc8d7ea1bd1128514f7053
| 12,216 |
py
|
Python
|
Beginner/03. Python/buscas-heuristicas.py
|
ankita080208/Hacktoberfest
|
2be849e89285260e7b6672f42979943ad6bbec78
|
[
"MIT"
] | 1 |
2021-10-06T13:55:02.000Z
|
2021-10-06T13:55:02.000Z
|
Beginner/03. Python/buscas-heuristicas.py
|
ankita080208/Hacktoberfest
|
2be849e89285260e7b6672f42979943ad6bbec78
|
[
"MIT"
] | null | null | null |
Beginner/03. Python/buscas-heuristicas.py
|
ankita080208/Hacktoberfest
|
2be849e89285260e7b6672f42979943ad6bbec78
|
[
"MIT"
] | null | null | null |
#'Importa pacote Numpy e renomeia com NP'
import numpy as np
#'Importa módulo base.funcoes e renomeia para FN'
import base.funcoes as fn
from base.grafo import Aresta
#'De Pillow importar Image, ImageDraw'
from PIL import Image, ImageDraw
#'De queue=fila importar Queue, LifoQueue'
from queue import Queue, LifoQueue, PriorityQueue
from base.queues import ReversePriorityQueue
#'De base.funcoes importar addQueue'
from base.funcoes import addQueue
#'Criando classe chamada Buscas(parâmetro{object}):'
class Buscas(object):
#'Definindo função de inicialização para a classe buscas=self'
def __init__(self):
#'buscas=self aponta para visitado que é um array vazio'
self.visitado = []
#'self aponta para marcado que é um array vazio'
self.marcado = []
#'self aponta para resultado que é um arrayvazio'
self.resultado = []
def drawPoint(self, data, aresta, color):
if color == 'branco':
data[aresta.line][aresta.column] = [255, 255, 255, 255]
elif color == 'cinza':
data[aresta.line][aresta.column] = [0, 0, 135, 255]
else:
data[aresta.line][aresta.column] = [255, 69, 0, 255]
class BuscaLargura(Buscas):
# Variaveis que serão utilizadas durante a busca
def __init__(self):
super().__init__()
self.cor = {}
self.pred = {}
self.d = {}
# Nome da busca
self.name = "Busca Largura"
def search(self, data, estado_pai):
for v in fn.list_state(estado_pai, []):
self.d[v] = np.inf
self.cor[v] = 'branco' # branco cinza e preto
# Marca os estados como none, para saber quais os estados que se deve passar novamente
self.pred[v] = None
self.drawPoint(data, v, self.cor[v])
# Marca o estado pai como cinza
self.cor[estado_pai] = 'cinza'
self.d[estado_pai] = 0
self.drawPoint(data, estado_pai, self.cor[estado_pai])
Q = Queue()
Q.put(estado_pai)
# Verifica se tem algum estado pai na minha lista, caso tenha ele entra no while
while not Q.empty():
u = Q.get_nowait()
# Caso o atual "u" que é minha lista de estados pai, contenha o estado pai objetivo, então sai do while
if u.goal:
break
# para cada filho na lista de filhos do pai a ser analisado
for v in u.children:
# se sua cor for branca, eu troco ela pra cinza
if self.cor[v] == 'branco':
self.cor[v] = 'cinza'
self.d[v] = self.d[u] + 1
self.pred[v] = u
self.drawPoint(data, v, self.cor[v])
self.visitado.append((u, v))
Q.put(v)
self.cor[u] = 'preto'
self.drawPoint(data, u, self.cor[u])
self.resultado = [key for key in self.cor if self.cor[key] == 'preto']
# Salva uma imagem com os dados coletados nos passos anteriores e com os estados visitados pintados
fn.save_image(data, "Resolucao-Largura.png")
class BuscaProfundidade(Buscas):
# Variaveis que serão utilizadas durante a busca
def __init__(self):
super().__init__()
self.cor = {}
self.pred = {}
self.d = {}
# Nome da busca
self.name = "Busca Profundidade"
def search(self, data, estado_pai):
for v in fn.list_state(estado_pai, []):
self.d[v] = np.inf
self.cor[v] = 'branco' # branco cinza e preto
# Marca os estados como none, para saber quais os estados que se deve passar novamente
self.pred[v] = None
self.drawPoint(data, v, self.cor[v])
# Marca o estado pai como cinza
self.cor[estado_pai] = 'cinza'
self.d[estado_pai] = 0
self.drawPoint(data, estado_pai, self.cor[estado_pai])
Q = LifoQueue()
Q.put(estado_pai)
# Verifica se tem algum estado pai na minha lista, caso tenha ele entra no while
while not Q.empty():
u = Q.get_nowait()
# Caso o atual "u" que é minha lista de estados pai, contenha o estado pai objetivo, então sai do while
if u.goal:
break
# para cada filho na lista de filhos do pai a ser analisado
for v in u.children:
# se sua cor for branca, eu troco ela pra cinza
if self.cor[v] == 'branco':
self.cor[v] = 'cinza'
self.d[v] = self.d[u] + 1
self.pred[v] = u
self.drawPoint(data, v, self.cor[v])
self.visitado.append((u, v))
Q.put(v)
self.cor[u] = 'preto'
self.drawPoint(data, u, self.cor[u])
self.resultado = [key for key in self.cor if self.cor[key] == 'preto']
# Salva uma imagem com os dados coletados nos passos anteriores e com os estados visitados pintados
fn.save_image(data, "Resolucao-Largura.png")
# NÃO UTILIZADO.
# class BuscaProfundidade(Buscas):
# # Variaveis que serão utilizadas durante a busca
# def __init__(self):
# super().__init__()
# self.cor = {}
# self.pred = {}
# self.d = {}
# self.f = {}
# # Nome da busca
# self.name = "Busca Profundidade"
# def search(self, data, estado_pai):
# # tempo inicial
# tempo = 0
# # Para cada estado possivel a partir do estado pai, ele armazena estes estados em uma lista e os coloca todos como cor branca
# for v in fn.list_state(estado_pai, []):
# # cores possíveis: branco, cinza e preto
# self.cor[v] = 'branco'
# self.pred[v] = None
# for v in fn.list_state(estado_pai, []):
# # para cada filho na lista, verifica-se se ele é branco
# if self.cor[v] == 'branco':
# tempo = self.visit(estado_pai, v, tempo)
# self.resultado = [key for key in self.cor if self.cor[key] == 'preto']
# def visit(self, G, s, tempo):
# tempo = tempo + 1
# self.d[s] = tempo
# self.cor[s] = 'cinza'
# for v in G.children:
# if self.cor[v] == 'branco':
# self.pred[v] = s
# self.visitado.append((s, v))
# tempo = self.visit(G, v, tempo)
# self.cor[s] = 'preto'
# self.tempo = tempo + 1
# self.f[s] = tempo
# return tempo
class BuscaCustoUniforme(Buscas):
"""
Algoritmo Busca - Uniforme
1. Definir um conjunto L de nós iniciais
2. Ordene L em ordem crescente de custo
3. Se L é vazio
Então Busca não foi bem sucedida
Senão seja n o primeiro nó de L;
4. Se n é um nó objetivo
Então
Retornar caminho do nó inicial até N;
Parar
Senão
Remover n de L;
Adicionar em L todos os nós filhos de n, rotulando cada nó com o seu caminho até o nó inicial;
Ordene L em ordem crescente de custo;
Volte ao passo 3.
"""
def __init__(self):
# Variaveis que serão utilizadas durante a busca
super().__init__()
self.cor = {}
# Nome da busca
self.name = "Busca Custo Uniforme"
def geraResultado(self):
self.resultado = [key for key in self.cor if self.cor[key] == 'preto']
def search(self, data, estado_pai):
frontier = PriorityQueue()
frontier.put((0, estado_pai))
# se a fila dos estados pai não estiver vazia, entra no while
while not frontier.empty():
ucs_w, current_node = frontier.get()
#Pega o estado atual da minha lista "frontier", ao andar pega o estaddo atual e o coloca na lista de estados visitados
self.visitado.append(current_node)
# Se o estado atual for o objetivo, finaliza a busca
if current_node.goal:
# print("Cheguei no final! ", current_node)
return
# para cada estado filho
for node in current_node.children:
custo = current_node.arestas[node].custo
filho = current_node.arestas[node].g_fim
if not filho in self.visitado:
self.resultado.append((current_node, filho))
frontier.put(
(custo, filho)
)
class BuscaGreedy(Buscas):
"""
Algoritmo Busca - Uniforme
1. Definir um conjunto L de nós iniciais
2. Ordene L em ordem crescente de custo
3. Se L é vazio
Então Busca não foi bem sucedida
Senão seja n o primeiro nó de L;
4. Se n é um nó objetivo
Então
Retornar caminho do nó inicial até N;
Parar
Senão
Remover n de L;
Adicionar em L todos os nós filhos de n, rotulando cada nó com o seu caminho até o nó inicial;
Ordene L em ordem crescente de custo;
Volte ao passo 3.
"""
def __init__(self):
# Variaveis que serão utilizadas durante a busca
super().__init__()
self.cor = {}
self.H = {}
# Nome da busca
self.name = "Busca Greedy (Gulosa)"
def search(self, data, estado_pai):
frontier = PriorityQueue()
frontier.put((0, estado_pai))
# Se a minha lista de estados pai não estiver vazia, entra no while
while not frontier.empty():
ucs_w, current_node = frontier.get_nowait()
# O estado atual a ser analizado é adcionado na lista de visitados
self.visitado.append(current_node)
# Se o estado atual for o fim, finaliza o while
if current_node.goal:
# print("Cheguei no final! ", current_node)
return
# Para cada estado filho
for node in current_node.children:
# Adiciona seu custo com uma busca heuristica
custo = current_node.arestas[node].custoH
filho = current_node.arestas[node].g_fim
if not filho in self.visitado:
self.resultado.append((current_node, filho))
frontier.put(
(custo, filho)
)
class BuscaAEstrela(Buscas):
# Variaveis que serão utilizadas durante a busca
def __init__(self):
super().__init__()
# Nome da busca
self.name = "Busca A* (A Estrela)"
self.came_from = {}
self.cost_so_far = {}
def search(self, data, estado_pai):
frontier = PriorityQueue()
frontier.put((0, estado_pai))
self.cost_so_far[estado_pai] = 0
# Se minha lista de pais não for vazia, entra no while
while not frontier.empty():
ucs_w, current = frontier.get_nowait()
# adiciona o atual estado a ser analizado na lista de visitados
self.visitado.append(current)
# Se o estado atual for o estado final, sai do while
if current.goal:
# print("Cheguei no final! ", current_node)
return
# Para o proximo estado na lista de filhos
for next in current.children:
# Ele recebera um novo custo sendo este custo a soma da distancia ja andada para chegar até ele,
# Mais a heuristica dele mesmo até o final
new_cost = ucs_w + current.arestas[next].custo
filho = current.arestas[next].g_fim
# se o custo do proximo filho a ser comparado não for maior que o do filho analizado anteriormente,
# ou o novo custo for menor que a distancia que ele ja percorreu ele entra no if
if next not in self.cost_so_far or new_cost < ucs_w:
priority = new_cost + current.arestas[next].custoH
self.resultado.append((current, filho))
frontier.put((priority, filho))
| 34.803419 | 135 | 0.563032 |
ada0a3f19ebd4f734d0b9a458bc4f2e1505253c6
| 783 |
py
|
Python
|
UFV---Python/Trabalho Mat. Disc/rascunho.py
|
Vith-MCB/UFV
|
9d96fecdc9ffde2563f9f397bcdb39d95aaf7e69
|
[
"MIT"
] | 1 |
2022-01-25T16:52:26.000Z
|
2022-01-25T16:52:26.000Z
|
UFV---Python/Trabalho Mat. Disc/rascunho.py
|
Vith-MCB/UFV
|
9d96fecdc9ffde2563f9f397bcdb39d95aaf7e69
|
[
"MIT"
] | null | null | null |
UFV---Python/Trabalho Mat. Disc/rascunho.py
|
Vith-MCB/UFV
|
9d96fecdc9ffde2563f9f397bcdb39d95aaf7e69
|
[
"MIT"
] | null | null | null |
# input
n = int(input())
cont1 = int(input())
conttot = 1
# grafo
contador = 0
g = [[0 for i in range(n)] for j in range(n)]
lista = input().split()
for col in range(n):
for linha in range(n):
g[col][linha] = int(lista[contador])
contador += 1
if col == linha:
g[col][linha] = 0
# Lista De Contaminados
contaminados = []
contaminados.append(cont1)
# Descobrindo Contaminados
for linha in range(n):
if g[cont1][linha] == 1:
contaminados.append(linha)
g[cont1][linha] = 0
conttot += 1
while True:
for y in range(n):
if g[linha][y] == 1 and y != cont1 and linha not in contaminados:
contaminados.append(linha)
conttot += 1
print(conttot)
| 23.029412 | 81 | 0.551724 |
7e949d1be8b4b1624b308071102466425b0db545
| 800 |
py
|
Python
|
undumb.py
|
pierce403/undumb
|
966d15a1010ad675054c1fdde253448c7d50cb09
|
[
"Apache-2.0"
] | 1 |
2020-08-11T05:01:05.000Z
|
2020-08-11T05:01:05.000Z
|
undumb.py
|
pierce403/undumb
|
966d15a1010ad675054c1fdde253448c7d50cb09
|
[
"Apache-2.0"
] | null | null | null |
undumb.py
|
pierce403/undumb
|
966d15a1010ad675054c1fdde253448c7d50cb09
|
[
"Apache-2.0"
] | null | null | null |
import re
import sys
if(len(sys.argv)>1):
file = open(sys.argv[1], encoding = "ISO-8859-1")
else:
file = open(sys.stdin.fileno(), encoding = "ISO-8859-1")
minlength = 8
maxlength = 20
specials = re.compile('[@_!#$%^&*()<>?/\|}{~:]')
lowers = re.compile('[abcdefghijklmnopqrstuvwxyz]')
uppers = re.compile('[ABCDEFGHIJKLMNOPQRSTUVWXYZ]')
numeric = re.compile('[0123456789]')
while 1:
words = file.readlines(100000)
if not words:
break
for word in words:
word=word.strip()
if(len(word)<minlength):
continue
if(len(word)>maxlength):
continue
if(None==specials.search(word)):
continue
if(None==lowers.search(word)):
continue
if(None==uppers.search(word)):
continue
if(None==numeric.search(word)):
continue
print(word)
| 22.222222 | 58 | 0.63375 |
bc1e72531c888ffd6f8a9aa21c25be6502ced3a4
| 99 |
py
|
Python
|
backend/apps/ineedstudent/apps.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | 3 |
2020-03-27T20:39:31.000Z
|
2020-03-31T20:24:55.000Z
|
backend/apps/ineedstudent/apps.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | 21 |
2020-03-28T09:57:15.000Z
|
2020-03-31T11:38:00.000Z
|
backend/apps/ineedstudent/apps.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class IneedstudentConfig(AppConfig):
name = 'ineedstudent'
| 16.5 | 36 | 0.777778 |
cb2cc562d1c939cd1753a77b41c7d4d1890aa287
| 2,340 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/build/typemap.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/build/typemap.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/build/typemap.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
#!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2015-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file typemap.py
# @author Michael Behrisch
# @date 2015-07-06
"""
This script rebuilds "src/netimport/typemap.h" and "src/polyconvert/pc_typemap.h", the files
representing the default typemaps.
It does this by parsing the data from the sumo data dir.
"""
from __future__ import print_function
from __future__ import absolute_import
import sys
from os.path import dirname, exists, getmtime, join
def writeTypeMap(typemapFile, typemap):
with open(typemapFile, 'w') as f:
for format, mapFile in sorted(typemap.items()):
print("const std::string %sTypemap =" % format, file=f)
for line in open(mapFile):
print('"%s"' %
line.replace('"', r'\"').replace('\n', r'\n'), file=f)
print(";", file=f)
def generateTypeMap(typemapFile, formats, suffix):
typemapDataDir = join(dirname(__file__), '..', '..', 'data', 'typemap')
typemap = {}
maxTime = 0
for format in formats:
typemap[format] = join(typemapDataDir, format + suffix)
if exists(typemap[format]):
maxTime = max(maxTime, getmtime(typemap[format]))
if not exists(typemapFile) or maxTime > getmtime(typemapFile):
writeTypeMap(typemapFile, typemap)
if __name__ == "__main__":
srcDir = join(dirname(__file__), '..', '..', 'src')
if len(sys.argv) > 1:
srcDir = sys.argv[1]
generateTypeMap(join(srcDir, 'netimport', 'typemap.h'), ("opendrive", "osm"), "Netconvert.typ.xml")
generateTypeMap(join(srcDir, 'polyconvert', 'pc_typemap.h'), ("navteq", "osm", "visum"), "Polyconvert.typ.xml")
| 40.344828 | 115 | 0.680769 |
3842ea2b47a34f7d49abb2340d79662e97807666
| 472 |
py
|
Python
|
pythonProj/FZPython/taskCenter/dailyTickData.py
|
iHamburg/FZQuant
|
86b750ec33d01badfd3f324d6f1599118b9bf8ff
|
[
"MIT"
] | null | null | null |
pythonProj/FZPython/taskCenter/dailyTickData.py
|
iHamburg/FZQuant
|
86b750ec33d01badfd3f324d6f1599118b9bf8ff
|
[
"MIT"
] | null | null | null |
pythonProj/FZPython/taskCenter/dailyTickData.py
|
iHamburg/FZQuant
|
86b750ec33d01badfd3f324d6f1599118b9bf8ff
|
[
"MIT"
] | 2 |
2019-04-10T10:05:00.000Z
|
2021-11-24T17:17:23.000Z
|
"""
获取实时盘口数据
"""
import tushare as ts
from pymongo import MongoClient
import json
import time
stockList = ['600196','601933','600703']
# 根据stock列表获得实时数据,
df = ts.get_realtime_quotes(stockList) #Single stock symbol
# print(df)
# conn = MongoClient('121.42.26.144', 27017)
while True:
# 每隔3秒执行一次数据
time.sleep(3)
df = ts.get_realtime_quotes(stockList) #Single stock symbol
print(df)
# conn.db.tickdata.insert(json.loads(df.to_json(orient='records')))
| 20.521739 | 71 | 0.713983 |
69b0b6a4eff52152771be7b2f24bd2e2a56e40d2
| 901 |
py
|
Python
|
0088merge-sorted-array.py
|
meat00/my-leetcode-python
|
8312de396b29e1d6dd54a65f87fa0511eb400faa
|
[
"MIT"
] | null | null | null |
0088merge-sorted-array.py
|
meat00/my-leetcode-python
|
8312de396b29e1d6dd54a65f87fa0511eb400faa
|
[
"MIT"
] | null | null | null |
0088merge-sorted-array.py
|
meat00/my-leetcode-python
|
8312de396b29e1d6dd54a65f87fa0511eb400faa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Solution:
def merge(self, nums1, m: int, nums2, n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
for i in range(m):
nums1[-1-i] = nums1[m-1-i]
i = 0
j = 0
k = 0
while i < m and j < n:
if nums1[-m+i] < nums2[j]:
nums1[k] = nums1[-m+i]
i += 1
else:
nums1[k] = nums2[j]
j += 1
k += 1
while i < m:
nums1[k] = nums1[-m+i]
i += 1
k += 1
while j < n:
nums1[k] = nums2[j]
j += 1
k += 1
if __name__ == "__main__":
s = Solution()
nums1 = [1, 2, 3, 0, 0, 0]
m = 3
nums2 = [2, 5, 6]
n = 3
s.merge(nums1, m, nums2, n)
print(nums1)
| 22.525 | 62 | 0.374029 |
3864e2aa61bdb12eff257d3cbadcdb4edbbe581e
| 114 |
py
|
Python
|
python_gui_tkinter/KALU/GARBAGE1/SAFE28JUL/test.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python_gui_tkinter/KALU/GARBAGE1/SAFE28JUL/test.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python_gui_tkinter/KALU/GARBAGE1/SAFE28JUL/test.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
from AppOperations import AppOperations as ao
from AppOperations import Rec
ao.reset_slno()
#print(Rec.timestmp())
| 28.5 | 45 | 0.824561 |
aab5aa24a847ec42ce8ae7235d2646ee49fe8cfa
| 5,706 |
py
|
Python
|
Utils/py/PathPlanner/LPG.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
Utils/py/PathPlanner/LPG.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
Utils/py/PathPlanner/LPG.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from __future__ import division
import sys
import math
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Circle
import matplotlib as mpl
import Queue as Q
import copy
import time
base = 1.1789
minimal_cell = 100
angular_part = 16
parameter_s = 0.5
robot_radius = 0
def get_r(coords):
return math.floor(math.log(((math.sqrt(np.power(coords[0], 2) + np.power(coords[1], 2)) * (base - 1)) / minimal_cell) + 1, base))
def get_inv_r(r):
return (np.exp(np.log(base)*r) - 1) * minimal_cell / (base - 1)
def get_a(coords, rot):
return math.floor((angular_part / (2*np.pi)) * (angle(coords) - rot) + 0.5)
def angle(coords):
if (coords[0] == 0):
return np.arctan2(coords[1], 1)
else:
return np.arctan2(coords[1], coords[0])
def get_cell_mid((r, a), rot): # returns cell mid from polar coordinates
prd = (((np.power(base, r+0.5) - 1) * minimal_cell) / (base - 1))
return (np.cos(a * (2*np.pi/16) + rot) * prd, np.sin(a * (2*np.pi/16) + rot) * prd)
def get_cell(coords, rot):
return (get_r(coords), get_a(coords, rot))
def dist(a, b):
(x1, y1) = (a[0], a[1])
(x2, y2) = (b[0], b[1])
return np.sqrt(np.power(x1 - x2, 2) + np.power(y1 - y2, 2))
def dist_cell(a, b, rot):
(x1, y1) = get_cell_mid(a, rot)
(x2, y2) = get_cell_mid(b, rot)
return np.sqrt(np.power(x1 - x2, 2) + np.power(y1 - y2, 2))
def obst_func(cell, obst, rot): # obst is obstacle coordinates in x, y
r_f = obst[2]
cell_mid = get_cell_mid(cell, rot)
dist_to_obst_mid = dist(cell_mid, obst)
obst_dist_to_mid = dist(obst, (0, 0))
r_d = obst_dist_to_mid / 10
# parameters
a = r_f - r_d # cost of constant part
r = r_f + r_d # radius of constant part
s = parameter_s*r # radius of linear decreasing part
return np.maximum(np.minimum(1 - ((dist_to_obst_mid - r) / s), 1), 0) * a
# A STAR IMPLEMENTATION
def a_star_search(start, goal, obstacles, rot):
openlist = Q.PriorityQueue()
closedlist = set()
openlist.put((0, start))
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
start = time.time()
while not openlist.empty():
current = openlist.get()[1]
if current == goal:
break
closedlist.add(current)
for r in [0, -1, 1]:
for a in [0, -1, 1]:
the_next = (current[0] + r, current[1] + a)
if the_next in closedlist:
continue
# initialize cost_so_far
if math.isnan(cost_so_far[current]):
cost_so_far[current] = 0
# cell cost without obstacles
new_cost = cost_so_far[current] + dist_cell(current, the_next, rot)
# add obstacle costs to cell
for obst in obstacles:
new_cost += obst_func(the_next, obst, rot)
# add to or update openlist
if the_next not in cost_so_far or new_cost < cost_so_far[the_next]:
cost_so_far[the_next] = new_cost
priority = new_cost + dist_cell(the_next, goal, rot)
openlist.put((priority, the_next))
came_from[the_next] = current
if time.time() - start > 10:
return None, None
return came_from, cost_so_far
def compute_waypoints(tar, obstacles, rot, rot_a):
start = get_cell(tar, rot)
target = (0, rot_a)
(a, b) = a_star_search(start, target, obstacles, rot)
if a is None:
return None
the_next = target
the_path = [the_next]
while a[the_next] in a:
the_next = a[the_next]
the_path.append(the_next)
return the_path
def compute_gait(the_path, target, rot):
(x, y) = (0, 0)
for k in range(0, len(the_path)):
(x, y) = get_cell_mid(the_path[k], rot)
if (np.absolute(x) >= 60) or (np.absolute(y) >= 60):
break
distance = dist((x, y), (0, 0))
max_steplength = min(60, max(-60, distance))
gait = (x / distance * max_steplength, y / distance * max_steplength)
if np.sqrt(np.power(gait[0], 2) + np.power(gait[1], 2)) > np.sqrt(np.power(target[0], 2) + np.power(target[1], 2)):
gait = target
return gait
def draw_LPG(ax, robot_pos, rot):
a_length = 2*math.pi / angular_part
radius = 60000
a = (np.arange(0, angular_part) + 0.5) * a_length
x = np.cos(a + rot) * radius
y = np.sin(a + rot) * radius
# draw rings
for k in range(0, 17):
rad = get_inv_r(k)
ax.add_artist(Circle(xy=(robot_pos[0], robot_pos[1]), radius=rad, fill=False, color='black', alpha=.25))
# draw angular partitions
for k in range(0, len(x)):
ax.plot([0 + robot_pos[0], x[k] + robot_pos[0]], [0 + robot_pos[1], y[k] + robot_pos[1]], 'black', alpha=.25)
def draw_waypoints(ax, waypoints, robot_pos, rot):
# draw waypoint cells
for k in waypoints:
(way_x, way_y) = get_cell_mid(k, rot)
ax.plot(way_x + robot_pos[0], way_y + robot_pos[1], ".", c='blue')
def draw_obstacles(ax, robot_pos, obstacles):
# draw obstacles
if obstacles:
for k in obstacles:
ax.add_artist(Circle(xy=(k[0], k[1]), radius=k[2]+(dist(k, robot_pos)/10) + (k[2]+(dist(k, robot_pos)/10) * parameter_s), fill=True, color='blue', alpha=.25))
ax.add_artist(Circle(xy=(k[0], k[1]), radius=k[2]+dist(k, robot_pos)/10, fill=True, color='red', alpha=.25))
ax.add_artist(Circle(xy=(k[0], k[1]), radius=10, fill=True, color='black'))
| 33.174419 | 170 | 0.578339 |
fadc9bf4539325b36b93f4d31e6ebd90427b62db
| 7,743 |
py
|
Python
|
src/bo4e/bo/marktlokation.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/bo/marktlokation.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/bo/marktlokation.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
"""
Contains Marktlokation class
and corresponding marshmallow schema for de-/serialization
"""
import attr
from marshmallow import fields
from marshmallow_enum import EnumField # type:ignore[import]
from bo4e.bo.geschaeftsobjekt import Geschaeftsobjekt, GeschaeftsobjektSchema
from bo4e.bo.geschaeftspartner import Geschaeftspartner, GeschaeftspartnerSchema
from bo4e.com.adresse import Adresse, AdresseSchema
from bo4e.com.geokoordinaten import Geokoordinaten, GeokoordinatenSchema
from bo4e.com.katasteradresse import Katasteradresse, KatasteradresseSchema
from bo4e.com.messlokationszuordnung import Messlokationszuordnung, MesslokationszuordnungSchema
from bo4e.enum.bilanzierungsmethode import Bilanzierungsmethode
from bo4e.enum.botyp import BoTyp
from bo4e.enum.energierichtung import Energierichtung
from bo4e.enum.gasqualitaet import Gasqualitaet
from bo4e.enum.gebiettyp import Gebiettyp
from bo4e.enum.netzebene import Netzebene
from bo4e.enum.sparte import Sparte
from bo4e.enum.verbrauchsart import Verbrauchsart
from bo4e.validators import validate_marktlokations_id
# pylint: disable=too-many-instance-attributes, too-few-public-methods
@attr.s(auto_attribs=True, kw_only=True)
class Marktlokation(Geschaeftsobjekt):
"""
Object containing information about a Marktlokation
.. HINT::
`Marktlokation JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/bo/MarktlokationSchema.json>`_
"""
# required attributes
bo_typ: BoTyp = attr.ib(default=BoTyp.MARKTLOKATION)
#: Identifikationsnummer einer Marktlokation, an der Energie entweder verbraucht, oder erzeugt wird.
marktlokations_id: str = attr.ib(validator=validate_marktlokations_id)
#: Sparte der Marktlokation, z.B. Gas oder Strom
sparte: Sparte
#: Kennzeichnung, ob Energie eingespeist oder entnommen (ausgespeist) wird
energierichtung: Energierichtung
#: Die Bilanzierungsmethode, RLM oder SLP
bilanzierungsmethode: Bilanzierungsmethode
netzebene: Netzebene
"""
Netzebene, in der der Bezug der Energie erfolgt.
Bei Strom Spannungsebene der Lieferung, bei Gas Druckstufe.
Beispiel Strom: Niederspannung Beispiel Gas: Niederdruck.
"""
# optional attributes
#: Verbrauchsart der Marktlokation.
verbrauchsart: Verbrauchsart = attr.ib(default=None)
#: Gibt an, ob es sich um eine unterbrechbare Belieferung handelt
unterbrechbar: bool = attr.ib(default=None)
#: Codenummer des Netzbetreibers, an dessen Netz diese Marktlokation angeschlossen ist.
netzbetreibercodenr: str = attr.ib(default=None)
#: Typ des Netzgebietes, z.B. Verteilnetz
gebietstyp: Gebiettyp = attr.ib(default=None)
#: Die ID des Gebietes in der ene't-Datenbank
netzgebietsnr: str = attr.ib(default=None) # todo: rename to "id" (see 2021-12-15 update)
#: Bilanzierungsgebiet, dem das Netzgebiet zugeordnet ist - im Falle eines Strom Netzes
bilanzierungsgebiet: str = attr.ib(default=None)
#: Codenummer des Grundversorgers, der für diese Marktlokation zuständig ist
grundversorgercodenr: str = attr.ib(default=None)
#: Die Gasqualität in diesem Netzgebiet. H-Gas oder L-Gas. Im Falle eines Gas-Netzes
gasqualitaet: Gasqualitaet = attr.ib(default=None)
#: Geschäftspartner, dem diese Marktlokation gehört
endkunde: Geschaeftspartner = attr.ib(default=None)
zugehoerige_messlokation: Messlokationszuordnung = attr.ib(default=None) # todo: rename to plural
"""
Aufzählung der Messlokationen, die zu dieser Marktlokation gehören.
Es können 3 verschiedene Konstrukte auftreten:
Beziehung 1 : 0 : Hier handelt es sich um Pauschalanlagen ohne Messung. D.h. die Verbrauchsdaten sind direkt über
die Marktlokation abgreifbar.
Beziehung 1 : 1 : Das ist die Standard-Beziehung für die meisten Fälle. In diesem Fall gibt es zu einer
Marktlokation genau eine Messlokation.
Beziehung 1 : N : Hier liegt beispielsweise eine Untermessung vor. Der Verbrauch einer Marklokation berechnet sich
hier aus mehreren Messungen.
Es gibt praktisch auch noch die Beziehung N : 1, beispielsweise bei einer Zweirichtungsmessung bei der durch eine
Messeinrichtung die Messung sowohl für die Einspreiseseite als auch für die Aussspeiseseite erfolgt.
Da Abrechnung und Bilanzierung jedoch für beide Marktlokationen getrennt erfolgt, werden nie beide Marktlokationen
gemeinsam betrachtet. Daher lässt sich dieses Konstrukt auf zwei 1:1-Beziehung zurückführen,
wobei die Messlokation in beiden Fällen die gleiche ist.
In den Zuordnungen sind ist die arithmetische Operation mit der der Verbrauch einer Messlokation zum Verbrauch einer
Marktlokation beitrögt mit aufgeführt.
Der Standard ist hier die Addition.
"""
# only one of the following three optional attributes can be set
#: Die Adresse, an der die Energie-Lieferung oder -Einspeisung erfolgt
lokationsadresse: Adresse = attr.ib(default=None)
geoadresse: Geokoordinaten = attr.ib(default=None)
"""
Alternativ zu einer postalischen Adresse kann hier ein Ort mittels Geokoordinaten angegeben werden
(z.B. zur Identifikation von Sendemasten).
"""
katasterinformation: Katasteradresse = attr.ib(default=None)
"""
Alternativ zu einer postalischen Adresse und Geokoordinaten kann hier eine Ortsangabe mittels Gemarkung und
Flurstück erfolgen.
"""
# todo: add kundengruppe
# pylint:disable=unused-argument
@lokationsadresse.validator
@geoadresse.validator
@katasterinformation.validator
def validate_address_info(self, address_attribute, value):
"""Checks that there is one and only one valid adress given."""
all_address_attributes = [
self.lokationsadresse,
self.geoadresse,
self.katasterinformation,
]
amount_of_given_address_infos = len([i for i in all_address_attributes if i is not None])
if amount_of_given_address_infos != 1:
raise ValueError("No or more than one address information is given.")
class MarktlokationSchema(GeschaeftsobjektSchema):
"""
Schema for de-/serialization of Marktlokation.
Inherits from GeschaeftsobjektSchema.
"""
# class_name is needed to use the correct schema for deserialisation.
# see function `deserialize` in geschaeftsobjekt.py
class_name = Marktlokation
# required attributes
marktlokations_id = fields.Str(data_key="marktlokationsId")
sparte = EnumField(Sparte)
energierichtung = EnumField(Energierichtung)
bilanzierungsmethode = EnumField(Bilanzierungsmethode)
netzebene = EnumField(Netzebene)
# optional attributes
verbrauchsart = EnumField(Verbrauchsart, load_default=None)
unterbrechbar = fields.Bool(load_default=None)
netzbetreibercodenr = fields.Str(load_default=None)
gebietstyp = EnumField(Gebiettyp, load_default=None)
netzgebietsnr = fields.Str(load_default=None)
bilanzierungsgebiet = fields.Str(load_default=None)
grundversorgercodenr = fields.Str(load_default=None)
gasqualitaet = EnumField(Gasqualitaet, load_default=None)
endkunde = fields.Nested(GeschaeftspartnerSchema, load_default=None)
zugehoerige_messlokation = fields.List(
fields.Nested(MesslokationszuordnungSchema), load_default=None, data_key="zugehoerigeMesslokation"
)
# only one of the following three optional attributes can be set
lokationsadresse = fields.Nested(AdresseSchema, load_default=None)
geoadresse = fields.Nested(GeokoordinatenSchema, load_default=None)
katasterinformation = fields.Nested(KatasteradresseSchema, load_default=None)
| 47.213415 | 180 | 0.767661 |
35a4b7c239146705ae98c9ce84c0a611d5d09b7d
| 1,020 |
py
|
Python
|
python/python_backup/wisp_old/archives/test3.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/python_backup/wisp_old/archives/test3.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/python_backup/wisp_old/archives/test3.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
import tkinter as tk
def populate(frame):
'''Put in some fake data'''
for row in range(100):
tk.Label(frame, text="%s" % row, width=3, borderwidth="1",
relief="solid").grid(row=row, column=0)
t="this is the second column for row %s" %row
tk.Label(frame, text=t).grid(row=row, column=1)
tk.Entry(frame).grid(row=row, column=2)
def onFrameConfigure(canvas):
'''Reset the scroll region to encompass the inner frame'''
canvas.configure(scrollregion=canvas.bbox("all"))
root = tk.Tk()
canvas = tk.Canvas(root, borderwidth=0, background="#ffffff")
frame = tk.Frame(canvas, background="#ffffff")
vsb = tk.Scrollbar(root, orient="vertical", command=canvas.yview)
canvas.configure(yscrollcommand=vsb.set)
vsb.pack(side="right", fill="y")
canvas.pack(side="left", fill="both", expand=True)
canvas.create_window((4,4), window=frame, anchor="nw")
frame.bind("<Configure>", lambda event, canvas=canvas: onFrameConfigure(canvas))
populate(frame)
root.mainloop()
| 34 | 80 | 0.680392 |
ea28cd016775170d9f5df70e42ab1a9b9f1f1670
| 5,533 |
py
|
Python
|
research/audio/fcn-4/infer/utils/mxbase_get_auc.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/audio/fcn-4/infer/utils/mxbase_get_auc.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/audio/fcn-4/infer/utils/mxbase_get_auc.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# coding:utf-8
"""
Copyright 2021 Huawei Technologies Co., Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import numpy as np
import pandas as pd
def str2digit(s):
""" string to digit """
if s.isdigit():
return int(s)
return s
def simplify_tagging_info(info_path="../data/config/", label_file="annotations_final.csv"):
""" simplify_tagging_info """
print("-"*25, "now in function simplify_tagging_info", "-"*25)
T = []
with open(os.path.join(info_path, label_file), 'rb') as info:
data = info.readline()
while data:
T.append([str2digit(i[1:-1]) for i in data.strip().decode('utf-8').split("\t")])
data = info.readline()
annotation = pd.DataFrame(T[1:], columns=T[0])
count = []
for i in annotation.columns[1:-2]:
count.append([annotation[i].sum() / len(annotation), i])
count = sorted(count)
full_label = []
for i in count[-50:]:
full_label.append(i[1])
simplied_tag = []
for i in T[1:]:
index = [k for k, x in enumerate(i) if x == 1]
label = [T[0][k] for k in index]
L = [str(0) for _ in range(50)]
L.append(i[-1])
for j in label:
if j in full_label:
ind = full_label.index(j)
L[ind] = '1'
simplied_tag.append(L)
txt_save_path = os.path.join(info_path, "music_tagging_tmp.txt")
np.savetxt(txt_save_path, np.array(simplied_tag), fmt='%s', delimiter=',')
csv_save_path = os.path.join(info_path, "music_tagging_tmp.csv")
np.savetxt(csv_save_path, np.array(simplied_tag), fmt='%s', delimiter=',')
print("successfully save tagging info in:\n", info_path)
return simplied_tag
def get_labels(info_list, infer_result_path):
""" get_labels """
print("-"*25, "now in function get_labels", "-"*25)
label_list = []
pred_list = []
print("info list length:\n", len(info_list))
for label_info in info_list:
[_, file_name] = os.path.split(label_info[-1])
file_name = file_name[:-4] + ".txt"
rst_file = os.path.join(infer_result_path, file_name)
if os.path.exists(rst_file):
true_label = np.array([str2digit(i) for i in label_info[:-1]])
rst_data = np.loadtxt(rst_file, delimiter=',')
label_list.append(true_label)
pred_list.append(rst_data)
return label_list, pred_list
def compute_auc(labels_list, preds_list):
"""
The AUC calculation function
Input:
labels_list: list of true label
preds_list: list of predicted label
Outputs
Float, means of AUC
"""
print("-"*25, "now in function compute_auc", "-"*25)
auc = []
if labels_list.shape[0] <= 0:
return "label list is None!"
print("shape of labels_list", labels_list.shape)
print("shape of preds_list", preds_list.shape)
n_bins = labels_list.shape[0] // 2
if labels_list.ndim == 1:
labels_list = labels_list.reshape(-1, 1)
preds_list = preds_list.reshape(-1, 1)
for i in range(labels_list.shape[1]):
labels = labels_list[:, i]
preds = preds_list[:, i]
postive_len = labels.sum()
negative_len = labels.shape[0] - postive_len
total_case = postive_len * negative_len
positive_histogram = np.zeros((n_bins))
negative_histogram = np.zeros((n_bins))
bin_width = 1.0 / n_bins
for j, _ in enumerate(labels):
nth_bin = int(preds[j] // bin_width)
if nth_bin == n_bins:
nth_bin = nth_bin - 1
if labels[j]:
positive_histogram[nth_bin] = positive_histogram[nth_bin] + 1
else:
negative_histogram[nth_bin] = negative_histogram[nth_bin] + 1
accumulated_negative = 0
satisfied_pair = 0
for k in range(n_bins):
satisfied_pair += (
positive_histogram[k] * accumulated_negative +
positive_histogram[k] * negative_histogram[k] * 0.5)
accumulated_negative += negative_histogram[k]
auc.append(satisfied_pair / total_case)
return np.mean(auc)
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Error-three arguments are required, your command should be like this:")
print(" python mxbase_get_auc.py info_file_path info_filename infer_results_path")
print("For example:")
print(" python mxbase_get_auc.py ../data/config/ annotations_final.csv ../mxbase/results/infer_results")
else:
base_info_path = sys.argv[1]
info_file_name = sys.argv[2]
base_result_path = sys.argv[3]
simp_info_tags = simplify_tagging_info(base_info_path, info_file_name)
_label_list, _pred_list = get_labels(simp_info_tags, base_result_path)
auc_val = compute_auc(np.array(_label_list), np.array(_pred_list))
print("-" * 27 + " Validation Performance " + "-" * 27)
print("AUC: {:.5f}\n".format(auc_val))
| 37.134228 | 114 | 0.628411 |
575d446b1875d8a5d0653247ca05134efd4ea1e2
| 2,409 |
py
|
Python
|
users/serializers.py
|
rocky-roll-call/rrc-backend
|
02e8e11c3dab7661e48650e2e861a4a97788a4ce
|
[
"MIT"
] | null | null | null |
users/serializers.py
|
rocky-roll-call/rrc-backend
|
02e8e11c3dab7661e48650e2e861a4a97788a4ce
|
[
"MIT"
] | null | null | null |
users/serializers.py
|
rocky-roll-call/rrc-backend
|
02e8e11c3dab7661e48650e2e861a4a97788a4ce
|
[
"MIT"
] | null | null | null |
"""
Serializers to convert API data to and from the database
"""
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from rest_framework.serializers import ModelSerializer
from .models import Profile, UserPhoto
class UserSerializer(ModelSerializer):
"""
A serializer for the default auth.User model
"""
class Meta:
model = User
fields = (
"id",
"username",
"email",
"password",
"last_login",
"date_joined",
"profile",
)
read_only_fields = ("id", "date_joined", "profile")
extra_kwargs = {"password": {"write_only": True}}
def create(self, validated_data):
user = User.objects.create(
email=validated_data["email"],
username=validated_data["username"],
password=make_password(validated_data["password"]),
)
user.save()
return user
class ProfileSerializer(ModelSerializer):
"""
A serializer for the users.Profile model
"""
class Meta:
model = Profile
fields = (
"id",
"user",
"name",
"alt",
"display_name",
"age",
"image",
"bio",
"location",
"external_url",
"facebook_url",
"twitter_user",
"instagram_user",
"show_email",
"searchable",
"email_confirmed",
"birth_date",
"photos",
)
read_only_fields = ("id", "user", "display_name", "age", "photos")
class PublicProfileSerializer(ModelSerializer):
"""
A serializer for a user's public profile
"""
class Meta:
model = Profile
fields = (
"id",
"display_name",
"age",
"image",
"bio",
"location",
"external_url",
"facebook_url",
"twitter_user",
"instagram_user",
"photos",
)
class UserPhotoSerializer(ModelSerializer):
"""
A serializer for the users.UserPhoto model
"""
class Meta:
model = UserPhoto
fields = ("id", "profile", "image", "description", "created")
read_only_fields = ("id", "profile", "image", "created")
| 23.851485 | 74 | 0.515982 |
57c8c5e2bf31423c6e4182faf3ddad0eb33fdc06
| 537 |
py
|
Python
|
06.BinarySearch/min/B2805-M.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 1 |
2021-11-21T06:03:06.000Z
|
2021-11-21T06:03:06.000Z
|
06.BinarySearch/min/B2805-M.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 2 |
2021-10-13T07:21:09.000Z
|
2021-11-14T13:53:08.000Z
|
06.BinarySearch/min/B2805-M.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | null | null | null |
import sys
num , need = map(int, input().split())
arr = list(map(int, input().split()))
#sys.stdin.readline().strip()
high = max(arr)
low = 1
def cutTree(mid):
sum = 0
for i in arr:
if(mid < i):
sum += i - mid
return sum
check = 0
while low <= high:
#print("high, low : " , high ,",",low)
mid = (high + low)//2
#print("mid : " , mid)
sum = cutTree(mid)
#print("sum : ",sum)
if sum >= need:
low = mid + 1
elif sum < need:
high = mid - 1
print(high)
| 17.322581 | 42 | 0.493482 |
17acd44b2823516e5d98e96db26b6112f10205bc
| 965 |
py
|
Python
|
plugins/tff_backend/migrations/_007_referral_in_user_data.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | null | null | null |
plugins/tff_backend/migrations/_007_referral_in_user_data.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 178 |
2017-08-02T12:58:06.000Z
|
2017-12-20T15:01:12.000Z
|
plugins/tff_backend/migrations/_007_referral_in_user_data.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 2 |
2018-01-10T10:43:12.000Z
|
2018-03-18T10:42:23.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
from framework.bizz.job import run_job
from plugins.tff_backend.bizz.user import store_referral_in_user_data
from plugins.tff_backend.models.user import TffProfile
def migrate(dry_run=False):
run_job(_profiles_with_referrer, [], store_referral_in_user_data, [])
def _profiles_with_referrer():
return TffProfile.query()
| 33.275862 | 74 | 0.767876 |
102ee09bcc7103b2bffb1124afb43dd99b75ca8a
| 5,224 |
py
|
Python
|
MyNaiveBayes.py
|
hakimkt/SAIVS
|
c310bd7c9426f0d21efeea8866cf6b881b7e8530
|
[
"Apache-2.0"
] | 40 |
2018-10-29T02:29:13.000Z
|
2021-11-23T13:14:50.000Z
|
MyNaiveBayes.py
|
5l1v3r1/SAIVS
|
aa62451665b6398ba329d68592bf4313be60a886
|
[
"Apache-2.0"
] | 1 |
2021-02-23T12:27:28.000Z
|
2021-02-23T12:27:28.000Z
|
MyNaiveBayes.py
|
5l1v3r1/SAIVS
|
aa62451665b6398ba329d68592bf4313be60a886
|
[
"Apache-2.0"
] | 29 |
2018-10-29T02:29:17.000Z
|
2022-03-17T06:31:35.000Z
|
#!/usr/bin/python
#coding:utf-8
import os
import sys
import math
import pickle
# NaiveBayesで種々の分類を実行
class Classify():
def __init__(self):
# 訓練済みデータを格納するpklファイルパスを定義
self.bin_nb_okng_body_path = os.path.join('.\\data', 'nb_okng_classify_body.pkl')
self.bin_nb_page_type_body_path = os.path.join('.\\data', 'nb_page_type_classify_body.pkl')
# ストップワード辞書のファイルパスを定義
self.txt_stop_words_list_path = os.path.join('.\\data', 'stop_words.txt')
# レスポンスデータから遷移の成否を分類
def classify_flow_okng(self, str_response):
# 訓練済みデータ(pkl)の読み込み
if os.path.exists(self.bin_nb_okng_body_path):
with open(self.bin_nb_okng_body_path, 'rb') as file_read:
obj_naive_bayes = pickle.load(file_read)
# 訓練済みのデータ(pkl)がない場合は処理を修了
else:
print "PKL File NOT FOUND."
return ''
# 分類対象のレスポンスデータを指定し分類を実行
str_category, int_score = obj_naive_bayes.classify(str_response)
return str_category, int_score
# レスポンスデータからページの種類を分類
def classify_page_type(self, lst_page_type):
# 訓練済みデータ(pkl)の読み込み
obj_naive_bayes = None
if os.path.exists(self.bin_nb_page_type_body_path):
with open(self.bin_nb_page_type_body_path, 'rb') as file_read:
obj_naive_bayes = pickle.load(file_read)
# 訓練済みのデータ(pkl)がない場合は処理を修了
else:
print "not found pkl(nb_page_type_classify_body.pkl)."
return ''
# 分類対象のtitleタグの値を指定し分類を実行
str_category, int_score = obj_naive_bayes.classify(lst_page_type)
return str_category
# ストップワードを削除
def remove_stop_words(self, lst_orig_text):
# ストップワード辞書の読み込み
if os.path.exists(self.txt_stop_words_list_path):
with open(self.txt_stop_words_list_path, 'r') as file_read:
str_read_text = file_read.read()
lst_stop_words = str_read_text.split('\n')
file_read.close()
lst_edited_text = []
int_idx = 0
while int_idx < len(lst_orig_text):
int_idx2 = 0
bol_match_flag = False
while int_idx2 < len(lst_stop_words):
if lst_orig_text[int_idx] == lst_stop_words[int_idx2]:
bol_match_flag = True
int_idx2 += 1
# オリジナルwordがストップワードに含まれていない場合
if bol_match_flag is False:
lst_edited_text.append(lst_orig_text[int_idx])
int_idx += 1
return lst_edited_text
# ストップワード辞書がない場合は処理を修了
else:
print "not found stop_words.txt."
return lst_orig_text
class NaiveBayes:
def __init__(self):
self.vocabularies = set()
self.word_count = {}
self.category_count = {}
# カテゴリ単位でカウント(Bag-of-Wordsの作成)
def word_count_up(self, word, category):
self.word_count.setdefault(category, {})
self.word_count[category].setdefault(word, 0)
self.word_count[category][word] += 1
self.vocabularies.add(word)
# カテゴリ数のカウント
def category_count_up(self, category):
self.category_count.setdefault(category, 0)
self.category_count[category] += 1
# 画面名とカテゴリを基に学習
def train(self, doc, category):
#カテゴリ単位でカウントする
self.word_count_up(doc, category)
#カテゴリ数をカウントする
self.category_count_up(category)
# ベイズ定理における事前確率の計算
def prior_prob(self, category):
num_of_categories = sum(self.category_count.values())
num_of_docs_of_the_category = self.category_count[category]
return float(num_of_docs_of_the_category) / float(num_of_categories)
def num_of_appearance(self, word, category):
if word in self.word_count[category]:
return self.word_count[category][word]
return 0
# ベイズ定理の計算
def word_prob(self, word, category):
# ラプラス・スムージング
numerator = self.num_of_appearance(word, category) + 1
denominator = sum(self.word_count[category].values()) + len(self.vocabularies)
prob = float(numerator) / float(denominator)
return prob
# 分類対象の文字列が各カテゴリに含まれる確率を計算
def score(self, tpl_classify_text, category):
score = math.log(self.prior_prob(category))
for word in tpl_classify_text:
score += math.log(self.word_prob(word, category))
return score
# 分類の実行
def classify(self, lst_classify_text):
best_guessed_category = None
max_prob_before = -sys.maxsize
# カテゴリ単位で類似度のスコアを算出
for category in self.category_count.keys():
# 予測したい文章
prob = self.score(tuple(lst_classify_text), category)
# 予測したい文章を、スコアの最も大きいカテゴリに分類する
if prob > max_prob_before:
max_prob_before = prob
best_guessed_category = category
# 分類したカテゴリとスコアを返却
return best_guessed_category, max_prob_before
| 33.487179 | 100 | 0.605475 |
1088f0cf4fe8f58837847c8d44ed70a295f79b15
| 2,480 |
py
|
Python
|
src/deal_files.py
|
Times125/Emotion-Analyse
|
b5d9f23fdf6c75f57f5cf20d58834a095b0c7e1e
|
[
"Apache-2.0"
] | 11 |
2018-01-16T06:39:00.000Z
|
2021-11-28T11:46:41.000Z
|
src/deal_files.py
|
Times125/Emotion-Analyse
|
b5d9f23fdf6c75f57f5cf20d58834a095b0c7e1e
|
[
"Apache-2.0"
] | null | null | null |
src/deal_files.py
|
Times125/Emotion-Analyse
|
b5d9f23fdf6c75f57f5cf20d58834a095b0c7e1e
|
[
"Apache-2.0"
] | 2 |
2019-08-16T14:53:37.000Z
|
2019-08-17T02:01:22.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author:lch02
@Time: 2017/12/25 14:46
@Description: 处理xlxsw中的文本,将其中的数据(160w条)导出
"""
import os
import re
import pickle
from nltk import regexp_tokenize
from nltk.corpus import stopwords
from config import test_path
from openpyxl import load_workbook
from multiprocessing import Pool
__author__ = 'lch02'
"""
将Excle中的数据导出
"""
def export_data():
pool = Pool()
files = ['Sentiment0.xlsx', 'Sentiment4.xlsx']
for i in range(2):
pool.apply_async(deal_doc, args=(i, files[i]))
pool.close()
pool.join()
print 'import'
def deal_doc(cat, fn):
file_name = os.path.join(test_path, fn)
wb = load_workbook(file_name, read_only=True)
ws = wb.active
neg = []
pos = []
if cat == 0:
for row in ws.iter_rows('A:B'):
label = row[0].value
content = row[1].value
if content is not None:
content = text_parse(content)
if len(content) == 0:
continue
elif label == 0 and len(content) != 0:
neg.append(content)
neg_file = os.path.join(test_path, 'neg_review.pkl') # 消极语料
with open(neg_file, 'wb') as f:
pickle.dump(neg, f)
else:
for row in ws.iter_rows('A:B'):
label = row[0].value
content = row[1].value
if content is not None:
content = text_parse(content)
if len(content) == 0:
continue
elif label == 4 and len(content) != 0:
pos.append(content)
pos_file = os.path.join(test_path, 'pos_review.pkl') # 积极语料
with open(pos_file, 'wb') as f:
pickle.dump(pos, f)
"""
文本处理:取词、去停用词等
"""
def text_parse(input_text, language='en'):
sentence = input_text.strip().lower()
sentence = re.sub(r'@\s*[\w]+ | ?#[\w]+ | ?&[\w]+; | ?[^\x00-\xFF]+', '', sentence)
special_tag = set(
['.', ',', '#', '!', '(', ')', '*', '`', ':', '"', '‘', '’', '“', '”', '@', ':', '^', '/', ']', '[', ';', '=', '_'])
pattern = r""" (?x)(?:[a-z]\.)+
| \d+(?:\.\d+)?%?\w+
| \w+(?:[-']\w+)*"""
word_list = regexp_tokenize(sentence, pattern)
if language == 'en':
filter_word = [w for w in word_list if
w not in stopwords.words('english') and w not in special_tag] # 去停用词和特殊标点符号
return filter_word
| 29.879518 | 124 | 0.515726 |
10abffd57deb1a4efdc92c688f38e868db500bfb
| 1,335 |
py
|
Python
|
src/lcdoc/mkdocs/lp/plugs/drawio/__init__.py
|
axiros/docutools
|
f99874a64afba8f5bc740049d843151ccd9ceaf7
|
[
"BSD-2-Clause"
] | 24 |
2021-10-04T22:11:59.000Z
|
2022-02-02T21:51:43.000Z
|
src/lcdoc/mkdocs/lp/plugs/drawio/__init__.py
|
axiros/docutools
|
f99874a64afba8f5bc740049d843151ccd9ceaf7
|
[
"BSD-2-Clause"
] | 2 |
2021-10-04T21:51:30.000Z
|
2021-10-05T14:15:31.000Z
|
src/lcdoc/mkdocs/lp/plugs/drawio/__init__.py
|
axiros/docutools
|
f99874a64afba8f5bc740049d843151ccd9ceaf7
|
[
"BSD-2-Clause"
] | null | null | null |
"""
### `drawio`
Automatically includes an svg, based on .drawio file changes.
"""
import json
import subprocess as sp
from lcdoc import lp
from lcdoc.tools import file_hash, app, dirname, exists, os, read_file, write_file, os
multi_line_to_list = True
req_kw = ['fn', 'src']
def run(cmd, kw):
"""
"""
D = lp.page_dir(kw)
src = kw['abs_src']
if not exists(src):
return 'Not found: %s' % src
fn = kw['fn']
if not fn or fn[0] == '/':
return lp.err('Require relative fn', have=fn)
ffn = D + fn
os.makedirs(dirname(ffn), exist_ok=True)
fn_src_info = ffn + '.src'
if exists(fn_src_info):
oldmtime, oldhsh = json.loads(read_file(fn_src_info))
else:
oldmtime, oldhsh = 0, 0
mtime = os.stat(src).st_mtime
have, hsh = False, None
if mtime == oldmtime:
have = True
else:
hsh = file_hash(src)
if hsh == oldhsh:
have = True
if not have:
create_new_svg(src, ffn, kw)
write_file(fn_src_info, json.dumps([mtime, hsh or file_hash(src)]))
return {'res': '' % fn, 'formatted': True}
def create_new_svg(fn_src, fn_svg, kw):
app.info('Exporting drawio', src=fn_src, svg=fn_svg)
d = os.environ.get('drawio', 'drawio')
sp.call([d, '--output', fn_svg, '--export', fn_src])
| 25.188679 | 86 | 0.595506 |
875dde80a08590b75ccd856d48fdea2d4ae6724f
| 2,784 |
py
|
Python
|
02_context.py
|
melandresen/Ich-Daten
|
87362aa8060865d2d33443054297214bd80526fe
|
[
"Apache-2.0"
] | null | null | null |
02_context.py
|
melandresen/Ich-Daten
|
87362aa8060865d2d33443054297214bd80526fe
|
[
"Apache-2.0"
] | null | null | null |
02_context.py
|
melandresen/Ich-Daten
|
87362aa8060865d2d33443054297214bd80526fe
|
[
"Apache-2.0"
] | null | null | null |
import os
import pandas as pd
import re
def update_indices(span_start, span_end, string):
match = re.search("[Ii]ch", string)
start_corr = span_start + match.start(0)
end_corr = span_end - len(string) + match.end(0)
return start_corr, end_corr
def extended_search(text, start_index, end_index, tolerance_window):
while tolerance_window < 30:
span_start = start_index - tolerance_window
span_end = end_index + tolerance_window
span = text[span_start : span_end]
if re.search("Ich| ich|^ich", span):
status = "rough_match_{}".format(tolerance_window)
start_index_corr, end_index_corr = update_indices(span_start, span_end, span)
target = text[start_index_corr:end_index_corr]
context_before = text[start_index_corr - context_size : start_index_corr]
match = target
context_after = text[end_index_corr : end_index_corr + context_size]
return status, context_before, match, context_after
tolerance_window += 5
else:
return "no_match", "None", "None", "None"
def get_context(directory, data_table, context_size):
"""nach den dazugehörigen Textstellen im Korpus suchen und zur Tabelle hinzufügen"""
context_data = pd.DataFrame(columns=["status", "context_before", "match", "context_after"])
for start_index, end_index, file_name, index in zip(
data_table["StartChar"], data_table["EndChar"], data_table["Text_korr"], data_table.index
):
if os.path.isfile(directory + file_name):
with open(directory + file_name, "r") as in_file:
text = in_file.read()
text = re.sub("[\t\n]", " ", text)
target = text[start_index:end_index]
if re.fullmatch("[Ii]ch", target):
context_data.loc[index] = [
"fullmatch",
text[start_index - context_size : start_index],
target,
text[end_index : end_index + context_size],
]
else:
status, context_before, match, context_after = extended_search(
text, start_index, end_index, 5
)
context_data.loc[index] = [status, context_before, match, context_after]
else: # PDFs werden zur Zeit übergangen
context_data.loc[index] = ["PDF", "PDF", "PDF", "PDF"]
result = pd.concat([data_table, context_data], axis=1)
return result
corpus_directory = "data/corpus/"
context_size = 150
data_table = pd.read_csv("results/01_mapping.txt", sep="\t", index_col=0)
data_table = get_context(corpus_directory, data_table, context_size)
data_table.to_csv("results/02_context.txt", sep="\t")
| 33.95122 | 97 | 0.62967 |
5e6cef070a741281952542c67242e44998f24d2d
| 726 |
py
|
Python
|
order/migrations/0010_auto_20201203_1426.py
|
hhdMrLion/Product-System
|
e870225ab10c32688a87426d5943d922c47c4404
|
[
"MIT"
] | 1 |
2021-06-18T03:03:42.000Z
|
2021-06-18T03:03:42.000Z
|
order/migrations/0010_auto_20201203_1426.py
|
hhdMrLion/Product-System
|
e870225ab10c32688a87426d5943d922c47c4404
|
[
"MIT"
] | null | null | null |
order/migrations/0010_auto_20201203_1426.py
|
hhdMrLion/Product-System
|
e870225ab10c32688a87426d5943d922c47c4404
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.16 on 2020-12-03 06:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0009_auto_20201203_1410'),
]
operations = [
migrations.AlterField(
model_name='order',
name='order_status',
field=models.SmallIntegerField(choices=[(3, '生产中'), (2, '待生产'), (4, '待发货'), (1, '备料中')], default=2, verbose_name='订单状态'),
),
migrations.AlterField(
model_name='order',
name='sn',
field=models.CharField(default=1, max_length=32, unique=True, verbose_name='订单编号'),
preserve_default=False,
),
]
| 29.04 | 134 | 0.559229 |
21b0b6ef09081080c08a08dca17dbd50bf6f03dd
| 72 |
py
|
Python
|
Dinsel/ex3/ex3.py
|
appfs/appfs
|
8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3
|
[
"MIT"
] | 11 |
2017-04-21T11:39:55.000Z
|
2022-02-11T20:25:18.000Z
|
Dinsel/ex3/ex3.py
|
appfs/appfs
|
8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3
|
[
"MIT"
] | 69 |
2017-04-26T09:30:38.000Z
|
2017-08-01T11:31:21.000Z
|
Dinsel/ex3/ex3.py
|
appfs/appfs
|
8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3
|
[
"MIT"
] | 53 |
2017-04-20T16:16:11.000Z
|
2017-07-19T12:53:01.000Z
|
#!/usr/bin/env python
with open(__file__) as fname: print(fname.read())
| 24 | 49 | 0.722222 |
9cae12c7b07e7617324c5978fde17b09cc1eb0e4
| 2,178 |
py
|
Python
|
research/cv/sknet/src/util.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 1 |
2021-11-18T08:17:44.000Z
|
2021-11-18T08:17:44.000Z
|
research/cv/sknet/src/util.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
research/cv/sknet/src/util.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network operations
"""
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.common import dtype as mstype
class GroupConv(nn.Cell):
"""
group convolution operation.
Args:
in_channels (int): Input channels of feature map.
out_channels (int): Output channels of feature map.
kernel_size (int): Size of convolution kernel.
stride (int): Stride size for the group convolution layer.
Returns:
tensor, output tensor.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, pad_mode="pad", pad=0, groups=1, has_bias=False):
super(GroupConv, self).__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.groups = groups
self.convs = nn.CellList()
self.op_split = P.Split(axis=1, output_num=self.groups)
self.op_concat = P.Concat(axis=1)
self.cast = P.Cast()
for _ in range(groups):
self.convs.append(nn.Conv2d(in_channels//groups, out_channels//groups,
kernel_size=kernel_size, stride=stride, has_bias=has_bias,
padding=pad, pad_mode=pad_mode, group=1))
def construct(self, x):
features = self.op_split(x)
outputs = ()
for i in range(self.groups):
outputs = outputs + (self.convs[i](self.cast(features[i], mstype.float32)),)
out = self.op_concat(outputs)
return out
| 39.6 | 120 | 0.636823 |
b48f72baf888034c5ea9cf3c4ad814f4ef5eb7e7
| 6,705 |
py
|
Python
|
shinrl/solvers/base/solver.py
|
omron-sinicx/ShinRL
|
09f4ae274a33d1fc1d9d542f816aef40014af6b5
|
[
"MIT"
] | 34 |
2021-12-09T07:12:57.000Z
|
2022-03-11T08:17:20.000Z
|
shinrl/solvers/base/solver.py
|
omron-sinicx/ShinRL
|
09f4ae274a33d1fc1d9d542f816aef40014af6b5
|
[
"MIT"
] | null | null | null |
shinrl/solvers/base/solver.py
|
omron-sinicx/ShinRL
|
09f4ae274a33d1fc1d9d542f816aef40014af6b5
|
[
"MIT"
] | 4 |
2021-12-11T07:48:01.000Z
|
2022-03-01T23:50:33.000Z
|
"""
Author: Toshinori Kitamura
Affiliation: NAIST & OSX
"""
from __future__ import annotations
import inspect
import random
from abc import ABC, abstractmethod, abstractstaticmethod
from itertools import count
from typing import Dict, Iterator, List, Optional, Type
import gym
import jax
import numpy as np
import structlog
from chex import PRNGKey
from tqdm import tqdm
from shinrl import ShinEnv
from .config import SolverConfig
from .history import History
class BaseSolver(ABC, History):
"""
Base class to implement solvers. The results are treated by the inherited History class.
# MixIn:
Our Solver interface adopts "mixin" mechanism to realize the flexible behavior.
The `make_mixin` method should return mixins that have necessary methods such as `evaluate` and `step` functions.
See [shinrl/solvers/vi/discrete/solver.py] for an example implementation.
"""
_id: Iterator[int] = count(0)
DefaultConfig = SolverConfig
# ########## YOU NEED TO IMPLEMENT HERE ##########
@abstractstaticmethod
def make_mixins(env: gym.Env, config: SolverConfig) -> List[Type[object]]:
"""Make a list of mixins from env and config"""
pass
@abstractmethod
def evaluate(self) -> Dict[str, float]:
"""Evaluate the solver and return the dict of results. Called every self.config.eval_interval steps."""
pass
@abstractmethod
def step(self) -> Dict[str, float]:
"""Execute the solver by one step and return the dict of results."""
pass
# ################################################
@staticmethod
def factory(
env: gym.Env,
config: SolverConfig,
mixins: List[Type[object]],
) -> BaseSolver:
"""Instantiate a solver with mixins and initialize it."""
class MixedSolver(*mixins):
pass
solver = MixedSolver()
solver.mixins = mixins
methods = inspect.getmembers(solver, predicate=inspect.ismethod)
solver.methods_str = [method[1].__qualname__ for method in methods]
solver.initialize(env, config)
return solver
def __init__(self) -> None:
self.env_id: int = -1
self.solver_id: str = f"{type(self).__name__}-{next(self._id)}"
self.logger = structlog.get_logger(solver_id=self.solver_id, env_id=None)
self.is_initialized: bool = False
self.env = None
self.key: PRNGKey = None
self.mixins: List[Type] = []
self.methods_str: List[str] = []
def initialize(
self,
env: gym.Env,
config: Optional[SolverConfig] = None,
) -> None:
"""Set the env and initialize the history.
Args:
env (gym.Env): Environment to solve..
config (SolverConfig, optional): Configuration of an algorithm.
"""
self.init_history()
self.set_config(config)
self.set_env(env)
self.seed(self.config.seed)
self.is_initialized = True
if self.config.verbose:
self.logger.info(
"Solver is initialized.", mixins=self.mixins, methods=self.methods_str
)
def seed(self, seed: int = 0) -> None:
self.key = jax.random.PRNGKey(seed)
self.env.seed(seed)
random.seed(seed)
np.random.seed(seed)
@property
def is_shin_env(self) -> bool:
if isinstance(self.env, gym.Wrapper):
return isinstance(self.env.unwrapped, ShinEnv)
else:
return isinstance(self.env, ShinEnv)
def set_env(self, env: gym.Env, reset: bool = True) -> None:
"""Set the environment to self.env.
Args:
env (gym.Env): Environment to solve.
reset (bool): Reset the env if True
"""
if isinstance(env.action_space, gym.spaces.Box):
is_high_normalized = (env.action_space.high == 1.0).all()
is_low_normalized = (env.action_space.low == -1.0).all()
assert_msg = """
Algorithms in ShinRL assume that the env.actions_space is in range [-1, 1].
Please wrap the env by shinrl.NormalizeActionWrapper.
"""
assert is_high_normalized and is_low_normalized, assert_msg
self.env = env
# Check discount factor
if self.is_shin_env:
if self.config.discount != env.config.discount:
self.logger.warning(
f"env.config.discount != solver.config.discount ({env.config.discount} != {self.config.discount}). \
This may cause an unexpected behavior."
)
self.dS, self.dA, self.horizon = env.dS, env.dA, env.config.horizon
# Reset env if necessary
if reset:
if isinstance(self.env, gym.wrappers.Monitor):
# With Monitor, reset() cannot be called unless the episode is over.
if self.env.stats_recorder.steps is None:
self.env.obs = self.env.reset()
else:
done = False
while not done:
_, _, done, _ = self.env.step(self.env.action_space.sample())
self.env.obs = self.env.reset()
else:
self.env.obs = self.env.reset()
else:
assert hasattr(
env, "obs"
), 'env must have attribute "obs". Do env.obs = obs before calling "set_env".'
self.env_id += 1
self.logger = structlog.get_logger(solver_id=self.solver_id, env_id=self.env_id)
if self.config.verbose:
self.logger.info("set_env is called.")
def run(self) -> None:
"""
Run the solver with the step function.
Call self.evaluate() every [eval_interval] steps.
"""
assert self.is_initialized, '"self.initialize" is not called.'
num_steps = self.config.steps_per_epoch
for _ in tqdm(range(num_steps), desc=f"Epoch {self.n_epoch}"):
# Do evaluation
if self.n_step % self.config.eval_interval == 0:
eval_res = self.evaluate()
for key, val in eval_res.items():
self.add_scalar(key, val)
# Do one-step update
step_res = self.step()
for key, val in step_res.items():
self.add_scalar(key, val)
self.n_step += 1
self.n_epoch += 1
if self.config.verbose:
self.logger.info(
f"Epoch {self.n_epoch} has ended.",
epoch_summary=self.recent_summary(num_steps),
data=list(self.data.keys()),
)
| 34.209184 | 120 | 0.587323 |
2590d3bc780b4146ff7f23d380a633901e96a1ff
| 142 |
py
|
Python
|
MiniProjects/Python-Challenges/Password_Gen.py
|
GitInitDev/ZohoUniv
|
966704837e65f58b52492b56d08e7958df3d220a
|
[
"Unlicense"
] | null | null | null |
MiniProjects/Python-Challenges/Password_Gen.py
|
GitInitDev/ZohoUniv
|
966704837e65f58b52492b56d08e7958df3d220a
|
[
"Unlicense"
] | null | null | null |
MiniProjects/Python-Challenges/Password_Gen.py
|
GitInitDev/ZohoUniv
|
966704837e65f58b52492b56d08e7958df3d220a
|
[
"Unlicense"
] | null | null | null |
import random
char = 'qwertyuiopasdfghjklzxcvbnm!@#$%^&*(()'
stren = 10
password = "".join(random.sample(char , stren))
print (password)
| 23.666667 | 48 | 0.669014 |
25b53a178f0eaa0c9af5577e4a3ff3f2ba9ddd5f
| 1,251 |
py
|
Python
|
Test.py
|
ShuboshaKuro/SimpleGameEngine
|
01da061fe931ec0ade898b82baa93c591eacbb43
|
[
"MIT"
] | null | null | null |
Test.py
|
ShuboshaKuro/SimpleGameEngine
|
01da061fe931ec0ade898b82baa93c591eacbb43
|
[
"MIT"
] | null | null | null |
Test.py
|
ShuboshaKuro/SimpleGameEngine
|
01da061fe931ec0ade898b82baa93c591eacbb43
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# has to change whenever noise_width and noise_height change in the PerlinNoise.hpp file
DIMENSION1 = 200
DIMENSION2 = 200
# works if the working directory is set
path = os.path.dirname(os.path.realpath(__file__))
FILENAME = path + "\input0.txt"
if __name__ == '__main__':
string = open(FILENAME, '+r')
noise = np.fromstring(string.read(), sep=" ", dtype=float).reshape(DIMENSION2, DIMENSION1)
# Build a grid by the 2 dimensions
Xr = np.arange(DIMENSION1)
Yr = np.arange(DIMENSION2)
X, Y = np.meshgrid(Xr, Yr)
# Build a figure with 2 subplots, the first is 3D
fig = plt.figure()
fig.suptitle("3D and 2D heighmap")
colormap = 'coolwarm'
ax = fig.add_subplot(2, 1, 1, projection='3d')
surf = ax.plot_surface(X, Y, noise, rstride=1, cstride=1, cmap=colormap, linewidth=0, antialiased=False)
ax2 = fig.add_subplot(2, 1, 2)
im = ax2.imshow(noise, cmap=colormap, interpolation='nearest')
# swap the Y axis so it aligns with the 3D plot
ax2.invert_yaxis()
# add an explanatory colour bar
plt.colorbar(im, orientation='horizontal')
# Show the image
plt.show()
| 27.8 | 108 | 0.689848 |
d353a668d33712371061f7bfd82f3ba63ccb884e
| 1,877 |
py
|
Python
|
leetcode/serialize-and-deserialize-binary-tree/solution.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 11 |
2019-02-08T06:54:34.000Z
|
2021-08-07T18:57:39.000Z
|
leetcode/serialize-and-deserialize-binary-tree/solution.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 1 |
2019-05-21T08:14:10.000Z
|
2019-05-21T08:14:10.000Z
|
leetcode/serialize-and-deserialize-binary-tree/solution.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | null | null | null |
import struct
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root is None:
return struct.pack('i', 0)
l = self.serialize(root.left)
r = self.serialize(root.right)
return struct.pack('i', len(l)) + struct.pack('i', root.val) + l + r
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
len_l = struct.unpack('i', data[:4])[0]
if len_l == 0:
return None
data = data[4:]
# val
val = struct.unpack('i', data[:4])[0]
data = data[4:]
# left
l = self.deserialize(data[:len_l])
data = data[len_l:]
# right
r = self.deserialize(data)
# build the node
node = TreeNode(val)
node.left = l
node.right = r
return node
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
def node(x, l=None, r=None):
n = TreeNode(x)
n.left = l
n.right = r
return n
def trees_equal(a, b):
if a is None and b is None:
return True
if a is None or b is None:
return False
return (
a.val == b.val and
trees_equal(a.left, b.left) and
trees_equal(a.right, b.right)
)
def test():
root = node(
1,
node(2),
node(
3,
node(4),
node(5),
),
)
codec = Codec()
got = codec.deserialize(codec.serialize(root))
assert trees_equal(root, got)
if __name__ == '__main__':
test()
| 20.626374 | 76 | 0.514651 |
d36fbff74f07f79524ba41fa627d903538351ac5
| 475 |
py
|
Python
|
deprecated/RasaNLU/src/rasaTrain.py
|
th-koeln-intia/ip-sprachassistent-team2
|
b8f8a20011bc766b1937566ee5a8786ee32bb3c5
|
[
"MIT"
] | 1 |
2020-12-09T23:14:19.000Z
|
2020-12-09T23:14:19.000Z
|
deprecated/RasaNLU/src/rasaTrain.py
|
th-koeln-intia/ip-sprachassistent-team2
|
b8f8a20011bc766b1937566ee5a8786ee32bb3c5
|
[
"MIT"
] | 1 |
2020-09-30T08:58:14.000Z
|
2020-10-14T13:55:14.000Z
|
deprecated/RasaNLU/src/rasaTrain.py
|
th-koeln-intia/ip-sprachassistent-team2
|
b8f8a20011bc766b1937566ee5a8786ee32bb3c5
|
[
"MIT"
] | 1 |
2020-09-17T17:04:11.000Z
|
2020-09-17T17:04:11.000Z
|
from rasa_nlu.training_data import load_data
from rasa_nlu import config
from rasa_nlu.model import Trainer
def train(model_dir="./models", project="default", data_dir="./intents"):
training_data = load_data(data_dir)
trainer = Trainer(config.load("nlu_config.yml"))
trainer.train(training_data)
model_directory = trainer.persist(model_dir, fixed_model_name=project)
print(model_directory)
if __name__ == '__main__':
train(project="Info-Projekt")
| 29.6875 | 74 | 0.757895 |
1a3ece3b49e6e7901d088b748a11e4b43a2e9bce
| 138 |
py
|
Python
|
Sketche/title.py
|
kantel/p5
|
2ef14191c35fdb056b44624c6ff0ff764c88cc30
|
[
"MIT"
] | null | null | null |
Sketche/title.py
|
kantel/p5
|
2ef14191c35fdb056b44624c6ff0ff764c88cc30
|
[
"MIT"
] | null | null | null |
Sketche/title.py
|
kantel/p5
|
2ef14191c35fdb056b44624c6ff0ff764c88cc30
|
[
"MIT"
] | null | null | null |
from p5 import *
def setup():
title("🐍 Jörgs Python Sketch 🐍".encode("utf-8"))
def draw():
background(245, 245, 245)
run()
| 13.8 | 52 | 0.586957 |
46cb478c44288acd22123b079969ab88e333de41
| 146 |
py
|
Python
|
saku/auction/apps.py
|
Mehdi-MosTafavi/Saku-Backend
|
348a1a676ffc8ddd9077f8c94733c5f6dce98fbd
|
[
"MIT"
] | null | null | null |
saku/auction/apps.py
|
Mehdi-MosTafavi/Saku-Backend
|
348a1a676ffc8ddd9077f8c94733c5f6dce98fbd
|
[
"MIT"
] | null | null | null |
saku/auction/apps.py
|
Mehdi-MosTafavi/Saku-Backend
|
348a1a676ffc8ddd9077f8c94733c5f6dce98fbd
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class AuctionConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'auction'
| 20.857143 | 56 | 0.760274 |
20166dff6b10b36f23bf6275623fcc30a520ef4e
| 244 |
py
|
Python
|
Python-programming-3/ascii.py
|
sanxy/hacktoberfest-1
|
913582b310688d496602e8b1bc9166cb64866e38
|
[
"MIT"
] | null | null | null |
Python-programming-3/ascii.py
|
sanxy/hacktoberfest-1
|
913582b310688d496602e8b1bc9166cb64866e38
|
[
"MIT"
] | 1 |
2020-10-24T18:08:27.000Z
|
2020-10-24T18:10:52.000Z
|
Python-programming-3/ascii.py
|
sanxy/hacktoberfest-1
|
913582b310688d496602e8b1bc9166cb64866e38
|
[
"MIT"
] | 4 |
2020-10-24T14:01:29.000Z
|
2020-10-25T09:21:07.000Z
|
# Python program to print
# ASCII Value of Character
# In c we can assign different
# characters of which we want ASCII value
c = 'g'
# print the ASCII value of assigned character in c
print("The ASCII value of '" + c + "' is", ord(c))
| 24.4 | 51 | 0.680328 |
6444e8f4d74e1d3a00ed257386acab8f6f38462e
| 897 |
py
|
Python
|
HackerEarth_problems/13 Reasons Why/solution1.py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 2 |
2020-10-17T12:50:42.000Z
|
2020-10-17T12:50:49.000Z
|
HackerEarth_problems/13 Reasons Why/solution1.py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | null | null | null |
HackerEarth_problems/13 Reasons Why/solution1.py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 1 |
2020-12-29T16:46:18.000Z
|
2020-12-29T16:46:18.000Z
|
'''
Problem: 13 Reasons Why
Given 3 integers A, B, C. Do the following steps-
Swap A and B.
Multiply A by C.
Add C to B.
Output new values of A and B.
'''
# When ran, you will see a blank line, as that is needed for the submission.
# If you are debugging and want it to be easier, change it too
# input = input("Numbers: ")
# Collects the input
input = input()
# Puts the input in the list, it's cutting them due to the space between the numbers.
inputList = input.split(" ")
# Since A and B are being swapped, A is given inputList[1], which was B's input. Vice Versa for B.
# C is just given the third input, which was C.
A = int(inputList[1])
B = int(inputList[0])
C = int(inputList[2])
# Multiplies A * C.
A = A * C
# Adds C + B.
B = C + B
# Converts them to strings since the submission needs to be one line.
A = str(A)
B = str(B)
# Prints the answer.
print(A + " " + B)
| 24.916667 | 98 | 0.662207 |
3764765a408de9f3ab9a2e62174d54e08bd084e9
| 527 |
py
|
Python
|
server/apps/movie/adminx.py
|
Mayandev/django_morec
|
8d115f76ad69d7aa78b07dc06aa7047979ad134b
|
[
"MIT"
] | 129 |
2019-04-20T08:23:25.000Z
|
2022-03-14T10:02:23.000Z
|
server/apps/movie/adminx.py
|
heartplus/django_morec
|
8d115f76ad69d7aa78b07dc06aa7047979ad134b
|
[
"MIT"
] | 9 |
2019-05-19T15:06:17.000Z
|
2021-12-14T06:47:14.000Z
|
server/apps/movie/adminx.py
|
heartplus/django_morec
|
8d115f76ad69d7aa78b07dc06aa7047979ad134b
|
[
"MIT"
] | 34 |
2019-05-06T06:37:17.000Z
|
2021-12-09T02:27:58.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-04-19 20:45
# @Author : Mayandev
# @Site : https://github.com/Mayandev/
# @File : adminx.py
# @Software: PyCharm
import xadmin
from .models import Movie, Genre
class MovieAdmin(object):
list_display = ['id', 'closest_movie', 'doubanId']
model_icon = 'fa fa-ticket'
class GenreAdmin(object):
list_display = ['id', 'genre']
model_icon = 'fa fa-ticket'
xadmin.site.register(Movie, MovieAdmin)
xadmin.site.register(Genre, GenreAdmin)
| 18.821429 | 54 | 0.660342 |
03bda1d222a249c9fd6e62c3dc9d059b6da67f6a
| 446 |
py
|
Python
|
exercises/pt/solution_01_03_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/pt/solution_01_03_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/pt/solution_01_03_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
# Importar a classe da língua inglesa (English) e criar um objeto nlp
from spacy.lang.en import English
nlp = English()
# Processar o texto
doc = nlp("I like tree kangaroos and narwhals.")
# Uma partição do Doc para "tree kangaroos"
tree_kangaroos = doc[2:4]
print(tree_kangaroos.text)
# Uma partição do Doc para "tree kangaroos and narwhals" (sem incluir o ".")
tree_kangaroos_and_narwhals = doc[2:6]
print(tree_kangaroos_and_narwhals.text)
| 27.875 | 76 | 0.762332 |
20810687c51cbb1a65e77504b29548d915b2e407
| 171 |
py
|
Python
|
02_Python/functions.py
|
DaviNakamuraCardoso/Harvard-CS50-Web-Programming
|
afec745eede41f7b294c3ee6ebaff9ac042e5e4c
|
[
"MIT"
] | null | null | null |
02_Python/functions.py
|
DaviNakamuraCardoso/Harvard-CS50-Web-Programming
|
afec745eede41f7b294c3ee6ebaff9ac042e5e4c
|
[
"MIT"
] | null | null | null |
02_Python/functions.py
|
DaviNakamuraCardoso/Harvard-CS50-Web-Programming
|
afec745eede41f7b294c3ee6ebaff9ac042e5e4c
|
[
"MIT"
] | null | null | null |
def main():
for i in range(10):
print(f"The square of {i} is {square(i)}")
return
def square(n):
return n**2
if __name__ == '__main__':
main()
| 13.153846 | 50 | 0.54386 |
643e431e28c0c195f6258a3a158f9de4a2b572c0
| 736 |
py
|
Python
|
tests/test_testing.py
|
tkamenoko/spangle
|
068479660a03239aa69c935d7ca0418c491d92da
|
[
"MIT"
] | 2 |
2019-11-17T06:38:56.000Z
|
2019-12-01T15:32:03.000Z
|
tests/test_testing.py
|
tkamenoko/spangle
|
068479660a03239aa69c935d7ca0418c491d92da
|
[
"MIT"
] | null | null | null |
tests/test_testing.py
|
tkamenoko/spangle
|
068479660a03239aa69c935d7ca0418c491d92da
|
[
"MIT"
] | null | null | null |
import asyncio
from asyncio import sleep
from spangle.api import Api
from spangle.handler_protocols import RequestHandlerProtocol
from ward import fixture, raises, test, using
@fixture
def api():
return Api()
@fixture
@using(api=api)
def timeout(api: Api):
@api.route("/timeout")
class Timeout:
async def on_get(self, req, resp):
await sleep(1)
return resp
return Timeout
@test("Client cancells a request after specified seconds") # type: ignore
@using(api=api, timeout=timeout)
async def _(api: Api, timeout: type[RequestHandlerProtocol]):
async with api.client() as client:
with raises(asyncio.TimeoutError):
await client.get("/timeout", timeout=0.001)
| 23 | 74 | 0.691576 |
a68edaa4ffdc8b0f4c9c5edcf020b638b5b5c299
| 1,085 |
py
|
Python
|
Problems/BinarySearch/Hard/SplitArrayLargestSum/split_array_largest_sum.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/BinarySearch/Hard/SplitArrayLargestSum/split_array_largest_sum.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/BinarySearch/Hard/SplitArrayLargestSum/split_array_largest_sum.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from functools import lru_cache
from typing import List
# DP - Top-Down
def splitArray(self, nums: List[int], m: int) -> int:
n = len(nums)
ps = [0]
for num in nums:
ps.append(ps[-1] + num)
@lru_cache(None)
def dp(i: int, c: int) -> int:
if i == n:
return 0
if c == 1:
return ps[-1] - ps[i]
ans = float('inf')
for j in range(i, n):
l, r = ps[j + 1] - ps[i], dp(j + 1, c - 1)
ans = min(ans, max(l, r))
if l > r:
break
return ans
return dp(0, m)
# BST
def splitArray(self, nums: List[int], m: int) -> int:
def helper(c: int, mid: int):
cur_sum, cuts = 0, 0
for x in nums:
cur_sum += x
if cur_sum > mid:
cur_sum = x
cuts += 1
return cuts + 1 <= c
l, r, res = max(nums), sum(nums), -1
while l <= r:
mid = (l + r) >> 1
if helper(m, mid):
ans, r = mid, mid - 1
else:
l = mid + 1
return ans
| 21.7 | 54 | 0.428571 |
5b300286bbd0afaf046bc7ddfda9a7f00bec8be6
| 1,799 |
py
|
Python
|
testcode/tok.py
|
Cl3V0r/MLSeminar
|
d05f171a9b7d773ea123e1919e07312a7f0c9fe8
|
[
"MIT"
] | null | null | null |
testcode/tok.py
|
Cl3V0r/MLSeminar
|
d05f171a9b7d773ea123e1919e07312a7f0c9fe8
|
[
"MIT"
] | null | null | null |
testcode/tok.py
|
Cl3V0r/MLSeminar
|
d05f171a9b7d773ea123e1919e07312a7f0c9fe8
|
[
"MIT"
] | null | null | null |
#!usr/bin/env python
#coding:utf8
from nltk.tokenize import TweetTokenizer
from nltk.stem.cistem import Cistem
from nltk.corpus import stopwords
import nltk
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from pathlib import Path
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
nltk.download('stopwords')
tknzr= TweetTokenizer()
stemmer = Cistem(True)
file_in = open("../data/postillon.txt", "r")
file_out = open("../build/preprocessed/postillon_stem.txt", "w")
for line in file_in:
tokenized = tknzr.tokenize(line)
for word in tokenized:
if word in stopwords.words('german'):
tokenized.remove(word)
word = stemmer.stem(word)
token_text = " ".join(tokenized)
file_out.write(token_text+'\n')
file_in.close()
file_out.close()
data = open("../build/preprocessed/postillon_stem.txt", "r")
vectorizer = CountVectorizer(max_features=1000, ngram_range=(1, 3))
X = vectorizer.fit_transform(data).toarray()
#print(vectorizer.get_feature_names())
#print(X)
contents = Path("../build/preprocessed/postillon_stem.txt").read_text()
wordcloud = WordCloud(background_color='white',
width=1920,
height=1080
).generate(contents)
plt.imshow(wordcloud)
plt.axis('off')
plt.savefig("../build/plots/postillonWordcloud.pdf")
plt.clf()
X_embedded = TSNE(n_components=2).fit_transform(X)
kmeans = KMeans(n_clusters=12)
kmeans.fit(X_embedded)
#print(kmeans.labels_)
plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=kmeans.labels_, cmap='rainbow')
plt.scatter(kmeans.cluster_centers_[:, 0],
kmeans.cluster_centers_[:, 1], color='black')
plt.savefig("../build/plots/tSNE_kNN_postillon.pdf")
| 32.125 | 81 | 0.716509 |
5b91aebc7e18c8a8d4f2c5b8c278295d0821b5e8
| 4,218 |
py
|
Python
|
SichereEntnahme.py
|
ThoEngel/rentenplanung
|
879c9a678ba1ff951a1f92b0c42673a7943a18e6
|
[
"MIT"
] | 3 |
2022-01-01T18:24:46.000Z
|
2022-01-08T15:28:46.000Z
|
SichereEntnahme.py
|
ThoEngel/Finanzen-Simuliert
|
879c9a678ba1ff951a1f92b0c42673a7943a18e6
|
[
"MIT"
] | null | null | null |
SichereEntnahme.py
|
ThoEngel/Finanzen-Simuliert
|
879c9a678ba1ff951a1f92b0c42673a7943a18e6
|
[
"MIT"
] | null | null | null |
'''
Vorsicht vor der 4%-Regel
Sicher Entnahmerate
https://www.finanzen-erklaert.de/vorsicht-vor-der-4-regel/
'''
import pandas as pd
import time
from SEsimulation.mDate import mDate
from SEsimulation import SEsimulation
import plotly.express as px
import numpy as np
def optimize(s, probability, loBound, hiBound):
""" Optimiere auf die max. mögliche Entnahme bei einer vorgegebenen Fehlerquote
Returns:
widthdrawal: max. mögliche prozentuale Entnahme
"""
n_ret_months = s.simulation['n_ret_years'] * 12
accuracy = 0.01 # Genauigkeit der Optimierung
# Vorbereitung der Optimierung
deltaWidthdrawal = (hiBound - loBound) / 2
percWidthdrawal = loBound + deltaWidthdrawal
cnt = 0
curProb = 0
# Optimization by successiv approximation
while (deltaWidthdrawal > accuracy) or (curProb > probability):
cnt += 1
s.withdrawal['fixed_pct'] = percWidthdrawal
s.init_simulation()
s.simulate()
survival = [trial_dict['exhaustion'] for trial_dict in s.latest_simulation]
curProb = 100 * (len(survival) - survival.count(n_ret_months)) / len(survival)
if s.visualization['textoutput'] == True:
print(cnt, '. Entnahme: ', percWidthdrawal, ' Ausfallwahrscheinlichkeit: ', curProb, '%')
deltaWidthdrawal /= 2
if deltaWidthdrawal <= accuracy / 10:
break
if curProb > probability:
percWidthdrawal -= deltaWidthdrawal
else:
percWidthdrawal += deltaWidthdrawal
return percWidthdrawal
print('Start')
starttime = time.time()
# Lesen monatliche S&P500 Daten
RETURN_FILE = 'real_return_df.pickle'
real_return_df = pd.read_pickle(RETURN_FILE)
# Konfiguration der Entnahme Simulation
config = {
'date': {'start': mDate(1, 2022), # Start Datum
'start_retirement': mDate(1, 2022)}, # Start der Entnahme
'assets': {'depot': 500000, # Depotvolumen zum Startzeitpunkt
'fees': 0.00}, # Jährliche Depotgebühren in %
'simulation': {'returns_df': real_return_df, # S&P500 Daten
'n_ret_years': 30}, # Simulationsdauer in Jahren
'withdrawal': {'fixed_pct': 4.0}, # Proz. Entnahmerate pro Jahr vom Startdepot
'pension': {'point': np.array([0]), # Anzahl erworbener Rentenpunkte
'point_add': np.array([0.0]), # Rentenpunktzuwachs pro Jahr
'start_date': [mDate(1, 3000)], # Beginn der gesetzlichen Rente
'name': {'John Doe'}, # Name des Rentenbeziehers
'point_value': 0.0, # aktueller Rentenpunktwert
'point_value_inc': 0.0}, # Proz. Steigerung des Rentenpunktwertes
'visualization': {'textoutput': True} # Textueller Zwischenausgaben als Debug Info
}
err_rates = [0.0, 0.1, 0.5, 1.0] # Fehlerraten [%]
years = [10, 12, 14, 16, 18, 20, 22, 25, 28, 31, 35, 39, 44, 49, 55, 60] # Dauer der Entnahme in Jahre
df = pd.DataFrame(columns=err_rates, index=years)
# Optimierungsgrenzen der proz. Entnahme:
loBound = 2 # Untere Grenze der Optimierung
hiBound = 8 # Obere Grenze der Optimierung
column_indexer = 0
for err_rate in err_rates:
row_indexer = 0
hiBound = 8
for year in years:
# Update Laufzeit
config['simulation']['n_ret_years'] = year
s = SEsimulation.SEsimulation(config)
widthdraw = optimize(s, err_rate, loBound, hiBound)
print('\n', year, ' Jahre, Entnahme: ', widthdraw, '% @Risk: ', err_rate, '%\n')
df.iloc[row_indexer, column_indexer] = widthdraw
row_indexer += 1
hiBound = widthdraw
column_indexer += 1
fig = px.line(df)
fig.update_layout(
title="Sichere jährliche Entnahmerate nach Laufzeit mit Inflationsanpassung",
xaxis_title="Laufzeit [Jahre]",
yaxis_title="Sichere Entnahme [%]",
legend_title="Fehlerquote [%]",
font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
)
)
fig.show()
endTime = time.time()
print('\nSimulationsdauer: %5.2f sec.' % (endTime - starttime))
| 31.477612 | 103 | 0.631342 |
5b9bf844c4b3f56ac874807da7d365032010e1ef
| 295 |
py
|
Python
|
PSA/psaExceptions.py
|
SECURED-FP7/secured-psa-nsm
|
20c8f790ebc2d2aa8c33bda1e047f8f29275a0be
|
[
"Apache-2.0"
] | null | null | null |
PSA/psaExceptions.py
|
SECURED-FP7/secured-psa-nsm
|
20c8f790ebc2d2aa8c33bda1e047f8f29275a0be
|
[
"Apache-2.0"
] | null | null | null |
PSA/psaExceptions.py
|
SECURED-FP7/secured-psa-nsm
|
20c8f790ebc2d2aa8c33bda1e047f8f29275a0be
|
[
"Apache-2.0"
] | null | null | null |
# -*- Mode:Python;indent-tabs-mode:nil; -*-
#
# File: psaExceptions.py
# Created: 05/09/2014
# Author: BSC
#
# Description:
# Custom execption class to manage error in the PSC
#
class psaExceptions( object ):
class confRetrievalFailed( Exception ):
pass
| 19.666667 | 57 | 0.620339 |
f3a5fab66ceedfb341431e9840acd30ba94bdbc7
| 38 |
py
|
Python
|
python/testlint/testlint/util.py
|
mpsonntag/snippets
|
fc3cc42ea49b885c1f29c0aef1379055a931a978
|
[
"BSD-3-Clause"
] | null | null | null |
python/testlint/testlint/util.py
|
mpsonntag/snippets
|
fc3cc42ea49b885c1f29c0aef1379055a931a978
|
[
"BSD-3-Clause"
] | null | null | null |
python/testlint/testlint/util.py
|
mpsonntag/snippets
|
fc3cc42ea49b885c1f29c0aef1379055a931a978
|
[
"BSD-3-Clause"
] | null | null | null |
def add_yourself(a):
return a + a
| 12.666667 | 20 | 0.631579 |
caf2517dfa6294e5eb94ac336f65b2026a084016
| 14,489 |
py
|
Python
|
scripts/signal_marker_processing/marker_aggregation.py
|
CsabaWirnhardt/cbm
|
1822addd72881057af34ac6a7c2a1f02ea511225
|
[
"BSD-3-Clause"
] | 17 |
2021-01-18T07:27:01.000Z
|
2022-03-10T12:26:21.000Z
|
scripts/signal_marker_processing/marker_aggregation.py
|
CsabaWirnhardt/cbm
|
1822addd72881057af34ac6a7c2a1f02ea511225
|
[
"BSD-3-Clause"
] | 4 |
2021-04-29T11:20:44.000Z
|
2021-12-06T10:19:17.000Z
|
scripts/signal_marker_processing/marker_aggregation.py
|
CsabaWirnhardt/cbm
|
1822addd72881057af34ac6a7c2a1f02ea511225
|
[
"BSD-3-Clause"
] | 47 |
2021-01-21T08:25:22.000Z
|
2022-03-21T14:28:42.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Daniele Borio
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
# Created on Sun Sep 26 15:34:44 2021
import numpy as np
"""
Summary:
Class for aggregating dictionaries of markers in a single
list of consecutive composite markers
"""
class marker_aggregator :
"""
Summary:
Class responsible for aggregating markers
"""
def __init__(self, options : dict ) :
"""
Summary:
Object constructor.
Arguments:
options - dictionary with the options specifying the operations
to be performed.
Returns:
Nothing.
"""
if "marker-aggregator" not in options :
raise Exception("marker-aggregator.__init__() - missing options")
else :
self.action_list = options["marker-aggregator"]
return
def aggregate_markers(self, markers : dict) -> list :
"""
Summary:
Function responsible for aggregating markers.
Arguments:
markers - dictionary of the markers to be aggregated
Returns:
mark_list - list of aggregated markers
"""
marker_dict = markers
marker_list = []
# There is nothing to aggregate
if len(markers) == 0 :
return []
# check if there are actions to perform
if not any(["action" in item for item in self.action_list]) :
# there is nothing do
# just use the fist list in the marker dictionary
marker_list = list(markers.values())[0]
return marker_list
for action in self.action_list :
if action["action"] == "confirm":
marker_dict, marker_list = self.confirm(marker_dict, marker_list, action)
elif action["action"] == "aggregate":
marker_dict, marker_list = self.aggregate(marker_dict, marker_list, action)
elif action["action"] == "merge":
marker_dict, marker_list = self.merge(marker_dict, marker_list, action)
else :
raise Exception("marker-aggregator.aggregate_markers() - unknown action")
return marker_list
def aggregate(self, marker_dict : dict, marker_list : list, action : dict) :
"""
Summary:
Markers from two time series are aggregated eventually forming aggregated
markers.
Arguments:
marker_dict - dictionary of the markers to be aggregated
marker_list - list of previously processed markers
Returns:
marker_dict - dictionary of the markers to be aggregated
marker_list - list of previously processed markers
action - dictionary specifing the action to be performed
"""
if len(action["signals"]) == 1 :
marker_list1 = marker_list
if action["signals"][0] in marker_dict :
marker_list2 = marker_dict[action["signals"][0]]
else :
marker_list2 = []
elif len(action["signals"]) == 2 :
marker_list1 = marker_dict[action["signals"][0]]
if action["signals"][0] in marker_dict :
marker_list1 = marker_dict[action["signals"][0]]
else :
marker_list1 = []
if action["signals"][1] in marker_dict :
marker_list2 = marker_dict[action["signals"][1]]
else :
marker_list2 = []
else :
raise Exception("marker-aggregator.aggregate() - signals not specified")
if len(marker_list2) == 0:
if "outname" in action :
marker_dict[action["outname"]] = marker_list1
return marker_dict, marker_list
else :
return marker_dict, marker_list1
if len(marker_list1) == 0:
if "outname" in action :
marker_dict[action["outname"]] = marker_list2
return marker_dict, marker_list
else :
return marker_dict, marker_list2
# overlapping
if "overlap" in action :
overlap_th = action["overlap"]
else :
overlap_th = 0
m1_ov_markers = []
m2_ov_markers = []
m1_markers = []
m2_markers = []
ov = []
for m1 in marker_list1 :
overlapping = [m1.overlap_in_days(x) for x in marker_list2]
max_overlap = max(overlapping)
if max_overlap > overlap_th :
m2 = marker_list2[np.argmax(overlapping)]
m1_ov_markers.append(m1)
m2_ov_markers.append(m2)
ov.append(max_overlap)
jj = 1
while jj < len(m2_ov_markers) :
m2 = m2_ov_markers[jj]
m2_old = m2_ov_markers[jj - 1]
if m2 == m2_old :
if ov[jj] > ov[jj - 1] :
m2_ov_markers.pop(jj - 1)
m1_ov_markers.pop(jj - 1)
ov.pop(jj - 1)
else :
m2_ov_markers.pop(jj)
m1_ov_markers.pop(jj)
ov.pop(jj)
else :
jj += 1
# Now aggregate the markers
overlapping_markers = []
for ii, m1 in enumerate(m1_ov_markers) :
overlapping_markers.append(m1.merge_markers(m2_ov_markers[ii]))
for m1 in marker_list1 :
if m1 not in m1_ov_markers :
m1_markers.append(m1)
for m2 in marker_list2 :
if m2 not in m2_ov_markers :
m2_markers.append(m2)
output_list = marker_aggregator.merge_event_list(overlapping_markers, m1_markers)
output_list = marker_aggregator.merge_event_list(output_list, m2_markers)
# Now generate the output
if "outname" in action :
marker_dict[action["outname"]] = output_list
return marker_dict, marker_list
else :
return marker_dict, output_list
def confirm(self, marker_dict : dict, marker_list, action : dict) :
"""
Summary:
Markers in a time series are confirmed by the markers in
another time series
Arguments:
marker_dict - dictionary of the markers to be aggregated
marker_list - list of previously processed markers
Returns:
marker_dict - dictionary of the markers to be aggregated
marker_list - list of previously processed markers
action - dictiory with the parameters for merging the markers
"""
if len(action["signals"]) == 1 :
marker_list1 = marker_list
if action["signals"][0] in marker_dict :
marker_list2 = marker_dict[action["signals"][0]]
else :
marker_list2 = []
elif len(action["signals"]) == 2 :
if action["signals"][0] in marker_dict :
marker_list1 = marker_dict[action["signals"][0]]
else :
marker_list1 = []
if action["signals"][1] in marker_dict :
marker_list2 = marker_dict[action["signals"][1]]
else :
marker_list2 = []
else :
raise Exception("marker-aggregator.confirm() - signals not specified")
if len(marker_list2) == 0 :
# No possibility to confirm
if "outname" in action :
marker_dict[action["outname"]] = []
return marker_dict, marker_list
else :
return marker_dict, []
# convert into a numpy array for convenience
marker_array = np.array(marker_list2)
# overlapping
if "overlap" in action :
overlap_th = action["overlap"]
else :
overlap_th = 0
confirmed_list = []
# now confirm the first marker series with the second
for marker in marker_list1 :
# find the overlapping between the markers of the two list
f = lambda x: marker.overlap_in_days(x)
vf = np.vectorize(f)
overlap = max(vf(marker_array))
if overlap > overlap_th :
confirmed_list.append(marker)
if "outname" in action :
marker_dict[action["outname"]] = confirmed_list
return marker_dict, marker_list
else :
return marker_dict, confirmed_list
def merge(self, marker_dict : dict, marker_list, action : dict) :
"""
Summary:
Markers from two time series are merged: no co-existence is
foreseen.
Arguments:
marker_dict - dictionary of the markers to be aggregated
marker_list - list of previously processed markers
Returns:
marker_dict - dictionary of the markers to be aggregated
marker_list - list of previously processed markers
action - dictionary specifing the action to be performed
"""
if len(action["signals"]) == 1 :
marker_list1 = marker_list
if action["signals"][0] in marker_dict :
marker_list2 = marker_dict[action["signals"][0]]
else :
marker_list2 = []
elif len(action["signals"]) == 2 :
if action["signals"][0] in marker_dict :
marker_list1 = marker_dict[action["signals"][0]]
else :
marker_list1 = []
if action["signals"][1] in marker_dict :
marker_list2 = marker_dict[action["signals"][1]]
else :
marker_list2 = []
else :
raise Exception("marker-aggregator.merge() - signals not specified")
if len(marker_list2) == 0:
if "outname" in action :
marker_dict[action["outname"]] = marker_list1
return marker_dict, marker_list
else :
return marker_dict, marker_list1
if len(marker_list1) == 0:
if "outname" in action :
marker_dict[action["outname"]] = marker_list2
return marker_dict, marker_list
else :
return marker_dict, marker_list2
# The second time series has precedence on the first
output_list = []
ii = 0
jj = 0
m1 = marker_list1[ii]
m2 = marker_list2[jj]
while ii < len(marker_list1) and jj < len(marker_list2) :
if m1 < m2 :
output_list.append(m1)
ii += 1
if ii < len(marker_list1) :
m1 = marker_list1[ii]
elif m2 < m1 :
output_list.append(m2)
jj += 1
if jj < len(marker_list2) :
m2 = marker_list2[jj]
else :
# the two markers overlap
if m1.start_date < m2.start_date :
m_new = m1.trim_right(m2.start_date)
output_list.append(m_new)
output_list.append(m2)
jj += 1
if m2.stop_date < m1.stop_date :
# m2 is totally contained in m1 - split in 3 events
m1 = m1.trim_left(m2.stop_date)
else :
ii += 1
if ii < len(marker_list1) :
m1 = marker_list1[ii]
if jj < len(marker_list2) :
m2 = marker_list2[jj]
else :
if m2.stop_date < m1.stop_date :
m1 = m1.trim_left(m2.stop_date)
output_list.append(m2)
jj += 1
if jj < len(marker_list2) :
m2 = marker_list2[jj]
else :
# m1 is completely in m2 - This should not happen
ii += 1
if ii < len(marker_list1) :
m1 = marker_list1[ii]
# Now check if there are remaining markers in the two lists
while ii < len(marker_list1) :
output_list.append(m1)
ii += 1
if ii < len(marker_list1) :
m1 = marker_list1[ii]
while jj < len(marker_list2) :
output_list.append(marker_list2[jj])
jj += 1
# Now generate the output
if "outname" in action :
marker_dict[action["outname"]] = output_list
return marker_dict, marker_list
else :
return marker_dict, output_list
@staticmethod
def merge_event_list(list1, list2) :
output_list = []
ii = 0
jj = 0
while (ii < len(list1)) and (jj < len(list2)) :
m1 = list1[ii]
m2 = list2[jj]
if m1.start_date < m2.start_date :
output_list.append(m1)
ii += 1
else:
output_list.append(m2)
jj += 1
while ii < len(list1) :
output_list.append(list1[ii])
ii += 1
while jj < len(list2) :
output_list.append(list2[jj])
jj += 1
return output_list
| 34.415677 | 99 | 0.494168 |
caf8d2e93123505a305327145631a51379f4be2b
| 693 |
py
|
Python
|
tests/test_cli.py
|
datumbox/model-index
|
a39af5f8aaa2a90b8fc7180744a855282360067a
|
[
"MIT"
] | 12 |
2021-02-26T08:19:00.000Z
|
2022-01-26T14:00:16.000Z
|
tests/test_cli.py
|
datumbox/model-index
|
a39af5f8aaa2a90b8fc7180744a855282360067a
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
datumbox/model-index
|
a39af5f8aaa2a90b8fc7180744a855282360067a
|
[
"MIT"
] | 3 |
2021-03-19T13:51:56.000Z
|
2021-08-25T05:25:52.000Z
|
from click.testing import CliRunner
from modelindex.commands.cli import cli
def test_cli_invocation():
runner = CliRunner()
result = runner.invoke(cli)
assert result.exit_code == 0
def test_cli_check_ok():
runner = CliRunner()
result = runner.invoke(cli, ["check", "tests/test-mi/11_markdown/rexnet.md"])
assert result.exit_code == 0
assert "Checking" in result.output
assert "All good" in result.output
def test_cli_check_fail():
runner = CliRunner()
result = runner.invoke(cli, ["check", "tests/test-mi/01_base"])
assert result.exit_code == 0
assert "Path to README file docs/inception-v3-readme.md is not a valid file" in result.output
| 28.875 | 97 | 0.707071 |
1b56962e5a6d5a63085f2158e015e7d133280d2e
| 82 |
py
|
Python
|
0-notes/job-search/Cracking the Coding Interview/C14Databases/questions/14.5-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C14Databases/questions/14.5-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C14Databases/questions/14.5-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# 14.5 Denormalization
# What is denormalization?
# Explain the pros and cons.
| 16.4 | 28 | 0.731707 |
a2173048fd49fa12babfe4478a4385f72c5c1495
| 871 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v8_0/update_supplier_address_in_stock_entry.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v8_0/update_supplier_address_in_stock_entry.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v8_0/update_supplier_address_in_stock_entry.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
# copy supplier_address to address_display, and set supplier_address to blank
stock_entries = frappe.db.sql(""" select name, purchase_order, supplier_address from `tabStock Entry`
where ifnull(supplier_address, '') <> ''""", as_dict=True)
frappe.reload_doc('stock', 'doctype', 'stock_entry')
for stock_entry in stock_entries:
# move supplier address to address_display, and fetch the supplier address from purchase order
se = frappe.get_doc("Stock Entry", stock_entry.get("name"))
se.address_display = stock_entry.get("supplier_address")
se.supplier_address = frappe.db.get_value("Purchase Order", stock_entry.get("purchase_order"),"supplier_address") or None
se.db_update()
| 37.869565 | 123 | 0.768083 |
bf914edd5e334bdda9f1192d6a3b43c7fe5939d7
| 6,731 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/shopping_cart/test_shopping_cart.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/shopping_cart/test_shopping_cart.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/shopping_cart/test_shopping_cart.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import nowdate, add_months
from erpnext.shopping_cart.cart import _get_cart_quotation, update_cart, get_party
from erpnext.tests.utils import create_test_contact_and_address
# test_dependencies = ['Payment Terms Template']
class TestShoppingCart(unittest.TestCase):
"""
Note:
Shopping Cart == Quotation
"""
def setUp(self):
frappe.set_user("Administrator")
create_test_contact_and_address()
self.enable_shopping_cart()
def tearDown(self):
frappe.set_user("Administrator")
self.disable_shopping_cart()
def test_get_cart_new_user(self):
self.login_as_new_user()
# test if lead is created and quotation with new lead is fetched
quotation = _get_cart_quotation()
self.assertEqual(quotation.quotation_to, "Customer")
self.assertEqual(quotation.contact_person,
frappe.db.get_value("Contact", dict(email_id="[email protected]")))
self.assertEqual(quotation.lead, None)
self.assertEqual(quotation.contact_email, frappe.session.user)
return quotation
def test_get_cart_customer(self):
self.login_as_customer()
# test if quotation with customer is fetched
quotation = _get_cart_quotation()
self.assertEqual(quotation.quotation_to, "Customer")
self.assertEqual(quotation.customer, "_Test Customer")
self.assertEqual(quotation.lead, None)
self.assertEqual(quotation.contact_email, frappe.session.user)
return quotation
def test_add_to_cart(self):
self.login_as_customer()
# remove from cart
self.remove_all_items_from_cart()
# add first item
update_cart("_Test Item", 1)
quotation = self.test_get_cart_customer()
self.assertEqual(quotation.get("items")[0].item_code, "_Test Item")
self.assertEqual(quotation.get("items")[0].qty, 1)
self.assertEqual(quotation.get("items")[0].amount, 10)
# add second item
update_cart("_Test Item 2", 1)
quotation = self.test_get_cart_customer()
self.assertEqual(quotation.get("items")[1].item_code, "_Test Item 2")
self.assertEqual(quotation.get("items")[1].qty, 1)
self.assertEqual(quotation.get("items")[1].amount, 20)
self.assertEqual(len(quotation.get("items")), 2)
def test_update_cart(self):
# first, add to cart
self.test_add_to_cart()
# update first item
update_cart("_Test Item", 5)
quotation = self.test_get_cart_customer()
self.assertEqual(quotation.get("items")[0].item_code, "_Test Item")
self.assertEqual(quotation.get("items")[0].qty, 5)
self.assertEqual(quotation.get("items")[0].amount, 50)
self.assertEqual(quotation.net_total, 70)
self.assertEqual(len(quotation.get("items")), 2)
def test_remove_from_cart(self):
# first, add to cart
self.test_add_to_cart()
# remove first item
update_cart("_Test Item", 0)
quotation = self.test_get_cart_customer()
self.assertEqual(quotation.get("items")[0].item_code, "_Test Item 2")
self.assertEqual(quotation.get("items")[0].qty, 1)
self.assertEqual(quotation.get("items")[0].amount, 20)
self.assertEqual(quotation.net_total, 20)
self.assertEqual(len(quotation.get("items")), 1)
def test_tax_rule(self):
self.login_as_customer()
quotation = self.create_quotation()
from erpnext.accounts.party import set_taxes
tax_rule_master = set_taxes(quotation.customer, "Customer", \
quotation.transaction_date, quotation.company, None, None, \
quotation.customer_address, quotation.shipping_address_name, 1)
self.assertEqual(quotation.taxes_and_charges, tax_rule_master)
self.assertEqual(quotation.total_taxes_and_charges, 1000.0)
self.remove_test_quotation(quotation)
def create_quotation(self):
quotation = frappe.new_doc("Quotation")
values = {
"doctype": "Quotation",
"quotation_to": "Customer",
"order_type": "Shopping Cart",
"customer": get_party(frappe.session.user).name,
"docstatus": 0,
"contact_email": frappe.session.user,
"selling_price_list": "_Test Price List Rest of the World",
"currency": "USD",
"taxes_and_charges" : "_Test Tax 1 - _TC",
"conversion_rate":1,
"transaction_date" : nowdate(),
"valid_till" : add_months(nowdate(), 1),
"items": [{
"item_code": "_Test Item",
"qty": 1
}],
"taxes": frappe.get_doc("Sales Taxes and Charges Template", "_Test Tax 1 - _TC").taxes,
"company": "_Test Company"
}
quotation.update(values)
quotation.insert(ignore_permissions=True)
return quotation
def remove_test_quotation(self, quotation):
frappe.set_user("Administrator")
quotation.delete()
# helper functions
def enable_shopping_cart(self):
settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
settings.update({
"enabled": 1,
"company": "_Test Company",
"default_customer_group": "_Test Customer Group",
"quotation_series": "_T-Quotation-",
"price_list": "_Test Price List India"
})
# insert item price
if not frappe.db.get_value("Item Price", {"price_list": "_Test Price List India",
"item_code": "_Test Item"}):
frappe.get_doc({
"doctype": "Item Price",
"price_list": "_Test Price List India",
"item_code": "_Test Item",
"price_list_rate": 10
}).insert()
frappe.get_doc({
"doctype": "Item Price",
"price_list": "_Test Price List India",
"item_code": "_Test Item 2",
"price_list_rate": 20
}).insert()
settings.save()
frappe.local.shopping_cart_settings = None
def disable_shopping_cart(self):
settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
settings.enabled = 0
settings.save()
frappe.local.shopping_cart_settings = None
def login_as_new_user(self):
self.create_user_if_not_exists("[email protected]")
frappe.set_user("[email protected]")
def login_as_customer(self):
self.create_user_if_not_exists("[email protected]",
"_Test Contact For _Test Customer")
frappe.set_user("[email protected]")
def remove_all_items_from_cart(self):
quotation = _get_cart_quotation()
quotation.flags.ignore_permissions=True
quotation.delete()
def create_user_if_not_exists(self, email, first_name = None):
if frappe.db.exists("User", email):
return
frappe.get_doc({
"doctype": "User",
"user_type": "Website User",
"email": email,
"send_welcome_email": 0,
"first_name": first_name or email.split("@")[0]
}).insert(ignore_permissions=True)
test_dependencies = ["Sales Taxes and Charges Template", "Price List", "Item Price", "Shipping Rule", "Currency Exchange",
"Customer Group", "Lead", "Customer", "Contact", "Address", "Item", "Tax Rule"]
| 30.876147 | 122 | 0.733769 |
157d19a898890b81f2e5c2fb54100bd500b6c261
| 1,857 |
py
|
Python
|
tests/web.adblockplus.org/pages/landingPage.py
|
adblockplus/web.adblockplus.org
|
c2c570ce4f4296afc3577afe233c6b23b128f206
|
[
"MIT"
] | 9 |
2016-01-29T18:05:29.000Z
|
2021-10-06T04:21:55.000Z
|
tests/web.adblockplus.org/pages/landingPage.py
|
adblockplus/web.adblockplus.org
|
c2c570ce4f4296afc3577afe233c6b23b128f206
|
[
"MIT"
] | 9 |
2015-04-06T19:03:32.000Z
|
2019-05-28T13:34:55.000Z
|
tests/web.adblockplus.org/pages/landingPage.py
|
adblockplus/web.adblockplus.org
|
c2c570ce4f4296afc3577afe233c6b23b128f206
|
[
"MIT"
] | 18 |
2015-04-06T17:42:31.000Z
|
2021-10-06T04:26:29.000Z
|
from pages.basePage import BasePage
DOWNLOAD_BUTTON_HREF = 'a[href*="install"]'
DOWNLOAD_BUTTON_HREF_ANDROID = 'a[href*="https://eyeo.to/adblockbrowser/android/abp-website"]'
DOWNLOAD_BUTTON_HREF_IOS = 'a[href*="https://eyeo.to/adblockplus/ios_safari_install/abp-website"]'
DOWNLOAD_BUTTON_HREF_LANG = 'a[href*="chrome_install"]'
class LandingPage(BasePage):
def __init__(self, driver, is_language_test=False):
self.driver = driver
self._download_button_href = DOWNLOAD_BUTTON_HREF
if is_language_test:
self._download_button_href = DOWNLOAD_BUTTON_HREF_LANG
@property
def get_download_button_link(self):
return self.driver.find_element_by_css_selector(self._download_button_href).get_attribute('href')
@property
def get_download_button_link_android(self):
return self.driver.find_element_by_css_selector(DOWNLOAD_BUTTON_HREF_ANDROID).get_attribute('href')
@property
def get_download_button_link_ios(self):
return self.driver.find_element_by_css_selector(DOWNLOAD_BUTTON_HREF_IOS).get_attribute('href')
@property
def get_download_button_text(self):
return self.driver.find_element_by_css_selector(self._download_button_href).get_attribute('innerText')
@property
def get_download_button_text_android(self):
return self.driver.find_element_by_css_selector(DOWNLOAD_BUTTON_HREF_ANDROID).get_attribute('title')
@property
def get_download_button_text_ios(self):
return self.driver.find_element_by_css_selector(DOWNLOAD_BUTTON_HREF_IOS).get_attribute('title')
def click_download_button(self):
self.driver.find_element_by_css_selector(self._download_button_href).click()
def click_download_button_android(self):
self.driver.find_element_by_css_selector(DOWNLOAD_BUTTON_HREF_ANDROID).click()
| 38.6875 | 110 | 0.777598 |
1734d420c88d48f5713ccdaf11d5cc003b5ad203
| 43,342 |
py
|
Python
|
Packs/CounterTack/Integrations/CounterTack/CounterTack.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CounterTack/Integrations/CounterTack/CounterTack.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CounterTack/Integrations/CounterTack/CounterTack.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import requests
import os
import os.path
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# remove proxy if not set to true in params
if not demisto.params().get('proxy'):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
''' GLOBALS/PARAMS '''
USERNAME = demisto.params().get('credentials').get('identifier')
PASSWORD = demisto.params().get('credentials').get('password')
SERVER_URL = demisto.params().get('server')[:-1] if demisto.params().get('server').endswith('/') else \
demisto.params().get('server')
FETCH_TIME = demisto.params().get('fetch_time', '3 days').strip()
FETCH_NOTIFICATIONS = demisto.params().get('fetch_notifications')
FETCH_BEHAVIORS = demisto.params().get('fetch_behviors')
# Should we use SSL
USE_SSL = not demisto.params().get('unsecure', False)
# Service base URL
BASE_PATH = '{}/api/v2/'.format(SERVER_URL)
# Headers to be sent in requests
DEFAULT_HEADERS = {
'Content-Type': 'application/json'
}
def http_request(method, suffix_url, headers=DEFAULT_HEADERS, body=None):
"""
returns the http request
"""
url = BASE_PATH + suffix_url
response = requests.request(
method,
url,
auth=(USERNAME, PASSWORD),
headers=headers,
verify=USE_SSL,
data=body
)
# handle request failure
if response.status_code not in {200}:
message = parse_error_response(response)
return_error('Error in API call to CounterTack with status code {}\n{}'.format(response.status_code, message))
try:
response = response.json()
except Exception:
return_error(response.content)
return response
def parse_error_response(response):
try:
res = response.json()
msg = res.get('message')
if res.get('details') is not None and res.get('details')[0].get('message') is not None:
msg = msg + "\n" + json.dumps(res.get('details')[0])
except Exception:
return response.text
return msg
"""
ENDPOINTS
"""
def get_endpoints_request():
"""
This request returns a collection of endpoints.
"""
suffix_url = 'endpoints'
response = http_request('GET', suffix_url)
return response
def get_endpoints():
"""
Returns the information on existing endpoints
"""
data = []
endpoint_standards = []
endpoints = get_endpoints_request()
for endpoint in endpoints:
data.append({
'Id': endpoint.get('id'),
'Name': endpoint.get('name'),
'OS': endpoint.get('product_name'),
'IP': endpoint.get('ips'),
'Status': endpoint.get('status'),
'Threat': endpoint.get('threat')
})
endpoint_standards.append({
'Id': endpoint.get('id'),
'IPAddress': endpoint.get('ips'),
'Domain': endpoint.get('domain'),
'MACAddress': endpoint.get('mac'),
'OS': endpoint.get('product_name'),
'OSVersion': endpoint.get('driver_version'),
'Model': endpoint.get('current_profile'),
'Memory': endpoint.get('memory'),
'Processors': endpoint.get('num_cpus')
})
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(endpoints,
keyTransform=underscoreToCamelCase),
'Endpoint': endpoint_standards
}
headers = ['OS', 'Name', 'Threat', 'Status', 'Id', 'IP']
entry = {
'Type': entryTypes['note'],
'Contents': endpoints,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'CounterTack Endpoints', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def get_endpoint_request(endpoint_id):
"""
Request for a specific endpoint
"""
suffix_url = 'endpoints/' + endpoint_id
response = http_request('GET', suffix_url)
return response
def get_endpoint():
"""
Get the information for the requested endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
returns:
The information about the specified endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
response = get_endpoint_request(endpoint_id)
content = {
'OS': response.get('product_name'),
'Domain': response.get('domain'),
'IP': response.get('ip'),
'Threat': response.get('threat'),
'MaxImpact': response.get('max_impact'),
'TenantID': response.get('tenant'),
'IsQuarantined': response.get('is_quarantined'),
'Profile': response.get('current_profile'),
'Cluster_hosts': response.get('cluster_hosts'),
'Status': response.get('status'),
'Tags': response.get('tags')
}
endpoint_standards = {
'Id': response.get('id'),
'IPAddress': response.get('ips'),
'Domain': response.get('domain'),
'MACAddress': response.get('mac'),
'OS': response.get('product_name'),
'OSVersion': response.get('driver_version'),
'Model': response.get('current_profile'),
'Memory': response.get('memory'),
'Processors': response.get('num_cpus')
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(response,
keyTransform=underscoreToCamelCase),
'Endpoint': endpoint_standards
}
headers = ['OS', 'Domain', 'IP', 'Threat', 'MaxImpact', 'TenantID', 'IsQuarantined',
'Profile', 'Tags', 'Cluster_Hosts', 'Status']
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'CounterTack Endpoint information:', content, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
"""
ENDPOINTS TAGS
"""
def endpoint_tags_request(endpoint_id):
"""
This request retrieves tags from specified endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/tags'
response = http_request('GET', suffix_url)
return response
def get_endpoint_tags():
"""
Get the tags for the specified endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
response = endpoint_tags_request(endpoint_id)
response = {
'tags': response
}
tags_context = {
'Id': endpoint_id,
'tags': response
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(tags_context,
keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('CounterTack tags for the specified endpoint:', response, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def add_tags_request(endpoint_id, body):
"""
The request adds tags to specified endpoint
The request gets the endpoint ID and the tags the user wants to add.
"""
suffix_url = 'endpoints/' + endpoint_id + '/tags'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def add_tags():
"""
The command add tags for the specified endpoint.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (array) body
The tags to add to the endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
body = argToList(demisto.args().get('tags'))
response = add_tags_request(endpoint_id, body)
response = endpoint_tags_request(endpoint_id)
response = {
'tags': response,
'Id': endpoint_id
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Endpoint tags were added successfully", response),
'EntryContext': context
}
demisto.results(entry)
def delete_tags_request(endpoint_id, body):
"""
This request deletes specific tags from specified endpoint.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (array) body
The tags to delete from the endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/tags'
response = http_request('DELETE', suffix_url, body=json.dumps(body))
return response
def delete_tags():
"""
The command deletes tags for the specified endpoint.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (array) body
The tags to delete from the endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
body = argToList(demisto.args().get('tags'))
response = delete_tags_request(endpoint_id, body)
response = endpoint_tags_request(endpoint_id)
response = {
'tags': response,
'Id': endpoint_id
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'Endpoint tags were deleted successfully', response),
'EntryContext': context
}
demisto.results(entry)
"""
ENDPOINTS COMMANDS
"""
def endpoint_quarantine_request(endpoint_id, body):
"""
Request to quarantine a specified endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) type
The type of the command: quarantine
"""
suffix_url = 'endpoints/' + endpoint_id + '/commands'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def endpoint_quarantine():
"""
Prevents an endpoint(s) from any network communication, but maintains a connection to the Sentinel Cluster
and addresses defined in the Global Whitelist.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) type
The type of the command: quarantine
"""
endpoint_id = demisto.args().get('endpoint_id')
body = {
'type': 'quarantine'
}
response = endpoint_quarantine_request(endpoint_id, body)
quarantine_response = get_endpoint_request(endpoint_id)
quarantine_context = {
'Id': endpoint_id,
'is_quarantine': quarantine_response.get('is_quarantined')
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(quarantine_context,
keyTransform=underscoreToCamelCase)
}
data = {
'Id': response.get('id'),
'user name': response.get('username'),
'request time': response.get('request_time'),
'endpoint ID': response.get('endpoint_ids'),
'command name': response.get('command_name'),
'status': response.get('status'),
}
entry = {
'Type': entryTypes['note'],
'Contents': quarantine_context,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('The command has been applied successfully:', data, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def disable_quarantine():
"""
Allows a previously quarantined endpoint to communicate with the network.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) type
The type of the command: lift_quarantine
"""
endpoint_id = demisto.args().get('endpoint_id')
body = {
'type': 'lift_quarantine'
}
response = endpoint_quarantine_request(endpoint_id, body)
quarantine_response = get_endpoint_request(endpoint_id)
quarantine_context = {
'Id': endpoint_id,
'is_quarantine': quarantine_response.get('is_quarantined')
}
data = {
'Id': response.get('id'),
'user name': response.get('username'),
'request time': response.get('request_time'),
'endpoint ID': response.get('endpoint_ids'),
'command name': response.get('command_name'),
'status': response.get('status'),
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(quarantine_context,
keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': quarantine_context,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('The command has been applied successfully:', data, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def file_extract_request(endpoint_id, body):
"""
Request for extracting file from specified endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/commands'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def extract_file():
"""
Enables an API consumer to extract the file in addition to some file metadata.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) body
The type of the command: extract file and the file path
"""
endpoint_id = demisto.args().get('endpoint_id')
paths = argToList(demisto.args().get('file_path'))
body = {
'type': 'extract_files',
'paths': paths
}
response = file_extract_request(endpoint_id, body)
data = {
'Id': response.get('id'),
'User Name': response.get('username'),
'Request Time': response.get('request_time'),
'Endpoint ID': response.get('endpoint_ids'),
'Command Name': response.get('command_name'),
'Command Arguments': response.get('command_arg'),
'Status': response.get('status'),
}
context = {
'CounterTack.File(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
headers = ['Id', 'User Name', 'Request Time', 'Endpoint ID', 'Command Name', 'Command Arguments', 'Status']
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The file has been extracted successfully:', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def delete_file_request(endpoint_id, body):
"""
Deletes a file from the specified endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/commands'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def delete_file():
"""
Deletes a file from the specified endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) body
The type of the command: delete_file and the file path
"""
endpoint_id = demisto.args().get('endpoint_id')
path = demisto.args().get('file_path')
body = {
'type': 'delete_file',
'path': path
}
delete_file_request(endpoint_id, body)
demisto.results('The file has been deleted successfully')
def kill_process_request(endpoint_id, body):
"""
Reqquest to terminates all instances of the process identified in the command.
"""
suffix_url = 'endpoints/' + endpoint_id + '/commands'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def kill_process():
"""
Terminates all instances of the process identified in the command.
Processes can be identified by the PID or process name.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) process_id
The ID of the process to terminate
demisto parameter: (string) process_name
The name of the process to terminate
"""
endpoint_id = demisto.args().get('endpoint_id')
pid = demisto.args().get('process_id')
name = demisto.args().get('process_name')
if not pid and not name:
return_error('Please provide either process_id or process_name')
body = {
'type': 'kill_process',
'pid': pid,
'name': name
}
response = kill_process_request(endpoint_id, body)
data = {
'Id': response.get('id'),
'User Name': response.get('username'),
'Request Time': response.get('request_time'),
'Endpoint ID': response.get('endpoint_ids'),
'Command Name': response.get('command_name'),
'Status': response.get('status'),
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(response,
keyTransform=underscoreToCamelCase,
removeNull=True)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The process has been terminated', data, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
"""
ENDPOINT FILES
"""
def file_request():
"""
This request retrieves all extracted files for all endpoints on the cluster
"""
suffix_url = 'endpoints/files'
response = http_request('GET', suffix_url)
return response
def get_all_files():
data = []
files_standards = []
files = file_request()
for file in files:
data.append({
'Id': file.get('id'),
'user': file.get('user'),
'endpoint_id': file.get('endpoint_id'),
'path': file.get('path'),
'extraction_time': file.get('extraction_time'),
'Status': file.get('status')
})
files_standards.append({
'Size': file.get('size'),
'MD5': file.get('md5'),
'SHA256': file.get('sha256'),
'SSDeep': file.get('ssdeep'),
'Path': file.get('path')
})
context = {
'CounterTack.File(val.Id && val.Id === obj.Id)': createContext(files, keyTransform=underscoreToCamelCase),
outputPaths['file']: files_standards
}
headers = ['Status', 'Id', 'path', 'endpoint_id', 'extraction_time', 'user']
entry = {
'Type': entryTypes['note'],
'Contents': files,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'CounterTack Endpoints Files', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def endpoint_files_request(endpoint_id):
"""
This request returns all extracted files from specified endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/files'
response = http_request('GET', suffix_url)
return response
def get_endpoint_files():
"""
Returns extracted files from specific endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
data = []
files_standards = []
files = endpoint_files_request(endpoint_id)
for file in files:
data.append({
'Id': file.get('id'),
'User': file.get('user'),
'EndpointId': file.get('endpoint_id'),
'Path': file.get('path'),
'ExtractionTime': file.get('extraction_time'),
'Status': file.get('status')
})
files_standards.append({
'Size': file.get('size'),
'MD5': file.get('md5'),
'SHA256': file.get('sha256'),
'SSDeep': file.get('ssdeep'),
'Path': file.get('path')
})
context = {
'CounterTack.File(val.Id && val.Id === obj.Id)': createContext(files, keyTransform=underscoreToCamelCase),
outputPaths['file']: files_standards
}
headers = ['Status', 'Id', 'path', 'endpoint_id', 'extraction_time', 'user']
entry = {
'Type': entryTypes['note'],
'Contents': data,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The extracted files from the endpoint:', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def file_information_request(file_id):
"""
request specific file information
"""
suffix_url = 'endpoints/files/' + file_id
response = http_request('GET', suffix_url)
return response
def get_file_information():
"""
Get the information of a specific file
demisto parameter: (string) file_id
The unique ID of the extracted file
"""
context = {}
files_standards = []
file_id = demisto.args().get('file_id')
response = file_information_request(file_id)
data = {
'endpoint_name': response.get('endpoint_name'),
'path': response.get('path'),
'size': response.get('size'),
'extraction_time': response.get('extraction_time'),
'status': response.get('status')
}
files_standards.append({
'Size': response.get('size'),
'MD5': response.get('md5'),
'SHA256': response.get('sha256'),
'SSDeep': response.get('ssdeep'),
'Path': response.get('path')
})
context['CounterTack.File(val.Id && val.Id === obj.Id)'] = createContext(response,
keyTransform=underscoreToCamelCase)
context[outputPaths['file']] = files_standards
headers = ['endpoint_name', 'path', 'size', 'status', 'extraction_time']
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('CounterTack File Information:', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def download_file_request(file_id):
# This request downloads an extracted file.
suffix_url = 'downloads/extractedfiles/' + file_id
response = http_request('GET', suffix_url)
return response
def download_file():
"""
Download an extracted file in a ZIP format.
demisto parameter: (string) file_id
The unique ID of the extracted file
"""
file_id = demisto.args().get('file_id')
response = download_file_request(file_id)
demisto.results(fileResult(file_id + '.zip', response.content))
"""
BEHAVIORS
"""
def get_behaviors_request():
"""
This request retrieves information on a collection of behaviors.
"""
suffix_url = 'behaviors'
response = http_request('GET', suffix_url)
return response
def get_behaviors():
"""
retrieve information on a collection of behaviors.
"""
data = []
behaviors = get_behaviors_request()
for behavior in behaviors:
data.append({
'Id': behavior.get('id'),
'Name': behavior.get('name'),
'Type': behavior.get('type'),
'ImpactLevel': behavior.get('impact_level'),
'lastReported': behavior.get('last_reported'),
'EndpointId': behavior.get('endpoint_id')
})
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(behaviors,
keyTransform=underscoreToCamelCase)
}
headers = ['Name', 'Id', 'Type', 'ImpactLevel', 'EndpointId', 'lastReported']
entry = {
'Type': entryTypes['note'],
'Contents': behaviors,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('CounterTack Endpoints Behaviors', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def get_behavior_request(behavior_id):
"""
Request for getting specified behvior
"""
suffix_url = 'behaviors/' + behavior_id
response = http_request('GET', suffix_url)
return response
def get_behavior():
"""
Get behavior information
demisto parameter: behavior_id(string)
The unique ID of the behvior
"""
behavior_id = demisto.args().get('behavior_id')
response = get_behavior_request(behavior_id)
data = {
'Id': response.get('id'),
'Name': response.get('name'),
'ImpactLevel': response.get('impact_level'),
'LastActive': response.get('last_active'),
'EventCount': response.get('event_count'),
'MaxImpact': response.get('max_impact'),
'EndpointId': response.get('endpoint_id'),
'Type': response.get('type'),
}
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
headers = ['Name', 'Id', 'ImpactLevel', 'MaxImpact', 'EventCount', 'Type', 'EndpointId', 'LastActive']
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('CounterTack Behavior information', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
"""
BEHAVIORS TAGS
"""
def behaviour_add_tags_request(behaviour_id, body):
"""
The request adds tags to specified behaviour
"""
suffix_url = 'behaviors/' + behaviour_id + '/tags'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def add_behavior_tags():
"""
Add specific tags to specified behavior
demisto parameter: (string) behavior_id
The unique ID of the behavior
demisto parameter: (Array) Body.
The tags to add to the behavior. seperate the tags with comma
"""
behaviour_id = demisto.args().get('behaviour_id')
body = argToList(demisto.args().get('tags'))
response = behaviour_add_tags_request(behaviour_id, body)
behavior_tags = get_behavior_request(behaviour_id)
response = {
'tags': behavior_tags.get('tags'),
'Id': behaviour_id
}
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Behavior tags were added successfully', response),
'EntryContext': context
}
demisto.results(entry)
def delete_tags_behavior_request(behaviour_id, body):
suffix_url = 'behaviors/' + behaviour_id + '/tags'
response = http_request('DELETE', suffix_url, body=json.dumps(body))
return response
def delete_behavior_tags():
"""
Delete specific tags from behavior
demisto parameter: (string) behavior_id
The unique ID of the behavior
demisto parameter: (Array) Body.
The tags to delete from the behavior. seperate the tags with comma
"""
behaviour_id = demisto.args().get('behaviour_id')
body = argToList(demisto.args().get('tags'))
response = delete_tags_behavior_request(behaviour_id, body)
response = get_behavior_request(behaviour_id)
response = {
'tags': response.get('tags'),
'Id': behaviour_id
}
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Endpoint tags were deleted successfully', response, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
"""
SEARCH
"""
def search_endpoints_request(exp):
"""
Request for endpoints search using CQL expression
"""
suffix_url = 'search/endpoints' + exp
response = http_request('GET', suffix_url)
return response
def search_behaviors_request(exp):
"""
Request for endpoints search using CQL expression
"""
suffix_url = 'search/behaviors' + exp
response = http_request('GET', suffix_url)
return response
def search_events_request(exp):
"""
Request for events search using CQL expression
"""
suffix_url = 'search/events' + exp
response = http_request('GET', suffix_url)
return response
def search_events():
"""
Request for events search using CQL expression
demisto parameter: (dict) expression
The CQL expression to be used for the search
"""
data = []
expression = demisto.args().get('expression')
exp = '?expression=' + expression
events = search_events_request(exp)
if events.get('results'):
results = events.get('results')
results_lst = list()
for i in range(len(results)):
results_lst.append({k.replace('events.', ''): v for k, v in results[i].items()})
events['results'] = results_lst
for event in events.get('results'):
data.append({
'Id': event.get('id'),
'Events Action': event.get('action'),
'Events Impact': event.get('impact'),
'Events EndpointID': event.get('endpoint_id'),
'Event Type': event.get('event_type'),
'Collected time': event.get('time_stamp'),
'Source process PID': event.get('source_process_pid'),
'Source process name': event.get('source_process_name')
})
context = {
'CounterTack.Event(val.Id && val.Id === obj.Id)': createContext(results_lst,
keyTransform=underscoreToCamelCase,
removeNull=True)
}
headers = ['ID', 'Event Type', 'Events Action', 'Events EndpointID', 'Events Impact',
'Collected time', 'Source process PID', 'Source process name']
entry = {
'Type': entryTypes['note'],
'Contents': results_lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Results of the events search', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
else:
demisto.results('No results found')
def search_endpoints():
"""
Request for endpoints search using CQL expression
demisto parameter: (dict) expression
The CQL expression to be used for the search
"""
data = []
endpoint_standards = []
expression = demisto.args().get('expression')
exp = '?expression=' + expression
endpoints = search_endpoints_request(exp)
if endpoints.get('results'):
results = endpoints.get('results')
results_lst = list()
for i in range(len(results)):
results_lst.append({k.replace('endpoints.', ''): v for k, v in results[i].items()})
endpoints['results'] = results_lst
for endpoint in endpoints.get('results'):
data.append({
'Id': endpoint.get('id'),
'Name': endpoint.get('name'),
'OS': endpoint.get('product_name'),
'IP': endpoint.get('ips'),
'Status': endpoint.get('status'),
'Threat': endpoint.get('threat')
})
endpoint_standards.append({
'Id': endpoint.get('id'),
'IPAddress': endpoint.get('ips'),
'Domain': endpoint.get('domain'),
'MACAddress': endpoint.get('mac'),
'OS': endpoint.get('product_name'),
'OSVersion': endpoint.get('driver_version'),
'Model': endpoint.get('current_profile'),
'Memory': endpoint.get('memory'),
'Processors': endpoint.get('num_cpus')
})
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(results_lst,
keyTransform=underscoreToCamelCase,
removeNull=True),
'Endpoint': endpoint_standards
}
headers = ['Status', 'Name', 'Id', 'OS', 'Events Impact', 'Threat', 'IP']
entry = {
'Type': entryTypes['note'],
'Contents': results_lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Results of the endpoints search', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
else:
demisto.results('No results found')
def search_behaviors():
"""
Request for behaviors search using CQL expression
demisto parameter: (dict) expression
The CQL expression to be used for the search
"""
data = []
expression = demisto.args().get('expression')
exp = '?expression=' + expression
behaviors = search_behaviors_request(exp)
if behaviors.get('results'):
results = behaviors.get('results')
results_lst = list()
for i in range(len(results)):
results_lst.append({k.replace('behaviors.', ''): v for k, v in results[i].items()})
behaviors['results'] = results_lst
for behavior in behaviors.get('results'):
data.append({
'Id': behavior.get('id'),
'Name': behavior.get('name'),
'Type': behavior.get('type'),
'Impact_Level': behavior.get('impact_level'),
'lastReported': behavior.get('last_reported'),
'EndpointID': behavior.get('endpoint_id')
})
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(results_lst,
keyTransform=underscoreToCamelCase,
removeNull=True)
}
headers = ['Name', 'Type', 'Impact_Level', 'Id', 'EndpointID', 'lastReported']
entry = {
'Type': entryTypes['note'],
'Contents': results_lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Results of the behaviors search', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
else:
demisto.results('No results found')
def hashes_search_request(exp):
"""
Request for Hashed search using CQL expression
"""
suffix_url = 'search/hashes' + exp
response = http_request('GET', suffix_url)
return response
def search_hashes():
"""
Request for hashes search using CQL expression
demisto parameter: (dict) expression
The CQL expression to be used for the search
"""
data = []
file_standards = []
expression = demisto.args().get('expression')
exp = '?expression=' + expression
hashes = hashes_search_request(exp)
if hashes.get('results'):
results = hashes.get('results')
results_lst = list()
for i in range(len(results)):
results_lst.append({k.replace('hashes.', ''): v for k, v in results[i].items()})
hashes['results'] = results_lst
for hash_type in hashes.get('results'):
file_hash_type = hash_type.get('type', '').upper()
if file_hash_type == 'SSDEEP':
file_hash_type = 'SSDeep'
hash_id = hash_type.get('id')
data.append({
file_hash_type: hash_id,
'Type': file_hash_type,
'Impact': hash_type.get('impact'),
'VT report location': hash_type.get('vt_report_location'),
'AV Coverage': hash_type.get('av_coverage')
})
if file_hash_type:
file_standards.append({
file_hash_type: hash_id
})
context = {
'CounterTack.Hash(val.hash_id && val.hash_id === obj.hash_id)': createContext(data),
outputPaths['file']: file_standards
}
entry = {
'Type': entryTypes['note'],
'Contents': results_lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Results of the hashes search:', data, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
else:
demisto.results('No results found')
"""
FETCH INCIDENTS
"""
def search_notifications_request(params=''):
"""
Request for notifications search using CQL expression
"""
suffix_url = 'search/notifications?expression=' + params
response = http_request('GET', suffix_url)
return response
def fetch_behaviors_request(params=''):
"""
Request for behaviors search using CQL expression
"""
suffix_url = 'search/behaviors?expression=' + params
response = http_request('GET', suffix_url)
return response
def fetch_incidents():
incidents = []
last_run = demisto.getLastRun()
if last_run and last_run['time_stamp']:
last_update_time = last_run['time_stamp']
else:
# In first run
last_update_time, _ = parse_date_range(FETCH_TIME, date_format='%Y-%m-%dT%H:%M:%S.%f'[:-3])
max_timestamp = last_update_time
if FETCH_BEHAVIORS:
params = 'behaviors.time_stamp>' + last_update_time
behaviors = fetch_behaviors_request(params)
for behavior in behaviors.get('results'):
incident = behavior_to_incident(behavior)
# 0 corresponds to never triggered
time_stamp = behavior.get('behaviors.time_stamp')[:-5] # comapre time_stamp
if time_stamp > max_timestamp:
max_timestamp = time_stamp
incidents.append(incident)
if FETCH_NOTIFICATIONS:
params = 'notifications.time_stamp>' + last_update_time
notifications = search_notifications_request(params)
for notification in notifications.get('results'):
incident = notifications_to_incidents(notification)
time_stamp = notification.get('notifications.time_stamp')[:-5]
if time_stamp > max_timestamp:
max_timestamp = time_stamp
incidents.append(incident)
demisto.setLastRun({
'time_stamp': max_timestamp
})
demisto.incidents(incidents)
def behavior_to_incident(behavior):
incident = {}
incident['name'] = 'CounterTack Behavior - ' + behavior.get('behaviors.name')
incident['rawJSON'] = json.dumps(behavior)
return incident
def notifications_to_incidents(notification):
incident = {}
incident['name'] = 'CounterTack Notification - ' + notification.get('notifications.message')
incident['rawJSON'] = json.dumps(notification)
return incident
"""
EXECUTION
"""
command = demisto.command()
LOG('Running command "{}"'.format(command))
try:
if command == 'test-module':
get_endpoints_request()
demisto.results('ok')
elif command == 'fetch-incidents':
fetch_incidents()
elif command == 'countertack-get-endpoints':
get_endpoints()
elif command == 'countertack-get-endpoint':
get_endpoint()
elif command == 'countertack-get-endpoint-tags':
get_endpoint_tags()
elif command == 'countertack-add-tags':
add_tags()
elif command == 'countertack-delete-tags':
delete_tags()
elif command == 'countertack-endpoint-quarantine':
endpoint_quarantine()
elif command == 'countertack-disable-quarantine':
disable_quarantine()
elif command == 'countertack-extract-file':
extract_file()
elif command == 'countertack-delete-file':
delete_file()
elif command == 'countertack-get-all-files':
get_all_files()
elif command == 'countertack-get-endpoint-files':
get_endpoint_files()
elif command == 'countertack-get-file-information':
get_file_information()
elif command == 'countertack-download-file':
download_file()
elif command == 'countertack-get-behaviors':
get_behaviors()
elif command == 'countertack-get-behavior':
get_behavior()
elif command == 'countertack-add-behavior-tags':
add_behavior_tags()
elif command == 'countertack-delete-behavior-tags':
delete_behavior_tags()
elif command == 'countertack-search-events':
search_events()
elif command == 'countertack-search-hashes':
search_hashes()
elif command == 'countertack-search-endpoints':
search_endpoints()
elif command == 'countertack-search-behaviors':
search_behaviors()
elif command == 'countertack-kill-process':
kill_process()
except Exception as e:
return_error(e.message)
LOG(e)
| 30.245639 | 120 | 0.604056 |
bdac16655c6ca969dc0bc7dd06b8c0d0dd447a3b
| 482 |
py
|
Python
|
pattern-classification/machine_learning/scikit-learn/tokenizer.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2021-12-13T15:41:48.000Z
|
2021-12-13T15:41:48.000Z
|
pattern-classification/machine_learning/scikit-learn/tokenizer.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 15 |
2021-09-12T15:06:13.000Z
|
2022-03-31T19:02:08.000Z
|
pattern-classification/machine_learning/scikit-learn/tokenizer.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2022-01-29T00:37:52.000Z
|
2022-01-29T00:37:52.000Z
|
from nltk.stem.porter import PorterStemmer
import re
from nltk.corpus import stopwords
stop = stopwords.words('english')
porter = PorterStemmer()
def tokenizer(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text.lower())
text = re.sub('[\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-', '')
text = [w for w in text.split() if w not in stop]
tokenized = [porter.stem(w) for w in text]
return text
| 34.428571 | 84 | 0.595436 |
97a8774db190df1aef3f8228788815dfb27dbc2d
| 70 |
py
|
Python
|
research/cv/ICNet/src/models/__init__.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/ICNet/src/models/__init__.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/ICNet/src/models/__init__.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
"""__init__"""
from .icnet import ICNet
from .icnet_dc import ICNetdc
| 17.5 | 29 | 0.757143 |
a9b6354e3ee0809a18fb29ba82e1e3553ac6b3df
| 813 |
py
|
Python
|
novel/crawler/novelcrawler/spiders/jianke.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2017-10-23T14:58:47.000Z
|
2017-10-23T14:58:47.000Z
|
novel/crawler/novelcrawler/spiders/jianke.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | null | null | null |
novel/crawler/novelcrawler/spiders/jianke.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2018-04-06T07:49:18.000Z
|
2018-04-06T07:49:18.000Z
|
# -*- coding: utf-8 -*-
import scrapy
class JiankeItem(scrapy.Item):
title = scrapy.Field()
link = scrapy.Field()
desc = scrapy.Field()
class JiankeSpider(scrapy.Spider):
name = 'jianke'
allowed_domains = ['www.xxbiquge.com']
start_urls = ['http://www.xxbiquge.com/2_2327/']
def parse(self, response):
for href in response.css("#list > dl > dd > a::attr('href')"):
url = response.urljoin(href.extract())
yield scrapy.Request(url, callback=self.parse_dir_contents)
@staticmethod
def parse_dir_contents(response):
item = JiankeItem()
item['title'] = response.css("div.bookname > h1::text").extract_first()
item['link'] = response.url
item['desc'] = response.css("#content").extract_first()
yield item
| 29.035714 | 79 | 0.619926 |
e7883f622839652116c8434612035980b2af5b57
| 3,514 |
py
|
Python
|
extraction/links_in_context/main.py
|
dbmdz/webarchiv-dh-bestandsausbau
|
98c271a09cdb026d1d58133f49dcb3e1c9fcf9b6
|
[
"MIT"
] | null | null | null |
extraction/links_in_context/main.py
|
dbmdz/webarchiv-dh-bestandsausbau
|
98c271a09cdb026d1d58133f49dcb3e1c9fcf9b6
|
[
"MIT"
] | null | null | null |
extraction/links_in_context/main.py
|
dbmdz/webarchiv-dh-bestandsausbau
|
98c271a09cdb026d1d58133f49dcb3e1c9fcf9b6
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from shutil import rmtree
from aut import WebArchive
from pyspark import SparkContext
from pyspark.sql import SparkSession
from links_in_context.filter_records import (
decode_pages,
get_links_in_context,
filter_links_in_context,
exclude_dest_hosts,
merge_links_in_context,
linkcontext_schema,
keep_valid_pages
)
sc = SparkContext.getOrCreate()
sqlContext = SparkSession.builder.getOrCreate()
spark = SparkSession.builder.appName("ExtractLinkcontext").getOrCreate()
input_path = Path("/in")
output_path = Path("/out")
seed_list = "links_in_context/Collection_Seeds.csv"
exclude_list = "links_in_context/Exclude_Domains.csv"
for file in input_path.iterdir():
if file.is_dir():
warc_pattern = "*.warc.gz"
warc_path = file / "arcs" / warc_pattern
extract_path = output_path / file.name
if Path(extract_path, "_SUCCESS").exists():
continue
else:
if extract_path.exists():
rmtree(str(extract_path))
target_instance_size = sum(
warc.stat().st_size for warc in Path(file / "arcs").glob(warc_pattern)
)
if target_instance_size < 6000000000:
records = WebArchive(sc, sqlContext, str(warc_path)).all()
valid_pages = keep_valid_pages(records)
decoded_pages = decode_pages(valid_pages)
links_in_context = get_links_in_context(decoded_pages)
links_in_context = filter_links_in_context(links_in_context)
links_in_context = exclude_dest_hosts(links_in_context, seed_list)
links_in_context = exclude_dest_hosts(links_in_context, exclude_list)
links_in_context.coalesce(1).write.format("json").save(str(extract_path))
else:
suffix = "_linkcontext"
for warc_path in Path(file / "arcs").iterdir():
tmp_output_path = output_path / (file.name + "_tmp")
extract_path = tmp_output_path / (warc_path.stem + suffix)
records = WebArchive(sc, sqlContext, str(warc_path)).all()
valid_pages = keep_valid_pages(records)
decoded_pages = decode_pages(valid_pages)
links_in_context = get_links_in_context(decoded_pages)
links_in_context = filter_links_in_context(links_in_context)
links_in_context = exclude_dest_hosts(links_in_context, seed_list)
links_in_context = exclude_dest_hosts(links_in_context, exclude_list)
links_in_context.coalesce(1).write.format("json").save(
str(extract_path)
)
extracts_path = Path(tmp_output_path / ("*" + suffix))
merge_path = Path(output_path / file.name)
to_merge = (
spark.read.format("json")
.schema(linkcontext_schema)
.option("path", str(extracts_path))
.load()
)
merged = merge_links_in_context(to_merge)
merged.coalesce(1).write.format("json").save(str(merge_path))
rmtree(str(tmp_output_path))
extracts_path = Path(output_path / "*" / "part-00000-*.json")
merge_path = Path(output_path / "all_links_in_context")
to_merge = (
spark.read.format("json")
.schema(linkcontext_schema)
.option("path", str(extracts_path))
.load()
)
merged = merge_links_in_context(to_merge)
merged.coalesce(1).write.format("json").save(str(merge_path))
| 37.784946 | 85 | 0.653671 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.