hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b9e01858b105fdac329d8866fe7276da05d5cfe6
| 1,448 |
py
|
Python
|
2_Iterables/Lists/lists.py
|
felixdittrich92/Python3
|
16b767465e4bdf0adc652c195d15384bb9faa4cf
|
[
"MIT"
] | 1 |
2022-03-02T07:16:30.000Z
|
2022-03-02T07:16:30.000Z
|
2_Iterables/Lists/lists.py
|
felixdittrich92/Python3
|
16b767465e4bdf0adc652c195d15384bb9faa4cf
|
[
"MIT"
] | null | null | null |
2_Iterables/Lists/lists.py
|
felixdittrich92/Python3
|
16b767465e4bdf0adc652c195d15384bb9faa4cf
|
[
"MIT"
] | null | null | null |
# basic features for lists: https://docs.python.org/3/tutorial/datastructures.html
# List: mutable
def main():
my_list = list()
my_list = [1, 2, 3]
print("----------Hinzufügen----------")
# Option 1: Single Value
my_list.append(-10)
print(my_list)
# Option 2: List Concatenation (Verkettung)
my_list2 = [4, 5]
my_list += my_list2
print(my_list)
# Option 3: Iterables
it = range(-2, 3, 1)
my_list.extend(it) # extend-erweitern
print(my_list)
# Option 4: Insert at user-defined index
my_list.insert(0, "hello")
print(my_list)
my_list.insert(-30, "hello")
print(my_list)
print("----------Entfernen----------")
# Remove values
my_list.pop()
print(my_list)
while 'hello' in my_list:
idx = my_list.index('hello')
my_list.pop(idx)
print(my_list)
print("----------kopieren----------")
# Copy
my_list_new = my_list
print(hex(id(my_list)))
print(hex(id(my_list_new)))
my_list_new = my_list.copy()
print(hex(id(my_list)))
print(hex(id(my_list_new)))
print("----------umdrehen----------")
# Reverse
my_list.reverse()
print(my_list)
print(my_list[::-1])
print("----------zählen----------")
# Count
print(my_list.count(1))
print("----------sortieren----------")
# Sort
my_list.sort(reverse=False)
print(my_list)
if __name__ == "__main__":
main()
| 20.985507 | 82 | 0.560083 |
e00b645841cf57d5da57ccbf53f47615349870c7
| 28,552 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/coremodules/network/routing.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/coremodules/network/routing.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/coremodules/network/routing.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2016-2020 German Aerospace Center (DLR) and others.
# SUMOPy module
# Copyright (C) 2012-2017 University of Bologna - DICAM
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file routing.py
# @author Joerg Schweizer
# @date
import os
import numpy as np
from agilepy.lib_base.processes import Process, CmlMixin, ff, call
class priorityDictionary(dict):
def __init__(self):
'''Initialize priorityDictionary by creating binary heap
of pairs (value,key). Note that changing or removing a dict entry will
not remove the old pair from the heap until it is found by smallest() or
until the heap is rebuilt.'''
self.__heap = []
dict.__init__(self)
def smallest(self):
'''Find smallest item after removing deleted items from heap.'''
if len(self) == 0:
raise IndexError, "smallest of empty priorityDictionary"
heap = self.__heap
while heap[0][1] not in self or self[heap[0][1]] != heap[0][0]:
lastItem = heap.pop()
insertionPoint = 0
while 1:
smallChild = 2*insertionPoint+1
if smallChild+1 < len(heap) and \
heap[smallChild][0] > heap[smallChild+1][0]:
smallChild += 1
if smallChild >= len(heap) or lastItem <= heap[smallChild]:
heap[insertionPoint] = lastItem
break
heap[insertionPoint] = heap[smallChild]
insertionPoint = smallChild
return heap[0][1]
def __iter__(self):
'''Create destructive sorted iterator of priorityDictionary.'''
def iterfn():
while len(self) > 0:
x = self.smallest()
yield x
del self[x]
return iterfn()
def __setitem__(self, key, val):
'''Change value stored in dictionary and add corresponding
pair to heap. Rebuilds the heap if the number of deleted items grows
too large, to avoid memory leakage.'''
dict.__setitem__(self, key, val)
heap = self.__heap
if len(heap) > 2 * len(self):
self.__heap = [(v, k) for k, v in self.iteritems()]
self.__heap.sort() # builtin sort likely faster than O(n) heapify
else:
newPair = (val, key)
insertionPoint = len(heap)
heap.append(None)
while insertionPoint > 0 and val < heap[(insertionPoint-1)//2][0]:
heap[insertionPoint] = heap[(insertionPoint-1)//2]
insertionPoint = (insertionPoint-1)//2
heap[insertionPoint] = newPair
def setdefault(self, key, val):
'''Reimplement setdefault to call our customized __setitem__.'''
if key not in self:
self[key] = val
return self[key]
def update(self, other):
for key in other.keys():
self[key] = other[key]
def dijkstra(id_node_start, nodes, edges, ids_node_target=None, weights={}):
"""
OUTDATED!!! see edgedijkstra
Calculates minimum cost tree and minimum route costs from
id_node_start to all nodes of the network or to
target nodes given in set ids_node_target.
Attention does not take into consideration missing connectors!!
"""
# print '\n\ndijkstraPlain',id_node_start.getID()
# dictionary of final distances
D = {}
# dictionary of predecessors
P = {}
# est.dist. of non-final vert.
Q = priorityDictionary()
Q[id_node_start] = 0
for v in Q:
D[v] = Q[v]
if ids_node_target is not None:
ids_node_target.discard(v)
# if ids_node_target.discard(v):
if len(ids_node_target) == 0:
return (D, P)
# print ' v=',v.getID(),len(v.getOutgoing())
for id_edge in nodes.ids_outgoing[v]:
# print ' ',edge.getID(),edge._to.getID()
w = edges.ids_tonode[id_edge]
#vwLength = D[v] + weights.get(edge,edge._cost)
vwLength = D[v] + weights.get(id_edge, edges.lengths[id_edge])
if w not in D and (w not in Q or vwLength < Q[w]):
Q[w] = vwLength
P[w] = id_edge
return (D, P)
def edgedijkstra_backwards(id_edge_start, cost_limit,
weights=None, bstar=None):
"""
Calculates minimum cost tree and minimum route costs from
id_edge_start to all edges of the network or to
target edges given in set ids_edge_target.
"""
ids_origin = set()
# print 'edgedijkstra_backwards id_edge_start',id_edge_start,'cost_limit',cost_limit
# dictionary of final distances
D = {}
# dictionary of predecessors
P = {}
# est.dist. of non-final vert.
if weights[id_edge_start] < 0:
print ' no access id_edge_start, weights', id_edge_start, weights[id_edge_start]
return ([], {}, {})
Q = priorityDictionary()
Q[id_edge_start] = weights[id_edge_start]
ids_edges_nochange = set()
for e in Q:
if (e not in ids_edges_nochange) & (e not in ids_origin):
D[e] = Q[e]
has_changed = False
# print ' --------------'
# print ' toedge',e,'ids_bedge',bstar[e]
# print ' D=',D
# print ' Q=',Q
if not bstar.has_key(e):
print 'WARNING in edgedijkstra: bstar has no edge', e
print 'routes = \n', P
return ([], None, P)
for id_edge in bstar[e]:
if 0:
weight_tot = D[e] + weights[id_edge]
newstate = '|'
if id_edge not in D:
newstate += '*D'
if id_edge not in Q:
newstate += '*Q'
elif weight_tot < Q[id_edge]:
newstate += '<Q'
else:
newstate += '>Q|'
print ' id_bedge', id_edge, 'w=%.2f,w_tot=%.2f' % (
weights[id_edge], weight_tot), weights[id_edge] >= 0, D[e] + weights[id_edge] < cost_limit, id_edge not in D, (id_edge not in Q or weight_tot < Q[id_edge]), newstate
if weights[id_edge] >= 0: # edge accessible?
weight_tot = D[e] + weights[id_edge]
if weight_tot < cost_limit:
if id_edge not in D and (id_edge not in Q or weight_tot < Q[id_edge]):
Q[id_edge] = weight_tot
P[id_edge] = e
has_changed = True
else:
# print ' **found origin',e
ids_origin.add(e)
# print ' has_changed',e,has_changed
if not has_changed:
# break
ids_edges_nochange.add(e)
# print ' P',P
# print ' D',D
return (ids_origin, D, P) # returns in tree with all reachable destinations
def edgedijkstra(id_edge_start, ids_edge_target=None,
weights=None, fstar=None):
"""
Calculates minimum cost tree and minimum route costs from
id_edge_start to all edges of the network or to
target edges given in set ids_edge_target.
"""
ids_target = ids_edge_target.copy()
# print 'edgedijkstra'
# dictionary of final distances
D = {}
# dictionary of predecessors
P = {}
# est.dist. of non-final vert.
if weights[id_edge_start] < 0:
print ' WARNING in edgedijkstra: no access id_edge_start, weights', id_edge_start, weights[id_edge_start]
return ({}, {})
Q = priorityDictionary()
Q[id_edge_start] = weights[id_edge_start]
for e in Q:
D[e] = Q[e]
if ids_target is not None:
ids_target.discard(e)
if len(ids_target) == 0:
return (D, P)
if not fstar.has_key(e):
print 'WARNING in edgedijkstra: fstar has no edge', e
print 'routes = \n', P
return (None, P)
for id_edge in fstar[e]:
if weights[id_edge] >= 0: # edge accessible?
weight_tot = D[e] + weights[id_edge]
if id_edge not in D and (id_edge not in Q or weight_tot < Q[id_edge]):
Q[id_edge] = weight_tot
P[id_edge] = e
return (D, P) # returns in tree with all reachable destinations
def get_mincostroute_edge2edge(id_rootedge, id_targetedge, D=None, P=None,
weights=None, fstar=None):
"""
Returns cost and shortest path from rootedge to a specific targetedge.
D, P must be precalculated for rootnode with function dijkstraPlainEdge
"""
if D is None:
D, P = edgedijkstra(id_rootedge, set([id_targetedge, ]),
weights=weights, fstar=fstar)
route = [id_targetedge]
if not P.has_key(id_targetedge):
return 0.0, []
e = id_targetedge
while e != id_rootedge:
id_edge = P[e]
route.append(id_edge)
e = id_edge
# route.append(e)
route.reverse()
return D[id_targetedge], route
def get_mincostroute_node2node(id_rootnode, id_targetnode, D, P, edges):
"""
Returns cost and shortest path from rootnode to a specific targetnode.
D, P must be precalculated for rootnode with function dijkstraPlain
"""
# print 'getMinCostRoute node_start=%s, edge_end =%s node_end=%s'%(rootnode.getID(),P[targetnode].getID(),targetnode.getID())
id_node = id_targetnode
route = []
if not P.has_key(id_targetnode):
return 0.0, []
while id_node != id_rootnode:
id_edge = P[id_node]
route.append(id_edge)
id_node = edges.ids_fromnode[id_edge]
# for edge in route:
# print ' ',edge.getID()
route.reverse()
return D[id_targetnode], route
def duaroute(tripfilepath, netfilepath, routefilepath, options='-v --ignore-errors'):
"""
Simple shortes path duaoute function
"""
# do not use options: --repair --remove-loops
cmd = 'duarouter '+options+' --trip-files %s --net-file %s --output-file %s'\
% (ff(tripfilepath), ff(netfilepath), ff(routefilepath))
return call(cmd)
def init_random(self, **kwargs):
optiongroup = 'random'
self.add_option('is_timeseed', kwargs.get('is_timeseed', False),
groupnames=[optiongroup, 'options', ],
name='Time seed',
perm='rw',
info='Initialises the random number generator with the current system time.',
cml='--random',
)
self.add_option('seed', kwargs.get('seed', 23423),
groupnames=[optiongroup, 'options', ],
name='Random seed',
perm='rw',
info='Initialises the random number generator with the given value.',
cml='--seed',
)
class RouterMixin(CmlMixin, Process):
def init_tripsrouter(self, ident, net,
trips,
netfilepath=None,
outfilepath=None,
name='Duarouter',
info='Generates routes from trips, flows or previous routes',
is_export_net=True,
logger=None, cml='duarouter'):
self._init_common(ident, name=name,
parent=net,
logger=logger,
info=info,
)
self.init_cml(cml) # pass main shell command
self.is_export_net = is_export_net
self._trips = trips
if netfilepath is None:
netfilepath = net.get_filepath()
self.add_option('netfilepath', netfilepath,
groupnames=['_private'],
cml='--net-file',
perm='r',
name='Net file',
wildcards='Net XML files (*.net.xml)|*.net.xml',
metatype='filepath',
info='SUMO Net file in XML format.',
)
if outfilepath is None:
outfilepath = trips.get_routefilepath()
self.add_option('outfilepath', outfilepath,
groupnames=['_private'],
cml='--output-file',
perm='r',
name='Out routefile',
wildcards='Route XML files (*.rou.xml)|*.rou.xml',
metatype='filepath',
info='Output file of the routing process, which is a SUMO route file in XML format.',
)
def init_options_time(self, **kwargs):
optiongroup = 'time'
self.add_option('time_begin', kwargs.get('time_begin', -1),
groupnames=[optiongroup, 'options', ],
name='Start time',
perm='rw',
info='Defines the begin time; Previous trips will be discarded. The value of -1 takes all routes from the beginning.',
unit='s',
cml='--begin',
is_enabled=lambda self: self.time_begin >= 0.0,
)
self.add_option('time_end', kwargs.get('time_end', -1),
groupnames=[optiongroup, 'options', ],
name='End time',
perm='rw',
info='Defines the end time; Later trips will be discarded; The value of -1 takes all routes to the end.',
unit='s',
cml='--end',
is_enabled=lambda self: self.time_end >= 0.0,
)
def init_options_processing_common(self, **kwargs):
optiongroup = 'processing'
self.add_option('n_alternatives_max', kwargs.get('n_alternatives_max', 5),
name='Max. alternatives',
info='Maximum number of considered route alternatives.',
cml='--max-alternatives',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
self.add_option('is_ignore_errors', kwargs.get('is_ignore_errors', True),
name='Ignore disconnected',
info='Continue if a route could not be build.',
cml='--ignore-errors',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
self.add_option('n_threads', kwargs.get('n_threads', 0),
name='Parallel threads',
info="The number of parallel execution threads used for routing.",
cml='--routing-threads',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
def init_options_processing_dua(self, **kwargs):
optiongroup = 'processing'
self.add_option('time_preload', kwargs.get('time_preload', 200),
name='Preload time',
unit='s',
info='Load routes for the next number of seconds ahead.',
cml='--route-steps',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
# self.add_option('is_randomize_flows',kwargs.get('is_randomize_flows',False),
# name = 'Preload time',
# info = 'generate random departure times for flow input.',
# cml = '--randomize-flows',
# groupnames = [optiongroup,'options',],#
# perm='rw',
# )
self.add_option('is_remove_loops', kwargs.get('is_remove_loops', False),
name='Remove loops',
info='Remove loops within the route; Remove turnarounds at start and end of the route. May cause errors!',
cml='--remove-loops',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
self.add_option('is_repair', kwargs.get('is_repair', False),
name='Repair',
info='Tries to correct a false route. May cause errors!',
cml='--repair',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
self.add_option('is_repair_from', kwargs.get('is_repair_from', False),
name='Repair start',
info='Tries to correct an invalid starting edge by using the first usable edge instead.',
cml='--repair.from',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
self.add_option('is_repair_to', kwargs.get('is_repair_from', False),
name='Repair end',
info='Tries to correct an invalid destination edge by using the last usable edge instead.',
cml='--repair.to',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
self.add_option('is_bulkrouting', kwargs.get('is_bulkrouting', False),
name='Bulk routing?',
info="Aggregate routing queries with the same origin.",
cml='--bulk-routing',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
# --weights.interpolate <BOOL> Interpolate edge weights at interval boundaries; default: false
# --weight-period <TIME> Aggregation period for the given weight files; triggers rebuilding of Contraction Hierarchy; default: 3600
# --weights.expand <BOOL> Expand weights behind the simulation's end; default: false
# --with-taz <BOOL> Use origin and destination zones (districts) for in- and output; default: false
def init_options_methods(self, **kwargs):
optiongroup = 'methods'
self.add_option('method_routechoice', kwargs.get('method_routechoice', 'gawron'),
name='Routechoice method',
choices=['gawron', 'logit', 'lohse'],
info="Mathematical model used for route choice.",
cml='--route-choice-method',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
self.add_option('beta_gawron', kwargs.get('beta_gawron', 0.3),
name="Gawron's 'beta'",
info="Gawron's 'beta' parameter.",
cml='--gawron.beta',
groupnames=[optiongroup, 'options', ],
perm='rw',
is_enabled=lambda self: self.method_routechoice is 'gawron',
)
self.add_option('a_gawron', kwargs.get('a_gawron', 0.05),
name="Gawron's 'a'",
info="Gawron's 'a' parameter.",
cml='--gawron.a',
groupnames=[optiongroup, 'options', ],
perm='rw',
is_enabled=lambda self: self.method_routechoice is 'gawron',
)
self.add_option('beta_logit', kwargs.get('beta_logit', 0.15),
name="Logit's 'beta'",
info="C-Logit's 'beta' parameter.",
cml='--logit.beta',
groupnames=[optiongroup, 'options', ],
perm='rw',
is_enabled=lambda self: self.method_routechoice is 'logit',
)
self.add_option('gamma_logit', kwargs.get('gamma_logit', 1.0),
name="Logit's 'gamma'",
info="C-Logit's 'gamma' parameter.",
cml='--logit.gamma',
groupnames=[optiongroup, 'options', ],
perm='rw',
is_enabled=lambda self: self.method_routechoice is 'logit',
)
self.add_option('theta_logit', kwargs.get('theta_logit', 0.01),
name="Logit's 'theta'",
info="C-Logit's 'theta' parameter.",
cml='--logit.theta',
groupnames=[optiongroup, 'options', ],
perm='rw',
is_enabled=lambda self: self.method_routechoice is 'logit',
)
self.add_option('algorithm_routing', kwargs.get('algorithm_routing', 'dijkstra'),
name='Routing algorithm',
choices=['dijkstra', 'astar', 'CH', 'CHWrapper'],
info="Select among routing algorithms.",
cml='--routing-algorithm',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
self.add_option('is_keep_all_routes', kwargs.get('is_keep_all_routes', False),
name='Keep all routes?',
info="Save even routes with near zero probability.",
cml='--keep-all-routes',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
self.add_option('is_skip_new_routes', kwargs.get('is_skip_new_routes', False),
name='Skip new routes?',
info="Only reuse routes from input, do not calculate new ones.",
cml='--skip-new-routes',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
def do(self):
if self.is_export_net:
# first export current net
self.parent.export_netxml(self.netfilepath)
if self.is_export_trips:
self._trips.export_trips_xml(self.tripfilepaths)
self.update_params()
cml = self.get_cml()
# print 'SumonetImporter.do',cml
self.run_cml(cml)
if self.status == 'success':
print ' Routing done.'
if os.path.isfile(self.outfilepath):
# print ' outfile exists, start importing routes'
self._trips.import_routes_xml(self.outfilepath,
is_clear_trips=False,
is_generate_ids=False,
is_add=True)
return True
return False
return False
class DuaRouter(RouterMixin):
def __init__(self, net, trips,
tripfilepaths=None,
outfilepath=None,
is_export_net=True,
logger=None,
**kwargs):
print 'DuaRouter.__init__ net, trips', net, trips
self.init_tripsrouter('duarouter', net, # net becomes parent
trips,
outfilepath=outfilepath,
logger=logger,
is_export_net=is_export_net,
)
if tripfilepaths is None:
if trips is not None:
tripfilepaths = trips.get_tripfilepath()
self.is_export_trips = True
else:
self.is_export_trips = False
else:
self.is_export_trips = False
print ' tripfilepaths', tripfilepaths
if tripfilepaths is not None:
self.add_option('tripfilepaths', tripfilepaths,
groupnames=['_private'],
cml='--trip-files',
perm='r',
name='Trip file(s)',
wildcards='Trip XML files (*.trip.xml)|*.trip.xml',
metatype='filepaths',
info='SUMO Trip files in XML format.',
)
self.init_options_time(**kwargs)
self.init_options_methods(**kwargs)
self.init_options_processing_common(**kwargs)
self.init_options_processing_dua(**kwargs)
init_random(self, **kwargs)
class MacroRouter(RouterMixin):
"""
Macroscopic router
in development
"""
def __init__(self, net, trips,
tripfilepaths=None,
netfilepath=None,
outfilepath=None,
is_export_net=True,
logger=None,
**kwargs):
print 'MacroRouter.__init__ net, trips', net, trips
self.init_tripsrouter('macrorouter', net, # net becomes parent
trips,
netfilepath=netfilepath,
outfilepath=outfilepath,
name='Macroscopic router',
info='Generates routes from trips, flows or previous routes',
is_export_net=is_export_net,
logger=logger,
cml='marouter'
)
if tripfilepaths is None:
if trips is not None:
tripfilepaths = trips.get_tripfilepath()
self.is_export_trips = True
else:
self.is_export_trips = False
else:
self.is_export_trips = False
print ' tripfilepaths', tripfilepaths
if tripfilepaths is not None:
self.add_option('tripfilepaths', tripfilepaths,
groupnames=['_private'],
cml='--route-files',
perm='r',
name='Trip file(s)',
wildcards='Trip XML files (*.trip.xml)|*.trip.xml',
metatype='filepaths',
info='SUMO Trip files in XML format.',
)
self.init_options_time(**kwargs)
self.init_options_methods(**kwargs)
# marouter specific
optiongroup = 'methods'
self.add_option('n_iter_max', kwargs.get('n_iter_max', 20),
name='Max. Iterations',
info="maximal number of iterations for new route searching in incremental and stochastic user assignment.",
cml='--max-iterations',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
self.add_option('n_iter_inner_max', kwargs.get('n_iter_inner_max', 1000),
name='Max. inner Iter.',
info="maximal number of inner iterations for user equilibrium calcuation in the stochastic user assignment.",
cml='--max-inner-iterations',
groupnames=[optiongroup, 'options', ],
perm='rw',
)
self.init_options_processing_common(**kwargs)
init_random(self, **kwargs)
| 40.614509 | 189 | 0.504973 |
16141fc7912eff2630ee2716a0513f817bff7308
| 2,480 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/net_tools/lldp.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/net_tools/lldp.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/net_tools/lldp.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lldp
requirements: [ lldpctl ]
short_description: get details reported by lldp
description:
- Reads data out of lldpctl
options: {}
author: "Andy Hill (@andyhky)"
notes:
- Requires lldpd running and lldp enabled on switches
'''
EXAMPLES = '''
# Retrieve switch/port information
- name: Gather information from lldp
lldp:
- name: Print each switch/port
debug:
msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}"
with_items: "{{ lldp.keys() }}"
# TASK: [Print each switch/port] ***********************************************************
# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
'''
from ansible.module_utils.basic import AnsibleModule
def gather_lldp(module):
cmd = ['lldpctl', '-f', 'keyvalue']
rc, output, err = module.run_command(cmd)
if output:
output_dict = {}
current_dict = {}
lldp_entries = output.split("\n")
for entry in lldp_entries:
if entry.startswith('lldp'):
path, value = entry.strip().split("=", 1)
path = path.split(".")
path_components, final = path[:-1], path[-1]
else:
value = current_dict[final] + '\n' + entry
current_dict = output_dict
for path_component in path_components:
current_dict[path_component] = current_dict.get(path_component, {})
current_dict = current_dict[path_component]
current_dict[final] = value
return output_dict
def main():
module = AnsibleModule({})
lldp_output = gather_lldp(module)
try:
data = {'lldp': lldp_output['lldp']}
module.exit_json(ansible_facts=data)
except TypeError:
module.fail_json(msg="lldpctl command failed. is lldpd running?")
if __name__ == '__main__':
main()
| 29.176471 | 92 | 0.592339 |
161b3b9572ae019879eb06a9654e3af95be5891d
| 197 |
py
|
Python
|
hello-world/hello/hello.py
|
crackedcd/Intern.MT
|
36398837af377a7e1c4edd7cbb15eabecd2c3103
|
[
"MIT"
] | 1 |
2019-07-05T03:42:17.000Z
|
2019-07-05T03:42:17.000Z
|
hello-world/hello/hello.py
|
crackedcd/Intern.MT
|
36398837af377a7e1c4edd7cbb15eabecd2c3103
|
[
"MIT"
] | null | null | null |
hello-world/hello/hello.py
|
crackedcd/Intern.MT
|
36398837af377a7e1c4edd7cbb15eabecd2c3103
|
[
"MIT"
] | 1 |
2019-06-24T05:56:55.000Z
|
2019-06-24T05:56:55.000Z
|
def hello():
return "Hello, pobby!"
def main():
s = hello()
print(s)
"""hahahahahah
sdjfkadsjflksdjfkldsajfklsda
fjdaslkfjdsakl"""
if __name__ == '__main__':
main()
| 13.133333 | 32 | 0.598985 |
1697cbbada3ad4216b2abe9509ecca194c6b89b3
| 389 |
py
|
Python
|
exercises/fr/exc_01_09.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/fr/exc_01_09.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/fr/exc_01_09.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
import spacy
nlp = spacy.load("fr_core_news_sm")
text = "Apple : le nouveau modèle X Pro attendu pour l'été."
# Traite le texte
doc = ____
# Itère sur les entités
for ____ in ____.____:
# Affiche le texte de l'entité et son label
print(____.____, ____.____)
# Obtiens la portion pour "X Pro"
x_pro = ____
# Affiche la portion de texte
print("Entité manquante :", x_pro.text)
| 19.45 | 60 | 0.706941 |
bc457aa6f2c9f86fd48baf479a195ae9e64bc729
| 5,559 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/accounts/report/budget_variance_report/budget_variance_report.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/accounts/report/budget_variance_report/budget_variance_report.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/accounts/report/budget_variance_report/budget_variance_report.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
from frappe.utils import formatdate
from erpnext.controllers.trends import get_period_date_ranges, get_period_month_ranges
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
cost_centers = get_cost_centers(filters)
period_month_ranges = get_period_month_ranges(filters["period"], filters["fiscal_year"])
cam_map = get_cost_center_account_month_map(filters)
data = []
for cost_center in cost_centers:
cost_center_items = cam_map.get(cost_center)
if cost_center_items:
for account, monthwise_data in cost_center_items.items():
row = [cost_center, account]
totals = [0, 0, 0]
for relevant_months in period_month_ranges:
period_data = [0, 0, 0]
for month in relevant_months:
month_data = monthwise_data.get(month, {})
for i, fieldname in enumerate(["target", "actual", "variance"]):
value = flt(month_data.get(fieldname))
period_data[i] += value
totals[i] += value
period_data[2] = period_data[0] - period_data[1]
row += period_data
totals[2] = totals[0] - totals[1]
row += totals
data.append(row)
return columns, data
def get_columns(filters):
columns = [_(filters.get("budget_against")) + ":Link/%s:120"%(filters.get("budget_against")), _("Account") + ":Link/Account:120"]
group_months = False if filters["period"] == "Monthly" else True
for from_date, to_date in get_period_date_ranges(filters["period"], filters["fiscal_year"]):
for label in [_("Target") + " (%s)", _("Actual") + " (%s)", _("Variance") + " (%s)"]:
if group_months:
label = label % (formatdate(from_date, format_string="MMM") + " - " + formatdate(to_date, format_string="MMM"))
else:
label = label % formatdate(from_date, format_string="MMM")
columns.append(label+":Float:120")
return columns + [_("Total Target") + ":Float:120", _("Total Actual") + ":Float:120",
_("Total Variance") + ":Float:120"]
def get_cost_centers(filters):
cond = "and 1=1"
if filters.get("budget_against") == "Cost Center":
cond = "order by lft"
return frappe.db.sql_list("""select name from `tab{tab}` where company=%s
{cond}""".format(tab=filters.get("budget_against"), cond=cond), filters.get("company"))
#Get cost center & target details
def get_cost_center_target_details(filters):
return frappe.db.sql("""
select b.{budget_against} as budget_against, b.monthly_distribution, ba.account, ba.budget_amount
from `tabBudget` b, `tabBudget Account` ba
where b.name=ba.parent and b.docstatus = 1 and b.fiscal_year=%s
and b.budget_against = %s and b.company=%s
""".format(budget_against=filters.get("budget_against").replace(" ", "_").lower()),
(filters.fiscal_year, filters.budget_against, filters.company), as_dict=True)
#Get target distribution details of accounts of cost center
def get_target_distribution_details(filters):
target_details = {}
for d in frappe.db.sql("""select md.name, mdp.month, mdp.percentage_allocation
from `tabMonthly Distribution Percentage` mdp, `tabMonthly Distribution` md
where mdp.parent=md.name and md.fiscal_year=%s""", (filters["fiscal_year"]), as_dict=1):
target_details.setdefault(d.name, {}).setdefault(d.month, flt(d.percentage_allocation))
return target_details
#Get actual details from gl entry
def get_actual_details(name, filters):
cond = "1=1"
budget_against=filters.get("budget_against").replace(" ", "_").lower()
if filters.get("budget_against") == "Cost Center":
cc_lft, cc_rgt = frappe.db.get_value("Cost Center", name, ["lft", "rgt"])
cond = "lft>='{lft}' and rgt<='{rgt}'".format(lft = cc_lft, rgt=cc_rgt)
ac_details = frappe.db.sql("""select gl.account, gl.debit, gl.credit,
MONTHNAME(gl.posting_date) as month_name, b.{budget_against} as budget_against
from `tabGL Entry` gl, `tabBudget Account` ba, `tabBudget` b
where
b.name = ba.parent
and b.docstatus = 1
and ba.account=gl.account
and b.{budget_against} = gl.{budget_against}
and gl.fiscal_year=%s
and b.{budget_against}=%s
and exists(select name from `tab{tab}` where name=gl.{budget_against} and {cond})
""".format(tab = filters.budget_against, budget_against = budget_against, cond = cond),
(filters.fiscal_year, name), as_dict=1)
cc_actual_details = {}
for d in ac_details:
cc_actual_details.setdefault(d.account, []).append(d)
return cc_actual_details
def get_cost_center_account_month_map(filters):
import datetime
cost_center_target_details = get_cost_center_target_details(filters)
tdd = get_target_distribution_details(filters)
cam_map = {}
for ccd in cost_center_target_details:
actual_details = get_actual_details(ccd.budget_against, filters)
for month_id in range(1, 13):
month = datetime.date(2013, month_id, 1).strftime('%B')
cam_map.setdefault(ccd.budget_against, {}).setdefault(ccd.account, {})\
.setdefault(month, frappe._dict({
"target": 0.0, "actual": 0.0
}))
tav_dict = cam_map[ccd.budget_against][ccd.account][month]
month_percentage = tdd.get(ccd.monthly_distribution, {}).get(month, 0) \
if ccd.monthly_distribution else 100.0/12
tav_dict.target = flt(ccd.budget_amount) * month_percentage / 100
for ad in actual_details.get(ccd.account, []):
if ad.month_name == month:
tav_dict.actual += flt(ad.debit) - flt(ad.credit)
return cam_map
| 38.337931 | 130 | 0.715956 |
bce44273fbafba0c45a7f5fb57bd3ef037cbf29d
| 77 |
py
|
Python
|
multimeter/globvar.py
|
tropi-frutti/facharbeit-fhg
|
e162014a9d12e60c616d78c14166c75414acda7b
|
[
"MIT"
] | null | null | null |
multimeter/globvar.py
|
tropi-frutti/facharbeit-fhg
|
e162014a9d12e60c616d78c14166c75414acda7b
|
[
"MIT"
] | null | null | null |
multimeter/globvar.py
|
tropi-frutti/facharbeit-fhg
|
e162014a9d12e60c616d78c14166c75414acda7b
|
[
"MIT"
] | null | null | null |
'''
Created on 15.02.2017
@author: emillokal
'''
measure0=0
measure1=0
| 11 | 22 | 0.649351 |
4c0c6a9313e6ab7e161fe3bd6fedac7c6ff23a0a
| 558 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v7_0/update_maintenance_module_in_doctype.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v7_0/update_maintenance_module_in_doctype.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v7_0/update_maintenance_module_in_doctype.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
from __future__ import unicode_literals
import frappe
def execute():
frappe.db.set_value("DocType", "Maintenance Schedule", "module", "Maintenance")
frappe.db.set_value("DocType", "Maintenance Schedule Detail", "module", "Maintenance")
frappe.db.set_value("DocType", "Maintenance Schedule Item", "module", "Maintenance")
frappe.db.set_value("DocType", "Maintenance Visit", "module", "Maintenance")
frappe.db.set_value("DocType", "Maintenance Visit Purpose", "module", "Maintenance")
| 50.727273 | 87 | 0.756272 |
4c37765fce61d94d70c7d552470b1e8946d11cdd
| 1,682 |
py
|
Python
|
0-notes/job-search/SamplesDSAlgos/data structures/datastructures-stack.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/SamplesDSAlgos/data structures/datastructures-stack.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/SamplesDSAlgos/data structures/datastructures-stack.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
from singly_linked_list import LinkedList
"""
What is the difference between using an array vs. a linked list when
implementing a Stack?
Major difference is, arrays are index-based data structure and
each element of the array is associated with an index.
With a linked list, it relies on pointers; each node has the data
and then pointers to both previous and next elements.
You use binary or linear searches to traverse arrays; linear to
traverse linked lists.
Arrays are directly or randomly accessed and you can access any
element in them; stacks are accessed via last or first
pointer only.
"""
# STACK
# A stack is a data structure whose primary purpose is to store and
# return elements in (Last In First Out)/FIFO order.
# time complexity: Avg | Worst
# Access: O(n) | O(n)
# Search: O(n) | O(n)
# Insertion: O(1) | O(1)
# Deletion: O(1) | O(1)
# space complexity: O(n)
class Stack:
def __init__(self):
self.size = 0
self.storage = LinkedList()
# get stack length/size
def __len__(self):
return self.size
def push(self, value):
# add one to size
self.size += 1
# add item to top of stack
self.storage.add_to_head(value)
def pop(self):
# if stack empty, can't remove anything
if self.size == 0:
return None
else:
# subtract one from size
self.size -= 1
# remove from top of stack & return item
return self.storage.remove_from_head()
| 28.508475 | 74 | 0.595719 |
d5f08c2dd826a82b3823ae489dd2c6241b800607
| 7,234 |
py
|
Python
|
scriptsForPreprocessing/remove_bad_mask_manual.py
|
fishial/Object-Detection-Model
|
4792f65ea785156a8e240d9cdbbc0c9d013ea0bb
|
[
"CC0-1.0"
] | 1 |
2022-01-03T14:00:17.000Z
|
2022-01-03T14:00:17.000Z
|
scriptsForPreprocessing/remove_bad_mask_manual.py
|
fishial/Object-Detection-Model
|
4792f65ea785156a8e240d9cdbbc0c9d013ea0bb
|
[
"CC0-1.0"
] | null | null | null |
scriptsForPreprocessing/remove_bad_mask_manual.py
|
fishial/Object-Detection-Model
|
4792f65ea785156a8e240d9cdbbc0c9d013ea0bb
|
[
"CC0-1.0"
] | 1 |
2021-12-21T09:50:53.000Z
|
2021-12-21T09:50:53.000Z
|
import os, json
import random
import cv2
import numpy as np
from PIL import Image, ImageDraw
def resize_image_if_big(image):
h, w, _ = image.shape
scale_X = 1
scale_Y = 1
if w > 1024 or h > 768:
scale_X = 1024 / w
scale_Y = 768 / h
image = cv2.resize(dst, (int(w * scale_X), int(h * scale_Y))) # Resize image
cv2.imshow("img", image)
return scale_X, scale_Y
def click_and_crop(event, x, y, flags, param):
global points, cropping
if event == cv2.EVENT_LBUTTONDOWN:
cropping = True
elif event == cv2.EVENT_LBUTTONUP:
cropping = False
points.append([x, y])
def random_color():
levels = range(32, 256, 32)
return tuple(random.choice(levels) for _ in range(3))
def calc_area(axis):
return (axis[2] - axis[0]) * (axis[3] - axis[1])
def bubble_sort(arr):
n = len(arr)
for i in range(n - 1):
for j in range(0, n - i - 1):
if calc_area(arr[j][0]) > calc_area(arr[j + 1][0]):
arr[j], arr[j + 1] = arr[j + 1], arr[j]
def draw_mask_on_image(img_src, i, target_mask):
h, w, _ = img_src.shape
dst = img_src.copy()
mask_general = np.zeros((h, w))
current_idx_on_image = []
cnt = 0
for idx_, i_idx in enumerate(i):
if i_idx[1] in target_mask: continue
color = random_color()
polygon_calc = []
for polygon_idx in range(len(i_idx[0]['regions']['0']['shape_attributes']['all_points_x'])):
polygon_calc.append((i_idx[0]['regions']['0']['shape_attributes']['all_points_x'][polygon_idx],
i_idx[0]['regions']['0']['shape_attributes']['all_points_y'][polygon_idx]))
if polygon_idx == 0: continue
cv2.line(dst, (i_idx[0]['regions']['0']['shape_attributes']['all_points_x'][polygon_idx - 1],
i_idx[0]['regions']['0']['shape_attributes']['all_points_y'][polygon_idx - 1]),
(i_idx[0]['regions']['0']['shape_attributes']['all_points_x'][polygon_idx],
i_idx[0]['regions']['0']['shape_attributes']['all_points_y'][polygon_idx]), color, thickness=2)
if len(polygon_calc) < 8:
json_tmp_copy[i_idx[1]]['verified'] = True
json_tmp_copy[i_idx[1]]['correct'] = False
continue
img = Image.new('L', (w, h), 0)
ImageDraw.Draw(img).polygon(list(map(tuple, polygon_calc)), outline=1, fill=255)
tmp_mask = np.array(img)
mask_general = cv2.addWeighted(tmp_mask, 1, mask_general, 1, 0, dtype=cv2.CV_8UC1)
current_idx_on_image.append([[min(i_idx[0]['regions']['0']['shape_attributes']['all_points_x']),
min(i_idx[0]['regions']['0']['shape_attributes']['all_points_y']),
max(i_idx[0]['regions']['0']['shape_attributes']['all_points_x']),
max(i_idx[0]['regions']['0']['shape_attributes']['all_points_y'])], i_idx[1]])
cv2.rectangle(dst, (min(i_idx[0]['regions']['0']['shape_attributes']['all_points_x']),
min(i_idx[0]['regions']['0']['shape_attributes']['all_points_y'])),
(max(i_idx[0]['regions']['0']['shape_attributes']['all_points_x']),
max(i_idx[0]['regions']['0']['shape_attributes']['all_points_y'])), color, 3)
cnt +=1
mask_stack = np.dstack([mask_general] * 3)
mask_stack_arr = np.asarray(mask_stack)
dst = cv2.addWeighted(dst, 0.5, mask_stack_arr, 0.5, 0, dtype=cv2.CV_8UC3)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(dst, 'Mask: {} '.format(cnt), (10, 50),
font, 1, (0, 255, 0), 2, cv2.LINE_AA)
if len(current_idx_on_image) != 1: bubble_sort(current_idx_on_image)
return dst, current_idx_on_image
points = []
cropping = False
# import data
path_to_json = r'resources/fishial/val/via_region_data.json'
# path to augmented dataset
path_to_aug_dataset = r'resources/fishial/val'
json_tmp = json.load(open(path_to_json))
unique_img_array = []
json_tmp_copy = json_tmp.copy()
while len(json_tmp) != 0:
keys = list(json_tmp.keys())
single_img = [[json_tmp[keys[len(keys) - 1]], keys[len(keys) - 1]]]
img_name = json_tmp[keys[len(keys) - 1]]['filename']
del json_tmp[keys[len(keys) - 1]]
for idx in range(len(json_tmp) - 1, -1, -1):
if json_tmp[keys[idx]]['filename'] == img_name:
single_img.append([json_tmp[keys[idx]], keys[idx]])
del json_tmp[keys[idx]]
unique_img_array.append(single_img)
result_dict = {}
ok = 0
nok = 0
for leave, current_collection in enumerate(unique_img_array):
print("Score: ", len(unique_img_array) - leave, len(result_dict))
# if current_collection[0][0]['verified']:
# continue
img_main = os.path.join(path_to_aug_dataset, current_collection[0][0]['filename'])
image = cv2.imread(img_main)
idx_to_remove = []
dst, current_idx_on_image = draw_mask_on_image(image, current_collection, idx_to_remove)
original_mask_on_image = current_idx_on_image.copy()
cv2.namedWindow("img")
cv2.setMouseCallback("img", click_and_crop)
points = []
scale_X, scale_Y = resize_image_if_big(dst)
while True:
key = cv2.waitKey(1) & 0xFF
if key == ord("s"):
print("Wait for save ...")
with open(path_to_json, 'w') as fp:
json.dump(json_tmp_copy, fp)
print("Change saved ! ")
elif key == ord("x"):
print("Undo")
try:
del idx_to_remove[-1]
dst, current_idx_on_image = draw_mask_on_image(image, current_collection, idx_to_remove)
scale_X, scale_Y = resize_image_if_big(dst)
except:
print("error")
elif key == 32:
print("Next image")
for zx in original_mask_on_image:
idx_record = zx[1]
if idx_record in idx_to_remove:
json_tmp_copy[idx_record]['verified'] = True
json_tmp_copy[idx_record]['correct'] = False
else:
json_tmp_copy[idx_record]['verified'] = True
json_tmp_copy[idx_record]['correct'] = True
break
elif key == 27:
print("Wait for save ...")
with open(path_to_json, 'w') as fp:
json.dump(json_tmp_copy, fp)
print("Change saved ! ")
exit(0)
if len(points) == 0: continue
idx_to_remove_tmp = []
for point in points:
for i in current_idx_on_image:
if i[0][0] < point[0] / scale_X < i[0][2] and i[0][1] < point[1]/scale_Y < i[0][3]:
idx_to_remove_tmp.append(i[1])
break
if len(idx_to_remove_tmp) != 0:
idx_to_remove += idx_to_remove_tmp
dst, current_idx_on_image = draw_mask_on_image(image, current_collection, idx_to_remove)
scale_X, scale_Y = resize_image_if_big(dst)
points = []
print("Wait for save ...")
with open(path_to_json, 'w') as fp:
json.dump(json_tmp_copy, fp)
print("Change saved ! ")
| 39.530055 | 117 | 0.581836 |
9104ecc2d4835f85068234be7d3cfbc689345b0c
| 42,358 |
py
|
Python
|
tests/onegov/election_day/screen_widgets/test_election_widgets.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/election_day/screen_widgets/test_election_widgets.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/election_day/screen_widgets/test_election_widgets.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from chameleon import PageTemplate
from datetime import date
from lxml import etree
from onegov.ballot import Election
from onegov.ballot import ElectionCompound
from onegov.ballot import ProporzElection
from onegov.core.widgets import inject_variables
from onegov.core.widgets import transform_structure
from onegov.election_day.layouts import ElectionLayout
from onegov.election_day.layouts import ElectionCompoundLayout
from onegov.election_day.screen_widgets import (
ColumnWidget,
CountedEntitiesWidget,
ElectionCandidatesByEntityTableWidget,
ElectionCandidatesChartWidget,
ElectionCandidatesTableWidget,
ElectionCompoundCandidatesTableWidget,
ElectionCompoundDistrictsTableWidget,
ElectionCompoundListsChartWidget,
ElectionCompoundListsTableWidget,
ElectionListsChartWidget,
ElectionListsTableWidget,
ProgressWidget,
RowWidget,
TitleWidget,
)
from tests.onegov.election_day.common import DummyRequest
def test_majorz_election_widgets(election_day_app, import_test_datasets):
structure = """
<row>
<column span="1">
<title class="my-class-1"/>
</column>
<column span="1">
<progress class="my-class-2"/>
</column>
<column span="1">
<counted-entities class="my-class-3"/>
</column>
<column span="1">
<election-candidates-table class="my-class-4"
lists="SP,
Grüne,"/>
</column>
<column span="1">
<election-candidates-chart class="my-class-5"/>
</column>
<column span="1">
<election-candidates-chart class="my-class-6" limit="2"
lists="x,y" elected="True"/>
</column>
<column span="1">
<election-candidates-by-entity-table class="my-class-7"/>
</column>
</row>
"""
widgets = [
RowWidget(),
ColumnWidget(),
CountedEntitiesWidget(),
ProgressWidget(),
TitleWidget(),
ElectionCandidatesChartWidget(),
ElectionCandidatesTableWidget(),
ElectionCandidatesByEntityTableWidget(),
]
# Empty
session = election_day_app.session()
session.add(
Election(title='Election', domain='canton', date=date(2015, 6, 18))
)
session.flush()
model = session.query(Election).one()
request = DummyRequest(app=election_day_app, session=session)
layout = ElectionLayout(model, request)
default = {'layout': layout, 'request': request}
data = inject_variables(widgets, layout, structure, default, False)
assert data == {
'candidates': [],
'candidates_by_entites': ([], []),
'election': model,
'embed': False,
'entities': '',
'layout': layout,
'model': model,
'request': request,
}
result = transform_structure(widgets, structure)
result = PageTemplate(result)(**data)
etree.fromstring(result.encode('utf-8'))
assert '>Election</span>' in result
assert 'my-class-1' in result
assert 'my-class-2' in result
assert 'my-class-3' in result
assert 'my-class-4' in result
assert 'my-class-5' in result
assert 'my-class-6' in result
assert 'my-class-7' in result
# Add intermediate results
model, errors = import_test_datasets(
'internal',
'election',
'zg',
'canton',
'majorz',
date_=date(2015, 10, 18),
number_of_mandates=2,
dataset_name='staenderatswahl-2015-intermediate',
app_session=session
)
assert not errors
session.add(model)
session.flush()
layout = ElectionLayout(model, request)
default = {'layout': layout, 'request': request}
data = inject_variables(widgets, layout, structure, default, False)
assert data == {
'candidates': [
('Hegglin', 'Peter', True, 'CVP', 10693, None, None),
('Eder', 'Joachim', True, 'FDP', 10103, None, None),
('Brandenberg', 'Manuel', False, 'SVP', 4845, None, None),
('Gysel', 'Barbara', False, 'SP', 2890, None, None),
('Lustenberger', 'Andreas', False, 'Grüne', 2541, None, None),
('Thöni', 'Stefan', False, 'Piraten', 746, None, None)
],
'candidates_by_entites': (
[
('Brandenberg', 'Manuel', 4845),
('Eder', 'Joachim', 10103),
('Gysel', 'Barbara', 2890),
('Hegglin', 'Peter', 10693),
('Lustenberger', 'Andreas', 2541),
('Thöni', 'Stefan', 746)
],
[
('Baar', [
('Baar', 'Brandenberg', 'Manuel', 2100),
('Baar', 'Eder', 'Joachim', 4237),
('Baar', 'Gysel', 'Barbara', 1264),
('Baar', 'Hegglin', 'Peter', 4207),
('Baar', 'Lustenberger', 'Andreas', 1269),
('Baar', 'Thöni', 'Stefan', 320)
]),
('Cham', [
('Cham', 'Brandenberg', 'Manuel', 1404),
('Cham', 'Eder', 'Joachim', 2726),
('Cham', 'Gysel', 'Barbara', 888),
('Cham', 'Hegglin', 'Peter', 2905),
('Cham', 'Lustenberger', 'Andreas', 685),
('Cham', 'Thöni', 'Stefan', 232)
]),
('Hünenberg', [
('Hünenberg', 'Brandenberg', 'Manuel', 881),
('Hünenberg', 'Eder', 'Joachim', 2098),
('Hünenberg', 'Gysel', 'Barbara', 540),
('Hünenberg', 'Hegglin', 'Peter', 2205),
('Hünenberg', 'Lustenberger', 'Andreas', 397),
('Hünenberg', 'Thöni', 'Stefan', 140)
]),
('Menzingen', [
('Menzingen', 'Brandenberg', 'Manuel', 460),
('Menzingen', 'Eder', 'Joachim', 1042),
('Menzingen', 'Gysel', 'Barbara', 198),
('Menzingen', 'Hegglin', 'Peter', 1376),
('Menzingen', 'Lustenberger', 'Andreas', 190),
('Menzingen', 'Thöni', 'Stefan', 54)
])
]
),
'election': model,
'embed': False,
'entities': 'Baar, Cham, Hünenberg, Menzingen',
'layout': layout,
'model': model,
'request': request,
}
result = transform_structure(widgets, structure)
result = PageTemplate(result)(**data)
etree.fromstring(result.encode('utf-8'))
assert 'majorz_internal_staenderatswahl-2015-intermediate' in result
assert '4 of 11' in result
assert 'Baar, Cham, Hünenberg, Menzingen' in result
assert 'election-candidates-table' in result
assert 'data-text="10693"' not in result
assert 'data-text="2890"' in result
assert 'data-text="2541"' in result
assert '>n.a.</td>' in result
assert (
'data-dataurl="Election/candidates-data'
'?limit=0&lists=&elected="'
) in result
assert (
'data-dataurl="Election/candidates-data'
'?limit=02&lists=x,y&elected=True"'
) in result
assert 'election-candidates-by-entity-table' in result
assert 'my-class-1' in result
assert 'my-class-2' in result
assert 'my-class-3' in result
assert 'my-class-4' in result
assert 'my-class-5' in result
assert 'my-class-6' in result
assert 'my-class-7' in result
# Add final results
model, errors = import_test_datasets(
'internal',
'election',
'zg',
'canton',
'majorz',
date_=date(2015, 10, 18),
number_of_mandates=2,
dataset_name='staenderatswahl-2015',
app_session=session
)
assert not errors
session.add(model)
session.flush()
layout = ElectionLayout(model, request)
default = {'layout': layout, 'request': request}
data = inject_variables(widgets, layout, structure, default, False)
assert data == {
'candidates': [
('Hegglin', 'Peter', True, 'CVP', 24132, None, None),
('Eder', 'Joachim', True, 'FDP', 23620, None, None),
('Brandenberg', 'Manuel', False, 'SVP', 10997, None, None),
('Gysel', 'Barbara', False, 'SP', 6612, None, None),
('Lustenberger', 'Andreas', False, 'Grüne', 5691, None, None),
('Thöni', 'Stefan', False, 'Piraten', 1709, None, None)
],
'candidates_by_entites': (
[
('Brandenberg', 'Manuel', 10997),
('Eder', 'Joachim', 23620),
('Gysel', 'Barbara', 6612),
('Hegglin', 'Peter', 24132),
('Lustenberger', 'Andreas', 5691),
('Thöni', 'Stefan', 1709)
],
[
('Baar', [
('Baar', 'Brandenberg', 'Manuel', 2100),
('Baar', 'Eder', 'Joachim', 4237),
('Baar', 'Gysel', 'Barbara', 1264),
('Baar', 'Hegglin', 'Peter', 4207),
('Baar', 'Lustenberger', 'Andreas', 1269),
('Baar', 'Thöni', 'Stefan', 320)
]),
('Cham', [
('Cham', 'Brandenberg', 'Manuel', 1404),
('Cham', 'Eder', 'Joachim', 2726),
('Cham', 'Gysel', 'Barbara', 888),
('Cham', 'Hegglin', 'Peter', 2905),
('Cham', 'Lustenberger', 'Andreas', 685),
('Cham', 'Thöni', 'Stefan', 232)
]),
('Hünenberg', [
('Hünenberg', 'Brandenberg', 'Manuel', 881),
('Hünenberg', 'Eder', 'Joachim', 2098),
('Hünenberg', 'Gysel', 'Barbara', 540),
('Hünenberg', 'Hegglin', 'Peter', 2205),
('Hünenberg', 'Lustenberger', 'Andreas', 397),
('Hünenberg', 'Thöni', 'Stefan', 140)
]),
('Menzingen', [
('Menzingen', 'Brandenberg', 'Manuel', 460),
('Menzingen', 'Eder', 'Joachim', 1042),
('Menzingen', 'Gysel', 'Barbara', 198),
('Menzingen', 'Hegglin', 'Peter', 1376),
('Menzingen', 'Lustenberger', 'Andreas', 190),
('Menzingen', 'Thöni', 'Stefan', 54)
]),
('Neuheim', [
('Neuheim', 'Brandenberg', 'Manuel', 235),
('Neuheim', 'Eder', 'Joachim', 453),
('Neuheim', 'Gysel', 'Barbara', 92),
('Neuheim', 'Hegglin', 'Peter', 511),
('Neuheim', 'Lustenberger', 'Andreas', 94),
('Neuheim', 'Thöni', 'Stefan', 26)
]),
('Oberägeri', [
('Oberägeri', 'Brandenberg', 'Manuel', 656),
('Oberägeri', 'Eder', 'Joachim', 1380),
('Oberägeri', 'Gysel', 'Barbara', 191),
('Oberägeri', 'Hegglin', 'Peter', 1276),
('Oberägeri', 'Lustenberger', 'Andreas', 150),
('Oberägeri', 'Thöni', 'Stefan', 72)
]),
('Risch', [
('Risch', 'Brandenberg', 'Manuel', 1041),
('Risch', 'Eder', 'Joachim', 1797),
('Risch', 'Gysel', 'Barbara', 391),
('Risch', 'Hegglin', 'Peter', 1730),
('Risch', 'Lustenberger', 'Andreas', 362),
('Risch', 'Thöni', 'Stefan', 137)
]),
('Steinhausen', [
('Steinhausen', 'Brandenberg', 'Manuel', 789),
('Steinhausen', 'Eder', 'Joachim', 1827),
('Steinhausen', 'Gysel', 'Barbara', 523),
('Steinhausen', 'Hegglin', 'Peter', 1883),
('Steinhausen', 'Lustenberger', 'Andreas', 490),
('Steinhausen', 'Thöni', 'Stefan', 171)
]),
('Unterägeri', [
('Unterägeri', 'Brandenberg', 'Manuel', 860),
('Unterägeri', 'Eder', 'Joachim', 2054),
('Unterägeri', 'Gysel', 'Barbara', 320),
('Unterägeri', 'Hegglin', 'Peter', 1779),
('Unterägeri', 'Lustenberger', 'Andreas', 258),
('Unterägeri', 'Thöni', 'Stefan', 85)
]),
('Walchwil', [
('Walchwil', 'Brandenberg', 'Manuel', 416),
('Walchwil', 'Eder', 'Joachim', 756),
('Walchwil', 'Gysel', 'Barbara', 151),
('Walchwil', 'Hegglin', 'Peter', 801),
('Walchwil', 'Lustenberger', 'Andreas', 93),
('Walchwil', 'Thöni', 'Stefan', 39)
]),
('Zug', [
('Zug', 'Brandenberg', 'Manuel', 2155),
('Zug', 'Eder', 'Joachim', 5250),
('Zug', 'Gysel', 'Barbara', 2054),
('Zug', 'Hegglin', 'Peter', 5459),
('Zug', 'Lustenberger', 'Andreas', 1703),
('Zug', 'Thöni', 'Stefan', 433)
])
]
),
'election': model,
'embed': False,
'entities': (
'Baar, Cham, Hünenberg, Menzingen, Neuheim, Oberägeri, Risch, '
'Steinhausen, Unterägeri, Walchwil, Zug'
),
'layout': layout,
'model': model,
'request': request,
}
result = transform_structure(widgets, structure)
result = PageTemplate(result)(**data)
etree.fromstring(result.encode('utf-8'))
assert 'majorz_internal_staenderatswahl-2015' in result
assert '11 of 11' in result
assert (
'Baar, Cham, Hünenberg, Menzingen, Neuheim, Oberägeri, Risch, '
'Steinhausen, Unterägeri, Walchwil, Zug'
) in result
assert 'election-candidates-table' in result
assert 'data-text="24132"' not in result
assert 'data-text="6612"' in result
assert 'data-text="5691"' in result
assert '>n.a.</td>' not in result
assert (
'data-dataurl="Election/candidates-data'
'?limit=0&lists=&elected="'
) in result
assert (
'data-dataurl="Election/candidates-data'
'?limit=02&lists=x,y&elected=True"'
) in result
assert 'election-candidates-by-entity-table' in result
assert 'my-class-1' in result
assert 'my-class-2' in result
assert 'my-class-3' in result
assert 'my-class-4' in result
assert 'my-class-5' in result
assert 'my-class-6' in result
assert 'my-class-7' in result
def test_proporz_election_widgets(election_day_app, import_test_datasets):
structure = """
<row>
<column span="1">
<title class="my-class-1"/>
</column>
<column span="1">
<progress class="my-class-2"/>
</column>
<column span="1">
<counted-entities class="my-class-3"/>
</column>
<column span="1">
<election-candidates-table class="my-class-4"
lists="SP Migrant.,,,,SVP Int."/>
</column>
<column span="1">
<election-candidates-chart class="my-class-5"/>
</column>
<column span="1">
<election-candidates-chart class="my-class-6" limit="2"
lists="x,y" elected="True"/>
</column>
<column span="1">
<election-lists-table class="my-class-7"
names="SP Männer, SP Frauen, ALG Junge "/>
</column>
<column span="1">
<election-lists-chart class="my-class-8"/>
</column>
<column span="1">
<election-lists-chart class="my-class-9" limit="3"
names="a,b"/>
</column>
</row>
"""
widgets = [
RowWidget(),
ColumnWidget(),
CountedEntitiesWidget(),
ProgressWidget(),
TitleWidget(),
ElectionCandidatesChartWidget(),
ElectionCandidatesTableWidget(),
ElectionListsChartWidget(),
ElectionListsTableWidget(),
]
# Empty
session = election_day_app.session()
session.add(
ProporzElection(
title='Election', domain='canton', date=date(2015, 6, 18)
)
)
model = session.query(ProporzElection).one()
request = DummyRequest(app=election_day_app, session=session)
layout = ElectionLayout(model, request)
default = {'layout': layout, 'request': request}
data = inject_variables(widgets, layout, structure, default, False)
assert data == {
'candidates': [],
'election': model,
'embed': False,
'entities': '',
'layout': layout,
'lists': [],
'model': model,
'request': request
}
result = transform_structure(widgets, structure)
result = PageTemplate(result)(**data)
etree.fromstring(result.encode('utf-8'))
assert '>Election</span>' in result
assert 'my-class-1' in result
assert 'my-class-2' in result
assert 'my-class-3' in result
assert 'my-class-4' in result
assert 'my-class-5' in result
assert 'my-class-6' in result
assert 'my-class-7' in result
assert 'my-class-8' in result
assert 'my-class-9' in result
# Add intermediate results
model, errors = import_test_datasets(
'internal',
'election',
'zg',
'canton',
'proporz',
date_=date(2015, 10, 18),
number_of_mandates=1,
dataset_name='nationalratswahlen-2015-intermediate',
app_session=session
)
assert not errors
session.add(model)
session.flush()
layout = ElectionLayout(model, request)
default = {'layout': layout, 'request': request}
data = inject_variables(widgets, layout, structure, default, False)
assert data == {
'candidates': [
('Lustenberger', 'Andreas', False, '', 1514, 'ALG', '1'),
('Estermann', 'Astrid', False, '', 491, 'ALG', '1'),
('Schriber-Neiger', 'Hanni', False, '', 423, 'ALG', '1'),
('Schuler', 'Hubert', False, '', 1918, 'SP', '10'),
('Bürgi Dellsperger', 'Christina', False, '', 1202, 'SP', '10'),
('Sivaganesan', 'Rupan', False, '', 691, 'SP', '10'),
('Hutter Elsener', 'Simone', False, '', 412, 'SP Frauen', '11'),
('Hug', 'Malaika', False, '', 340, 'SP Frauen', '11'),
('Mäder Beglinger', 'Anne', False, '', 237, 'SP Frauen', '11'),
('Krasnici', 'Denis', False, '', 258, 'SP Juso', '12'),
('Spescha', 'Anna', False, '', 202, 'SP Juso', '12'),
('Koepfli', 'Virginia', False, '', 102, 'SP Juso', '12'),
('Dzaferi', 'Zari', False, '', 1355, 'SP Männer', '13'),
('Freimann', 'Fabian', False, '', 218, 'SP Männer', '13'),
('Suter', 'Guido', False, '', 188, 'SP Männer', '13'),
('Sönmez', 'Sehriban', False, '', 54, 'SP Migrant.', '14'),
('Coralic', 'Fadila', False, '', 50, 'SP Migrant.', '14'),
('Simsek', 'Deniz', False, '', 38, 'SP Migrant.', '14'),
('Aeschi', 'Thomas', True, '', 7731, 'SVP', '15'),
('Werner', 'Thomas', False, '', 2914, 'SVP', '15'),
('Villiger', 'Thomas', False, '', 2571, 'SVP', '15'),
('Pfisterer', 'Luc', False, '', 105, 'SVP Int.', '16'),
('Bucher', 'Rinaldo', False, '', 69, 'SVP Int.', '16'),
('Hornickel', 'Alexander', False, '', 46, 'SVP Int.', '16'),
('Risi', 'Adrian', False, '', 1153, 'SVP WuG', '17'),
('Brunner', 'Philip C.', False, '', 471, 'SVP WuG', '17'),
('Gertsch', 'Beat', False, '', 268, 'SVP WuG', '17'),
('Widmer', 'Fabienne', False, '', 101, 'ALG Junge', '2'),
('Gut', 'Christina', False, '', 74, 'ALG Junge', '2'),
('Perucchi', 'Alessandro', False, '', 66, 'ALG Junge', '2'),
('Haas', 'Esther', False, '', 301, 'ALG Bildung', '3'),
('Odermatt', 'Anastas', False, '', 221, 'ALG Bildung', '3'),
('Zimmermann Gibson', 'Tabea', False, '', 207, 'ALG Bildung', '3'),
('Pfister', 'Gerhard', True, '', 6719, 'CVP', '4'),
('Barmet-Schelbert', 'Monika', False, '', 1996, 'CVP', '4'),
('Hausheer', 'Andreas', False, '', 1340, 'CVP', '4'),
('Bieri', 'Anna', False, '', 2407, 'CVP Junge', '5'),
('Iten', 'Christoph', False, '', 587, 'CVP Junge', '5'),
('Kremmel', 'Corina', False, '', 525, 'CVP Junge', '5'),
('Pezzatti', 'Bruno', True, '', 4309, 'FDP Ost', '6'),
('Ingold', 'Gabriela', False, '', 1083, 'FDP Ost', '6'),
('Mollet', 'Patrick', False, '', 705, 'FDP Ost', '6'),
('Grüter', 'Arno', False, '', 897, 'FDP West', '7'),
('Gygli', 'Daniel', False, '', 717, 'FDP West', '7'),
('Siegrist', 'Birgitt', False, '', 493, 'FDP West', '7'),
('Stadlin', 'Daniel', False, '', 731, 'glp', '8'),
('Kottelat Schloesing', 'Michèle', False, '', 508, 'glp', '8'),
('Soltermann', 'Claus', False, '', 451, 'glp', '8'),
('Mauchle', 'Florian', False, '', 260, 'Piraten', '9'),
('Thöni', 'Stefan', False, '', 211, 'Piraten', '9')
],
'election': model,
'embed': False,
'entities': 'Baar, Cham, Hünenberg, Menzingen',
'layout': layout,
'lists': [
('SVP', 13532, '15', 1),
('CVP', 10247, '4', 1),
('FDP Ost', 6219, '6', 1),
('SP', 3866, '10', 0),
('CVP Junge', 3549, '5', 0),
('ALG', 2459, '1', 0),
('FDP West', 2143, '7', 0),
('SVP WuG', 1933, '17', 0),
('SP Männer', 1814, '13', 0),
('glp', 1718, '8', 0),
('SP Frauen', 998, '11', 0),
('ALG Bildung', 735, '3', 0),
('SP Juso', 567, '12', 0),
('Piraten', 475, '9', 0),
('ALG Junge', 245, '2', 0),
('SVP Int.', 223, '16', 0),
('SP Migrant.', 146, '14', 0)
],
'model': model,
'request': request
}
result = transform_structure(widgets, structure)
result = PageTemplate(result)(**data)
etree.fromstring(result.encode('utf-8'))
assert 'proporz_internal_nationalratswahlen-2015-intermediate' in result
assert '4 of 11' in result
assert 'Baar, Cham, Hünenberg, Menzingen' in result
assert 'election-candidates-table' in result
assert 'data-text="1514"' not in result
assert 'data-text="54"' in result
assert 'data-text="50"' in result
assert 'data-text="105"' in result
assert 'data-text="69"' in result
assert '>n.a.</td>' in result
assert (
'data-dataurl="ProporzElection/candidates-data'
'?limit=0&lists=&elected="'
) in result
assert (
'data-dataurl="ProporzElection/candidates-data'
'?limit=02&lists=x,y&elected=True"'
) in result
assert 'election-lists-table' in result
assert 'data-text="13532"' not in result
assert 'data-text="1814"' in result
assert 'data-text="998"' in result
assert 'data-text="245"' in result
assert (
'data-dataurl="ProporzElection/lists-data?limit=0&names="'
) in result
assert (
'data-dataurl="ProporzElection/lists-data?limit=03&names=a,b"'
) in result
assert 'my-class-1' in result
assert 'my-class-2' in result
assert 'my-class-3' in result
assert 'my-class-4' in result
assert 'my-class-5' in result
assert 'my-class-6' in result
assert 'my-class-7' in result
assert 'my-class-8' in result
assert 'my-class-9' in result
# Add final results
model, errors = import_test_datasets(
'internal',
'election',
'zg',
'canton',
'proporz',
date_=date(2015, 10, 18),
number_of_mandates=1,
dataset_name='nationalratswahlen-2015',
app_session=session
)
assert not errors
session.add(model)
session.flush()
layout = ElectionLayout(model, request)
default = {'layout': layout, 'request': request}
data = inject_variables(widgets, layout, structure, default, False)
assert data == {
'candidates': [
('Lustenberger', 'Andreas', False, '', 3240, 'ALG', '1'),
('Estermann', 'Astrid', False, '', 1327, 'ALG', '1'),
('Schriber-Neiger', 'Hanni', False, '', 1206, 'ALG', '1'),
('Schuler', 'Hubert', False, '', 3859, 'SP', '10'),
('Bürgi Dellsperger', 'Christina', False, '', 2987, 'SP', '10'),
('Sivaganesan', 'Rupan', False, '', 1874, 'SP', '10'),
('Hutter Elsener', 'Simone', False, '', 929, 'SP Frauen', '11'),
('Hug', 'Malaika', False, '', 684, 'SP Frauen', '11'),
('Mäder Beglinger', 'Anne', False, '', 561, 'SP Frauen', '11'),
('Spescha', 'Anna', False, '', 555, 'SP Juso', '12'),
('Krasnici', 'Denis', False, '', 550, 'SP Juso', '12'),
('Koepfli', 'Virginia', False, '', 218, 'SP Juso', '12'),
('Dzaferi', 'Zari', False, '', 2303, 'SP Männer', '13'),
('Suter', 'Guido', False, '', 545, 'SP Männer', '13'),
('Freimann', 'Fabian', False, '', 394, 'SP Männer', '13'),
('Coralic', 'Fadila', False, '', 144, 'SP Migrant.', '14'),
('Sönmez', 'Sehriban', False, '', 117, 'SP Migrant.', '14'),
('Simsek', 'Deniz', False, '', 82, 'SP Migrant.', '14'),
('Aeschi', 'Thomas', True, '', 17034, 'SVP', '15'),
('Werner', 'Thomas', False, '', 7206, 'SVP', '15'),
('Villiger', 'Thomas', False, '', 5629, 'SVP', '15'),
('Pfisterer', 'Luc', False, '', 269, 'SVP Int.', '16'),
('Bucher', 'Rinaldo', False, '', 168, 'SVP Int.', '16'),
('Hornickel', 'Alexander', False, '', 132, 'SVP Int.', '16'),
('Risi', 'Adrian', False, '', 2607, 'SVP WuG', '17'),
('Brunner', 'Philip C.', False, '', 1159, 'SVP WuG', '17'),
('Gertsch', 'Beat', False, '', 607, 'SVP WuG', '17'),
('Widmer', 'Fabienne', False, '', 345, 'ALG Junge', '2'),
('Gut', 'Christina', False, '', 235, 'ALG Junge', '2'),
('Perucchi', 'Alessandro', False, '', 222, 'ALG Junge', '2'),
('Odermatt', 'Anastas', False, '', 637, 'ALG Bildung', '3'),
('Haas', 'Esther', False, '', 559, 'ALG Bildung', '3'),
('Zimmermann Gibson', 'Tabea', False, '', 490, 'ALG Bildung', '3'),
('Pfister', 'Gerhard', True, '', 16134, 'CVP', '4'),
('Barmet-Schelbert', 'Monika', False, '', 4093, 'CVP', '4'),
('Hausheer', 'Andreas', False, '', 3606, 'CVP', '4'),
('Bieri', 'Anna', False, '', 3908, 'CVP Junge', '5'),
('Iten', 'Christoph', False, '', 1394, 'CVP Junge', '5'),
('Kremmel', 'Corina', False, '', 1163, 'CVP Junge', '5'),
('Pezzatti', 'Bruno', True, '', 10174, 'FDP Ost', '6'),
('Ingold', 'Gabriela', False, '', 3637, 'FDP Ost', '6'),
('Mollet', 'Patrick', False, '', 2190, 'FDP Ost', '6'),
('Grüter', 'Arno', False, '', 1706, 'FDP West', '7'),
('Gygli', 'Daniel', False, '', 1378, 'FDP West', '7'),
('Siegrist', 'Birgitt', False, '', 1142, 'FDP West', '7'),
('Stadlin', 'Daniel', False, '', 1823, 'glp', '8'),
('Kottelat Schloesing', 'Michèle', False, '', 1256, 'glp', '8'),
('Soltermann', 'Claus', False, '', 1043, 'glp', '8'),
('Mauchle', 'Florian', False, '', 629, 'Piraten', '9'),
('Thöni', 'Stefan', False, '', 488, 'Piraten', '9')
],
'election': model,
'embed': False,
'entities': (
'Baar, Cham, Hünenberg, Menzingen, Neuheim, Oberägeri, Risch, '
'Steinhausen, Unterägeri, Walchwil, Zug'
),
'layout': layout,
'lists': [
('SVP', 30532, '15', 1),
('CVP', 24335, '4', 1),
('FDP Ost', 16285, '6', 1),
('SP', 8868, '10', 0),
('CVP Junge', 6521, '5', 0),
('ALG', 5844, '1', 0),
('SVP WuG', 4436, '17', 0),
('FDP West', 4299, '7', 0),
('glp', 4178, '8', 0),
('SP Männer', 3314, '13', 0),
('SP Frauen', 2186, '11', 0),
('ALG Bildung', 1701, '3', 0),
('SP Juso', 1333, '12', 0),
('Piraten', 1128, '9', 0),
('ALG Junge', 807, '2', 0),
('SVP Int.', 575, '16', 0),
('SP Migrant.', 347, '14', 0)
],
'model': model,
'request': request
}
result = transform_structure(widgets, structure)
result = PageTemplate(result)(**data)
etree.fromstring(result.encode('utf-8'))
assert 'proporz_internal_nationalratswahlen-2015' in result
assert '11 of 11' in result
assert (
'Baar, Cham, Hünenberg, Menzingen, Neuheim, Oberägeri, Risch, '
'Steinhausen, Unterägeri, Walchwil, Zug'
) in result
assert 'election-candidates-table' in result
assert 'data-text="3240"' not in result
assert 'data-text="144"' in result
assert 'data-text="117"' in result
assert 'data-text="269"' in result
assert 'data-text="168"' in result
assert '>n.a.</td>' not in result
assert (
'data-dataurl="ProporzElection/candidates-data'
'?limit=0&lists=&elected="'
) in result
assert (
'data-dataurl="ProporzElection/candidates-data'
'?limit=02&lists=x,y&elected=True"'
) in result
assert 'election-lists-table' in result
assert 'data-text="30532"' not in result
assert 'data-text="3314"' in result
assert 'data-text="2186"' in result
assert 'data-text="807"' in result
assert (
'data-dataurl="ProporzElection/lists-data?limit=0&names="'
) in result
assert (
'data-dataurl="ProporzElection/lists-data?limit=03&names=a,b"'
) in result
assert 'my-class-1' in result
assert 'my-class-2' in result
assert 'my-class-3' in result
assert 'my-class-4' in result
assert 'my-class-5' in result
assert 'my-class-6' in result
assert 'my-class-7' in result
assert 'my-class-8' in result
assert 'my-class-9' in result
def test_election_compound_widgets(election_day_app_sg, import_test_datasets):
structure = """
<row>
<column span="1">
<title class="my-class-1"/>
</column>
<column span="1">
<progress class="my-class-2"/>
</column>
<column span="1">
<counted-entities class="my-class-3"/>
</column>
<column span="1">
<election-compound-candidates-table class="my-class-4"/>
</column>
<column span="1">
<election-compound-districts-table class="my-class-5"/>
</column>
<column span="1">
<election-compound-lists-table class="my-class-6"
names="GRÜ , FDP_J "/>
</column>
<column span="1">
<election-compound-lists-chart class="my-class-7"/>
</column>
<column span="1">
<election-compound-lists-chart class="my-class-8" limit="2"
names="a,b"/>
</column>
</row>
"""
widgets = [
RowWidget(),
ColumnWidget(),
CountedEntitiesWidget(),
ProgressWidget(),
TitleWidget(),
ElectionCompoundCandidatesTableWidget(),
ElectionCompoundDistrictsTableWidget(),
ElectionCompoundListsChartWidget(),
ElectionCompoundListsTableWidget(),
]
# Empty
session = election_day_app_sg.session()
session.add(
ElectionCompound(
title='Compound', domain='canton', date=date(2020, 3, 8)
)
)
model = session.query(ElectionCompound).one()
request = DummyRequest(app=election_day_app_sg, session=session)
layout = ElectionCompoundLayout(model, request)
default = {'layout': layout, 'request': request}
data = inject_variables(widgets, layout, structure, default, False)
assert data == {
'districts': {},
'elected_candidates': [],
'election': model,
'election_compound': model,
'embed': False,
'entities': '',
'layout': layout,
'lists': [],
'model': model,
'request': request
}
result = transform_structure(widgets, structure)
result = PageTemplate(result)(**data)
etree.fromstring(result.encode('utf-8'))
assert '>Compound</span>' in result
assert 'my-class-1' in result
assert 'my-class-2' in result
assert 'my-class-3' in result
assert 'my-class-4' in result
assert 'my-class-5' in result
assert 'my-class-6' in result
assert 'my-class-7' in result
assert 'my-class-8' in result
# Add intermediate results
election_1, errors = import_test_datasets(
'internal',
'election',
'sg',
'region',
'proporz',
date_=date(2020, 3, 8),
number_of_mandates=17,
dataset_name='kantonsratswahl-2020-wahlkreis-rheintal-intermediate',
app_session=session
)
assert not errors
election_2, errors = import_test_datasets(
'internal',
'election',
'sg',
'region',
'proporz',
date_=date(2020, 3, 8),
number_of_mandates=10,
dataset_name='kantonsratswahl-2020-wahlkreis-rorschach',
app_session=session
)
assert not errors
session.add(election_1)
session.add(election_2)
model.elections = [election_1, election_2]
session.flush()
layout = ElectionCompoundLayout(model, request)
default = {'layout': layout, 'request': request}
data = inject_variables(widgets, layout, structure, default, False)
e_1 = election_1.title
e_2 = election_2.title
assert data == {
'districts': {
e_1: ('Rheintal', f'ProporzElection/{e_1}'),
e_2: ('Rorschach', f'ProporzElection/{e_2}')
},
'elected_candidates': [
('Bruss-Schmidheiny', 'Carmen', '', 'SVP', '01', e_1),
('Eugster', 'Thomas', '', 'SVP', '01', e_1),
('Freund', 'Walter', '', 'SVP', '01', e_1),
('Götte', 'Michael', '', 'SVP', '01', e_2),
('Kuster', 'Peter', '', 'SVP', '01', e_1),
('Luterbacher', 'Mäge', '', 'SVP', '01', e_2),
('Wasserfallen', 'Sandro', '', 'SVP', '01', e_2),
('Willi', 'Christian', '', 'SVP', '01', e_1),
('Wüst', 'Markus', '', 'SVP', '01', e_1),
('Broger', 'Andreas', '', 'CVP', '02', e_1),
('Dürr', 'Patrick', '', 'CVP', '02', e_1),
('Hess', 'Sandro', '', 'CVP', '02', e_1),
('Schöbi', 'Michael', '', 'CVP', '02', e_1),
('Frei', 'Raphael', '', 'FDP', '02a', e_2),
('Raths', 'Robert', '', 'FDP', '02a', e_2),
('Britschgi', 'Stefan', '', 'FDP', '03', e_1),
('Graf', 'Claudia', '', 'FDP', '03', e_1),
('Huber', 'Rolf', '', 'FDP', '03', e_1),
('Bucher', 'Laura', '', 'SP', '04', e_1),
('Gemperli', 'Dominik', '', 'CVP', '04', e_2),
('Krempl-Gnädinger', 'Luzia', '', 'CVP', '04', e_2),
('Maurer', 'Remo', '', 'SP', '04', e_1),
('Etterlin', 'Guido', '', 'SP', '05', e_2),
('Gschwend', 'Meinrad', '', 'GRÜ', '05', e_1),
('Schöb', 'Andrea', '', 'SP', '05', e_2),
('Losa', 'Jeannette', '', 'GRÜ', '06', e_2),
('Mattle', 'Ruedi', '', 'GLP', '06', e_1)
],
'election': model,
'election_compound': model,
'embed': False,
'entities': e_2,
'layout': layout,
'lists': [
('SVP', 9, 31515),
('CVP', 6, 28509),
('FDP', 5, 19546),
('SP', 4, 17381),
('GRÜ', 2, 10027),
('GLP', 1, 7725),
('EVP', 0, 2834),
('FDP_J', 0, 1379)
],
'model': model,
'request': request
}
result = transform_structure(widgets, structure)
result = PageTemplate(result)(**data)
etree.fromstring(result.encode('utf-8'))
assert '>Compound</span>' in result
assert '1 of 2' in result
assert f'<div>{e_2}</div>'
assert 'election-compound-candidates-table' in result
assert 'Bruss-Schmidheiny Carmen' in result
assert 'election-compound-districts-table' in result
assert '10 of 10' in result
assert '9 of 9' in result
assert '0 of 17' in result
assert '1 of 13' in result
assert 'election-compound-lists-table' in result
assert 'data-text="31515"' not in result
assert 'data-text="10027"' in result
assert 'data-text="1379"' in result
assert '>n.a.</td>' in result
assert (
'data-dataurl="ElectionCompound/lists-data?limit=0&names="'
) in result
assert (
'data-dataurl="ElectionCompound/lists-data?limit=02&names=a,b"'
) in result
assert 'my-class-1' in result
assert 'my-class-2' in result
assert 'my-class-3' in result
assert 'my-class-4' in result
assert 'my-class-5' in result
assert 'my-class-6' in result
assert 'my-class-7' in result
assert 'my-class-8' in result
# Add final results
election_1, errors = import_test_datasets(
'internal',
'election',
'sg',
'region',
'proporz',
date_=date(2020, 3, 8),
number_of_mandates=17,
dataset_name='kantonsratswahl-2020-wahlkreis-rheintal',
app_session=session
)
assert not errors
session.add(election_1)
model.elections = [election_1, election_2]
session.flush()
layout = ElectionCompoundLayout(model, request)
default = {'layout': layout, 'request': request}
data = inject_variables(widgets, layout, structure, default, False)
e_1 = election_1.title
e_2 = election_2.title
assert data == {
'districts': {
e_1: ('Rheintal', f'ProporzElection/{e_1}'),
e_2: ('Rorschach', f'ProporzElection/{e_2}')
},
'elected_candidates': [
('Bruss-Schmidheiny', 'Carmen', '', 'SVP', '01', e_1),
('Eugster', 'Thomas', '', 'SVP', '01', e_1),
('Freund', 'Walter', '', 'SVP', '01', e_1),
('Götte', 'Michael', '', 'SVP', '01', e_2),
('Kuster', 'Peter', '', 'SVP', '01', e_1),
('Luterbacher', 'Mäge', '', 'SVP', '01', e_2),
('Wasserfallen', 'Sandro', '', 'SVP', '01', e_2),
('Willi', 'Christian', '', 'SVP', '01', e_1),
('Wüst', 'Markus', '', 'SVP', '01', e_1),
('Broger', 'Andreas', '', 'CVP', '02', e_1),
('Dürr', 'Patrick', '', 'CVP', '02', e_1),
('Hess', 'Sandro', '', 'CVP', '02', e_1),
('Schöbi', 'Michael', '', 'CVP', '02', e_1),
('Frei', 'Raphael', '', 'FDP', '02a', e_2),
('Raths', 'Robert', '', 'FDP', '02a', e_2),
('Britschgi', 'Stefan', '', 'FDP', '03', e_1),
('Graf', 'Claudia', '', 'FDP', '03', e_1),
('Huber', 'Rolf', '', 'FDP', '03', e_1),
('Bucher', 'Laura', '', 'SP', '04', e_1),
('Gemperli', 'Dominik', '', 'CVP', '04', e_2),
('Krempl-Gnädinger', 'Luzia', '', 'CVP', '04', e_2),
('Maurer', 'Remo', '', 'SP', '04', e_1),
('Etterlin', 'Guido', '', 'SP', '05', e_2),
('Gschwend', 'Meinrad', '', 'GRÜ', '05', e_1),
('Schöb', 'Andrea', '', 'SP', '05', e_2),
('Losa', 'Jeannette', '', 'GRÜ', '06', e_2),
('Mattle', 'Ruedi', '', 'GLP', '06', e_1)
],
'election': model,
'election_compound': model,
'embed': False,
'entities': f'{e_1}, {e_2}',
'layout': layout,
'lists': [
('SVP', 9, 87135),
('CVP', 6, 71209),
('FDP', 5, 55152),
('SP', 4, 37291),
('GRÜ', 2, 24722),
('GLP', 1, 20644),
('EVP', 0, 2834),
('FDP_J', 0, 1379)
],
'model': model,
'request': request
}
result = transform_structure(widgets, structure)
result = PageTemplate(result)(**data)
etree.fromstring(result.encode('utf-8'))
assert '>Compound</span>' in result
assert '2 of 2' in result
assert f'<div>{e_1}, {e_2}</div>'
assert 'election-compound-candidates-table' in result
assert 'Bruss-Schmidheiny Carmen' in result
assert 'election-compound-districts-table' in result
assert '10 of 10' in result
assert '9 of 9' in result
assert '17 of 17' in result
assert '13 of 13' in result
assert 'election-compound-lists-table' in result
assert 'data-text="87135"' not in result
assert 'data-text="24722"' in result
assert 'data-text="1379"' in result
assert '>n.a.</td>' not in result
assert (
'data-dataurl="ElectionCompound/lists-data?limit=0&names="'
) in result
assert (
'data-dataurl="ElectionCompound/lists-data?limit=02&names=a,b"'
) in result
assert 'my-class-1' in result
assert 'my-class-2' in result
assert 'my-class-3' in result
assert 'my-class-4' in result
assert 'my-class-5' in result
assert 'my-class-6' in result
assert 'my-class-7' in result
assert 'my-class-8' in result
| 39.075646 | 79 | 0.502975 |
fc0323df9c1a2e11a21d12dbb890a153f48fbb4f
| 9,849 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/slxos_linkagg.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/slxos_linkagg.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/slxos_linkagg.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: slxos_linkagg
author: "Matthew Stone (@bigmstone)"
short_description: Manage link aggregation groups on Extreme Networks SLX-OS network devices
description:
- This module provides declarative management of link aggregation groups
on Extreme Networks SLX-OS network devices.
notes:
- Tested against SLX-OS 17s.1.02
options:
group:
description:
- Channel-group number for the port-channel
Link aggregation group. Range 1-1024.
mode:
description:
- Mode of the link aggregation group.
choices: ['active', 'on', 'passive']
members:
description:
- List of members of the link aggregation group.
aggregate:
description: List of link aggregation definitions.
state:
description:
- State of the link aggregation group.
default: present
choices: ['present', 'absent']
purge:
description:
- Purge links not defined in the I(aggregate) parameter.
type: bool
'''
EXAMPLES = """
- name: create link aggregation group
slxos_linkagg:
group: 10
state: present
- name: delete link aggregation group
slxos_linkagg:
group: 10
state: absent
- name: set link aggregation group to members
slxos_linkagg:
group: 200
mode: active
members:
- Ethernet 0/1
- Ethernet 0/2
- name: remove link aggregation group from Ethernet 0/1
slxos_linkagg:
group: 200
mode: active
members:
- Ethernet 0/1
- name: Create aggregate of linkagg definitions
slxos_linkagg:
aggregate:
- { group: 3, mode: on, members: [Ethernet 0/1] }
- { group: 100, mode: passive, members: [Ethernet 0/2] }
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface port-channel 30
- interface Ethernet 0/3
- channel-group 30 mode on
- no interface port-channel 30
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import CustomNetworkConfig
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec
from ansible_collections.community.general.plugins.module_utils.network.slxos.slxos import get_config, load_config
def search_obj_in_list(group, lst):
for o in lst:
if o['group'] == group:
return o
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
purge = module.params['purge']
for w in want:
group = w['group']
mode = w['mode']
members = w.get('members') or []
state = w['state']
del w['state']
obj_in_have = search_obj_in_list(group, have)
if state == 'absent':
if obj_in_have:
commands.append('no interface port-channel {0}'.format(group))
elif state == 'present':
cmd = ['interface port-channel {0}'.format(group),
'exit']
if not obj_in_have:
if not group:
module.fail_json(msg='group is a required option')
commands.extend(cmd)
if members:
for m in members:
commands.append('interface {0}'.format(m))
commands.append('channel-group {0} mode {1}'.format(group, mode))
else:
if members:
if 'members' not in obj_in_have.keys():
for m in members:
commands.extend(cmd)
commands.append('interface {0}'.format(m))
commands.append('channel-group {0} mode {1}'.format(group, mode))
elif set(members) != set(obj_in_have['members']):
missing_members = list(set(members) - set(obj_in_have['members']))
for m in missing_members:
commands.extend(cmd)
commands.append('interface {0}'.format(m))
commands.append('channel-group {0} mode {1}'.format(group, mode))
superfluous_members = list(set(obj_in_have['members']) - set(members))
for m in superfluous_members:
commands.extend(cmd)
commands.append('interface {0}'.format(m))
commands.append('no channel-group')
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['group'], want)
if not obj_in_want:
commands.append('no interface port-channel {0}'.format(h['group']))
return commands
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
d['group'] = str(d['group'])
obj.append(d)
else:
obj.append({
'group': str(module.params['group']),
'mode': module.params['mode'],
'members': module.params['members'],
'state': module.params['state']
})
return obj
def parse_mode(module, config, group, member):
mode = None
netcfg = CustomNetworkConfig(indent=1, contents=config)
parents = ['interface {0}'.format(member)]
body = netcfg.get_section(parents)
match_int = re.findall(r'interface {0}\n'.format(member), body, re.M)
if match_int:
match = re.search(r'channel-group {0} mode (\S+)'.format(group), body, re.M)
if match:
mode = match.group(1)
return mode
def parse_members(module, config, group):
members = []
for line in config.strip().split('!'):
l = line.strip()
if l.startswith('interface'):
match_group = re.findall(r'channel-group {0} mode'.format(group), l, re.M)
if match_group:
match = re.search(r'^interface (\S+\s\S+)$', l, re.M)
if match:
members.append(match.group(1))
return members
def get_channel(module, config, group):
match = re.findall(r'^interface (\S+\s\S+)$', config, re.M)
if not match:
return {}
channel = {}
for item in set(match):
member = item
channel['mode'] = parse_mode(module, config, group, member)
channel['members'] = parse_members(module, config, group)
return channel
def map_config_to_obj(module):
objs = list()
config = get_config(module)
for line in config.split('\n'):
l = line.strip()
match = re.search(r'interface Port-channel (\S+)', l, re.M)
if match:
obj = {}
group = match.group(1)
obj['group'] = group
obj.update(get_channel(module, config, group))
objs.append(obj)
return objs
def main():
""" main entry point for module execution
"""
element_spec = dict(
group=dict(type='int'),
mode=dict(choices=['active', 'on', 'passive']),
members=dict(type='list'),
state=dict(default='present',
choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['group'] = dict(required=True)
required_one_of = [['group', 'aggregate']]
required_together = [['members', 'mode']]
mutually_exclusive = [['group', 'aggregate']]
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec,
required_together=required_together),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| 30.211656 | 114 | 0.602396 |
4ac9d9ac70f42f3b68a1cf98efa1dbd5ceae6ffb
| 453 |
py
|
Python
|
backend/apps/ineedstudent/converters.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | 2 |
2020-03-28T13:56:39.000Z
|
2020-03-29T10:16:12.000Z
|
backend/apps/ineedstudent/converters.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | 76 |
2020-03-27T21:53:04.000Z
|
2020-03-30T20:27:43.000Z
|
backend/apps/ineedstudent/converters.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | null | null | null |
class DecimalPointFloatConverter:
"""
Custom Django converter for URLs.
Parses floats with a decimal point (not with a comma!)
Allows for integers too, parses values in this or similar form:
- 100.0
- 100
Will NOT work for these forms:
- 100.000.000
- 100,0
"""
regex = "[0-9]*[.]?[0-9]*"
def to_python(self, value):
return float(value)
def to_url(self, value):
return str(value)
| 20.590909 | 67 | 0.604857 |
ab4f0603128a12657e6585224797995588ba699c
| 298 |
py
|
Python
|
exercises/zh/solution_03_16_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/zh/solution_03_16_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/zh/solution_03_16_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
nlp = spacy.load("zh_core_web_sm")
text = (
"在300多年的风雨历程中,历代同仁堂人始终恪守“炮制虽繁必不敢省人工,品味虽贵必不敢减物力”的古训,"
"树立“修合无人见,存心有天知”的自律意识,造就了制药过程中兢兢小心、精益求精的严细精神。"
)
# 关闭tagger和parser
with nlp.disable_pipes("tagger", "parser"):
# 处理文本
doc = nlp(text)
# 打印doc中的实体
print(doc.ents)
| 19.866667 | 56 | 0.691275 |
dbd781c4e49b976114458433160e622316423e66
| 854 |
py
|
Python
|
src/onegov/town6/views/payment.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/town6/views/payment.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/town6/views/payment.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.security import Private
from onegov.form import merge_forms
from onegov.org.views.payment import view_payments, export_payments
from onegov.town6 import TownApp
from onegov.org.forms import DateRangeForm, ExportForm
from onegov.pay import PaymentCollection
from onegov.town6.layout import PaymentCollectionLayout
@TownApp.html(
model=PaymentCollection,
template='payments.pt',
permission=Private)
def town_view_payments(self, request):
return view_payments(self, request, PaymentCollectionLayout(self, request))
@TownApp.form(
model=PaymentCollection,
name='export',
template='form.pt',
permission=Private,
form=merge_forms(DateRangeForm, ExportForm))
def town_export_payments(self, request, form):
return export_payments(
self, request, form, PaymentCollectionLayout(self, request))
| 30.5 | 79 | 0.784543 |
91641db7bc9e5c42bb376cf870acf52357d1a86c
| 461 |
py
|
Python
|
Curso_Python/Secao2-Python-Basico-Logica-Programacao/22_operadores_logicos/22_operadores_logicos.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao2-Python-Basico-Logica-Programacao/22_operadores_logicos/22_operadores_logicos.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao2-Python-Basico-Logica-Programacao/22_operadores_logicos/22_operadores_logicos.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
"""
Operadores logicos - aula 4
and, or not
in e not in
"""
# (verdadeiro e False) = Falso
#comparação1 and comparação
#verdadeiro OU Verdadeiro
#comp1 or comp2
#Not inverter uma expressão
#Comando in muito utilllll
nome = 'Pedro'
if 'Ped' in nome: #todo: Muito util
print('Existe o texto.')
else:
print('Não existe o texto.')
nome1 = 'Pedro'
if 'dfg' not in nome1: #todo: Muito UTILLL
print('Executei.')
else:
print('Existe o texto.')
| 16.464286 | 43 | 0.672451 |
919f3e4a49cc077319825dfa2692abc557e4abed
| 5,915 |
py
|
Python
|
demo.py
|
SevenDaysDA/MetaScience_Datenaufbereitung
|
a7a68c59dc96fa0b56c85a29505f8cb4fc06ccb1
|
[
"Apache-2.0"
] | null | null | null |
demo.py
|
SevenDaysDA/MetaScience_Datenaufbereitung
|
a7a68c59dc96fa0b56c85a29505f8cb4fc06ccb1
|
[
"Apache-2.0"
] | null | null | null |
demo.py
|
SevenDaysDA/MetaScience_Datenaufbereitung
|
a7a68c59dc96fa0b56c85a29505f8cb4fc06ccb1
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from mosestokenizer import MosesDetokenizer
from scipy.stats import pearsonr
def pearson(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
return '{0:.{1}f}'.format(pearson_corr, 3)
reference_list = dict({
"cs-en": 'testset_cs-en.tsv',
"de-en": 'testset_de-en.tsv',
"fi-en": 'testset_fi-en.tsv',
"lv-en": 'testset_lv-en.tsv',
"ru-en": 'testset_ru-en.tsv',
"tr-en": 'testset_tr-en.tsv',
"zh-en": 'testset_zh-en.tsv',
})
import argparse
'''
#'xlm-roberta-base','xlm-clm-enfr-1024'] #'paraphrase-TinyBERT-L6-v2'] #'sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2'] #,'sentence-transformers/paraphrase-TinyBERT-L6-v2'], 'sentence-transformers/paraphrase-xlm-r-multilingual-v1']# ,'bert-base-multilingual-cased', 'distilbert-base-multilingual-cased', ] ]
#
# ####### FAILED ######
# ['xlm-roberta-large'] -> size error;
# zu testende Modelle mit und ohne LM (Successful)
# 'bert-base-multilingual-cased' ,'xlm-roberta-base', 'distilbert-base-multilingual-cased'
#variants = ['bert-base-multilingual-cased' ,'xlm-roberta-base', 'distilbert-base-multilingual-cased']
'''
variants = ['bert-base-multilingual-cased','distilbert-base-multilingual-cased','sentence-transformers/paraphrase-xlm-r-multilingual-v1', 'Tiny1']#'sentence-transformers/paraphrase-multilingual-mpnet-base-v2']#'sentence-transformers/paraphrase-xlm-r-multilingual-v1','sentence-transformers/paraphrase-TinyBERT-L6-v4']
from time import perf_counter
LPS = 'LP'
SCORE = 'Score'
TIME = 'Time'
USAGE = 'Memory'
LMSCORE = 'LM_Score'
LMTIME = 'LM_Time'
LMUSAGE = 'LM_Usage'
results = {
LPS: [],
SCORE: [],
TIME: [],
USAGE: [],
LMSCORE: [],
LMTIME: [],
LMUSAGE: []
}
for model in variants:
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, default=model)
parser.add_argument('--do_lower_case', type=bool, default=False)
parser.add_argument('--language_model', type=str, default='gpt2')
parser.add_argument('--alignment', type=str, default='CLP', help='CLP or UMD or None')
parser.add_argument('--ngram', type=int, default=2)
parser.add_argument('--layer', type=int, default=8)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--dropout_rate', type=float, default=0.3, help='Remove the percentage of noisy elements in Word-Mover-Distance')
import json
args = parser.parse_args()
params = vars(args)
print(json.dumps(params, indent = 2))
from scorer import XMOVERScorer
import numpy as np
import torch
import truecase
scorer = XMOVERScorer(args.model_name, args.language_model, args.do_lower_case)
def metric_combination(a, b, alpha):
return alpha[0]*np.array(a) + alpha[1]*np.array(b)
import tracemalloc
import os
from tqdm import tqdm
for pair in tqdm(reference_list.items()):
lp, path = pair
src, tgt = lp.split('-')
device = "cuda" if torch.cuda.is_available() else "cpu"
temp = np.load('mapping/layer-8/europarl-v7.%s-%s.%s.BAM' % (src, tgt, args.layer), allow_pickle=True)
projection = torch.tensor(temp, dtype=torch.float).to(device)
temp = np.load('mapping/layer-8/europarl-v7.%s-%s.%s.GBDD' % (src, tgt, args.layer), allow_pickle=True)
bias = torch.tensor(temp, dtype=torch.float).to(device)
data = pd.read_csv(os.path.join('WMT17', 'testset', path), sep='\t')
references = data['reference'].tolist()
translations = data['translation'].tolist()
source = data['source'].tolist()
human_score = data['HUMAN_score'].tolist()
sentBLEU = data['sentBLEU'].tolist()
print("Lp: ",lp)
with MosesDetokenizer(src.strip()) as detokenize:
source = [detokenize(s.split(' ')) for s in source]
with MosesDetokenizer(tgt) as detokenize:
references = [detokenize(s.split(' ')) for s in references]
translations = [detokenize(s.split(' ')) for s in translations]
translations = [truecase.get_true_case(s) for s in translations]
tracemalloc.start()
s = perf_counter()
xmoverscores = scorer.compute_xmoverscore(args.alignment, projection, bias, source, translations, ngram=args.ngram, \
layer=args.layer, dropout_rate=args.dropout_rate, bs=args.batch_size)
results[TIME].append(str(perf_counter() - s,))
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
results[USAGE].append(str(peak / 10 ** 6,))
final_score = pearson(human_score, xmoverscores)
results[SCORE].append(str(final_score))
results[LPS].append(lp)
tracemalloc.start()
s = perf_counter()
lm_scores = scorer.compute_perplexity(translations, bs=1)
scores = metric_combination(xmoverscores, lm_scores, [1, 0.1])
final_lm_score = pearson(human_score, scores)
results[LMSCORE].append(str(final_lm_score))
results[LMTIME].append(str(perf_counter() - s))
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
results[LMUSAGE].append(str(peak / 10 ** 6))
print("Time XMoverDistance: \t",results[TIME] )
print("XMOVER Scores: \t\t ", results[SCORE])
print("LM+XMover: ",results[LMSCORE])
print("Plain scores: ",torch.mean(torch.tensor(xmoverscores)))
print('\r\nlp:{} xmovescore:{} '.format(lp, final_score ))
'''
results[BERTTIME] = []
for i in range(len(variants)):
results[BERTTIME].append(f'{results[TIME][i] / results[TIME][0] * 100:.1f}')
'''
df = pd.DataFrame(results, columns=[LPS, SCORE, TIME, USAGE, LMSCORE, LMTIME, LMUSAGE])
df.to_csv("XMOVERScore_FinalBench_vs2.csv", index=False)
| 38.914474 | 347 | 0.651057 |
71df670ac770124d07e65111cce0fed66536bf5f
| 509 |
py
|
Python
|
Python/Exercícios_Python/058_validação_de_dados.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/058_validação_de_dados.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/058_validação_de_dados.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""058 - Validação de Dados
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ZTluph36T-5jWIn1AJP2JN1XKyqGAYep
"""
sexo = ''
while sexo != 'M' and sexo != 'F':
sexo = str(input('DIgite o sexo [M/F]: ')).upper()
if sexo != 'M' and sexo != 'F':
print('Por favor digite a letra correta!!!')
if sexo == 'M':
sexo = 'Masculino'
else:
sexo = 'Feminino'
print('O sexo escolhido foi: {}'.format(sexo))
| 25.45 | 77 | 0.626719 |
1cb9dc6f2b7d7ce39dd3da6ba05cdb37926b58ec
| 1,468 |
py
|
Python
|
setup.py
|
sparks-baird/matbench
|
4424609454286e32fff2bcc724379b2a316c5a76
|
[
"MIT"
] | 15 |
2021-11-01T09:02:19.000Z
|
2022-03-19T10:59:41.000Z
|
setup.py
|
sparks-baird/matbench
|
4424609454286e32fff2bcc724379b2a316c5a76
|
[
"MIT"
] | 62 |
2021-09-20T14:09:59.000Z
|
2022-03-30T19:03:22.000Z
|
setup.py
|
ardunn/matbench
|
7d11a2d63766339ec00e610e2255be29b81544d3
|
[
"MIT"
] | 4 |
2021-03-22T10:37:42.000Z
|
2021-07-20T14:11:28.000Z
|
import os
from setuptools import setup, find_packages
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(MODULE_DIR, "requirements.txt"), "r") as f:
requirements = f.read().replace(" ", "").split("\n")
# source of version is in the constants file
VERSION_FILE = os.path.join(MODULE_DIR, "matbench/constants.py")
token = "VERSION = "
with open(VERSION_FILE, "r") as f:
version = None
for line in f.readlines():
if token in line:
version = line.replace(token, "").strip()
# Double quotes are contained in the read line, remove them
version = version.replace("\"", "")
if __name__ == "__main__":
setup(
name='matbench',
version=version,
description='a machine learning benchmark for materials science',
long_description="A machine learning benchmark for materials science. "
"https://github.com/materialsproject/matbench",
url='https://github.com/materialsproject/matbench',
author=['Alex Dunn', 'Anubhav Jain'],
author_email='[email protected]',
license='modified BSD',
packages=find_packages(where="."),
package_data={
"matbench": ["*.json"],
"matbench.tests": ["*.json"]
},
zip_safe=False,
install_requires=requirements,
extras_require={},
test_suite='matbench',
tests_require='tests',
include_package_data=True
)
| 34.139535 | 79 | 0.626703 |
1cd881f9de0a9712cd0134bcbb0c0a721c25ff49
| 1,872 |
py
|
Python
|
app/views_main.py
|
galyeshua/Index
|
96e1630efc51d2c03f2d80889dfa1d117155e2ee
|
[
"MIT"
] | null | null | null |
app/views_main.py
|
galyeshua/Index
|
96e1630efc51d2c03f2d80889dfa1d117155e2ee
|
[
"MIT"
] | null | null | null |
app/views_main.py
|
galyeshua/Index
|
96e1630efc51d2c03f2d80889dfa1d117155e2ee
|
[
"MIT"
] | null | null | null |
from app import app
from flask import render_template, jsonify, request, url_for, redirect, flash
from app.forms import loginForm
from app.datafile_functions import get_data
from flask_login import login_user, logout_user, current_user
from app.models import User
from werkzeug.urls import url_parse
@app.route('/index')
@app.route('/')
def index():
filename = app.config['DATA_FILENAME']
categories = get_data(filename)
timestamp = categories.get("timestamp", 0)
return render_template('index.html', title='Index',
categories=categories['categories'], timestamp=timestamp)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = loginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password', 'danger')
return redirect(url_for('login', next='/admin'))
login_user(user, remember=False)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('admin')
return redirect(next_page)
return render_template('login.html', title='Login', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/json', methods=['GET', 'POST'])
def get_json():
'''
return jsonify data
if it is a POST request it will return 403
'''
if request.method == 'POST':
if current_user.is_anonymous:
return jsonify(message='You must log in to do it'), 403
filename = app.config['DATA_FILENAME']
categories = get_data(filename)
return jsonify(categories)
| 31.2 | 84 | 0.672009 |
c77b4c2e0d65297eaf953b88d80b336b66f02824
| 508 |
py
|
Python
|
fastapi/tutorial-1-8-middleware/main.py
|
pisitj/practice-python-web-framework
|
5f7f60737b1cf9618e73ad8047b6c4f556d1feb0
|
[
"MIT"
] | null | null | null |
fastapi/tutorial-1-8-middleware/main.py
|
pisitj/practice-python-web-framework
|
5f7f60737b1cf9618e73ad8047b6c4f556d1feb0
|
[
"MIT"
] | null | null | null |
fastapi/tutorial-1-8-middleware/main.py
|
pisitj/practice-python-web-framework
|
5f7f60737b1cf9618e73ad8047b6c4f556d1feb0
|
[
"MIT"
] | null | null | null |
# https://fastapi.tiangolo.com/tutorial/middleware/
from fastapi import FastAPI, Request
import time
app = FastAPI()
@app.middleware("http")
async def add_process_time_header(request: Request, call_next):
# request
start_time = time.time()
# next
response = await call_next(request)
# response
process_time = time.time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
@app.get("/")
def hello():
return {"message": "Hello World."}
| 22.086957 | 63 | 0.692913 |
c78cbb26046e24b3aadb9f6323af0b191a22ab0e
| 5,894 |
py
|
Python
|
FuncionesGraficas.py
|
Miguel-331/Proyecto-SUN
|
104afd03e05616a297fbda976d377f49a1f905ec
|
[
"Unlicense"
] | null | null | null |
FuncionesGraficas.py
|
Miguel-331/Proyecto-SUN
|
104afd03e05616a297fbda976d377f49a1f905ec
|
[
"Unlicense"
] | null | null | null |
FuncionesGraficas.py
|
Miguel-331/Proyecto-SUN
|
104afd03e05616a297fbda976d377f49a1f905ec
|
[
"Unlicense"
] | null | null | null |
def Menu():
print("Bienvenido, seleccione la operacion que desea realizar")
print("1.) Añadir")
print("2.) Asignar||Adicionar||Cancelar Materias")
print("3.) Modificar")
print("4.) Eliminar usuario")
print("5.) Mostrar bases de datos")
print("6.) Calificar")
print("7.) Buscar")
print("8.) Salir")
while True:
try:
opcion = int(input("Digite la opción: "))
break
except:
print("Numero no valido, escoga denuevo.")
return opcion
def MenuAñadir():
while True:
print("1.) Estudiante")
print("2.) Profesor")
print("3.) Materia")
print("4.) Cancelar")
while True:
try:
opcion1 = int(input("Digite el tipo de dato a añadir: "))
break
except:
print("Numero no valido, escoga denuevo.")
return opcion1
def MenuAsignar():
while True:
print("1.) Asignar materia a profesor")
print("2.) Inscribir materias para estudiante")
print("3.) Cancelacion de una materia")
print("4.) Cancelar")
while True:
try:
opcion2 = int(input("Digite la opcion que desea: "))
break
except:
print("Numero no valido, escoga denuevo.")
return opcion2
def MenuModificar():
while True:
print("1.) Estudiantes")
print("2.) Profesor")
print("3.) Materias")
print("4.) Cancelar")
while True:
try:
opcion3 = int(input("Digite la opcion que desea: "))
break
except:
print("Numero no valido, escoga denuevo.")
return opcion3
def MenuEstudiantes():
print("1.) Nombre")
print("2.) Apellido")
print("3.) Carrera")
print("4.) Estado")
print("5.) Cancelar")
def MenuProfesor():
print("1.) Nombre")
print("2.) Apellido")
print("3.) Estado")
print("4.) Cancelar")
def MenuMaterias():
print("1.) Nombre")
print("2.) Creditos")
print("3.) Cancelar")
def MenuEliminar():
while True:
print("1.) Estudiantes")
print("2.) Profesor")
print("3.) Cancelar")
while True:
try:
opcion4 = int(input("Digite la opcion que desea: "))
break
except:
print("Numero no valido, escoga denuevo.")
return opcion4
def MenuMostrar():
while True:
print("1.) Estudiantes")
print("2.) Profesor")
print("3.) Materias")
print("4.) Materias asignadas a profesores")
print("5.) Materias vistas por estudiante")
print("6.) Cancelar")
while True:
try:
opcion5 = int(input("Digite la opcion que desea: "))
break
except:
print("Numero no valido, escoga denuevo.")
return opcion5
def MenuCalificaciones():
while True:
print("1.) Calificar materias")
print("2.) Calcular PAPA")
print("3.) Cancelar")
while True:
try:
opcion6 = int(input("Digite la opcion que desea: "))
break
except:
print("Numero no valido, escoga denuevo.")
return opcion6
def MenuBuscar():
while True:
print("1.) Buscar Estudiante")
print("2.) Buscar Profesor")
print("3.) Buscar Materia")
while True:
try:
opcion7 = int(input("Digite la opcion que desea: "))
break
except:
print("Numero no valido, escoga denuevo.")
return opcion7
def ImprimirTabla(tabla):
if (tabla == "ESTUDIANTES"):
print("Nombres"," ","Apellidos"," ","ID"," ","Codigo Plan de estudios"," ","Estado"," "," P.A.P.A.")
print("")
print("")
if (tabla == "PROFESORES"):
print("Nombres"," ","Apellidos"," ","ID"," ","Estado")
print("")
print("")
if (tabla == "MATERIAS"):
print("Codigo"," ","Nombre"," ","Codigo de Facultad"," ","Codigo plan de estudio"," ","Creditos"," "," Prerequisito"," ","Codigo del prerequisito")
print("")
print("")
if (tabla == "MATERIASDOC"):
print("Codigo"," ","Nombre"," ","Nombre Docente"," ","Apellido Docente"," ","ID"," "," Hora de inicio"," ","Horas clase"," ","Dias"," ","Cupos"," ","Grupo")
print("")
print("")
if (tabla == "MATERIASEST"):
print("Codigo"," ","Nombre"," ","ID Estudiante"," ","Nombre Estudiante"," ","Apellido Estudiante"," "," Grupo"," ","ID Profesor"," "," Nombre Profesor"," ","Apellido Profesor"," ","Hora final"," ","Dias"," ","Estatus"," ","Calificación")
print("")
print("")
def ImprimirTabla2(tabla):
if (tabla == "ESTUDIANTES"):
print("")
print("")
if (tabla == "PROFESORES"):
print("")
print("")
if (tabla == "MATERIAS"):
print("")
print("")
if (tabla == "MATERIASDOC"):
print("")
print("")
if (tabla == "MATERIASEST"):
print("Codigo materia"," ","id estudiante",)
print("")
print("")
def Salir():
condition = True
salida = input("Esta seguro que desea salir Y|N: ")
if (salida == "Y"):
condition = False
else:
pass
return condition
| 29.61809 | 245 | 0.470478 |
1bffa6f7b76480c09b698387a0a15acb09b67803
| 17,767 |
py
|
Python
|
tensorflow/basic-rl/tutorial5/tests/dqn-deer-keras-cartpole.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
tensorflow/basic-rl/tutorial5/tests/dqn-deer-keras-cartpole.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
tensorflow/basic-rl/tutorial5/tests/dqn-deer-keras-cartpole.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-10-14T07:30:18.000Z
|
2019-10-14T07:30:18.000Z
|
'''
Deep Q-learning approach to the cartpole problem
using OpenAI's gym environment.
As part of the basic series on reinforcement learning @
https://github.com/vmayoral/basic_reinforcement_learning
Inspired by https://github.com/VinF/deer
@author: Victor Mayoral Vilches <[email protected]>
'''
import gym
import random
import pandas
import numpy as np
from keras.models import Model
from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape
import theano.tensor as T
import sys
import logging
import numpy as np
from theano import config
import numpy as np
class QNetwork(object):
""" All the Q-networks classes should inherit this interface.
Parameters
-----------
environment : object from class Environment
The environment linked to the Q-network
batch_size : int
Number of tuples taken into account for each iteration of gradient descent
"""
def __init__(self, environment, batch_size):
self._environment = environment
self._df = 0.9
self._lr = 0.0002
self._input_dimensions = self._environment.inputDimensions()
self._n_actions = self._environment.nActions()
self._batch_size = batch_size
def train(self, states, actions, rewards, nextStates, terminals):
""" This method performs the Bellman iteration for one batch of tuples.
"""
raise NotImplementedError()
def chooseBestAction(self, state):
""" Get the best action for a belief state
"""
raise NotImplementedError()
def qValues(self, state):
""" Get the q value for one belief state
"""
raise NotImplementedError()
def setLearningRate(self, lr):
""" Setting the learning rate
Parameters
-----------
lr : float
The learning rate that has to bet set
"""
self._lr = lr
def setDiscountFactor(self, df):
""" Setting the discount factor
Parameters
-----------
df : float
The discount factor that has to bet set
"""
if df < 0. or df > 1.:
raise AgentError("The discount factor should be in [0,1]")
self._df = df
def learningRate(self):
""" Getting the learning rate
"""
return self._lr
def discountFactor(self):
""" Getting the discount factor
"""
return self._df
class NN():
"""
Deep Q-learning network using Keras
Parameters
-----------
batch_size : int
Number of tuples taken into account for each iteration of gradient descent
input_dimensions :
n_actions :
random_state : numpy random number generator
"""
def __init__(self, batch_size, input_dimensions, n_actions, random_state):
self._input_dimensions=input_dimensions
self._batch_size=batch_size
self._random_state=random_state
self._n_actions=n_actions
def _buildDQN(self):
"""
Build a network consistent with each type of inputs
"""
layers=[]
outs_conv=[]
inputs=[]
for i, dim in enumerate(self._input_dimensions):
nfilter=[]
# - observation[i] is a FRAME
if len(dim) == 3: #FIXME
input = Input(shape=(dim[0],dim[1],dim[2]))
inputs.append(input)
#reshaped=Reshape((dim[0],dim[1],dim[2]), input_shape=(dim[0],dim[1]))(input)
x = Convolution2D(32, 8, 8, border_mode='valid')(input)
x = MaxPooling2D(pool_size=(4, 4), strides=None, border_mode='valid')(x)
x = Convolution2D(64, 4, 4, border_mode='valid')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='valid')(x)
x = Convolution2D(64, 3, 3)(x)
out = Flatten()(x)
# - observation[i] is a VECTOR
elif len(dim) == 2 and dim[0] > 3: #FIXME
input = Input(shape=(dim[0],dim[1]))
inputs.append(input)
reshaped=Reshape((1,dim[0],dim[1]), input_shape=(dim[0],dim[1]))(input)
x = Convolution2D(16, 2, 1, border_mode='valid')(reshaped)
x = Convolution2D(16, 2, 2)(x)
out = Flatten()(x)
# - observation[i] is a SCALAR -
else:
if dim[0] > 3:
# this returns a tensor
input = Input(shape=(dim[0],))
inputs.append(input)
reshaped=Reshape((1,1,dim[0]), input_shape=(dim[0],))(input)
x = Convolution2D(8, 1, 2, border_mode='valid')(reshaped)
x = Convolution2D(8, 1, 2)(x)
out = Flatten()(x)
else:
if(len(dim) == 2):
# this returns a tensor
input = Input(shape=(dim[1],dim[0]))
inputs.append(input)
out = Flatten()(input)
if(len(dim) == 1):
input = Input(shape=(dim[0],))
inputs.append(input)
out=input
outs_conv.append(out)
if len(outs_conv)>1:
x = merge(outs_conv, mode='concat')
else:
x= outs_conv [0]
# we stack a deep fully-connected network on top
x = Dense(50, activation='relu')(x)
x = Dense(20, activation='relu')(x)
out = Dense(self._n_actions)(x)
model = Model(input=inputs, output=out)
layers=model.layers
# Grab all the parameters together.
params = [ param
for layer in layers
for param in layer.trainable_weights ]
return model, params
from warnings import warn
from keras.optimizers import SGD,RMSprop
class MyQNetwork(QNetwork):
"""
Deep Q-learning network using Keras
Parameters
-----------
environment : object from class Environment
rho : float
Parameter for rmsprop. Default : 0.9
rms_epsilon : float
Parameter for rmsprop. Default : 0.0001
momentum : float
Default : 0
clip_delta : float
Not implemented.
freeze_interval : int
Period during which the target network is freezed and after which the target network is updated. Default : 1000
batch_size : int
Number of tuples taken into account for each iteration of gradient descent. Default : 32
network_type : str
Not used. Default : None
update_rule: str
{sgd,rmsprop}. Default : rmsprop
batch_accumulator : str
{sum,mean}. Default : sum
random_state : numpy random number generator
double_Q : bool, optional
Activate or not the double_Q learning.
More informations in : Hado van Hasselt et al. (2015) - Deep Reinforcement Learning with Double Q-learning.
neural_network : object, optional
default is deer.qnetworks.NN_keras
"""
def __init__(self, environment, rho=0.9, rms_epsilon=0.0001, momentum=0, clip_delta=0, freeze_interval=1000, batch_size=32, network_type=None, update_rule="rmsprop", batch_accumulator="sum", random_state=np.random.RandomState(), double_Q=False, neural_network=NN):
""" Initialize environment
"""
QNetwork.__init__(self,environment, batch_size)
self._rho = rho
self._rms_epsilon = rms_epsilon
self._momentum = momentum
#self.clip_delta = clip_delta
self._freeze_interval = freeze_interval
self._double_Q = double_Q
self._random_state = random_state
self.update_counter = 0
Q_net = neural_network(self._batch_size, self._input_dimensions, self._n_actions, self._random_state)
self.q_vals, self.params = Q_net._buildDQN()
if update_rule == 'deepmind_rmsprop':
warn("The update_rule used is rmsprop")
update_rule='rmsprop'
if (update_rule=="sgd"):
optimizer = SGD(lr=self._lr, momentum=momentum, nesterov=False)
elif (update_rule=="rmsprop"):
optimizer = RMSprop(lr=self._lr, rho=self._rho, epsilon=self._rms_epsilon)
else:
raise Exception('The update_rule '+update_rule+ 'is not'
'implemented.')
self.q_vals.compile(optimizer=optimizer, loss='mse')
self.next_q_vals, self.next_params = Q_net._buildDQN()
self.next_q_vals.compile(optimizer='rmsprop', loss='mse') #The parameters do not matter since training is done on self.q_vals
self.q_vals.summary()
# self.next_q_vals.summary()
self._resetQHat()
def toDump(self):
# FIXME
return None,None
def train(self, states_val, actions_val, rewards_val, next_states_val, terminals_val):
"""
Train one batch.
1. Set shared variable in states_shared, next_states_shared, actions_shared, rewards_shared, terminals_shared
2. perform batch training
Parameters
-----------
states_val : list of batch_size * [list of max_num_elements* [list of k * [element 2D,1D or scalar]])
actions_val : b x 1 numpy array of integers
rewards_val : b x 1 numpy array
next_states_val : list of batch_size * [list of max_num_elements* [list of k * [element 2D,1D or scalar]])
terminals_val : b x 1 numpy boolean array (currently ignored)
Returns
-------
Average loss of the batch training
Individual losses for each tuple
"""
if self.update_counter % self._freeze_interval == 0:
self._resetQHat()
next_q_vals = self.next_q_vals.predict(next_states_val.tolist())
if(self._double_Q==True):
next_q_vals_current_qnet=self.q_vals.predict(next_states_val.tolist())
argmax_next_q_vals=np.argmax(next_q_vals_current_qnet, axis=1)
max_next_q_vals=next_q_vals[np.arange(self._batch_size),argmax_next_q_vals].reshape((-1, 1))
else:
max_next_q_vals=np.max(next_q_vals, axis=1, keepdims=True)
not_terminals=np.ones_like(terminals_val) - terminals_val
target = rewards_val + not_terminals * self._df * max_next_q_vals.reshape((-1))
q_vals=self.q_vals.predict(states_val.tolist())
# In order to obtain the individual losses, we predict the current Q_vals and calculate the diff
q_val=q_vals[np.arange(self._batch_size), actions_val.reshape((-1,))]#.reshape((-1, 1))
diff = - q_val + target
loss_ind=0.5*pow(diff,2)
q_vals[ np.arange(self._batch_size), actions_val.reshape((-1,)) ] = target
# Is it possible to use something more flexible than this?
# Only some elements of next_q_vals are actual value that I target.
# My loss should only take these into account.
# Workaround here is that many values are already "exact" in this update
loss=self.q_vals.train_on_batch(states_val.tolist() , q_vals )
self.update_counter += 1
return np.sqrt(loss),loss_ind
def qValues(self, state_val):
""" Get the q values for one belief state
Arguments
---------
state_val : one belief state
Returns
-------
The q value for the provided belief state
"""
return self.q_vals.predict([np.expand_dims(state,axis=0) for state in state_val])[0]
def chooseBestAction(self, state):
""" Get the best action for a belief state
Arguments
---------
state : one belief state
Returns
-------
The best action : int
"""
q_vals = self.qValues(state)
return np.argmax(q_vals)
def _resetQHat(self):
for i,(param,next_param) in enumerate(zip(self.params, self.next_params)):
next_param.set_value(param.get_value())
from deer.base_classes import Environment
import copy
class MyEnv(Environment):
def __init__(self, rng):
""" Initialize environment.
Arguments:
rng - the numpy random number generator
"""
# Defining the type of environment
self.env = gym.make('CartPole-v0')
self._last_observation = self.env.reset()
self.is_terminal=False
self._input_dim = [(1,), (1,), (1,), (1,)] # self.env.observation_space.shape is equal to 4
# and we use only the current value in the belief state
def act(self, action):
""" Simulate one time step in the environment.
"""
self._last_observation, reward, self.is_terminal, info = self.env.step(action)
if (self.mode==0): # Show the policy only at test time
self.env.render()
return reward
def reset(self, mode=0):
""" Reset environment for a new episode.
Arguments:
Mode : int
-1 corresponds to training and 0 to test
"""
# Reset initial observation to a random x and theta
self._last_observation = self.env.reset()
self.is_terminal=False
self.mode=mode
return self._last_observation
def inTerminalState(self):
"""Tell whether the environment reached a terminal state after the last transition (i.e. the last transition
that occured was terminal).
"""
return self.is_terminal
def inputDimensions(self):
return self._input_dim
def nActions(self):
return 2 #Would be useful to have this directly in gym : self.env.action_space.shape
def observe(self):
return copy.deepcopy(self._last_observation)
import deer.experiment.base_controllers as bc
from deer.default_parser import process_args
from deer.agent import NeuralAgent
class Defaults:
# ----------------------
# Experiment Parameters
# ----------------------
STEPS_PER_EPOCH = 200
EPOCHS = 300
STEPS_PER_TEST = 200
PERIOD_BTW_SUMMARY_PERFS = 10
# ----------------------
# Environment Parameters
# ----------------------
FRAME_SKIP = 1
# ----------------------
# DQN Agent parameters:
# ----------------------
UPDATE_RULE = 'sgd'
BATCH_ACCUMULATOR = 'sum'
LEARNING_RATE = 0.1
LEARNING_RATE_DECAY = 0.99
DISCOUNT = 0.9
DISCOUNT_INC = 1.
DISCOUNT_MAX = 0.95
RMS_DECAY = 0.9
RMS_EPSILON = 0.0001
MOMENTUM = 0
CLIP_DELTA = 1.0
EPSILON_START = 1.0
EPSILON_MIN = 0.2
EPSILON_DECAY = 10000
UPDATE_FREQUENCY = 1
REPLAY_MEMORY_SIZE = 1000000
BATCH_SIZE = 32
NETWORK_TYPE = "General_DQN_0"
FREEZE_INTERVAL = 100
DETERMINISTIC = True
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# --- Parse parameters ---
parameters = process_args(sys.argv[1:], Defaults)
if parameters.deterministic:
rng = np.random.RandomState(12345)
else:
rng = np.random.RandomState()
# --- Instantiate environment ---
env = MyEnv(rng)
# --- Instantiate qnetwork ---
qnetwork = MyQNetwork(
env,
parameters.rms_decay,
parameters.rms_epsilon,
parameters.momentum,
parameters.clip_delta,
parameters.freeze_interval,
parameters.batch_size,
parameters.network_type,
parameters.update_rule,
parameters.batch_accumulator,
rng,
double_Q=True)
# --- Instantiate agent ---
agent = NeuralAgent(
env,
qnetwork,
parameters.replay_memory_size,
max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))),
parameters.batch_size,
rng)
# --- Bind controllers to the agent ---
# For comments, please refer to run_toy_env.py
agent.attach(bc.VerboseController(
evaluate_on='epoch',
periodicity=1))
agent.attach(bc.TrainerController(
evaluate_on='action',
periodicity=parameters.update_frequency,
show_episode_avg_V_value=False,
show_avg_Bellman_residual=False))
agent.attach(bc.LearningRateController(
initial_learning_rate=parameters.learning_rate,
learning_rate_decay=parameters.learning_rate_decay,
periodicity=1))
agent.attach(bc.DiscountFactorController(
initial_discount_factor=parameters.discount,
discount_factor_growth=parameters.discount_inc,
discount_factor_max=parameters.discount_max,
periodicity=1))
agent.attach(bc.EpsilonController(
initial_e=parameters.epsilon_start,
e_decays=parameters.epsilon_decay,
e_min=parameters.epsilon_min,
evaluate_on='action',
periodicity=1,
reset_every='none'))
agent.attach(bc.InterleavedTestEpochController(
id=0,
epoch_length=parameters.steps_per_test,
controllers_to_disable=[0, 1, 2, 3, 4],
periodicity=2,
show_score=True,
summarize_every=parameters.period_btw_summary_perfs))
# --- Run the experiment ---
agent.run(parameters.epochs, parameters.steps_per_epoch)
| 32.659926 | 268 | 0.592784 |
90fff11e633c824332314f01e6d26dbe6e15cd96
| 949 |
py
|
Python
|
voltmeter.py
|
rickroty/SL1
|
bfa319561ed3ecc64277874be16e028c3a717693
|
[
"MIT"
] | null | null | null |
voltmeter.py
|
rickroty/SL1
|
bfa319561ed3ecc64277874be16e028c3a717693
|
[
"MIT"
] | null | null | null |
voltmeter.py
|
rickroty/SL1
|
bfa319561ed3ecc64277874be16e028c3a717693
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as GPIO
import time
from time import sleep
import datetime
import socket
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SWITCH = 18
VOLTMETER = 15
GPIO.setmode(GPIO.BOARD)
GPIO.setup(SWITCH, GPIO.IN)
GPIO.setup(VOLTMETER, GPIO.OUT)
GPIO.output(VOLTMETER, GPIO.HIGH)
print 'VOLT METER ON'
meteron = True
run = True
while (run):
if ( GPIO.input(SWITCH) == True ):
if meteron:
meteron = False
GPIO.output(VOLTMETER, GPIO.LOW)
clientsocket.connect(('localhost', 5000))
clientsocket.send('killusb')
clientsocket.close()
print 'VOLT METER OFF'
print 'USB OFF'
sleep(0.5)
else:
meteron = True
GPIO.output(VOLTMETER, GPIO.HIGH)
print 'VOLT METER ON'
sleep(0.5)
sleep(0.2)
GPIO.output(VOLTMETER, GPIO.LOW)
GPIO.cleanup()
| 21.088889 | 64 | 0.596417 |
293887025869f6804f2b6143c74fef1c04eb5ea7
| 1,821 |
py
|
Python
|
solutions/graph_based_recommend/webserver/src/service/search.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | 1 |
2021-01-11T18:40:22.000Z
|
2021-01-11T18:40:22.000Z
|
solutions/graph_based_recommend/webserver/src/service/search.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
solutions/graph_based_recommend/webserver/src/service/search.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
import logging as log
from common.config import MILVUS_TABLE, OUT_PATH, OUT_DATA
from indexer.index import milvus_client, search_vectors, get_vector_by_ids
from indexer.tools import connect_mysql, search_by_milvus_id
import numpy as np
import torch
import pickle
import dgl
import json
import random
def get_list_info(conn, cursor, table_name, host, list_ids):
if not table_name:
table_name = MILVUS_TABLE
list_info = {}
list_img = []
for ids in list_ids:
ids = ids[:-4]
info, img = get_ids_info(conn, cursor, table_name, host, int(ids))
title = info["Title"]
year = info["Year"]
list_info[ids] = [title, year, img]
return list_info
def get_ids_info(conn, cursor, table_name, host, ids):
if not table_name:
table_name = MILVUS_TABLE
info = search_by_milvus_id(conn, cursor, table_name, str(ids))
info = json.loads(info[1], strict=False)
img = "http://"+ str(host) + "/getImage?img=" + str(ids)
print("============", img)
return info, img
def do_search(index_client, conn, cursor, img_list, search_id, table_name):
if not table_name:
table_name = MILVUS_TABLE
_, vector_item = get_vector_by_ids(index_client, table_name, search_id)
status, results = search_vectors(index_client, table_name, vector_item)
print("-----milvus search status------", status)
results_ids = []
search_num = len(search_id)
num = 100/search_num
print("-----num:", num)
for results_id in results.id_array:
k = 0
for i in results_id:
if k >= num:
break
img = str(i) +'.jpg'
if img in img_list and i not in search_id:
results_ids.append(img)
k += 1
# print(results_ids)
return results_ids
| 29.370968 | 75 | 0.641955 |
468f77f5a138358b08e4913613f99c872a484350
| 7,475 |
py
|
Python
|
research/cv/retinanet_resnet152/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 1 |
2021-11-18T08:17:44.000Z
|
2021-11-18T08:17:44.000Z
|
research/cv/retinanet_resnet152/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
research/cv/retinanet_resnet152/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Train retinanet and get checkpoint files."""
import os
import argparse
import ast
import mindspore
import mindspore.nn as nn
from mindspore import context, Tensor
from mindspore.communication.management import init, get_rank
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor, Callback
from mindspore.train import Model
from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import set_seed
from src.retinahead import retinanetWithLossCell, TrainingWrapper, retinahead
from src.backbone import resnet152
from src.config import config
from src.dataset import create_retinanet_dataset, create_mindrecord
from src.lr_schedule import get_lr
from src.init_params import init_net_param, filter_checkpoint_parameter
set_seed(1)
class Monitor(Callback):
"""
Monitor loss and time.
Args:
lr_init (numpy array): train lr
Returns:
None
Examples:
>>> Monitor(100,lr_init=Tensor([0.05]*100).asnumpy())
"""
def __init__(self, lr_init=None):
super(Monitor, self).__init__()
self.lr_init = lr_init
self.lr_init_len = len(lr_init)
def step_end(self, run_context):
cb_params = run_context.original_args()
print("lr:[{:8.6f}]".format(self.lr_init[cb_params.cur_step_num-1]), flush=True)
def main():
parser = argparse.ArgumentParser(description="retinanet training")
parser.add_argument("--only_create_dataset", type=ast.literal_eval, default=False,
help="If set it true, only create Mindrecord, default is False.")
parser.add_argument("--distribute", type=ast.literal_eval, default=False,
help="Run distribute, default is False.")
parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.")
parser.add_argument("--lr", type=float, default=0.1, help="Learning rate, default is 0.1.")
parser.add_argument("--mode", type=str, default="sink", help="Run sink mode or not, default is sink.")
parser.add_argument("--dataset", type=str, default="coco", help="Dataset, default is coco.")
parser.add_argument("--epoch_size", type=int, default=500, help="Epoch size, default is 500.")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size, default is 32.")
parser.add_argument("--pre_trained", type=str, default=None, help="Pretrained Checkpoint file path.")
parser.add_argument("--pre_trained_epoch_size", type=int, default=0, help="Pretrained epoch size.")
parser.add_argument("--save_checkpoint_epochs", type=int, default=1, help="Save checkpoint epochs, default is 1.")
parser.add_argument("--loss_scale", type=int, default=1024, help="Loss scale, default is 1024.")
parser.add_argument("--filter_weight", type=ast.literal_eval, default=False,
help="Filter weight parameters, default is False.")
parser.add_argument("--run_platform", type=str, default="Ascend", choices=("Ascend"),
help="run platform, only support Ascend.")
args_opt = parser.parse_args()
if args_opt.run_platform == "Ascend":
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
if args_opt.distribute:
if os.getenv("DEVICE_ID", "not_set").isdigit():
context.set_context(device_id=int(os.getenv("DEVICE_ID")))
init()
device_num = args_opt.device_num
rank = get_rank()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=device_num)
else:
rank = 0
device_num = 1
context.set_context(device_id=args_opt.device_id)
else:
raise ValueError("Unsupported platform.")
mindrecord_file = create_mindrecord(args_opt.dataset, "retina6402.mindrecord", True)
if not args_opt.only_create_dataset:
loss_scale = float(args_opt.loss_scale)
# When create MindDataset, using the fitst mindrecord file, such as retinanet.mindrecord0.
dataset = create_retinanet_dataset(mindrecord_file, repeat_num=1,
batch_size=args_opt.batch_size, device_num=device_num, rank=rank)
dataset_size = dataset.get_dataset_size()
print("Create dataset done!")
backbone = resnet152(config.num_classes)
retinanet = retinahead(backbone, config)
net = retinanetWithLossCell(retinanet, config)
net.to_float(mindspore.float16)
init_net_param(net)
if args_opt.pre_trained:
if args_opt.pre_trained_epoch_size <= 0:
raise KeyError("pre_trained_epoch_size must be greater than 0.")
param_dict = load_checkpoint(args_opt.pre_trained)
if args_opt.filter_weight:
filter_checkpoint_parameter(param_dict)
load_param_into_net(net, param_dict)
lr = Tensor(get_lr(global_step=config.global_step,
lr_init=config.lr_init, lr_end=config.lr_end_rate * args_opt.lr, lr_max=args_opt.lr,
warmup_epochs1=config.warmup_epochs1, warmup_epochs2=config.warmup_epochs2,
warmup_epochs3=config.warmup_epochs3, warmup_epochs4=config.warmup_epochs4,
warmup_epochs5=config.warmup_epochs5, total_epochs=args_opt.epoch_size,
steps_per_epoch=dataset_size))
opt = nn.Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr,
config.momentum, config.weight_decay, loss_scale)
net = TrainingWrapper(net, opt, loss_scale)
model = Model(net)
print("Start train retinanet, the first epoch will be slower because of the graph compilation.")
cb = [TimeMonitor(), LossMonitor()]
cb += [Monitor(lr_init=lr.asnumpy())]
config_ck = CheckpointConfig(save_checkpoint_steps=dataset_size * args_opt.save_checkpoint_epochs,
keep_checkpoint_max=config.keep_checkpoint_max)
ckpt_cb = ModelCheckpoint(prefix="retinanet", directory=config.save_checkpoint_path, config=config_ck)
if args_opt.distribute:
if rank == 0:
cb += [ckpt_cb]
model.train(args_opt.epoch_size, dataset, callbacks=cb, dataset_sink_mode=True)
else:
cb += [ckpt_cb]
model.train(args_opt.epoch_size, dataset, callbacks=cb, dataset_sink_mode=True)
if __name__ == '__main__':
main()
| 48.225806 | 118 | 0.676789 |
4696144662db2061d9a17792e84dcf6ecbd65e72
| 195 |
py
|
Python
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 28/28.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 101 |
2021-12-20T11:57:11.000Z
|
2022-03-23T09:49:13.000Z
|
50-Python-Exercises/Exercises/Exercise 28/28.py
|
kuwarkapur/Hacktoberfest-2022
|
efaafeba5ce51d8d2e2d94c6326cc20bff946f17
|
[
"MIT"
] | 4 |
2022-01-12T11:55:56.000Z
|
2022-02-12T04:53:33.000Z
|
50-Python-Exercises/Exercises/Exercise 28/28.py
|
kuwarkapur/Hacktoberfest-2022
|
efaafeba5ce51d8d2e2d94c6326cc20bff946f17
|
[
"MIT"
] | 38 |
2022-01-12T11:56:16.000Z
|
2022-03-23T10:07:52.000Z
|
#Why is the error and how to fix it?
#A: A TypeError menas you are using the wrong type to make an operation. Change print(a+b) to return a+b
def foo(a, b):
print(a + b)
x = foo(2, 3) * 10
| 24.375 | 104 | 0.65641 |
73196498306bcb2ab659c3ad51073f4f5a735e8a
| 3,363 |
py
|
Python
|
HPOTerms/diseaseToHPO.py
|
ecarl-glitch/LeukoDB
|
57c39d431430fae9def5109b4d6aa457ef1a25cf
|
[
"MIT"
] | null | null | null |
HPOTerms/diseaseToHPO.py
|
ecarl-glitch/LeukoDB
|
57c39d431430fae9def5109b4d6aa457ef1a25cf
|
[
"MIT"
] | null | null | null |
HPOTerms/diseaseToHPO.py
|
ecarl-glitch/LeukoDB
|
57c39d431430fae9def5109b4d6aa457ef1a25cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Input files for this script are hpoTermDef.obo (downloaded from HPO, flat file of HPO term defs),
# OMIM_NumList.csv (list of OMIM Nums generated by OMIMdisease scripts, and phenotype_annotation.tab,
# a file downloaded from the online HPO, that maps HPO terms onto OMIM disease ids (OMIM_Num)
# Output is HPOforLeukoDisease.tsv, a tab-deliminated file of OMIM_Nums matched to
# their corresponding HPO terms
# pandas used to read in OMIM_NumList and phenotype_annotation as dataframes
# PURPOSE: This file creates the tsv file that is used to create the diseaseHPO table by parsing previously created files
# files downloaded from the online Human Phenotype Ontology
import pandas as pd
from functools import reduce
outputfile = open('outputFile/HPOforLeukoDisease.tsv', 'w')
hpoTermDef = open('inputFile/hpoTermDef.obo', 'r')
HPO_annotation = pd.read_csv('inputFile/phenotype_annotation.tab', sep='\t',
index_col=0, low_memory=False)
OMIMNums = pd.read_csv('inputFile/OMIM_NumList.csv', low_memory=False)
OMIMList = OMIMNums['OMIM_Num'].values.tolist()
# Empty dataframe to which extracted values will be added
df = pd.DataFrame(columns=['OMIM_Num', 'diseaseName', 'HPO_ID'])
# Empty HPOList to keep track of HPO IDs to be searched for
HPOList = []
# Loop extracts values from dataframe, adds them to a temp dataframe, and then appends that to the exisiting dataframe
for (diseaseID, HPOID, diseaseName) in \
zip(HPO_annotation['disease-identifier'], HPO_annotation['HPO-ID'],
HPO_annotation['disease-name']):
if diseaseID in OMIMList:
# temp dataframe for storing values
df2 = pd.DataFrame([[diseaseID, diseaseName, HPOID]],
columns=['OMIM_Num', 'diseaseName', 'HPO_ID'
])
df = df.append(df2, ignore_index=True)
# add the HPO_ID onto the list, to be used to map HPO IDs to HPO Names
HPOList.append(HPOID)
# Reads in the first line of the hpoTermDef file
line = hpoTermDef.readline()
# Creates an empty dataframe to store HPO ids and names together
hpoDF = pd.DataFrame(columns=['HPO_ID', 'HPO_Name'])
for line in hpoTermDef:
# "id: " indicates that line contains an HPO ID
if line.startswith('id: '):
# Splits line, strips the new line char, then determines if HPO ID is in list
line = line.split(' ')
# Temp dataframe is created to store the HPO name and ID, then is appended to HPODF
if line[1].strip('\n') in HPOList:
HPOID = line[1].strip('\n')
line = hpoTermDef.readline()
HPOName = line.strip('name: ').strip('\n')
hpoDF_Temp = pd.DataFrame([[HPOID, HPOName]],
columns=['HPO_ID', 'HPO_Name'])
hpoDF = hpoDF.append(hpoDF_Temp, ignore_index=True)
# List of dataframes to be joined together
dfs = [df, hpoDF]
# dataframes are joined based on matching HPO ID
final_df = reduce(lambda left, right: pd.merge(left, right, on=['HPO_ID'
], how='outer'), dfs)
# Sorts Values by OMIM Num (Intially sorted by HPO ID) and then writes dataframe to output file
final_df.sort_values(by=['OMIM_Num'], inplace=True)
final_df.to_csv('outputFile/HPOforLeukoDisease.tsv', sep='\t')
outputfile.close()
hpoTermDef.close()
| 35.03125 | 121 | 0.688373 |
b458199336d3b740b224d4d5647915a21729ba73
| 739 |
py
|
Python
|
musterloesungen/6.3/durchschnitt.py
|
giu/appe6-uzh-hs2018
|
204dea36be1e53594124b606cdfa044368e54726
|
[
"MIT"
] | null | null | null |
musterloesungen/6.3/durchschnitt.py
|
giu/appe6-uzh-hs2018
|
204dea36be1e53594124b606cdfa044368e54726
|
[
"MIT"
] | null | null | null |
musterloesungen/6.3/durchschnitt.py
|
giu/appe6-uzh-hs2018
|
204dea36be1e53594124b606cdfa044368e54726
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Kurs: Python: Grundlagen der Programmierung für Nicht-Informatiker
# Semester: Herbstsemester 2018
# Homepage: http://accaputo.ch/kurs/python-uzh-hs-2018/
# Author: Giuseppe Accaputo
# Aufgabe: 6.3
def durchschnitt(zahlen):
# Um den Durchschnitt zu berechnen, benötigen wir zuerst die
# Summe bestehnd aus allen Zahlen
summe = 0
for zahl in zahlen:
summe = summe + zahl
# Der Durchschnitt berechnet sich aus der Summe aller Zahlen
# dividiert durch die Anzahl Zahlen
anzahl_zahlen = len(zahlen)
avg = summe / float(anzahl_zahlen)
return avg
print(durchschnitt([1,2,3,4]))
print(durchschnitt([4,18,30,-20]))
print(durchschnitt([3,3,3,3]))
| 29.56 | 76 | 0.675237 |
c31120c6fcaece42c81e57b6c3655c16b4a9ddb2
| 3,798 |
py
|
Python
|
test/test_tag16.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
test/test_tag16.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
test/test_tag16.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import patch
from io import StringIO
import re
try:
from pyventskalender import tag16_loesung as heute
except ImportError:
from pyventskalender import tag16 as heute
class RepliesExhausted(Exception):
pass
def make_fake_input(replies, prompts):
"""
Create a function to use as mock for `input` that will store the prompt
(passed to `input`) in `prompts` and that returns the `replies` as
`input` would return a reply the user types.
"""
def fake_input(prompt):
prompts.append(prompt)
if len(replies) == 0:
raise RepliesExhausted
return replies.pop(0)
return fake_input
EXPECTED_RE = re.compile(r"(<.*?>)")
class Tag16Tests(TestCase):
def test_10_aufgeben(self):
replies = ["ich gebe auf"]
prompts = []
with patch('builtins.input', make_fake_input(replies, prompts)):
with patch('sys.stdout', new=StringIO()) as fake_out:
wort = "asdfasdfasdf"
heute.galgenmannspiel(wort)
ausgegebener_text = fake_out.getvalue()
self.assertTrue(ausgegebener_text.startswith("Gesucht:"))
self.assertIn("Ok. Das Wort wäre gewesen:", ausgegebener_text)
self.assertIn(wort, ausgegebener_text)
def test_20_genau_einen_buchstaben(self):
replies = ["xxx", "x"]
prompts = []
with patch('builtins.input', make_fake_input(replies, prompts)):
with patch('sys.stdout', new=StringIO()) as fake_out:
wort = "asdfasdfasdf"
try:
heute.galgenmannspiel(wort)
except RepliesExhausted:
# If we enter the infinite loop, we abort it by raising
# this exception
pass
ausgegebener_text = fake_out.getvalue()
self.assertTrue(ausgegebener_text.startswith("Gesucht:"))
self.assertIn("Bitte genau einen Buchstaben angeben", ausgegebener_text)
self.assertNotIn(wort, ausgegebener_text)
def test_30_galgenmannspiel_gewonnen(self):
wort = "AAbbAaBb"
replies = ["a", "x", "y", "b"]
prompts = []
with patch('builtins.input', make_fake_input(replies, prompts)):
with patch('sys.stdout', new=StringIO()) as fake_out:
ergebnis = heute.galgenmannspiel(wort)
self.assertTrue(ergebnis)
ausgegebener_text = fake_out.getvalue()
self.assertNotIn("Ok. Das Wort wäre gewesen:", ausgegebener_text)
self.assertNotIn("Bitte genau einen Buchstaben angeben", ausgegebener_text)
self.assertIn("x ist leider falsch; bisher falsch geraten: ['x']", ausgegebener_text)
self.assertIn("y ist leider falsch; bisher falsch geraten: ['x', 'y']", ausgegebener_text)
self.assertIn(wort, ausgegebener_text)
self.assertIn("Erraten", ausgegebener_text)
def test_40_galgenmannspiel_verloren(self):
wort = "AAbbAaBb"
replies = 100 * ["x"]
prompts = []
with patch('builtins.input', make_fake_input(replies, prompts)):
with patch('sys.stdout', new=StringIO()) as fake_out:
ergebnis = heute.galgenmannspiel(wort)
self.assertFalse(ergebnis,
msg=f"Ich muss verlieren, wenn ich immer nur x eingebe aber {wort} gesucht ist.")
ausgegebener_text = fake_out.getvalue()
self.assertIn(wort, ausgegebener_text)
self.assertNotIn("Erraten", ausgegebener_text)
self.assertIn("Leider nicht geklappt", ausgegebener_text)
| 39.154639 | 114 | 0.609005 |
6f608aeed71d46c0547b6891bd3f34718d57f747
| 5,466 |
py
|
Python
|
research/cv/advanced_east/src/vgg.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/advanced_east/src/vgg.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/advanced_east/src/vgg.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Image classifiation.
"""
import math
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore.common import initializer as init
from mindspore.common.initializer import initializer, HeNormal
from src.config import config
npy = config['vgg_npy']
def _make_layer(base, args, batch_norm):
"""Make stage network of VGG."""
layers = []
in_channels = 3
layer = []
for v in base:
if v == 'M':
layer += [nn.MaxPool2d(kernel_size=2, stride=2)]
layers.append(layer)
layer = []
else:
weight = 'ones'
if args.initialize_mode == "XavierUniform":
weight_shape = (v, in_channels, 3, 3)
weight = initializer('XavierUniform', shape=weight_shape, dtype=mstype.float32).to_tensor()
conv2d = nn.Conv2d(in_channels=in_channels,
out_channels=v,
kernel_size=3,
padding=args.padding,
pad_mode=args.pad_mode,
has_bias=args.has_bias,
weight_init=weight)
if batch_norm:
layer += [conv2d, nn.BatchNorm2d(v), nn.ReLU()]
else:
layer += [conv2d, nn.ReLU()]
in_channels = v
layer1 = nn.SequentialCell(layers[0])
layer2 = nn.SequentialCell(layers[1])
layer3 = nn.SequentialCell(layers[2])
layer4 = nn.SequentialCell(layers[3])
layer5 = nn.SequentialCell(layers[4])
return layer1, layer2, layer3, layer4, layer5
class Vgg(nn.Cell):
"""
VGG network definition.
Args:
base (list): Configuration for different layers, mainly the channel number of Conv layer.
num_classes (int): Class numbers. Default: 1000.
batch_norm (bool): Whether to do the batchnorm. Default: False.
batch_size (int): Batch size. Default: 1.
Returns:
Tensor, infer output tensor.
Examples:
>>> Vgg([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
>>> num_classes=1000, batch_norm=False, batch_size=1)
"""
def __init__(self, base, num_classes=1000, batch_norm=False, batch_size=1, args=None, phase="train"):
super(Vgg, self).__init__()
_ = batch_size
self.layer1, self.layer2, self.layer3, self.layer4, self.layer5 = _make_layer(base, args, batch_norm=batch_norm)
if args.initialize_mode == "KaimingNormal":
#default_recurisive_init(self)
self.custom_init_weight()
def construct(self, x):
l1 = self.layer1(x)
l2 = self.layer2(l1)
l3 = self.layer3(l2)
l4 = self.layer4(l3)
l5 = self.layer5(l4)
return l2, l3, l4, l5
def custom_init_weight(self):
"""
Init the weight of Conv2d and Dense in the net.
"""
for _, cell in self.cells_and_names():
if isinstance(cell, nn.Conv2d):
cell.weight.set_data(init.initializer(
HeNormal(negative_slope=math.sqrt(5), mode='fan_out', nonlinearity='relu'),
cell.weight.shape, cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(init.initializer(
'zeros', cell.bias.shape, cell.bias.dtype))
elif isinstance(cell, nn.Dense):
cell.weight.set_data(init.initializer(
init.Normal(0.01), cell.weight.shape, cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(init.initializer(
'zeros', cell.bias.shape, cell.bias.dtype))
cfg = {
'11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg16(num_classes=1000, args=None, phase="train"):
"""
Get Vgg16 neural network with batch normalization.
Args:
num_classes (int): Class numbers. Default: 1000.
args(namespace): param for net init.
phase(str): train or test mode.
Returns:
Cell, cell instance of Vgg16 neural network with batch normalization.
Examples:
>>> vgg16(num_classes=1000, args=args)
"""
if args is None:
from src.config import cifar_cfg
args = cifar_cfg
net = Vgg(cfg['16'], num_classes=num_classes, args=args, batch_norm=args.batch_norm, phase=phase)
return net
| 37.183673 | 120 | 0.579949 |
48eb12ff8ba6f2eee2f82f947c0207b7d4128bc2
| 942 |
py
|
Python
|
extract/extract.py
|
AlexTibet/ETL
|
9291712943d0c0f33b288c9fa39be20975f17e72
|
[
"MIT"
] | null | null | null |
extract/extract.py
|
AlexTibet/ETL
|
9291712943d0c0f33b288c9fa39be20975f17e72
|
[
"MIT"
] | null | null | null |
extract/extract.py
|
AlexTibet/ETL
|
9291712943d0c0f33b288c9fa39be20975f17e72
|
[
"MIT"
] | null | null | null |
import os
from extract.readers import CSVReader, JSONReader, XLSReader, Reader
from typing import Type
class Extractor:
path: str
data: list
_readers: dict[str, Type[Reader]]
default_path = os.path.join(os.getcwd(), 'input_files')
def __init__(self, path: str = default_path):
self.path = path
self.data = []
self._readers = {
'.csv': CSVReader,
'.json': JSONReader,
'.xml': XLSReader,
}
print(self.path)
def get_data(self, path: str = default_path) -> [dict]:
files = os.listdir(path)
for file in files:
self._extract_data(file, path)
return self.data
def _extract_data(self, file, path):
filename, file_extension = os.path.splitext(file)
filepath = os.path.join(path, file)
reader = self._readers[file_extension]()
self.data.extend(reader.get_data(filepath))
| 25.459459 | 68 | 0.604034 |
48f326fde4b047140b545bb8a9ea086e015c86fc
| 2,043 |
py
|
Python
|
src/week3.py
|
animucki/2mmn40
|
c54c0e4e9c801d63f048fbb5d9abd8fe9432cfdc
|
[
"Unlicense"
] | null | null | null |
src/week3.py
|
animucki/2mmn40
|
c54c0e4e9c801d63f048fbb5d9abd8fe9432cfdc
|
[
"Unlicense"
] | null | null | null |
src/week3.py
|
animucki/2mmn40
|
c54c0e4e9c801d63f048fbb5d9abd8fe9432cfdc
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# =============================================================================
# 2mmn40 week 3 report
# version 2017-12-03 afternoon
# BA
#
#
# for BA: Make sure to run in directory
# C:\Users\20165263\Dropbox\tue\2mmn40\src
#
# =============================================================================
import numpy as np
import matplotlib.pyplot as plt
# Objective: simulate a diatomic bond. So we're just integrating f=ma over t.
# To integrate f=ma, we need f, m, v0 and q0.
# f is obtained from potentials. f = -grad u
# m, v0, x0 are all given.
# Data structure required: molecule geometry. So, a list of lists of molecules.
# Each molecule needs to have a mass, an x0, a v0, and explicitly
### part 1: diatomic molecule
#molecule parameters
bondList = [[1],[0]]
kbond = 1.0
rbond = 1.0
m = np.array([1.0, 1.0])
#simulation parameters: choice of integrator
# 0 - forward euler
# 1 - verlet
# 2 - velocity verlet
integrator = 0
maxsteps = 1000
# take a small enough timestep
dt = min(np.sqrt( kbond/m )) /100
#initial values
q0 = np.array([[0.0, 0.1, -0.1],
[1.01, 0.9, 0.95]])
v0 = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
#initialize system state
q = q0.copy()
v = v0.copy()
#find distance: r and dr
dr = q - q[:, np.newaxis]
r = np.linalg.norm(dr, axis=2)
# find bond forces
# will throw a RuntimeWarning due to the dividing by zero along diagonal elements of r.
# However, nan_to_num converts those nan's to zero in the result, so ignore the warning.
# A particle cannot exert a force on itself, so that makes sense
fbond = np.nan_to_num( -kbond * dr * (rbond - r[:,:,np.newaxis]) / r[:,:,np.newaxis])
ftotal = np.sum(fbond,axis=1)
#integrate a single step:
if integrator == 0:
q += dt*v + dt**2 /(2*m[:,np.newaxis]) *ftotal
v += dt/m[:,np.newaxis] *ftotal
elif integrator == 1:
#Verlet integration step
q += 0
elif integrator == 2:
#Velocity Verlect integration step
q += 0
else:
raise ('Unkown integrator selected')
| 25.860759 | 88 | 0.600587 |
d297ad1765b3c02fc47ba37eb3641c25075edb08
| 2,611 |
py
|
Python
|
custom_user/models.py
|
EE/flexdb
|
08a80b9e56201e678ef055af27bdefa6d52bcbf5
|
[
"MIT"
] | null | null | null |
custom_user/models.py
|
EE/flexdb
|
08a80b9e56201e678ef055af27bdefa6d52bcbf5
|
[
"MIT"
] | null | null | null |
custom_user/models.py
|
EE/flexdb
|
08a80b9e56201e678ef055af27bdefa6d52bcbf5
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from flexdb.utils import get_config, PermissionExeption
class CustomUserManager(BaseUserManager):
def create_user(self, username=None, email=None, password=None):
if not username:
raise ValueError('The given username must be set')
user = self.model(username=username, email=email,
is_staff=False, is_active=True, is_superuser=False)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
user = self.create_user(username, email, password)
user.is_staff = True
user.is_active = True
user.is_superuser = True
user.save(using=self._db)
return user
class CustomUser(AbstractBaseUser, PermissionsMixin):
username = models.CharField("user name", max_length=127, unique=True)
first_name = models.CharField("first name", max_length=63, blank=True)
last_name = models.CharField("last name", max_length=63, blank=True)
email = models.EmailField("email", blank=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
def get_short_name(self):
return self.first_name
def get_full_name(self):
full_name = "{} {}".format(self.first_name.encode('utf-8'), self.last_name.encode('utf-8'))
return full_name.strip()
def __unicode__(self):
return self.username
def has_permission(self, app_name, permission_name):
if self.is_superuser:
return True
perm = UsersPermissions.objects.filter(user=self, app_name=app_name, permission_name=permission_name).count()
if perm > 0:
return True
return False
def add_permission(self, app_name, permission_name):
config = get_config(app_name)
if not config.has_permission(permission_name):
raise PermissionExeption(app_name, permission_name)
perm = UsersPermissions(user=self, app_name=app_name, permission_name=permission_name)
perm.save()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
objects = CustomUserManager()
class UsersPermissions(models.Model):
user = models.ForeignKey(CustomUser)
app_name = models.CharField(max_length=127)
permission_name = models.CharField(max_length=127)
def __unicode__(self):
return "{}, {}, {}".format(self.user.username, self.app_name, self.permission_name)
| 35.767123 | 117 | 0.692455 |
c4f147071f47883e5f9814913f96b77f96c62d94
| 392 |
py
|
Python
|
INBa/2015/Semyenov_A_N/task_1_24.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Semyenov_A_N/task_1_24.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Semyenov_A_N/task_1_24.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
#Задача 1. Вариант 24
#Напишите программу, которая будет сообщать род деятельности и псевдоним под которым скрывается Джон Гриффит Лондон. После вывода информации программа должна дожидаться пока пользователь нажмет Enter для выхода.
#Semyenov A.N.
#09.02.2016
print("Джон Гриффит Лондон более известена, как американский писатель, социалист Джек Лондон")
input('\nНажмите Enter для выхода')
| 56 | 211 | 0.816327 |
6fa2d874e51c88d510e932a00cb5d4888d15dbff
| 16,743 |
py
|
Python
|
src/onegov/org/views/resource.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/views/resource.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/views/resource.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import icalendar
import morepath
import sedate
from collections import OrderedDict, namedtuple
from datetime import datetime, timedelta
from isodate import parse_date, ISO8601Error
from itertools import groupby
from morepath.request import Response
from onegov.core.security import Public, Private
from onegov.core.utils import module_path
from onegov.core.orm import as_selectable_from_path
from onegov.form import FormSubmission
from onegov.org.cli import close_ticket
from onegov.reservation import ResourceCollection, Resource, Reservation
from onegov.org import _, OrgApp, utils
from onegov.org.elements import Link
from onegov.org.forms import (
ResourceForm, ResourceCleanupForm, ResourceExportForm
)
from onegov.org.layout import ResourcesLayout, ResourceLayout
from onegov.org.models.resource import DaypassResource, RoomResource, \
ItemResource
from onegov.org.utils import group_by_column, keywords_first
from onegov.ticket import Ticket, TicketCollection
from purl import URL
from sedate import utcnow, standardize_date
from sqlalchemy import and_, select
from sqlalchemy.orm import object_session
from webob import exc
RESOURCE_TYPES = {
'daypass': {
'success': _("Added a new daypass"),
'title': _("New daypass"),
'class': DaypassResource
},
'room': {
'success': _("Added a new room"),
'title': _("New room"),
'class': RoomResource
},
'daily-item': {
'success': _("Added a new item"),
'title': _("New Item"),
'class': ItemResource
}
}
def get_daypass_form(self, request):
return get_resource_form(self, request, 'daypass')
def get_room_form(self, request):
return get_resource_form(self, request, 'room')
def get_item_form(self, request):
return get_resource_form(self, request, 'daily-item')
def get_resource_form(self, request, type=None):
if isinstance(self, ResourceCollection):
assert type is not None
model = RESOURCE_TYPES[type]['class']()
else:
model = self
return model.with_content_extensions(ResourceForm, request)
@OrgApp.html(model=ResourceCollection, template='resources.pt',
permission=Public)
def view_resources(self, request, layout=None):
return {
'title': _("Reservations"),
'resources': group_by_column(
request=request,
query=self.query(),
group_column=Resource.group,
sort_column=Resource.title
),
'layout': layout or ResourcesLayout(self, request)
}
@OrgApp.json(model=ResourceCollection, permission=Public, name='json')
def view_resources_json(self, request):
def transform(resource):
return {
'name': resource.name,
'title': resource.title,
'url': request.link(resource),
}
@request.after
def cache(response):
# only update once every minute
response.cache_control.max_age = 60
return group_by_column(
request=request,
query=self.query(),
group_column=Resource.group,
sort_column=Resource.title,
transform=transform,
default_group=request.translate(_("Reservations"))
)
@OrgApp.form(model=ResourceCollection, name='new-room',
template='form.pt', permission=Private, form=get_room_form)
def handle_new_room(self, request, form, layout=None):
return handle_new_resource(self, request, form, 'room', layout)
@OrgApp.form(model=ResourceCollection, name='new-daypass',
template='form.pt', permission=Private, form=get_daypass_form)
def handle_new_daypass(self, request, form, layout=None):
return handle_new_resource(self, request, form, 'daypass', layout)
@OrgApp.form(model=ResourceCollection, name='new-daily-item',
template='form.pt', permission=Private, form=get_item_form)
def handle_new_resource_item(self, request, form, layout=None):
return handle_new_resource(self, request, form, 'daily-item', layout)
def handle_new_resource(self, request, form, type, layout=None):
if form.submitted(request):
resource = self.add(
title=form.title.data, type=type, timezone='Europe/Zurich'
)
form.populate_obj(resource)
request.success(RESOURCE_TYPES[type]['success'])
return morepath.redirect(request.link(resource))
layout = layout or ResourcesLayout(self, request)
layout.include_editor()
layout.include_code_editor()
layout.breadcrumbs.append(Link(RESOURCE_TYPES[type]['title'], '#'))
return {
'layout': layout,
'title': _(RESOURCE_TYPES[type]['title']),
'form': form,
'form_width': 'large'
}
@OrgApp.form(model=Resource, name='edit', template='form.pt',
permission=Private, form=get_resource_form)
def handle_edit_resource(self, request, form, layout=None):
if form.submitted(request):
form.populate_obj(self)
request.success(_("Your changes were saved"))
return morepath.redirect(request.link(self))
elif not request.POST:
form.process(obj=self)
layout = layout or ResourceLayout(self, request)
layout.include_editor()
layout.include_code_editor()
layout.breadcrumbs.append(Link(_("Edit"), '#'))
return {
'layout': layout,
'title': self.title,
'form': form,
'form_width': 'large'
}
@OrgApp.html(model=Resource, template='resource.pt', permission=Public)
def view_resource(self, request, layout=None):
return {
'title': self.title,
'resource': self,
'layout': layout or ResourceLayout(self, request),
'feed': request.link(self, name='slots'),
'resources_url': request.class_link(ResourceCollection, name='json')
}
@OrgApp.view(model=Resource, request_method='DELETE', permission=Private)
def handle_delete_resource(self, request):
request.assert_valid_csrf_token()
if not self.deletable:
raise exc.HTTPMethodNotAllowed()
tickets = TicketCollection(request.session)
def handle_reservation_tickets(reservation):
ticket = tickets.by_handler_id(reservation.token.hex)
if ticket:
close_ticket(ticket, request.current_user, request)
ticket.create_snapshot(request)
collection = ResourceCollection(request.app.libres_context)
collection.delete(
self,
including_reservations=True,
handle_reservation=handle_reservation_tickets
)
@OrgApp.form(model=Resource, permission=Private, name='cleanup',
form=ResourceCleanupForm, template='resource_cleanup.pt')
def handle_cleanup_allocations(self, request, form, layout=None):
""" Removes all unused allocations between the given dates. """
if form.submitted(request):
start, end = form.data['start'], form.data['end']
count = self.scheduler.remove_unused_allocations(start, end)
request.success(
_("Successfully removed ${count} unused allocations", mapping={
'count': count
})
)
return morepath.redirect(request.link(self))
if request.method == 'GET':
form.start.data, form.end.data = get_date_range(self, request.params)
layout = layout or ResourceLayout(self, request)
layout.breadcrumbs.append(Link(_("Clean up"), '#'))
layout.editbar_links = None
return {
'layout': layout,
'title': _("Clean up"),
'form': form
}
def predict_next_reservation(resource, request, reservations):
prediction = utils.predict_next_daterange(
tuple((r.display_start(), r.display_end()) for r in reservations)
)
if not prediction:
return None
allocation = resource.scheduler.allocations_in_range(*prediction).first()
if not allocation:
return None
whole_day = sedate.is_whole_day(*prediction, timezone=resource.timezone)
quota = utils.predict_next_value(tuple(r.quota for r in reservations)) or 1
if whole_day:
time = request.translate(_("Whole day"))
else:
time = utils.render_time_range(*prediction)
return {
'url': request.link(allocation, name='reserve'),
'start': prediction[0].isoformat(),
'end': prediction[1].isoformat(),
'quota': quota,
'wholeDay': whole_day,
'time': time
}
@OrgApp.json(model=Resource, name='reservations', permission=Public)
def get_reservations(self, request):
reservations = tuple(self.bound_reservations(request))
prediction = predict_next_reservation(self, request, reservations)
return {
'reservations': [
utils.ReservationInfo(self, reservation, request).as_dict()
for reservation in reservations
],
'prediction': prediction
}
def get_date(text, default):
try:
date = parse_date(text)
return datetime(date.year, date.month, date.day, tzinfo=default.tzinfo)
except (ISO8601Error, TypeError):
return default
def get_date_range(resource, params):
default_start, default_end = resource.calendar_date_range
start = get_date(params.get('start'), default_start)
end = get_date(params.get('end'), default_end)
start = sedate.replace_timezone(
datetime(start.year, start.month, start.day), resource.timezone)
end = sedate.replace_timezone(
datetime(end.year, end.month, end.day), resource.timezone)
if end < start:
start = end
return sedate.align_range_to_day(start, end, resource.timezone)
@OrgApp.html(model=Resource, permission=Private, name='occupancy',
template='resource_occupancy.pt')
def view_occupancy(self, request, layout=None):
# infer the default start/end date from the calendar view parameters
start, end = get_date_range(self, request.params)
query = self.reservations_with_tickets_query(start, end)
query = query.with_entities(
Reservation.start, Reservation.end, Reservation.quota,
Ticket.subtitle, Ticket.id
)
def group_key(record):
return sedate.to_timezone(record[0], self.timezone).date()
occupancy = OrderedDict()
grouped = groupby(query.all(), group_key)
Entry = namedtuple('Entry', ('start', 'end', 'title', 'quota', 'url'))
count = 0
for date, records in grouped:
occupancy[date] = tuple(
Entry(
start=sedate.to_timezone(r[0], self.timezone),
end=sedate.to_timezone(
r[1] + timedelta(microseconds=1), self.timezone),
quota=r[2],
title=r[3],
url=request.class_link(Ticket, {
'handler_code': 'RSV',
'id': r[4]
})
) for r in records
)
count += len(occupancy[date])
layout = layout or ResourceLayout(self, request)
layout.breadcrumbs.append(Link(_("Occupancy"), '#'))
layout.editbar_links = None
utilisation = 100 - self.scheduler.queries.availability_by_range(
start, end, (self.id, )
)
return {
'layout': layout,
'title': _("Occupancy"),
'occupancy': occupancy,
'resource': self,
'start': sedate.to_timezone(start, self.timezone).date(),
'end': sedate.to_timezone(end, self.timezone).date(),
'count': count,
'utilisation': utilisation
}
@OrgApp.html(model=Resource, template='resource-subscribe.pt',
permission=Private, name='subscribe')
def view_resource_subscribe(self, request, layout=None):
url = URL(request.link(self, 'ical'))
url = url.scheme('webcal')
if url.has_query_param('view'):
url = url.remove_query_param('view')
url = url.query_param('access-token', self.access_token)
url = url.as_string()
layout = layout or ResourceLayout(self, request)
layout.breadcrumbs.append(Link(_("Subscribe"), '#'))
return {
'title': self.title,
'resource': self,
'layout': layout,
'url': url
}
@OrgApp.view(model=Resource, permission=Public, name='ical')
def view_ical(self, request):
assert self.access_token is not None
if request.params.get('access-token') != self.access_token:
raise exc.HTTPForbidden()
s = utcnow() - timedelta(days=30)
e = utcnow() + timedelta(days=30 * 12)
cal = icalendar.Calendar()
cal.add('prodid', '-//OneGov//onegov.org//')
cal.add('version', '2.0')
cal.add('method', 'PUBLISH')
cal.add('x-wr-calname', self.title)
cal.add('x-wr-relcalid', self.id.hex)
# refresh every 120 minutes by default (Outlook and maybe others)
cal.add('x-published-ttl', 'PT120M')
# add allocations/reservations
date = utcnow()
path = module_path('onegov.org', 'queries/resource-ical.sql')
stmt = as_selectable_from_path(path)
records = object_session(self).execute(select(stmt.c).where(and_(
stmt.c.resource == self.id, s <= stmt.c.start, stmt.c.start <= e
)))
for r in records:
start = r.start
end = r.end + timedelta(microseconds=1)
evt = icalendar.Event()
evt.add('uid', r.token)
evt.add('summary', r.title)
evt.add('location', self.title)
evt.add('description', r.description)
evt.add('dtstart', standardize_date(start, 'UTC'))
evt.add('dtend', standardize_date(end, 'UTC'))
evt.add('dtstamp', date)
evt.add('url', request.class_link(Ticket, {
'handler_code': r.handler_code,
'id': r.ticket_id
}))
cal.add_component(evt)
return Response(
cal.to_ical(),
content_type='text/calendar',
content_disposition=f'inline; filename={self.name}.ics'
)
@OrgApp.form(model=Resource, permission=Private, name='export',
template='export.pt', form=ResourceExportForm)
def view_export(self, request, form, layout=None):
layout = layout or ResourceLayout(self, request)
layout.breadcrumbs.append(Link(_("Occupancy"), '#'))
layout.editbar_links = None
# XXX this could be turned into a redirect to a GET view, which would
# make it easier for scripts to get this data, but since we don't have
# a good API story anyway we don't have spend to much energy on it here
# - instead we should do this in a comprehensive fashion
if form.submitted(request):
field_order, results = run_export(
resource=self,
start=form.data['start'],
end=form.data['end'],
nested=form.format == 'json',
formatter=layout.export_formatter(form.format)
)
return form.as_export_response(results, self.title, key=field_order)
if request.method == 'GET':
form.start.data, form.end.data = get_date_range(self, request.params)
return {
'layout': layout,
'title': _("Export"),
'form': form,
'explanation': _("Exports the reservations of the given date range.")
}
def run_export(resource, start, end, nested, formatter):
start = sedate.replace_timezone(
datetime(start.year, start.month, start.day),
resource.timezone
)
end = sedate.replace_timezone(
datetime(end.year, end.month, end.day),
resource.timezone
)
start, end = sedate.align_range_to_day(start, end, resource.timezone)
query = resource.reservations_with_tickets_query(start, end)
query = query.join(FormSubmission, Reservation.token == FormSubmission.id)
query = query.with_entities(
Reservation.start,
Reservation.end,
Reservation.quota,
Reservation.email,
Ticket.number,
Ticket.subtitle,
FormSubmission.data,
)
results = []
keywords = ('start', 'end', 'quota', 'email', 'ticket', 'title')
for record in query:
result = OrderedDict()
start = sedate.to_timezone(record[0], resource.timezone)
end = sedate.to_timezone(record[1], resource.timezone)
end += timedelta(microseconds=1)
result['start'] = formatter(start)
result['end'] = formatter(end)
result['quota'] = formatter(record[2])
result['email'] = formatter(record[3])
result['ticket'] = formatter(record[4])
result['title'] = formatter(record[5])
if nested:
result['form'] = {
k: formatter(v)
for k, v in record[6].items()
}
else:
for key, value in record[6].items():
result['form_' + key] = formatter(value)
results.append(result)
return keywords_first(keywords), results
| 30.834254 | 79 | 0.64851 |
96483dee2a97b0169a6bd756558ad5e63ab945a5
| 183 |
py
|
Python
|
Algorithms/Sorting/CountingSort1.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
Algorithms/Sorting/CountingSort1.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
Algorithms/Sorting/CountingSort1.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
#coding:utf-8
from collections import Counter
n = int(raw_input())
counter = Counter(raw_input().split())
print ' '.join(map(str, [counter.get(str(i), '0') for i in xrange(100)]))
| 20.333333 | 73 | 0.677596 |
73d61d32567e2c06ecf3c3780d4a761526c69bbf
| 5,767 |
py
|
Python
|
Python/Sonstige_Uebungen/huffman_codierung.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | null | null | null |
Python/Sonstige_Uebungen/huffman_codierung.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | 6 |
2020-12-24T15:15:09.000Z
|
2022-01-13T01:58:35.000Z
|
Python/Sonstige_Uebungen/huffman_codierung.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
####
# File: huffman_codierung_v2.py
# Project: Sonstige_Uebungen
#-----
# Created Date: Friday 01.11.2019, 12:25
# Author: Apop85
#-----
# Last Modified: Wed Nov 06 2019
#-----
# Copyright (c) 2019 Apop85
# This software is published under the MIT license.
# Check http://www.opensource.org/licenses/MIT for further informations
#-----
# Description:
####
from copy import deepcopy as copy
def main():
# Hauptmenü erstellen und Auswahl auswerten
menu_items = {1: "Satz codieren", 2: "Huffmanbaum ausgeben", 3: "Zeichencodierung ausgeben", 4: "Alle Daten ausgeben", 0: "Beenden" }
choice = create_menu(menu_items)
if choice == 0:
exit()
data = get_data()
encoded_data, tree, path = encode_data(data)
saved_space = 100-(100/(8*len(data)))*len(encoded_data)
print("\n"*5)
if choice == 1 or choice == 4:
print(encoded_data, "\nSaved space: "+str(round(saved_space, 1))+"%")
if choice == 2 or choice == 4:
for key in tree.keys():
print(str(key)+str(tree[key]).center(100))
if choice == 3 or choice == 4:
for key in path.keys():
print(str(key)+str(path[key]).center(50))
input("Enter zum fortfahren")
def create_menu(menu_items):
# Erstelle Menü anhand der übergebenen Menü-Liste
while True:
print("█"*80)
for key in menu_items.keys():
item = str(key) + ". "+menu_items[key]
item_lenght = len(item)
if key != 0:
print ("█ "+" "*int(round((76-item_lenght)/2, 0))+item+" "*int((76-item_lenght)/2)+" █")
else:
# 0 für exit soll immer am Schluss kommen
zero_item = (copy(item), item_lenght)
# Ausgabe der exit-option am Schluss
print("█"*80)
print ("█ "+" "*int(round((76-zero_item[1])/2, 0))+zero_item[0]+" "*int((76-zero_item[1])/2)+" █")
print("█"*80)
choice = input(" "*30+"Auswahl: ")
# Prüfe Eingabe ob Zahl und vorhanden in der Menüliste
if choice.isdecimal() and int(choice) in menu_items.keys():
return int(choice)
else:
print("░0"*40)
print("Eingabe ungültig")
print("░0"*40)
def get_data():
print("█"*80)
print("█"+"Zu codierenden Satz eingeben".center(78, " ")+"█")
print("█"*80)
data = ""
while data == "":
data = input("Eingabe: ")
return data
def encode_data(data):
characters, character_path = get_character_list(data)
tree, path = create_huffman_tree(characters, character_path)
encoded_data = ""
for character in data:
encoded_data += path[character]
return encoded_data, tree, path
def get_character_list(data):
# Erstelle Dictionary mit den Buchstaben und deren Anzahl
char_list = {}
character_path = {}
for character in data:
char_list.setdefault(character, 0)
character_path.setdefault(character, "")
char_list[character] += 1
character_values = {}
key_list = []
# Lese Zahlen aus
for key in char_list.keys():
key_list += [char_list[key]]
key_list = sorted(key_list)
# Dictionary umdrehen damit die Zahlen die Keys sind
for value in key_list:
for key in char_list.keys():
if value == char_list[key]:
character_values.setdefault(value, [])
if key not in character_values[value]:
character_values[value] += [key]
break
return character_values, character_path
def create_huffman_tree(data, char_path, huf_tree={}, rest_data=[]):
# Lese aktuellen Key von data aus
key_list = list(data.keys())
current_key = key_list[0]
huf_tree.setdefault(current_key, [])
huf_tree[current_key] += data[current_key]
# Verrechne den Wert der Buchstaben in 2er Paaren
last_insert = 0
for i in range(0, len(data[current_key]), 2):
key_1 = data[current_key][i]
try:
# Wenn noch zwei Werte vorhanden sind verrechne miteinander
key_2 = data[current_key][i+1]
for key in char_path.keys():
# Setze den Pfad des ersten Werts auf 1 und beim zweiten auf 0
if key in key_1:
char_path[key] = "1"+char_path[key]
elif key in key_2:
char_path[key] = "0"+char_path[key]
new_key = current_key*2
data.setdefault(new_key, [])
data[new_key].insert(i, key_1+key_2)
except:
# Ist bereits ein Restwert vorhanden, verrechne mit Restwert sonst erstelle Restwert
if len(rest_data) != 0:
new_key = current_key + rest_data[0]
data.setdefault(new_key, [])
data[new_key].insert(last_insert, rest_data[1]+key_1)
last_insert += 2
for key in char_path.keys():
# Setze den wert aus dem vorherigen rest auf 0 den neuen auf 1
if key in rest_data[1]:
char_path[key] = "1"+char_path[key]
elif key in key_1:
char_path[key] = "0"+char_path[key]
rest_data = []
else:
rest_data = [current_key, key_1]
# Lösche die verwendeten Daten
del data[current_key]
if len(list(data.keys())) > 0:
# Sind noch Daten vorhanden führe Funktion mit neuem Datenset erneut durch.
huf_tree, char_path = create_huffman_tree(data, char_path, huf_tree, rest_data)
# Bleiben keine Daten mehr übrig ist der Prozess abgeschlossen
return huf_tree, char_path
while True:
main()
| 35.164634 | 137 | 0.583492 |
fb535c44c618c72e411e3a0a86c74ce26ece5d4b
| 610 |
py
|
Python
|
Algorithms/2_Implementation/56.py
|
abphilip-codes/Hackerrank_DSA
|
bb9e233d9d45c5b14c138830602695ad4113fba4
|
[
"MIT"
] | 1 |
2021-11-25T13:39:30.000Z
|
2021-11-25T13:39:30.000Z
|
Algorithms/2_Implementation/56.py
|
abphilip-codes/Hackerrank_DSA
|
bb9e233d9d45c5b14c138830602695ad4113fba4
|
[
"MIT"
] | null | null | null |
Algorithms/2_Implementation/56.py
|
abphilip-codes/Hackerrank_DSA
|
bb9e233d9d45c5b14c138830602695ad4113fba4
|
[
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/flatland-space-stations/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the flatlandSpaceStations function below.
def flatlandSpaceStations(n, c):
return max(c[0], n-c[-1]-1, *[(c[z+1]-c[z])//2 for z in range(len(c)-1)])
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nm = input().split()
n = int(nm[0])
m = int(nm[1])
c = list(map(int, input().rstrip().split()))
result = flatlandSpaceStations(n, sorted(c))
fptr.write(str(result) + '\n')
fptr.close()
| 20.333333 | 77 | 0.634426 |
548e6d981e84074347101289d907692f960e9a8e
| 5,090 |
py
|
Python
|
Backend/views/ebrake.py
|
dbvis-ukon/coronavis
|
f00374ac655c9d68541183d28ede6fe5536581dc
|
[
"Apache-2.0"
] | 15 |
2020-04-24T20:18:11.000Z
|
2022-01-31T21:05:05.000Z
|
Backend/views/ebrake.py
|
dbvis-ukon/coronavis
|
f00374ac655c9d68541183d28ede6fe5536581dc
|
[
"Apache-2.0"
] | 2 |
2021-05-19T07:15:09.000Z
|
2022-03-07T08:29:34.000Z
|
Backend/views/ebrake.py
|
dbvis-ukon/coronavis
|
f00374ac655c9d68541183d28ede6fe5536581dc
|
[
"Apache-2.0"
] | 4 |
2020-04-27T16:20:13.000Z
|
2021-02-23T10:39:42.000Z
|
import datetime
import re
from decimal import Decimal
from flask import Blueprint, request
from sqlalchemy import text
from cache import cache, make_cache_key
from db import db
from timer import timer
routes = Blueprint('ebrake', __name__, url_prefix='/federal-emergency-brake')
@routes.route('/', methods=['GET'])
@timer
@cache.cached(key_prefix=make_cache_key)
def get_rki_emergency_brake():
""" Returns the incidences and corresponding emergency brake information based on rki.de/inzidenzen
The calculation whether a county is in federal-emergency-brake is performed here: https://github.com/dbvis-ukon/coronavis/blob/master/Crawler/crawl_rki_incidences.py#L141
---
parameters:
- name: from
type: string
description: A date in ISO format
required: false
default: 2020-01-01
example: 2021-04-20
- name: to
type: string
description: A date in ISO format
required: false
example: 2021-05-20
- name: ids
type: string[]
description: ids (AGS) of the regions, comma separated
required: false
example: 08335,08336
responses:
200:
description:
schema:
type: object
properties:
last_updated:
type: string
example: 2021-04-25T08:39:47
last_checked:
type: string
example: 2021-04-26T02:28:39.523499+02:00
data:
type: array
items:
type: object
properties:
id:
type: string
example: 08335
description: The AGS of the county
name:
type: string
example: Landkreis Konstanz
description: The name of the county
timestamp:
type: string
example: 2021-04-25T00:00:00
description: The reference date
7_day_incidence:
type: number
format: float
example: 152.2851504514
description: The 7 day incidence based on the excel sheet
7_day_cases:
type: number
format: int
example: 436
description: The 7 day cases based on the excel sheet
ebrake100:
type: boolean
example: true
description: true iff the county is currently in the ebrake(100), false otherwise; may be null
ebrake150:
type: boolean
example: true
description: true iff the county is currently in the ebrake(150), false otherwise; may be null
ebrake165:
type: boolean
example: true
description: true iff the county is currently in the ebrake(165), false otherwise; may be null
holiday:
type: string
example: Erster Mai
description: The name of the holiday (German) or null iff no holiday
"""
from_time = '2020-01-01'
to_time = (datetime.datetime.now() + datetime.timedelta(days=10)).isoformat()
if request.args.get('from'):
from_time = request.args.get('from')
if request.args.get('to'):
to_time = request.args.get('to')
sql_ids = ''
if request.args.get('ids'):
ids = request.args.get('ids').split(',')
sanitized_sql = []
for id in ids:
id = re.sub('[^0-9]+', '', id)
sanitized_sql.append(f"(id LIKE '{id}%')")
sql_ids = f"AND ({' OR '.join(sanitized_sql)})"
sql_stmt = f'''
SELECT
e.datenbestand,
e.updated_at,
e.id,
e.timestamp,
e."7_day_incidence",
e."7_day_cases",
e.ebrake100,
e.ebrake165,
(le.bez || ' ' || le.name) as le_name,
e.ebrake150,
e.holiday
FROM ebrake_data e
JOIN landkreise_extended le ON e.id = le.ids
WHERE e.timestamp >= :fromtime
AND e.timestamp <= :totime
{sql_ids}
'''
res = db.engine.execute(text(sql_stmt), fromtime=from_time, totime=to_time).fetchall()
entries = []
for d in res:
entries.append({
'id': d[2],
'timestamp': d[3].isoformat(),
'holiday': d[10],
'7_day_incidence': float(d[4]) if isinstance(d[4], Decimal) else None,
'7_day_cases': int(d[5]) if isinstance(d[4], Decimal) else None,
'ebrake100': d[6],
'ebrake150': d[9],
'ebrake165': d[7],
'name': d[8]
})
return {
'last_updated': res[0][0].isoformat(),
'last_checked': res[0][1].isoformat(),
'data': entries
}, 200
| 32.628205 | 174 | 0.519843 |
b77f4a29b782d4d4a5ce786e6de91a54e14ad7b5
| 78 |
py
|
Python
|
BeautifulData/Admin/admin.py
|
zhangyafeii/Flask
|
9c9a5ea282f77aabcda838796dad2411af9b519f
|
[
"MIT"
] | null | null | null |
BeautifulData/Admin/admin.py
|
zhangyafeii/Flask
|
9c9a5ea282f77aabcda838796dad2411af9b519f
|
[
"MIT"
] | null | null | null |
BeautifulData/Admin/admin.py
|
zhangyafeii/Flask
|
9c9a5ea282f77aabcda838796dad2411af9b519f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@Datetime: 2019/1/2
@Author: Zhang Yafei
"""
| 7.090909 | 23 | 0.512821 |
b7fac334c816a9b276870a5d334dcba7b7eee27f
| 430 |
py
|
Python
|
network engineering course practice/medium term practice/front/sort_urls.py
|
MU001999/codeex
|
23e300f9121a8d6bf4f4c9ec103510193808e9ba
|
[
"MIT"
] | 2 |
2019-01-27T14:16:09.000Z
|
2019-05-25T10:05:24.000Z
|
network engineering course practice/medium term practice/front/sort_urls.py
|
MU001999/codeex
|
23e300f9121a8d6bf4f4c9ec103510193808e9ba
|
[
"MIT"
] | null | null | null |
network engineering course practice/medium term practice/front/sort_urls.py
|
MU001999/codeex
|
23e300f9121a8d6bf4f4c9ec103510193808e9ba
|
[
"MIT"
] | 1 |
2020-11-05T05:17:28.000Z
|
2020-11-05T05:17:28.000Z
|
from model import *
from collections import defaultdict
def sort_urls(clientip):
urlcnt = defaultdict(int)
for request in Request.where(clientip=clientip).select():
urlcnt[request.host + request.url] += 1
return zip(sorted(urlcnt.keys(), key=lambda k: urlcnt[k])[::-1], sorted(urlcnt.values())[::-1])
if __name__ == "__main__":
for kv in sort_urls("211.71.149.249")[:10]:
print(kv[0], kv[1])
| 25.294118 | 99 | 0.653488 |
4db9016a9bafa11c962fbc504f35a58a3909dc06
| 1,019 |
py
|
Python
|
reindex.py
|
cyrillk/elastic-reindex
|
417e41a0d817c2ff30fa95d5ddbf064093d1c86b
|
[
"MIT"
] | null | null | null |
reindex.py
|
cyrillk/elastic-reindex
|
417e41a0d817c2ff30fa95d5ddbf064093d1c86b
|
[
"MIT"
] | null | null | null |
reindex.py
|
cyrillk/elastic-reindex
|
417e41a0d817c2ff30fa95d5ddbf064093d1c86b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from argparse import ArgumentParser
from elasticsearch import Elasticsearch
from elasticsearch.helpers import reindex
host = 'localhost'
source = 'hotels_v4'
target = 'hotels_v5'
def parse_args():
parser = ArgumentParser(description="ES reindex")
parser.add_argument('-a', '--apply', action='store_true',
help="apply reindex")
return parser.parse_args()
def print_count(msg, count):
print('*** ' * 3 + msg + ' ***' * 3)
print(count)
print('')
def main():
args = parse_args()
should_apply = args.apply
print(should_apply)
es = Elasticsearch([{'host': host}])
print_count("Source [before]", es.count(index=source))
print_count("Target [before]", es.count(index=target))
if (args.apply):
reindex(es, source, target, chunk_size=5000, scroll='30m')
print_count("Source [after]", es.count(index=source))
print_count("Target [after]", es.count(index=target))
if __name__ == "__main__":
main()
| 22.644444 | 66 | 0.649657 |
4dfc4be48a08e00d1232b466f9b2a27a83e7aec6
| 5,509 |
py
|
Python
|
modules/datahelper.py
|
metxchris/MMM-Explorer
|
251b4d7af56241882611bc47e94ec2923e4be8da
|
[
"MIT"
] | null | null | null |
modules/datahelper.py
|
metxchris/MMM-Explorer
|
251b4d7af56241882611bc47e94ec2923e4be8da
|
[
"MIT"
] | null | null | null |
modules/datahelper.py
|
metxchris/MMM-Explorer
|
251b4d7af56241882611bc47e94ec2923e4be8da
|
[
"MIT"
] | null | null | null |
"""Acts as an interface between the various classes that contain data
This module provides the primary function to load variable data from a CDF and
into Variables class objects. Additionally, it allows for interfacing
between data classes of different types.
Example Usage:
* mmm_vars, cdf_vars, raw_cdf_vars = initialize_variables()
"""
from copy import deepcopy
import modules.variables as variables
import modules.controls as controls
import modules.calculations as calculations
import modules.conversions as conversions
import modules.cdfreader as cdfreader
import modules.utils as utils
from modules.enums import SaveType, ScanType
def initialize_variables(options):
'''
Initializes all input variables needed to run the MMM Driver and plot
variable profiles
Parameters:
* options (Options): Contains user specified options
Returns:
* mmm_vars (InputVariables): All calculated variables
* cdf_vars (InputVariables): All interpolated CDF variables
* raw_cdf_vars (InputVariables): All unedited CDF variables
'''
raw_cdf_vars = cdfreader.extract_data(options)
cdf_vars = conversions.convert_variables(raw_cdf_vars)
mmm_vars = calculations.calculate_new_variables(cdf_vars)
return mmm_vars, cdf_vars, raw_cdf_vars
def deepcopy_data(obj):
'''
Creates a deepcopy of the given object and reference between their
options
A deepcopy is needed to avoid creating a reference between two object
classes. However, we do want to create a reference between the options
stored in each class, so that only one options object exists between all
copied objects.
Parameters:
* obj (InputControls | InputVariables | OutputVariables): The obj to deepcopy
Returns
* new_obj (InputControls | InputVariables | OutputVariables): deepcopy of obj
'''
new_obj = deepcopy(obj)
new_obj.options = obj.options
return new_obj
def get_all_rho_data(options):
'''
Creates dictionaries that map rho values to InputVariables and
OutputVariables objects
Data is loaded from CSVs stored in the rho folder of the runid, scan_num,
and var_to_scan, which are supplied via the options parameter. A list of
rho values for the scan is created from the filenames of the CSVs.
Parameters:
* options (Options): Object containing user options
Returns:
* input_vars_dict (dict): Dictionary mapping rho values (str) to InputVariables input data
* output_vars_dict (dict): Dictionary mapping rho values (str) to OutputVariables data
* input_controls (InputControls or None): InputControls object with np.ndarray for values
'''
input_vars_dict, output_vars_dict = {}, {}
rho_values = utils.get_rho_strings(options, SaveType.OUTPUT)
# Stores InputVariables and OutputVariables data objects for each rho_value
for rho in rho_values:
input_vars = variables.InputVariables(options)
output_vars = variables.OutputVariables(options)
input_vars.load_from_csv(SaveType.INPUT, rho_value=rho)
input_vars.load_from_csv(SaveType.ADDITIONAL, rho_value=rho)
output_vars.load_from_csv(SaveType.OUTPUT, rho_value=rho)
input_vars_dict[rho] = input_vars
output_vars_dict[rho] = output_vars
# Get control_file from rho folder (there's at most one control file, as controls are independent of rho values)
input_controls = controls.InputControls(options)
input_controls.load_from_csv(use_rho=True)
return input_vars_dict, output_vars_dict, input_controls
def get_data_objects(options, scan_factor=None, rho_value=None):
'''
Get InputVariables, OutputVariables, and InputControls data objects
Parameters:
* options (Options): Object containing user options
* scan_factor (float): The scan factor to load (Optional)
* rho_value (str): The rho value to load (Optional)
Returns:
* input_vars (InputVariables): Object containing base input variable data
* output_vars (OutputVariables): Object containing base output variable data
* input_controls (InputControls): Object containing base input control data
'''
input_vars = variables.InputVariables(options)
output_vars = variables.OutputVariables(options)
input_controls = controls.InputControls(options)
input_vars.load_from_csv(SaveType.INPUT, scan_factor, rho_value)
input_vars.load_from_csv(SaveType.ADDITIONAL, scan_factor, rho_value)
output_vars.load_from_csv(SaveType.OUTPUT, scan_factor, rho_value)
use_rho = True if rho_value is not None else False
input_controls.load_from_csv(scan_factor=scan_factor, use_rho=use_rho)
return input_vars, output_vars, input_controls
def get_scan_type(var_to_scan):
'''
Gets the scan type from the variable being scanned
Parameters:
* var_to_scan (str): The variable being scanned
* options (Options): Object containing user options
Raises:
* TypeError: If var_to_scan is not a member of InputVariables or InputControls
'''
scan_type = ScanType.NONE
if var_to_scan is not None:
if hasattr(variables.InputVariables(), var_to_scan):
scan_type = ScanType.VARIABLE if var_to_scan != 'time' else ScanType.TIME
elif hasattr(controls.InputControls(), var_to_scan):
scan_type = ScanType.CONTROL
else:
raise TypeError(f'Variable {var_to_scan} is not defined under InputVariables or InputControls')
return scan_type
| 35.089172 | 116 | 0.747323 |
12a42b5108ecb95c37d69a4cba2335f0740469dc
| 2,721 |
py
|
Python
|
official/cv/crnn_seq2seq_ocr/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/crnn_seq2seq_ocr/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/crnn_seq2seq_ocr/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
preprocess.
"""
import os
import numpy as np
from src.dataset import create_ocr_val_dataset
from src.model_utils.config import config
def get_bin():
'''generate bin files.'''
prefix = "fsns.mindrecord"
if config.enable_modelarts:
mindrecord_file = os.path.join(config.data_path, prefix + "0")
else:
mindrecord_file = os.path.join(config.test_data_dir, prefix + "0")
print("mindrecord_file", mindrecord_file)
dataset = create_ocr_val_dataset(mindrecord_file, config.eval_batch_size)
data_loader = dataset.create_dict_iterator(num_epochs=1, output_numpy=True)
print("Dataset creation Done!")
sos_id = config.characters_dictionary.go_id
images_path = os.path.join(config.pre_result_path, "00_images")
decoder_input_path = os.path.join(config.pre_result_path, "01_decoder_input")
decoder_hidden_path = os.path.join(config.pre_result_path, "02_decoder_hidden")
annotation_path = os.path.join(config.pre_result_path, "annotation")
os.makedirs(images_path)
os.makedirs(decoder_input_path)
os.makedirs(decoder_hidden_path)
os.makedirs(annotation_path)
for i, data in enumerate(data_loader):
annotation = data["annotation"]
images = data["image"].astype(np.float32)
decoder_hidden = np.zeros((1, config.eval_batch_size, config.decoder_hidden_size),
dtype=np.float16)
decoder_input = (np.ones((config.eval_batch_size, 1)) * sos_id).astype(np.int32)
file_name = "ocr_bs" + str(config.eval_batch_size) + "_" + str(i) + ".bin"
images.tofile(os.path.join(images_path, file_name))
decoder_input.tofile(os.path.join(decoder_input_path, file_name))
decoder_hidden.tofile(os.path.join(decoder_hidden_path, file_name))
file_name = "ocr_bs" + str(config.eval_batch_size) + "_" + str(i) + ".npy"
np.save(os.path.join(annotation_path, file_name), annotation)
print("=" * 10, "export bin files finished.", "=" * 10)
if __name__ == '__main__':
get_bin()
| 40.61194 | 90 | 0.693128 |
42320070dbb2e29ff5b3422eced2aa9425c78a23
| 69,514 |
py
|
Python
|
feedcrawler/web.py
|
rix1337/RSScrawler
|
5a1c067148310c03321329d3fa409a4e4144fce7
|
[
"MIT"
] | 63 |
2016-03-21T21:35:07.000Z
|
2021-03-20T22:50:34.000Z
|
feedcrawler/web.py
|
rix1337/RSScrawler
|
5a1c067148310c03321329d3fa409a4e4144fce7
|
[
"MIT"
] | 200 |
2016-03-15T21:45:26.000Z
|
2021-04-03T10:47:45.000Z
|
feedcrawler/web.py
|
rix1337/RSScrawler
|
5a1c067148310c03321329d3fa409a4e4144fce7
|
[
"MIT"
] | 25 |
2016-03-15T20:44:33.000Z
|
2020-10-25T09:43:48.000Z
|
# -*- coding: utf-8 -*-
# FeedCrawler
# Projekt von https://github.com/rix1337
import ast
import json
import os
import re
import sys
import time
from functools import wraps
from flask import Flask, request, redirect, send_from_directory, render_template, jsonify, Response
from passlib.hash import pbkdf2_sha256
from requests.packages.urllib3 import disable_warnings as disable_request_warnings
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from waitress import serve
import feedcrawler.myjdapi
import feedcrawler.search.shared.content_all
import feedcrawler.search.shared.content_shows
from feedcrawler import internal
from feedcrawler import version
from feedcrawler.common import Unbuffered
from feedcrawler.common import decode_base64
from feedcrawler.common import get_to_decrypt
from feedcrawler.common import is_device
from feedcrawler.common import remove_decrypt
from feedcrawler.common import rreplace
from feedcrawler.config import CrawlerConfig
from feedcrawler.db import FeedDb
from feedcrawler.db import ListDb
from feedcrawler.myjd import check_device
from feedcrawler.myjd import do_add_decrypted
from feedcrawler.myjd import do_package_replace
from feedcrawler.myjd import download
from feedcrawler.myjd import get_device
from feedcrawler.myjd import get_if_one_device
from feedcrawler.myjd import get_info
from feedcrawler.myjd import get_packages_in_linkgrabber
from feedcrawler.myjd import get_state
from feedcrawler.myjd import jdownloader_pause
from feedcrawler.myjd import jdownloader_start
from feedcrawler.myjd import jdownloader_stop
from feedcrawler.myjd import move_to_downloads
from feedcrawler.myjd import package_merge
from feedcrawler.myjd import remove_from_linkgrabber
from feedcrawler.myjd import retry_decrypt
from feedcrawler.myjd import update_jdownloader
from feedcrawler.notifiers import notify
from feedcrawler.search import search
helper_active = False
already_added = []
def app_container():
global helper_active
global already_added
base_dir = '.'
if getattr(sys, 'frozen', False):
base_dir = os.path.join(sys._MEIPASS)
app = Flask(__name__, template_folder=os.path.join(base_dir, 'web'))
app.config["TEMPLATES_AUTO_RELOAD"] = True
general = CrawlerConfig('FeedCrawler')
if general.get("prefix"):
prefix = '/' + general.get("prefix")
else:
prefix = ""
def check_auth(config, username, password):
auth_hash = config.get("auth_hash")
if auth_hash and "$pbkdf2-sha256" not in auth_hash:
auth_hash = pbkdf2_sha256.hash(auth_hash)
config.save(
"auth_hash", to_str(auth_hash))
return username == config.get("auth_user") and pbkdf2_sha256.verify(password, auth_hash)
def authenticate():
return Response(
'''<html>
<head><title>401 Authorization Required</title></head>
<body bgcolor="white">
<center><h1>401 Authorization Required</h1></center>
<hr><center>FeedCrawler</center>
</body>
</html>
''', 401,
{'WWW-Authenticate': 'Basic realm="FeedCrawler"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
config = CrawlerConfig('FeedCrawler')
if config.get("auth_user") and config.get("auth_hash"):
auth = request.authorization
if not auth or not check_auth(config, auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
def to_int(i):
if isinstance(i, bytes):
i = i.decode()
i = str(i).strip().replace("None", "")
return int(i) if i else ""
def to_float(i):
i = str(i).strip().replace("None", "")
return float(i) if i else ""
def to_str(i):
return '' if i is None else str(i)
def to_bool(i):
return True if i == "True" else False
if prefix:
@app.route('/')
@requires_auth
def index_prefix():
return redirect(prefix)
@app.route(prefix + '/<path:path>')
@requires_auth
def send_html(path):
return send_from_directory(os.path.join(base_dir, 'web'), path)
@app.route(prefix + '/')
@requires_auth
def index():
return render_template('index.html')
@app.route(prefix + "/api/log/", methods=['GET', 'DELETE'])
@requires_auth
def get_delete_log():
if request.method == 'GET':
try:
log = []
if os.path.isfile(internal.log_file):
logfile = open(internal.log_file)
i = 0
for line in reversed(logfile.readlines()):
if line and line != "\n":
payload = [i]
line = line.replace("]", "")
line = line.replace("[", "")
line = re.sub(r",\d{3}", "", line)
line = line.split(" - ")
for line_part in line:
payload.append(line_part)
log.append(payload)
i += 1
return jsonify(
{
"log": log,
}
)
except:
return "Failed", 400
elif request.method == 'DELETE':
try:
open(internal.log_file, 'w').close()
return "Success", 200
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/log_entry/<b64_entry>", methods=['DELETE'])
@requires_auth
def get_delete_log_entry(b64_entry):
if request.method == 'DELETE':
try:
entry = decode_base64(b64_entry)
log = []
if os.path.isfile(internal.log_file):
logfile = open(internal.log_file)
for line in reversed(logfile.readlines()):
if line and line != "\n":
if entry not in line:
log.append(line)
log = "".join(reversed(log))
with open(internal.log_file, 'w') as file:
file.write(log)
return "Success", 200
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/settings/", methods=['GET', 'POST'])
@requires_auth
def get_post_settings():
if request.method == 'GET':
try:
general_conf = CrawlerConfig('FeedCrawler')
hosters = CrawlerConfig('Hosters')
alerts = CrawlerConfig('Notifications')
ombi = CrawlerConfig('Ombi')
crawljobs = CrawlerConfig('Crawljobs')
mb_conf = CrawlerConfig('ContentAll')
sj_conf = CrawlerConfig('ContentShows')
dj_conf = CrawlerConfig('CustomDJ')
return jsonify(
{
"settings": {
"general": {
"auth_user": general_conf.get("auth_user"),
"auth_hash": general_conf.get("auth_hash"),
"myjd_user": general_conf.get("myjd_user"),
"myjd_pass": general_conf.get("myjd_pass"),
"myjd_device": general_conf.get("myjd_device"),
"port": to_int(general_conf.get("port")),
"prefix": general_conf.get("prefix"),
"interval": to_int(general_conf.get("interval")),
"flaresolverr": general_conf.get("flaresolverr"),
"english": general_conf.get("english"),
"surround": general_conf.get("surround"),
"closed_myjd_tab": general_conf.get("closed_myjd_tab"),
"one_mirror_policy": general_conf.get("one_mirror_policy"),
"packages_per_myjd_page": to_int(general_conf.get("packages_per_myjd_page")),
"prefer_dw_mirror": general_conf.get("prefer_dw_mirror"),
},
"hosters": {
"rapidgator": hosters.get("rapidgator"),
"turbobit": hosters.get("turbobit"),
"uploaded": hosters.get("uploaded"),
"zippyshare": hosters.get("zippyshare"),
"oboom": hosters.get("oboom"),
"ddl": hosters.get("ddl"),
"filefactory": hosters.get("filefactory"),
"uptobox": hosters.get("uptobox"),
"onefichier": hosters.get("1fichier"),
"filer": hosters.get("filer"),
"nitroflare": hosters.get("nitroflare"),
"ironfiles": hosters.get("ironfiles"),
"k2s": hosters.get("k2s"),
},
"alerts": {
"pushbullet": alerts.get("pushbullet"),
"pushover": alerts.get("pushover"),
"homeassistant": alerts.get("homeassistant"),
"telegram": alerts.get("telegram"),
},
"ombi": {
"url": ombi.get("url"),
"api": ombi.get("api"),
},
"crawljobs": {
"autostart": crawljobs.get("autostart"),
"subdir": crawljobs.get("subdir"),
},
"mb": {
"quality": mb_conf.get("quality"),
"search": mb_conf.get("search"),
"ignore": mb_conf.get("ignore"),
"regex": mb_conf.get("regex"),
"imdb_score": to_float(mb_conf.get("imdb")),
"imdb_year": to_int(mb_conf.get("imdbyear")),
"force_dl": mb_conf.get("enforcedl"),
"cutoff": mb_conf.get("cutoff"),
"hevc_retail": mb_conf.get("hevc_retail"),
"retail_only": mb_conf.get("retail_only"),
"hoster_fallback": mb_conf.get("hoster_fallback"),
},
"sj": {
"quality": sj_conf.get("quality"),
"ignore": sj_conf.get("rejectlist"),
"regex": sj_conf.get("regex"),
"hevc_retail": sj_conf.get("hevc_retail"),
"retail_only": sj_conf.get("retail_only"),
"hoster_fallback": sj_conf.get("hoster_fallback"),
},
"mbsj": {
"enabled": mb_conf.get("crawlseasons"),
"quality": mb_conf.get("seasonsquality"),
"packs": mb_conf.get("seasonpacks"),
"source": mb_conf.get("seasonssource"),
},
"dj": {
"quality": dj_conf.get("quality"),
"ignore": dj_conf.get("rejectlist"),
"regex": dj_conf.get("regex"),
"hoster_fallback": dj_conf.get("hoster_fallback"),
}
}
}
)
except:
return "Failed", 400
if request.method == 'POST':
try:
data = request.json
section = CrawlerConfig("FeedCrawler")
section.save(
"auth_user", to_str(data['general']['auth_user']))
auth_hash = data['general']['auth_hash']
if auth_hash and "$pbkdf2-sha256" not in auth_hash:
auth_hash = pbkdf2_sha256.hash(auth_hash)
section.save(
"auth_hash", to_str(auth_hash))
myjd_user = to_str(data['general']['myjd_user'])
myjd_pass = to_str(data['general']['myjd_pass'])
myjd_device = to_str(data['general']['myjd_device'])
if myjd_user and myjd_pass and not myjd_device:
myjd_device = get_if_one_device(myjd_user, myjd_pass)
if myjd_device:
print(u"Gerätename " + myjd_device + " automatisch ermittelt.")
if myjd_user and myjd_pass and myjd_device:
device_check = check_device(myjd_user, myjd_pass, myjd_device)
if not device_check:
myjd_device = get_if_one_device(myjd_user, myjd_pass)
if myjd_device:
print(u"Gerätename " + myjd_device + " automatisch ermittelt.")
else:
print(u"Fehlerhafte My JDownloader Zugangsdaten. Bitte vor dem Speichern prüfen!")
return "Failed", 400
section.save("myjd_user", myjd_user)
section.save("myjd_pass", myjd_pass)
section.save("myjd_device", myjd_device)
section.save("port", to_str(data['general']['port']))
section.save("prefix", to_str(data['general']['prefix']).lower())
interval = to_str(data['general']['interval'])
if to_int(interval) < 5:
interval = '5'
section.save("interval", interval)
section.save("flaresolverr", to_str(data['general']['flaresolverr']))
section.save("english", to_str(data['general']['english']))
section.save("surround", to_str(data['general']['surround']))
section.save("closed_myjd_tab", to_str(data['general']['closed_myjd_tab']))
section.save("one_mirror_policy", to_str(data['general']['one_mirror_policy']))
section.save("packages_per_myjd_page", to_str(data['general']['packages_per_myjd_page']))
section.save("prefer_dw_mirror", to_str(data['general']['prefer_dw_mirror']))
section = CrawlerConfig("Crawljobs")
section.save("autostart", to_str(data['crawljobs']['autostart']))
section.save("subdir", to_str(data['crawljobs']['subdir']))
section = CrawlerConfig("Notifications")
section.save("pushbullet", to_str(data['alerts']['pushbullet']))
section.save("pushover", to_str(data['alerts']['pushover']))
section.save("telegram", to_str(data['alerts']['telegram']))
section.save("homeassistant", to_str(data['alerts']['homeassistant']))
section = CrawlerConfig("Hosters")
section.save("rapidgator", to_str(data['hosters']['rapidgator']))
section.save("turbobit", to_str(data['hosters']['turbobit']))
section.save("uploaded", to_str(data['hosters']['uploaded']))
section.save("zippyshare", to_str(data['hosters']['zippyshare']))
section.save("oboom", to_str(data['hosters']['oboom']))
section.save("ddl", to_str(data['hosters']['ddl']))
section.save("filefactory", to_str(data['hosters']['filefactory']))
section.save("uptobox", to_str(data['hosters']['uptobox']))
section.save("1fichier", to_str(data['hosters']['onefichier']))
section.save("filer", to_str(data['hosters']['filer']))
section.save("nitroflare", to_str(data['hosters']['nitroflare']))
section.save("ironfiles", to_str(data['hosters']['ironfiles']))
section.save("k2s", to_str(data['hosters']['k2s']))
section = CrawlerConfig("Ombi")
section.save("url", to_str(data['ombi']['url']))
section.save("api", to_str(data['ombi']['api']))
section = CrawlerConfig("ContentAll")
section.save("quality", to_str(data['mb']['quality']))
section.save("search", to_str(data['mb']['search']))
section.save("ignore", to_str(data['mb']['ignore']).lower())
section.save("regex", to_str(data['mb']['regex']))
section.save("cutoff", to_str(data['mb']['cutoff']))
section.save("enforcedl", to_str(data['mb']['force_dl']))
section.save("crawlseasons", to_str(data['mbsj']['enabled']))
section.save("seasonsquality", to_str(data['mbsj']['quality']))
section.save("seasonpacks", to_str(data['mbsj']['packs']))
section.save("seasonssource", to_str(data['mbsj']['source']).lower())
section.save("imdbyear", to_str(data['mb']['imdb_year']))
imdb = to_str(data['mb']['imdb_score'])
if re.match('[^0-9]', imdb):
imdb = 0.0
elif imdb == '':
imdb = 0.0
else:
imdb = round(float(to_str(data['mb']['imdb_score']).replace(",", ".")), 1)
if imdb > 10:
imdb = 10.0
section.save("imdb", to_str(imdb))
section.save("hevc_retail", to_str(data['mb']['hevc_retail']))
section.save("retail_only", to_str(data['mb']['retail_only']))
section.save("hoster_fallback", to_str(data['mb']['hoster_fallback']))
section = CrawlerConfig("ContentShows")
section.save("quality", to_str(data['sj']['quality']))
section.save("rejectlist", to_str(data['sj']['ignore']).lower())
section.save("regex", to_str(data['sj']['regex']))
section.save("hevc_retail", to_str(data['sj']['hevc_retail']))
section.save("retail_only", to_str(data['sj']['retail_only']))
section.save("hoster_fallback", to_str(data['sj']['hoster_fallback']))
section = CrawlerConfig("CustomDJ")
section.save("quality", to_str(data['dj']['quality']))
section.save("rejectlist", to_str(data['dj']['ignore']).lower())
section.save("regex", to_str(data['dj']['regex']))
section.save("hoster_fallback", to_str(data['dj']['hoster_fallback']))
return "Success", 201
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/version/", methods=['GET'])
@requires_auth
def get_version():
if request.method == 'GET':
try:
ver = "v." + version.get_version()
if version.update_check()[0]:
updateready = True
updateversion = version.update_check()[1]
print(u'Update steht bereit (' + updateversion +
')! Weitere Informationen unter https://github.com/rix1337/FeedCrawler/releases/latest')
else:
updateready = False
return jsonify(
{
"version": {
"ver": ver,
"update_ready": updateready,
"docker": internal.docker,
"helper_active": helper_active
}
}
)
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/crawltimes/", methods=['GET'])
@requires_auth
def get_crawltimes():
if request.method == 'GET':
try:
crawltimes = FeedDb("crawltimes")
return jsonify(
{
"crawltimes": {
"active": to_bool(crawltimes.retrieve("active")),
"start_time": to_float(crawltimes.retrieve("start_time")),
"end_time": to_float(crawltimes.retrieve("end_time")),
"total_time": crawltimes.retrieve("total_time"),
"next_start": to_float(crawltimes.retrieve("next_start")),
}
}
)
except:
time.sleep(3)
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/hostnames/", methods=['GET'])
@requires_auth
def get_hostnames():
if request.method == 'GET':
try:
hostnames = CrawlerConfig('Hostnames')
dw = hostnames.get('dw')
fx = hostnames.get('fx')
sj = hostnames.get('sj')
dj = hostnames.get('dj')
sf = hostnames.get('sf')
ww = hostnames.get('ww')
nk = hostnames.get('nk')
by = hostnames.get('by')
dw = dw.replace("d", "D", 2).replace("l", "L", 1).replace("w", "W", 1)
fx = fx.replace("f", "F", 1).replace("d", "D", 1).replace("x", "X", 1)
sj = sj.replace("s", "S", 1).replace("j", "J", 1)
dj = dj.replace("d", "D", 1).replace("j", "J", 1)
sf = sf.replace("s", "S", 1).replace("f", "F", 1)
ww = ww.replace("w", "W", 2)
nk = nk.replace("n", "N", 1).replace("k", "K", 1)
by = by.replace("b", "B", 1)
bl = ' / '.join(list(filter(None, [dw, fx, ww, nk, by])))
s = ' / '.join(list(filter(None, [dw, sj, sf])))
sjbl = ' / '.join(list(filter(None, [s, bl])))
if not dw:
dw = "Nicht gesetzt!"
if not fx:
fx = "Nicht gesetzt!"
if not sj:
sj = "Nicht gesetzt!"
if not dj:
dj = "Nicht gesetzt!"
if not sf:
sf = "Nicht gesetzt!"
if not ww:
ww = "Nicht gesetzt!"
if not nk:
nk = "Nicht gesetzt!"
if not by:
by = "Nicht gesetzt!"
if not bl:
bl = "Nicht gesetzt!"
if not s:
s = "Nicht gesetzt!"
if not sjbl:
sjbl = "Nicht gesetzt!"
return jsonify(
{
"hostnames": {
"sj": sj,
"dj": dj,
"sf": sf,
"by": by,
"dw": dw,
"fx": fx,
"nk": nk,
"ww": ww,
"bl": bl,
"s": s,
"sjbl": sjbl
}
}
)
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/blocked_sites/", methods=['GET'])
@requires_auth
def get_blocked_sites():
if request.method == 'GET':
try:
def check(site, db):
return to_bool(str(db.retrieve(site)).replace("Blocked", "True"))
db_status = FeedDb('site_status')
return jsonify(
{
"site_status": {
"SJ": check("SJ", db_status),
"DJ": check("DJ", db_status),
"SF": check("SF", db_status),
"BY": check("BY", db_status),
"DW": check("DW", db_status),
"FX": check("FX", db_status),
"HW": check("HW", db_status),
"NK": check("NK", db_status),
"WW": check("WW", db_status)
}
}
)
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/start_now/", methods=['POST'])
@requires_auth
def start_now():
if request.method == 'POST':
try:
FeedDb('crawltimes').store("startnow", "True")
i = 3
started = False
while i > 0:
if not FeedDb('crawltimes').retrieve("startnow"):
started = True
break
i -= 1
time.sleep(5)
if started:
return "Success", 200
else:
return "Failed", 400
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/search/<title>", methods=['GET'])
@requires_auth
def search_title(title):
if request.method == 'GET':
try:
results = search.get(title)
return jsonify(
{
"results": {
"bl": results[0],
"sj": results[1]
}
}
), 200
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/download_movie/<title>", methods=['POST'])
@requires_auth
def download_movie(title):
if request.method == 'POST':
try:
payload = feedcrawler.search.shared.content_all.get_best_result(title)
if payload:
matches = feedcrawler.search.shared.content_all.download(payload)
return "Success: " + str(matches), 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/download_show/<title>", methods=['POST'])
@requires_auth
def download_show(title):
if request.method == 'POST':
try:
payload = feedcrawler.search.shared.content_shows.get_best_result(title)
if payload:
matches = feedcrawler.search.shared.content_shows.download(payload)
if matches:
return "Success: " + str(matches), 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/download_bl/<payload>", methods=['POST'])
@requires_auth
def download_bl(payload):
if request.method == 'POST':
try:
if feedcrawler.search.shared.content_all.download(payload):
return "Success", 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/download_sj/<payload>", methods=['POST'])
@requires_auth
def download_sj(payload):
if request.method == 'POST':
try:
if feedcrawler.search.shared.content_shows.download(payload):
return "Success", 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/myjd/", methods=['GET'])
@requires_auth
def myjd_info():
if request.method == 'GET':
try:
myjd = get_info()
packages_to_decrypt = get_to_decrypt()
if myjd:
return jsonify(
{
"downloader_state": myjd[1],
"grabber_collecting": myjd[2],
"update_ready": myjd[3],
"packages": {
"downloader": myjd[4][0],
"linkgrabber_decrypted": myjd[4][1],
"linkgrabber_offline": myjd[4][2],
"linkgrabber_failed": myjd[4][3],
"to_decrypt": packages_to_decrypt
}
}
), 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/myjd_state/", methods=['GET'])
@requires_auth
def myjd_state():
if request.method == 'GET':
try:
myjd = get_state()
if myjd:
return jsonify(
{
"downloader_state": myjd[1],
"grabber_collecting": myjd[2]
}
), 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/myjd_move/<linkids>&<uuids>", methods=['POST'])
@requires_auth
def myjd_move(linkids, uuids):
if request.method == 'POST':
try:
linkids_raw = ast.literal_eval(linkids)
linkids = []
if isinstance(linkids_raw, (list, tuple)):
for linkid in linkids_raw:
linkids.append(linkid)
else:
linkids.append(linkids_raw)
uuids_raw = ast.literal_eval(uuids)
uuids = []
if isinstance(uuids_raw, (list, tuple)):
for uuid in uuids_raw:
uuids.append(uuid)
else:
uuids.append(uuids_raw)
if move_to_downloads(linkids, uuids):
return "Success", 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/myjd_remove/<linkids>&<uuids>", methods=['POST'])
@requires_auth
def myjd_remove(linkids, uuids):
if request.method == 'POST':
try:
linkids_raw = ast.literal_eval(linkids)
linkids = []
if isinstance(linkids_raw, (list, tuple)):
for linkid in linkids_raw:
linkids.append(linkid)
else:
linkids.append(linkids_raw)
uuids_raw = ast.literal_eval(uuids)
uuids = []
if isinstance(uuids_raw, (list, tuple)):
for uuid in uuids_raw:
uuids.append(uuid)
else:
uuids.append(uuids_raw)
if remove_from_linkgrabber(linkids, uuids):
return "Success", 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/internal_remove/<name>", methods=['POST'])
@requires_auth
def internal_remove(name):
if request.method == 'POST':
try:
delete = remove_decrypt(name)
if delete:
return "Success", 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/myjd_retry/<linkids>&<uuids>&<b64_links>", methods=['POST'])
@requires_auth
def myjd_retry(linkids, uuids, b64_links):
if request.method == 'POST':
try:
linkids_raw = ast.literal_eval(linkids)
linkids = []
if isinstance(linkids_raw, (list, tuple)):
for linkid in linkids_raw:
linkids.append(linkid)
else:
linkids.append(linkids_raw)
uuids_raw = ast.literal_eval(uuids)
uuids = []
if isinstance(uuids_raw, (list, tuple)):
for uuid in uuids_raw:
uuids.append(uuid)
else:
uuids.append(uuids_raw)
links = decode_base64(b64_links)
links = links.split("\n")
if retry_decrypt(linkids, uuids, links):
return "Success", 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/myjd_update/", methods=['POST'])
@requires_auth
def myjd_update():
if request.method == 'POST':
try:
if update_jdownloader():
return "Success", 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/myjd_start/", methods=['POST'])
@requires_auth
def myjd_start():
if request.method == 'POST':
try:
if jdownloader_start():
return "Success", 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/myjd_pause/<bl>", methods=['POST'])
@requires_auth
def myjd_pause(bl):
if request.method == 'POST':
try:
bl = json.loads(bl)
if jdownloader_pause(bl):
return "Success", 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/myjd_stop/", methods=['POST'])
@requires_auth
def myjd_stop():
if request.method == 'POST':
try:
if jdownloader_stop():
return "Success", 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/myjd_cnl/<uuid>", methods=['POST'])
@requires_auth
def myjd_cnl(uuid):
if request.method == 'POST':
try:
failed = get_info()
if failed:
decrypted_packages = failed[4][1]
offline_packages = failed[4][2]
failed_packages = failed[4][3]
else:
failed_packages = False
decrypted_packages = False
if not failed_packages:
return "Failed", 500
title = False
old_package = False
if failed_packages:
for op in failed_packages:
if str(op['uuid']) == str(uuid):
title = op['name']
old_package = op
break
if not old_package or not title:
return "Failed", 500
known_packages = []
if decrypted_packages:
for dp in decrypted_packages:
known_packages.append(dp['uuid'])
if offline_packages:
for op in offline_packages:
known_packages.append(op['uuid'])
cnl_package = False
grabber_was_collecting = False
i = 12
while i > 0:
i -= 1
time.sleep(5)
if get_info():
grabber_collecting = failed[2]
if grabber_was_collecting or grabber_collecting:
grabber_was_collecting = grabber_collecting
i -= 1
time.sleep(5)
else:
if not grabber_collecting:
decrypted_packages = failed[4][1]
offline_packages = failed[4][2]
another_device = package_merge(decrypted_packages, title, known_packages)[0]
if another_device:
info = get_info()
if info:
grabber_collecting = info[2]
decrypted_packages = info[4][1]
offline_packages = info[4][2]
if not grabber_collecting and decrypted_packages:
for dp in decrypted_packages:
if dp['uuid'] not in known_packages:
cnl_package = dp
i = 0
if not grabber_collecting and offline_packages:
for op in offline_packages:
if op['uuid'] not in known_packages:
cnl_package = op
i = 0
if not cnl_package:
return "No Package added through Click'n'Load in time!", 504
replaced = do_package_replace(old_package, cnl_package)
if replaced:
return "Success", 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/internal_cnl/<name>&<password>", methods=['POST'])
@requires_auth
def internal_cnl(name, password):
if request.method == 'POST':
try:
failed = get_info()
if failed:
decrypted_packages = failed[4][1]
offline_packages = failed[4][2]
else:
decrypted_packages = False
known_packages = []
if decrypted_packages:
for dp in decrypted_packages:
known_packages.append(dp['uuid'])
if offline_packages:
for op in offline_packages:
known_packages.append(op['uuid'])
cnl_packages = []
grabber_was_collecting = False
i = 12
while i > 0:
i -= 1
time.sleep(5)
failed = get_info()
if failed:
grabber_collecting = failed[2]
if grabber_was_collecting or grabber_collecting:
grabber_was_collecting = grabber_collecting
i -= 1
time.sleep(5)
else:
if not grabber_collecting:
decrypted_packages = failed[4][1]
offline_packages = failed[4][2]
if not grabber_collecting and decrypted_packages:
for dp in decrypted_packages:
if dp['uuid'] not in known_packages:
cnl_packages.append(dp)
i = 0
if not grabber_collecting and offline_packages:
for op in offline_packages:
if op['uuid'] not in known_packages:
cnl_packages.append(op)
i = 0
if not cnl_packages:
return "No Package added through Click'n'Load in time!", 504
if do_add_decrypted(name, password, cnl_packages):
remove_decrypt(name)
return "Success", 200
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/api/lists/", methods=['GET', 'POST'])
@requires_auth
def get_post_lists():
if request.method == 'GET':
try:
def get_list(liste):
cont = ListDb(liste).retrieve()
return "\n".join(cont) if cont else ""
return jsonify(
{
"lists": {
"mb": {
"filme": get_list('List_ContentAll_Movies'),
"regex": get_list('List_ContentAll_Movies_Regex'),
},
"sj": {
"serien": get_list('List_ContentShows_Shows'),
"regex": get_list('List_ContentShows_Shows_Regex'),
"staffeln_regex": get_list('List_ContentShows_Seasons_Regex'),
},
"dj": {
"dokus": get_list('List_CustomDJ_Documentaries'),
"regex": get_list('List_CustomDJ_Documentaries_Regex'),
},
"mbsj": {
"staffeln": get_list('List_ContentAll_Seasons'),
}
},
}
)
except:
return "Failed", 400
if request.method == 'POST':
try:
data = request.json
ListDb("List_ContentAll_Movies").store_list(
data['mb']['filme'].split('\n'))
ListDb("List_ContentAll_Seasons").store_list(
data['mbsj']['staffeln'].split('\n'))
ListDb("List_ContentAll_Movies_Regex").store_list(
data['mb']['regex'].split('\n'))
ListDb("List_ContentShows_Shows").store_list(
data['sj']['serien'].split('\n'))
ListDb("List_ContentShows_Shows_Regex").store_list(
data['sj']['regex'].split('\n'))
ListDb("List_ContentShows_Seasons_Regex").store_list(
data['sj']['staffeln_regex'].split('\n'))
ListDb("List_CustomDJ_Documentaries").store_list(
data['dj']['dokus'].split('\n'))
ListDb("List_CustomDJ_Documentaries_Regex").store_list(
data['dj']['regex'].split('\n'))
return "Success", 201
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/redirect_user/<target>", methods=['GET'])
@requires_auth
def redirect_user(target):
if request.method == 'GET':
try:
if target == "captcha":
return redirect("http://getcaptchasolution.com/zuoo67f5cq", code=302)
elif target == "multihoster":
return redirect("http://linksnappy.com/?ref=397097", code=302)
except:
pass
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/sponsors_helper/feedcrawler_helper_sj.user.js", methods=['GET'])
@requires_auth
def feedcrawler_helper_sj():
if request.method == 'GET':
try:
hostnames = CrawlerConfig('Hostnames')
sj = hostnames.get('sj')
dj = hostnames.get('dj')
return """// ==UserScript==
// @name FeedCrawler Helper (SJ/DJ)
// @author rix1337
// @description Forwards decrypted SJ/DJ Download links to FeedCrawler
// @version 0.3.0
// @require https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js
// @match https://""" + sj + """/*
// @match https://""" + dj + """/*
// @exclude https://""" + sj + """/serie/search?q=*
// @exclude https://""" + dj + """/serie/search?q=*
// ==/UserScript==
document.body.addEventListener('mousedown', function (e) {
if (e.target.tagName != "A") return;
var anchor = e.target;
if (anchor.href.search(/""" + sj + """\/serie\//i) != -1) {
anchor.href = anchor.href + '#' + anchor.text;
} else if (anchor.href.search(/""" + dj + """\/serie\//i) != -1) {
anchor.href = anchor.href + '#' + anchor.text;
}
});
var tag = window.location.hash.replace("#", "").split('|');
var title = tag[0];
var password = tag[1];
if (title) {
$('.wrapper').prepend('<h3>[FeedCrawler Helper] ' + title + '</h3>');
$(".container").hide();
var checkExist = setInterval(async function () {
if ($("tr:contains('" + title + "')").length) {
$(".container").show();
$("tr:contains('" + title + "')")[0].lastChild.firstChild.click();
clearInterval(checkExist);
}
}, 100);
}
""", 200
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/sponsors_helper/feedcrawler_sponsors_helper_dw.user.js", methods=['GET'])
@requires_auth
def feedcrawler_sponsors_helper_dw():
if not helper_active:
return "Forbidden", 403
if request.method == 'GET':
try:
hostnames = CrawlerConfig('Hostnames')
dw = hostnames.get('dw')
return """// ==UserScript==
// @name FeedCrawler Sponsors Helper (DW)
// @author rix1337
// @description Clicks the correct download button on DW sub pages to speed up Click'n'Load
// @version 0.2.0
// @require https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js
// @match https://""" + dw + """/*
// @grant window.close
// ==/UserScript==
// Hier muss die von außen erreichbare Adresse des FeedCrawlers stehen (nicht bspw. die Docker-interne):
var sponsorsURL = '""" + internal.local_address + """';
// Hier kann ein Wunschhoster eingetragen werden (exakt 'ddownload.com' oder 'rapidgator.net'):
var sponsorsHoster = '';
document.body.addEventListener('mousedown', function (e) {
if (e.target.tagName != "A") return;
var anchor = e.target;
if (anchor.href.search(/""" + dw + """\/download\//i) != -1) {
anchor.href = anchor.href + '#' + anchor.text;
}
});
var tag = window.location.hash.replace("#", "").split('|');
var title = tag[0];
var password = tag[1];
if (title) {
$('.container').prepend('<h3>[FeedCrawler Sponsors Helper] ' + title + '</h3>');
var checkExist = setInterval(async function() {
if (sponsorsHoster && $("span:contains('Download Mirror')").find('a[data-original-title="Download bei ' + sponsorsHoster + '"]').length) {
$("span:contains('Download Mirror')").find('a[data-original-title="Download bei ' + sponsorsHoster + '"]').click();
} else {
$("span:contains('Download Mirror 1')").click();
}
console.log("[FeedCrawler Sponsors Helper] clicked Download button to trigger reCAPTCHA");
clearInterval(checkExist);
}, 100);
var dlExists = setInterval(async function() {
if ($("tr:contains('Download Part')").length) {
var items = $("tr:contains('Download Part')").find("a");
var links = [];
items.each(function(index){
links.push(items[index].href);
})
console.log("[FeedCrawler Sponsors Helper] found download links: " + links);
clearInterval(dlExists);
window.open(sponsorsURL + '/sponsors_helper/to_download/' + btoa(links + '|' + title + '|' + password));
window.close();
}
}, 100);
}
""", 200
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/sponsors_helper/feedcrawler_sponsors_helper_sj.user.js", methods=['GET'])
@requires_auth
def feedcrawler_sponsors_helper_sj():
if not helper_active:
return "Forbidden", 403
if request.method == 'GET':
try:
hostnames = CrawlerConfig('Hostnames')
sj = hostnames.get('sj')
dj = hostnames.get('dj')
return """// ==UserScript==
// @name FeedCrawler Sponsors Helper (SJ/DJ)
// @author rix1337
// @description Clicks the correct download button on SJ/DJ sub pages to speed up Click'n'Load
// @version 0.4.0
// @require https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js
// @match https://""" + sj + """/*
// @match https://""" + dj + """/*
// @exclude https://""" + sj + """/serie/search?q=*
// @exclude https://""" + dj + """/serie/search?q=*
// @grant window.close
// ==/UserScript==
// Hier muss die von außen erreichbare Adresse des FeedCrawlers stehen (nicht bspw. die Docker-interne):
var sponsorsURL = '""" + internal.local_address + """';
// Hier kann ein Wunschhoster eingetragen werden (ohne www. und .tld):
var sponsorsHoster = '';
$.extend($.expr[':'], {
'containsi': function(elem, i, match, array) {
return (elem.textContent || elem.innerText || '').toLowerCase()
.indexOf((match[3] || "").toLowerCase()) >= 0;
}
});
document.body.addEventListener('mousedown', function (e) {
if (e.target.tagName != "A") return;
var anchor = e.target;
if (anchor.href.search(/""" + sj + """\/serie\//i) != -1) {
anchor.href = anchor.href + '#' + anchor.text;
} else if (anchor.href.search(/""" + dj + """\/serie\//i) != -1) {
anchor.href = anchor.href + '#' + anchor.text;
}
});
function Sleep(milliseconds) {
return new Promise(resolve => setTimeout(resolve, milliseconds));
}
var tag = window.location.hash.replace("#", "").split('|');
var title = tag[0];
var password = tag[1];
if (title && title !== "login") {
$('.wrapper').prepend('<h3>[FeedCrawler Sponsors Helper] ' + title + '</h3>');
$(".container").hide();
var checkExist = setInterval(function() {
async function clickRelease() {
if ($("tr:contains('" + title + "')").length) {
$(".container").show();
$("tr:contains('" + title + "')")[0].lastChild.firstChild.click();
if (sponsorsHelper) {
console.log("[FeedCrawler Sponsors Helper] Clicked Download button of " + title);
await Sleep(500);
var requiresLogin = $(".alert-warning").length;
if (requiresLogin) {
clearInterval(checkExist);
window.open("https://" + $(location).attr('hostname') + "#login|" + btoa(window.location));
window.close()
}
}
clearInterval(checkExist);
} }
clickRelease();
}, 100);
if (sponsorsHelper) {
var dlExists = setInterval(async function() {
if ($("tr:contains('Download Part')").length) {
var items = $("tr:contains('Download Part')").find("a");
var links = [];
items.each(function(index){
links.push(items[index].href);
})
console.log("[FeedCrawler Sponsors Helper] found download links: " + links);
clearInterval(dlExists);
window.open(sponsorsURL + '/sponsors_helper/to_download/' + btoa(links + '|' + title + '|' + password));
window.close();
} else if ( document.body.innerHTML.search("se das Captcha!") && !$('.center-recaptcha').length) {
if ( sponsorsHoster && $("button:containsi('" + sponsorsHoster + "')").length) {
$("button:containsi('" + sponsorsHoster + "')").click();
} else if ( $("button:containsi('1fichier')").length) {
$("button:containsi('1fichier')").click();
} else if ( $("button:containsi('ddownload')").length) {
$("button:containsi('ddownload')").click();
} else if ( $("button:containsi('turbo')").length) {
$("button:containsi('turbo')").click();
} else if ( $("button:containsi('filer')").length) {
$("button:containsi('filer')").click();
} else {
$("div.modal-body").find("button.btn.btn-secondary.btn-block").click();
}
console.log("[FeedCrawler Sponsors Helper] Clicked Download button to trigger reCAPTCHA");
}
}, 100);
}
};
""", 200
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/sponsors_helper/feedcrawler_sponsors_helper_fc.user.js", methods=['GET'])
@requires_auth
def feedcrawler_sponsors_helper_fc():
if not helper_active:
return "Forbidden", 403
if request.method == 'GET':
try:
return """// ==UserScript==
// @name FeedCrawler Sponsors Helper (FC)
// @author rix1337
// @description Forwards Click'n'Load to FeedCrawler
// @version 0.5.0
// @match *.filecrypt.cc/*
// @match *.filecrypt.co/*
// @grant window.close
// ==/UserScript==
// Hier muss die von außen erreichbare Adresse des FeedCrawlers stehen (nicht bspw. die Docker-interne):
var sponsorsURL = '""" + internal.local_address + """';
// Hier kann ein Wunschhoster eingetragen werden (ohne www. und .tld):
var sponsorsHoster = '';
var tag = window.location.hash.replace("#", "").split('|');
var title = tag[0]
var password = tag[1]
var ids = tag[2]
var urlParams = new URLSearchParams(window.location.search);
function Sleep(milliseconds) {
return new Promise(resolve => setTimeout(resolve, milliseconds));
}
var mirrorsAvailable = false;
try {
mirrorsAvailable = document.querySelector('.mirror').querySelectorAll("a");
} catch {}
var cnlAllowed = false;
if (mirrorsAvailable && sponsorsHoster) {
const currentURL = window.location.href;
var desiredMirror = "";
var i;
for (i = 0; i < mirrorsAvailable.length; i++) {
if (mirrorsAvailable[i].text.includes(sponsorsHoster)) {
var ep = "";
var cur_ep = urlParams.get('episode');
if (cur_ep) {
ep = "&episode=" + cur_ep;
}
desiredMirror = mirrorsAvailable[i].href + ep + window.location.hash;
}
}
if (desiredMirror) {
if (!currentURL.includes(desiredMirror)) {
console.log("[FeedCrawler Sponsors Helper] switching to desired Mirror: " + sponsorsHoster);
window.location = desiredMirror;
} else {
console.log("[FeedCrawler Sponsors Helper] already at the desired Mirror: " + sponsorsHoster);
cnlAllowed = true;
}
} else {
console.log("[FeedCrawler Sponsors Helper] desired Mirror not available: " + sponsorsHoster);
cnlAllowed = true;
}
} else {
cnlAllowed = true;
}
var cnlExists = setInterval(async function() {
if (cnlAllowed && document.getElementsByClassName("cnlform").length) {
clearInterval(cnlExists);
document.getElementById("cnl_btn").click();
console.log("[FeedCrawler Sponsors Helper] attempting Click'n'Load");
await Sleep(4000);
window.close();
}
}, 100);
""", 200
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/sponsors_helper/", methods=['GET'])
@requires_auth
def to_decrypt():
global helper_active
helper_active = True
if request.method == 'GET':
return render_template('helper.html')
else:
return "Failed", 405
@app.route(prefix + "/sponsors_helper/api/to_decrypt/", methods=['GET'])
@requires_auth
def to_decrypt_api():
global helper_active
if request.method == 'GET':
try:
helper_active = True
decrypt_name = False
decrypt_url = False
decrypt = get_to_decrypt()
if decrypt:
decrypt = decrypt[0]
decrypt_name = decrypt["name"]
decrypt_url = decrypt["url"].replace("http://", "https://") + "#" + decrypt_name + "|" + decrypt[
"password"]
return jsonify(
{
"to_decrypt": {
"name": decrypt_name,
"url": decrypt_url,
}
}
)
except:
return "Failed", 400
else:
return "Failed", 405
@app.route(prefix + "/sponsors_helper/to_download/<payload>", methods=['GET'])
@requires_auth
def to_download(payload):
if request.method == 'GET':
try:
global already_added
try:
payload = decode_base64(payload.replace("%3D", "=")).split("|")
except:
return "Failed", 400
if payload:
links = payload[0]
package_name = payload[1].replace("%20", "")
name = package_name
try:
password = payload[2]
except:
password = ""
try:
ids = payload[3]
except:
ids = False
FeedDb('crawldog').store(package_name, 'added')
if internal.device:
if ids:
try:
ids = ids.replace("%20", "").split(";")
linkids = ids[0]
uuids = ids[1]
except:
linkids = False
uuids = False
if ids and uuids:
linkids_raw = ast.literal_eval(linkids)
linkids = []
if isinstance(linkids_raw, (list, tuple)):
for linkid in linkids_raw:
linkids.append(linkid)
else:
linkids.append(linkids_raw)
uuids_raw = ast.literal_eval(uuids)
uuids = []
if isinstance(uuids_raw, (list, tuple)):
for uuid in uuids_raw:
uuids.append(uuid)
else:
uuids.append(uuids_raw)
remove_from_linkgrabber(linkids, uuids)
remove_decrypt(package_name)
else:
is_episode = re.findall(r'.*\.(S\d{1,3}E\d{1,3})\..*', package_name)
if not is_episode:
re_name = rreplace(package_name.lower(), "-", ".*", 1)
re_name = re_name.replace(".untouched", ".*").replace("dd+51", "dd.51")
season_string = re.findall(r'.*(s\d{1,3}).*', re_name)
if season_string:
re_name = re_name.replace(season_string[0], season_string[0] + '.*')
codec_tags = [".h264", ".x264"]
for tag in codec_tags:
re_name = re_name.replace(tag, ".*264")
web_tags = [".web-rip", ".webrip", ".webdl", ".web-dl"]
for tag in web_tags:
re_name = re_name.replace(tag, ".web.*")
multigroup = re.findall(r'.*-((.*)\/(.*))', package_name.lower())
if multigroup:
re_name = re_name.replace(multigroup[0][0],
'(' + multigroup[0][1] + '|' + multigroup[0][2] + ')')
else:
re_name = package_name
season_string = re.findall(r'.*(s\d{1,3}).*', re_name.lower())
if season_string:
season_string = season_string[0].replace("s", "S")
else:
season_string = "^unmatchable$"
try:
packages = get_packages_in_linkgrabber()
except feedcrawler.myjdapi.TokenExpiredException:
get_device()
if not internal.device or not is_device(internal.device):
return "Failed", 500
packages = get_packages_in_linkgrabber()
if packages:
failed = packages[0]
offline = packages[1]
try:
if failed:
for package in failed:
if re.match(re.compile(re_name), package['name'].lower()):
episode = re.findall(r'.*\.S\d{1,3}E(\d{1,3})\..*', package['name'])
# ToDo refactor to new code below
if episode:
FeedDb('episode_remover').store(package_name, str(int(episode[0])))
linkids = package['linkids']
uuids = [package['uuid']]
remove_from_linkgrabber(linkids, uuids)
remove_decrypt(package_name)
break
if offline:
for package in offline:
if re.match(re.compile(re_name), package['name'].lower()):
episode = re.findall(r'.*\.S\d{1,3}E(\d{1,3})\..*', package['name'])
# ToDo refactor to new code below
if episode:
FeedDb('episode_remover').store(package_name, str(int(episode[0])))
linkids = package['linkids']
uuids = [package['uuid']]
remove_from_linkgrabber(linkids, uuids)
remove_decrypt(package_name)
break
except:
pass
packages = get_to_decrypt()
if packages:
for package in packages:
if name == package["name"].strip():
name = package["name"]
elif re.match(re.compile(re_name),
package['name'].lower().strip().replace(".untouched", ".*").replace(
"dd+51",
"dd.51")):
episode = re.findall(r'.*\.S\d{1,3}E(\d{1,3})\..*', package['name'])
remove_decrypt(package['name'])
if episode:
episode_to_keep = str(int(episode[0]))
episode = str(episode[0])
if len(episode) == 1:
episode = "0" + episode
name = name.replace(season_string + ".",
season_string + "E" + episode + ".")
episode_in_remover = FeedDb('episode_remover').retrieve(package_name)
if episode_in_remover:
episode_to_keep = episode_in_remover + "|" + episode_to_keep
FeedDb('episode_remover').delete(package_name)
time.sleep(1)
FeedDb('episode_remover').store(package_name, episode_to_keep)
break
time.sleep(1)
remove_decrypt(name)
try:
epoch = int(time.time())
for item in already_added:
if item[0] == package_name:
if int(item[1]) + 30 > epoch:
print(name + u" wurde in den letzten 30 Sekunden bereits hinzugefügt")
return name + u" wurde in den letzten 30 Sekunden bereits hinzugefügt", 400
else:
already_added.remove(item)
download(package_name, "FeedCrawler", links, password)
db = FeedDb('FeedCrawler')
if not db.retrieve(name):
db.store(name, 'added')
try:
notify(["[FeedCrawler Sponsors Helper erfolgreich] - " + name])
except:
print(u"Benachrichtigung konnte nicht versendet werden!")
print(u"[FeedCrawler Sponsors Helper erfolgreich] - " + name)
already_added.append([name, str(epoch)])
return "<script type='text/javascript'>" \
"function closeWindow(){window.close()}window.onload=closeWindow;</script>" \
"[FeedCrawler Sponsors Helper erfolgreich] - " + name, 200
except:
print(name + u" konnte nicht hinzugefügt werden!")
except:
pass
return "Failed", 400
else:
return "Failed", 405
serve(app, host='0.0.0.0', port=internal.port, threads=10, _quiet=True)
def start():
sys.stdout = Unbuffered(sys.stdout)
disable_request_warnings(InsecureRequestWarning)
if version.update_check()[0]:
updateversion = version.update_check()[1]
print(u'Update steht bereit (' + updateversion +
')! Weitere Informationen unter https://github.com/rix1337/FeedCrawler/releases/latest')
app_container()
| 42.568279 | 146 | 0.455908 |
c4594ef3399076489d0799c62fa24d5cda8bda18
| 1,112 |
py
|
Python
|
Curso_Python/Secao2-Python-Basico-Logica-Programacao/34_lista_em_python/34_lista_em_python.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao2-Python-Basico-Logica-Programacao/34_lista_em_python/34_lista_em_python.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao2-Python-Basico-Logica-Programacao/34_lista_em_python/34_lista_em_python.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
"""
Lista em Python
fatiamento
append, insert, pop, del, clear, extend, min, max
range
"""
secreto = 'Perfume'
digitadas = []
chances = 3
while True:
if chances <= 0:
print('Você perdeu!!!')
break
letra = input('Digite uma letra: ')
if len(letra) > 1:
print('Ahhh isso não vale, digite apenas uma letra.')
continue
digitadas.append(letra)
if letra in secreto:
print(f'UHUUULLL, a letra "{letra}" existe na palavra secreta.')
else:
print(f'AFFzzzz: a letra "{letra}" NÃO EXISTE na palavra secreta.')
digitadas.pop()
secreto_temporario = ''
for letra_secreta in secreto:
if letra_secreta in digitadas:
secreto_temporario += letra_secreta
else:
secreto_temporario += '*'
if secreto_temporario == secreto:
print(f'Que legal, voçê ganhou!!! A palavra era {secreto_temporario}')
break
else:
print(f'A palavra secreta esta assim: {secreto_temporario}')
if letra not in secreto:
chances -= 1
print(f'Voçê ainda tem {chances} chances.')
| 24.173913 | 78 | 0.613309 |
674b1259b96846aafa36af8671130db2c55479f4
| 1,177 |
py
|
Python
|
python_gui_tkinter/KALU/GARBAGE/mail/pic_Attach_mime.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python_gui_tkinter/KALU/GARBAGE/mail/pic_Attach_mime.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python_gui_tkinter/KALU/GARBAGE/mail/pic_Attach_mime.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
# Import smtplib for the actual sending function
import smtplib
# And imghdr to find the types of our images
import imghdr
# Here are the email package modules we'll need
from email.message import EmailMessage
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login("", "")
# Create the container email message.
msg = EmailMessage()
msg['Subject'] = 'Test Python png\'s'
me = '[email protected]'
family = ['[email protected]','[email protected]'] #the list of all recipients' email addresses
msg['From'] = me
msg['To'] = ', '.join(family)
msg.preamble = 'Test Python png message test'
# Open the files in binary mode. Use imghdr to figure out the
# MIME subtype for each specific image.
pngfiles = ['python1.png','python.png']
for file in pngfiles:
with open(file, 'rb') as fp:
img_data = fp.read()
msg.add_attachment(img_data, maintype='image',
subtype=imghdr.what(None, img_data))
# Send the email via our own SMTP server.
'''
with smtplib.SMTP('localhost') as s:
s.send_message(msg)
'''
server.sendmail("[email protected]", "[email protected]", msg)
server.quit()
| 30.973684 | 108 | 0.701784 |
67f362bd2993cba7662da63dfa96527c3a99c1ef
| 88 |
py
|
Python
|
apps/tess/apps.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 2 |
2018-05-18T08:38:29.000Z
|
2018-05-22T08:26:09.000Z
|
apps/tess/apps.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 118 |
2017-10-31T13:45:09.000Z
|
2018-02-24T20:51:42.000Z
|
apps/tess/apps.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class TessConfig(AppConfig):
name = 'apps.tess'
| 14.666667 | 33 | 0.738636 |
db0a1084d9e1d719af07b4dcdf33550df0abd4c7
| 759 |
py
|
Python
|
Problems/Depth-First Search/medium/CountSubsIslands/count_sub_islands.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Depth-First Search/medium/CountSubsIslands/count_sub_islands.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Depth-First Search/medium/CountSubsIslands/count_sub_islands.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from typing import List
def countSubIslands(self, grid1: List[List[int]], grid2: List[List[int]]) -> int:
m, n = len(grid1), len(grid1[0])
sub_islands = 0
def dfs(row: int, col: int) -> bool:
if row < 0 or row >= m or col < 0 or col >= n:
return True
if grid2[row][col] != 1:
return True
grid2[row][col] = 0
top = dfs(row - 1, col)
bot = dfs(row + 1, col)
left = dfs(row, col - 1)
right = dfs(row, col + 1)
if grid1[row][col] != 1:
return False
return top and bot and left and right
for i in range(m):
for j in range(n):
if grid2[i][j] == 1:
sub_islands += dfs(i, j)
return sub_islands
| 23.71875 | 81 | 0.496706 |
e164ed9f3846de9f349d9e2954c800573eb64bf6
| 6,124 |
py
|
Python
|
Bowman/.ipynb_checkpoints/Function1_2_3-checkpoint.py
|
JonathanOnorato/ChemLP
|
a7b0d2f9a3899531aacda9bc85d89d721f6450c4
|
[
"MIT"
] | 4 |
2020-04-13T20:49:01.000Z
|
2021-02-09T01:05:01.000Z
|
Bowman/.ipynb_checkpoints/Function1_2_3-checkpoint.py
|
JonathanOnorato/ChemLP
|
a7b0d2f9a3899531aacda9bc85d89d721f6450c4
|
[
"MIT"
] | 3 |
2020-04-28T23:16:27.000Z
|
2020-04-28T23:26:53.000Z
|
Bowman/Function1_2_3.py
|
JonathanOnorato/ChemLP
|
a7b0d2f9a3899531aacda9bc85d89d721f6450c4
|
[
"MIT"
] | null | null | null |
# %%
#Function 1 of ChemLibre Texts reading program, takes in a url, path, and browser type and returns the html
#Path location should be in the format ex. C:/Users/bowri/Anaconda3/chromedriver
#If using Firefox, or not Chrome, simply enter "" for path location, requires having downloaded chromedriver first
#See formatting below
#Stuff to do:
#1) throw more errors - check, still work on the try/except for selenium being present
#2) getting rid of import functions - check
#3) add docstrings to let user know which types of data are allowed - check
#4) add default settings, eg. output = none; have output in, maybe more
#5) document better
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json
import pandas as pd
from lxml import html
import selenium
from bs4 import BeautifulSoup as BS
import random
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import StaleElementReferenceException as SERE
def selenium_html_collector(url, browser, path_to_driver, webdriver):
""""This function takes in three strings: 1) a url, 2) a browser,
and 3) a path to your local chromedriver location, which is only
need if you are using Chrome. It takes in 4) a webdriver module
from Selenium as well. It returns an html of the given
url and opens the browser to that site as well"""
if browser == "Firefox":
#try:
drive = webdriver.Firefox()
#except:
# print("Did you import the webdriver module from Selenium?")
elif browser == "Chrome":
#try:
drive = webdriver.Chrome(executable_path= (path_to_driver))
#except:
# print("Did you import the webdriver module from Selenium?")
elif browser != "Chrome" or "Firefox":
print("this is the weird browser:", browser)
raise Exception("Sorry, the function only utilizes Firefox and Chrome currently")
drive.get(url)
return drive
def book_finder(url, driver): #SHOULD GET RID OF INITIALIZED LIST EVERY TIME PROGRAM RUN, ADD AS ARGUMENT
book_urls = []
urls = []
driver.get(url)
driver.implicitly_wait(random.randint(1,10))
#mt-sortable-listing-link mt-edit-section internal is the class name that gets all genres
#can do something recursive, where if <h1 .text contains "Book:" stop
sections = driver.find_elements_by_class_name("mt-sortable-listing-link mt-edit-section internal")
print(type(sections))
print(sections)
#if h1.text does not contain "Book:"
header = str(driver.find_element_by_xpath("//*[@id='title']").text)
print(header)
#for section in sections:
# book_finder(section, driver)
print()
for section in sections:
urls.append(str(section.get_attribute("href").text))
print(urls)
#else:
#for section in sections:
#book_url.append = href value(link)
#return book_urls
# %%
#Function 3 of ChemLibreTexts reading program, takes in two lists: 1) chapter titles and 2) chapter
#contents and 3) a filename, and exports them to a JSON file with the given filename
#Creates a dictionary with the two lists, and writes and opens a json file
#add additional arguments for default settings, eg output_state boolean, for printing vs writing
def chapter_exporter(chapter_titles, chapter_contents, filename, export = True):
""""This function takes in three variables, and has one default variable. The first two
variables must be lists, which ultimately get combined into a dictionary. The third var
is the string filename of your choice, and the final variable determines whether or not
the program will print or export the dictionary to a json. By default it is set to true"""
if isinstance(chapter_titles, list) and isinstance(chapter_contents, list) == True:
titles_and_contents = dict(zip(chapter_titles, chapter_contents))
if export == True:
with open(filename, "w") as outfile:
json.dump(titles_and_contents, outfile)
else:
print(titles_and_contents)
else:
raise Exception("Variables passed in must be lists")
# %%
#import json
#titles_list = ["chapter 1", "chapter 2", "chapter 3"]
#chap_list = ["this is chapter 1", "this is chapter 2", "this is chapter 3"]
#title = "chapter 1"
#chapter_exporter(titles_list, chap_list, "test_chapter_writing", False)
# %%
def chapter_text_parser(driver):
driver.implicitly_wait(random.randint(1,100))
chapter_title = driver.find_element(By.XPATH, '//*[@id="title"]').text.strip()
subchap_link_title_container = driver.find_elements(By.CLASS_NAME, 'mt-listing-detailed-title')
subchap_titles = [title.text.strip() for title in subchap_link_title_container ]
subchap_links = [link.find_element_by_tag_name('a').get_attribute('href') for link in subchap_link_title_container]
print('Name of chapter', chapter_title, '\n', 'Number of subchapter is', len(subchap_links))
subchap_overview_container = driver.find_elements(By.CLASS_NAME, 'mt-listing-detailed-overview')
subchap_overviews = [overview.text.strip() for overview in subchap_overview_container]
subchap_contents=[]
data = {}
for chap_link in subchap_links:
driver.get(chap_link)
driver.page_source
chap_text_container = driver.find_elements(By.CLASS_NAME,'mt-content-container')
for subchap in chap_text_container:
subchap_contents.append(subchap.text.strip())
data = {'chap-title':subchap_titles, 'chap-overview': subchap_overviews, 'chap-content':subchap_contents}
return data
# %%
def new_exporter(dictionary, filename, driver, printout = False):
if printout == False:
with open(filename, "w") as outfile:
json.dump(dictionary, outfile)
else:
print(dictionary)
return driver.close()
| 39.006369 | 119 | 0.695624 |
55f2dde3fd2c0543dc44287d45b459f9582e2546
| 136 |
py
|
Python
|
python/gdal_cookbook/cookbook_geometry/create_point.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/gdal_cookbook/cookbook_geometry/create_point.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/gdal_cookbook/cookbook_geometry/create_point.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
from osgeo import ogr
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(1116651.439379124, 637392.6969887456)
print(point.ExportToWkt())
| 34 | 52 | 0.816176 |
36c3cba9cf0d7ee801e42cd0b3576d3af1e8ad0c
| 106 |
py
|
Python
|
relay/relay_output.py
|
ihrigb/stagebuzzer
|
dbce1c5fa59a6f22e74d84ccc96d4d1a28a5b680
|
[
"Apache-2.0"
] | null | null | null |
relay/relay_output.py
|
ihrigb/stagebuzzer
|
dbce1c5fa59a6f22e74d84ccc96d4d1a28a5b680
|
[
"Apache-2.0"
] | null | null | null |
relay/relay_output.py
|
ihrigb/stagebuzzer
|
dbce1c5fa59a6f22e74d84ccc96d4d1a28a5b680
|
[
"Apache-2.0"
] | null | null | null |
class RelayOutput:
def enable_relay(self, name: str):
pass
def reset(self):
pass
| 15.142857 | 38 | 0.584906 |
3d0ca19c0269e822729626ef3095d8a86de2b864
| 579 |
py
|
Python
|
python/coursera_python/WESLEYAN/week4/test/2.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/coursera_python/WESLEYAN/week4/test/2.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/coursera_python/WESLEYAN/week4/test/2.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
import random
def make_same_random():
""" Make a list of 10 random integers that are the same each time """
numlis = []
random.seed(17) # set the seed from which random numbers are made
for i in range(0,10):
d = random.random()
print(d)
numlis.append(d)
return numlis
def call_make_random():
""" Uses make_same_random to get a list of random numbers """
random_integers = make_same_random()
#print(random_integers)
#random_integers1 = make_same_random()
#print(random_integers1)
call_make_random()
| 25.173913 | 76 | 0.654577 |
a10801063b1b47b54932dac4080ed43f9c79a6a5
| 4,186 |
py
|
Python
|
test/test_npu/test_network_ops/test_bitwise_not.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_bitwise_not.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_bitwise_not.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class Test_Bitwise_Not(TestCase):
def generate_data(self, min_d, max_d, shape, dtype):
input1 = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input1 = torch.from_numpy(input1)
return npu_input1
def generate_bool_data(self, shape):
input1 = np.random.randint(0, 2, shape).astype(np.bool_)
npu_input1 = torch.from_numpy(input1)
return npu_input1
def cpu_op_exec(self, input1):
output = torch.bitwise_not(input1)
if output.dtype not in [torch.int32, torch.int8, torch.bool]:
output = output.to(torch.int32)
output = output.numpy()
return output
def npu_op_exec(self, input1):
input1 = input1.to("npu")
output = torch.bitwise_not(input1)
output = output.to("cpu")
if output.dtype not in [torch.int32, torch.int8, torch.bool]:
output = output.to(torch.int32)
output = output.numpy()
return output
def npu_op_exec_out(self, input1, input2):
input1 = input1.to("npu")
input2 = input2.to("npu")
torch.bitwise_not(input1, out = input2)
output = input2.to("cpu")
if output.dtype not in [torch.int32, torch.int8, torch.bool]:
output = output.to(torch.int32)
output = output.numpy()
return output
def test_bitwise_not_bool(self, device):
npu_input1 = self.generate_bool_data((2, 3))
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_int16(self, device):
npu_input1 = self.generate_data(0, 2342, (2, 3), np.int16)
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_int32(self, device):
npu_input1 = self.generate_data(0, 34222, (2, 3), np.int32)
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_int64(self, device):
npu_input1 = self.generate_data(0, 355553, (2, 3), np.int64)
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_out(self, device):
shape_format = [
[[0, 2342, [2, 3], np.int16], [0, 2342, [10, 20], np.int16]],
[[0, 34222, [2, 3], np.int32], [0, 34222, [10, 20], np.int32]],
[[0, 355553, [2, 3], np.int64], [0, 355553, [1, 1], np.int64]],
]
for item in shape_format:
npu_input1 = self.generate_data(item[0][0], item[0][1], item[0][2], item[0][3])
npu_input2 = self.generate_data(item[1][0], item[1][1], item[1][2], item[1][3])
cpu_output = self.cpu_op_exec(npu_input1)
npu_output1 = self.npu_op_exec_out(npu_input1, npu_input1)
npu_output2 = self.npu_op_exec_out(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output1)
self.assertRtolEqual(cpu_output, npu_output1)
instantiate_device_type_tests(Test_Bitwise_Not, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
| 39.866667 | 91 | 0.665313 |
62961bf7a39c3c78999724de35586e607fa1cbd0
| 873 |
py
|
Python
|
PINp/2015/TITOV_I_V/task_9_20.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2015/TITOV_I_V/task_9_20.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2015/TITOV_I_V/task_9_20.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 9
# Создайте игру, в которой компьютер выбирает какое-либо слово, а игрок должен
#его отгадать. Компьютер сообщает игроку, сколько букв в слове, и дает пять попыток
#узнать, есть ли какая-либо буква в слове, причем программа может отвечать только
#"Да" и "Нет". Вслед за тем игрок должен попробовать отгадать слово.
# Titov I. V.
# 02.06.2016
import random
spisokWord = ("слон", "аватар", "мяч", "суперудар", "реал", "баскетбол", "яблоко")
zagadka = random.choice(spisokWord)
print ("Длина слова - ", len(zagadka))
for i in range(5):
print("\n")
userLeter = input ("Введите букву - ")
if userLeter in zagadka:
print ("Да")
else:
print ("Нет")
if (input("\nВведите ответ - ") == zagadka):
print ("Неплохо для магла)")
else:
print ("Просто вы из этих (")
print (zagadka)
input ("\n Нажмите ENTER для выхода")
| 25.676471 | 83 | 0.662085 |
62bb16e1b05c02702d62e2f10d8ac2fdbe874ef6
| 562 |
py
|
Python
|
code/selfish_proxy/strategy/__init__.py
|
simonmulser/master-thesis
|
5ca2ddda377a0eede5a3c50866e0f90292c5448f
|
[
"CC-BY-4.0"
] | null | null | null |
code/selfish_proxy/strategy/__init__.py
|
simonmulser/master-thesis
|
5ca2ddda377a0eede5a3c50866e0f90292c5448f
|
[
"CC-BY-4.0"
] | null | null | null |
code/selfish_proxy/strategy/__init__.py
|
simonmulser/master-thesis
|
5ca2ddda377a0eede5a3c50866e0f90292c5448f
|
[
"CC-BY-4.0"
] | 1 |
2019-06-05T09:10:30.000Z
|
2019-06-05T09:10:30.000Z
|
from enum import Enum
class Action(Enum):
adopt = 'a'
override = 'o'
match = 'm'
wait = 'w'
class ForkState(Enum):
irrelevant = 0
relevant = 1
active = 2
class BlockOrigin(Enum):
private = 0
public = 1
def opposite_origin(block_origin):
if block_origin is BlockOrigin.private:
return BlockOrigin.public
else:
return BlockOrigin.private
class ActionException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
| 16.057143 | 43 | 0.637011 |
ad0069b8182e1e872e838f597163cbabe5c2a596
| 64 |
py
|
Python
|
Online-Judges/CodingBat/Python/String-01/04-make_out_word.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3 |
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Online-Judges/CodingBat/Python/String-01/04-make_out_word.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Online-Judges/CodingBat/Python/String-01/04-make_out_word.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
def make_out_word(out, word):
return (out[:2]+word+out[2:])
| 21.333333 | 33 | 0.640625 |
4927f80e3cf89bb5f439bfba63d80d64f6c1b8de
| 297 |
py
|
Python
|
python/FizzBuzz.py
|
campbe13/FizzBuzz
|
1ac2b964ef65e792e11947e684f3cc3f8bcaf90d
|
[
"MIT"
] | null | null | null |
python/FizzBuzz.py
|
campbe13/FizzBuzz
|
1ac2b964ef65e792e11947e684f3cc3f8bcaf90d
|
[
"MIT"
] | null | null | null |
python/FizzBuzz.py
|
campbe13/FizzBuzz
|
1ac2b964ef65e792e11947e684f3cc3f8bcaf90d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
Fizz Buzz in python 3
P Campbell
February 2018
"""
for i in range(1,101):
if i % 3 == 0 or i % 5 == 0 :
if i % 3 == 0:
msg = "Fizz"
if i % 5 == 0:
msg += "Buzz"
print (msg)
msg = ""
else:
print (i)
| 15.631579 | 33 | 0.40404 |
494935d54029e425636bb895b354d05dfd9bf75e
| 1,214 |
py
|
Python
|
official/cv/ADNet/src/utils/get_action_history_onehot.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/ADNet/src/utils/get_action_history_onehot.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/ADNet/src/utils/get_action_history_onehot.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# returns action history as one-hot form
# https://github.com/hellbell/ADNet/blob/3a7955587b5d395401ebc94a5ab067759340680d/utils/get_action_history_onehot.m
import mindspore.numpy as nps
def get_action_history_onehot(action_history, opts):
onehot = nps.zeros((opts['num_actions'] * len(action_history),))
for i in range(len(action_history)):
start_idx = i * opts['num_actions']
if action_history[i] >= 0 and action_history[i] < opts['num_actions']:
onehot[start_idx + action_history[i]] = 1.
return onehot
| 46.692308 | 115 | 0.700165 |
49522de2dc5e50522c06dd38764eecf95b0f09d8
| 136 |
py
|
Python
|
kernel/scripts/generate_interrupts_idt.py
|
losfair/FlatMk-v0
|
6e78666e8982e41688c24828093ea6b73b76ea11
|
[
"MIT"
] | 5 |
2020-01-11T22:38:34.000Z
|
2021-06-01T13:40:55.000Z
|
kernel/scripts/generate_interrupts_idt.py
|
losfair/FlatMk-v0
|
6e78666e8982e41688c24828093ea6b73b76ea11
|
[
"MIT"
] | null | null | null |
kernel/scripts/generate_interrupts_idt.py
|
losfair/FlatMk-v0
|
6e78666e8982e41688c24828093ea6b73b76ea11
|
[
"MIT"
] | null | null | null |
print("{")
for i in range(32, 256):
print("IDT[{}].set_handler_fn(core::mem::transmute(intr_{} as usize));".format(i, i))
print("}")
| 34 | 89 | 0.617647 |
499c820479e74acc1b022daad8b71cb4ac591d66
| 834 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/accounts/doctype/cash_flow_mapping/test_cash_flow_mapping.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/accounts/doctype/cash_flow_mapping/test_cash_flow_mapping.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/accounts/doctype/cash_flow_mapping/test_cash_flow_mapping.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestCashFlowMapping(unittest.TestCase):
def setUp(self):
if frappe.db.exists("Cash Flow Mapping", "Test Mapping"):
frappe.delete_doc('Cash Flow Mappping', 'Test Mapping')
def tearDown(self):
frappe.delete_doc('Cash Flow Mapping', 'Test Mapping')
def test_multiple_selections_not_allowed(self):
doc = frappe.new_doc('Cash Flow Mapping')
doc.mapping_name = 'Test Mapping'
doc.label = 'Test label'
doc.append(
'accounts',
{'account': 'Accounts Receivable - _TC'}
)
doc.is_working_capital = 1
doc.is_finance_cost = 1
self.assertRaises(frappe.ValidationError, doc.insert)
doc.is_finance_cost = 0
doc.insert()
| 25.272727 | 68 | 0.730216 |
b8ddb322cc829271072a3b60922437e02d72d250
| 6,043 |
py
|
Python
|
tspdb/tests/test_module.py
|
swipswaps/tspdb
|
9c085cef7164c114bb0952519b9715dcfa072b34
|
[
"Apache-2.0"
] | 43 |
2019-12-10T00:05:51.000Z
|
2022-03-31T21:21:20.000Z
|
tspdb/tests/test_module.py
|
swipswaps/tspdb
|
9c085cef7164c114bb0952519b9715dcfa072b34
|
[
"Apache-2.0"
] | 5 |
2021-05-09T01:12:31.000Z
|
2022-03-29T17:34:15.000Z
|
tspdb/tests/test_module.py
|
swipswaps/tspdb
|
9c085cef7164c114bb0952519b9715dcfa072b34
|
[
"Apache-2.0"
] | 14 |
2020-01-13T21:20:07.000Z
|
2022-03-31T02:11:26.000Z
|
import numpy as np
from tspdb.src.pindex.predict import get_prediction_range, get_prediction
from tspdb.src.pindex.pindex_managment import TSPI
from tspdb.src.pindex.pindex_utils import index_ts_mapper
import time
import timeit
import pandas as pd
from tspdb.src.hdf_util import read_data
from tspdb.src.tsUtils import randomlyHideValues
from scipy.stats import norm
from sklearn.metrics import r2_score
import tspdb
def r2_var(y,y_h,X):
average = np.mean(X**2) - np.mean(X)**2
return 1 - sum((y-y_h)**2)/sum((y-average)**2)
def create_table_data():
obs = np.arange(10**5).astype('float')
means = obs
var = np.zeros(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
print(obs_9)
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var })
df.to_csv('testdata/tables/ts_basic_5.csv',index_label = 'time')
timestamps = pd.date_range('2012-10-01 00:00:00', periods = 10**5, freq='5s')
df.index = timestamps
df.to_csv('testdata/tables/ts_basic_ts_5_5.csv', index_label = 'time')
# real time series variance constant
data = read_data('testdata/MixtureTS2.h5')
obs = data['obs'][:]
means = data['means'][:]
var = np.ones(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7 ,'var': var })
df.index_label = 'time'
df.to_csv('testdata/tables/MixtureTS2.csv', index_label = 'time')
# real time series variance constant
data = read_data('testdata/MixtureTS.h5')
obs = data['obs'][:]
means = data['means'][:]
var = np.ones(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var })
df.to_csv('testdata/tables/MixtureTS.csv', index_label = 'time')
# real time series varaince harmonics
data = read_data('testdata/MixtureTS_var.h5')
obs = data['obs'][:]
means = data['means'][:]
var = data['var'][:]
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7, 'var': var })
df.to_csv('testdata/tables/MixtureTS_var.csv', index_label = 'time')
def create_tables(interface):
dir_ = tspdb.__path__[0]+'/tests/'
for table in ['mixturets2','ts_basic_5','ts_basic_ts_5_5','mixturets_var']:
df = pd.read_csv(dir_+'testdata/tables/%s.csv'%table, engine = 'python')
if table == 'ts_basic_ts_5_5': df['time'] = df['time'].astype('datetime64[ns]')
interface.create_table(table, df, 'time', include_index = False)
def update_test(interface, init_points = 10**4 , update_points = [1000,100,5000,10000], T = 1000, direct_var = True ,index_name = 'ts_basic_test_pindex'):
df = pd.DataFrame(data ={'ts': np.arange(init_points).astype('float')})
interface.create_table('ts_basic_test', df, 'row_id', index_label='row_id')
time_series_table = ['ts_basic_test','ts', 'row_id']
T0 = 1000
gamma = 0.5
k = 2
k_var = 1
agg_interval = 1.
conn = interface.engine.raw_connection()
cur = conn.cursor()
cur.execute('''SELECT create_pindex('%s','%s','%s','%s', "T" => %s, k => %s, k_var => %s, agg_interval => %s, var_direct => %s)'''%('ts_basic_test','row_id','ts', index_name, T, k,k_var, agg_interval, direct_var))
cur.close()
conn.commit()
conn.close()
for points in update_points:
df = pd.DataFrame(data = {'ts':np.arange(init_points,points+init_points).astype('float')}, index = np.arange(init_points,points+init_points) )
interface.bulk_insert('ts_basic_test', df, index_label='row_id')
init_points += points
print ('successfully updated %s points' %points)
def ts_table_tests(init_points = 10**4 , update_points = [1000,100,5000,10000], T = 1000, direct_var = True ,index_name = 'ts_basic_ts_pindex'):
interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="aalomar",password="AAmit32lids")
df = pd.DataFrame(data ={'ts': np.arange(init_points).astype('float')})
timestamps = pd.date_range('2012-10-01 00:00:00', periods = init_points+1, freq='5s')
end = timestamps[-1]
df.index = timestamps[:-1]
interface.create_table('ts_basic_ts', df, 'timestamp', index_label='timestamp')
time_series_table = ['ts_basic_ts','ts', 'timestamp']
T0 = 1000
gamma = 0.5
k = 2
k_var = 1
TSPD = TSPI(_dir = 'C:/Program Files/PostgreSQL/10/data/', agg_interval = 5, T = T,T_var = T, rank = k, rank_var = k_var, col_to_row_ratio = 10, index_name = index_name,gamma = gamma, interface= interface ,time_series_table = time_series_table, direct_var = direct_var )
TSPD.create_index()
interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="aalomar",password="AAmit32lids")
for points in update_points:
df = pd.DataFrame(data = {'ts':np.arange(init_points,points+init_points).astype('float')} )
timestamps = pd.date_range(end, periods = points+1, freq='5s')
end = timestamps[-1]
df.index = timestamps[:-1]
interface.bulk_insert('ts_basic_ts', df, index_label='timestamp')
init_points += points
print ('successfully updated %s points' %points)
def create_pindex_test(interface,table_name, T,T_var, k ,k_var, direct_var,value_column= ['ts'], index_name = None , agg_interval = 1., col_to_row_ratio= 10, time_column = 'row_id'):
T0 = 1000
gamma = 0.5
if index_name is None: index_name = 'pindex'
value_column = ','.join(value_column)
interface.engine.execute('''SELECT create_pindex('%s','%s','{%s}','%s', T => %s,t_var =>%s, k => %s, k_var => %s, agg_interval => %s, var_direct => %s, col_to_row_ratio => %s)'''%(table_name,time_column, value_column, index_name, T, T_var, k,k_var, agg_interval, direct_var, col_to_row_ratio))
| 46.484615 | 295 | 0.685256 |
65dbb5423005f814b64568088c321fdf237d1174
| 1,717 |
py
|
Python
|
pluginsinterface/PlugAsyncio.py
|
lonelyion/TweetToBot-Docker
|
ea91a9d93bad2b757c2ba0923ae9f1cd0f5ac278
|
[
"MIT"
] | null | null | null |
pluginsinterface/PlugAsyncio.py
|
lonelyion/TweetToBot-Docker
|
ea91a9d93bad2b757c2ba0923ae9f1cd0f5ac278
|
[
"MIT"
] | 1 |
2020-09-22T02:30:40.000Z
|
2020-09-22T02:30:40.000Z
|
pluginsinterface/PlugAsyncio.py
|
lonelyion/TweetToBot-Docker
|
ea91a9d93bad2b757c2ba0923ae9f1cd0f5ac278
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
from pluginsinterface.EventHandling import StandEven
from pluginsinterface.Plugmanagement import async_send_even
import asyncio
import traceback
import threading
from helper import getlogger
logger = getlogger(__name__)
runinfo = {
'run': False,
'threading': None,
'loop': asyncio.new_event_loop(),
'queue': None
}
async def __even_put(runinfo, even: StandEven):
return await runinfo['queue'].put(even)
def even_put(even: StandEven):
global runinfo
if runinfo['run']:
asyncio.run_coroutine_threadsafe(__even_put(runinfo, even),
runinfo['loop'])
return
async def __evendeal(queue):
while True:
even = await queue.get()
try:
await async_send_even(even)
except:
s = traceback.format_exc(limit=10)
logger.error(s)
logger.error('出现这条消息表明模块出现异常')
queue.task_done()
def __runAsyncioTask(runinfo):
#设置事件循环
asyncio.set_event_loop(runinfo['loop'])
runinfo['queue'] = asyncio.Queue(128)
runinfo['loop'].run_forever()
def RunLoop():
"""
启动插件处理循环
"""
global runinfo
runinfo['threading'] = threading.Thread(group=None,
target=__runAsyncioTask,
args=(runinfo, ),
name='PlugAsyncio_thread',
daemon=True)
runinfo['threading'].start()
logger.info('插件事件处理循环启动...')
asyncio.run_coroutine_threadsafe(__evendeal(runinfo['queue']),
runinfo['loop'])
runinfo['run'] = True
| 26.828125 | 70 | 0.570763 |
b841ae52d0b58fb7e799ef5523e90cda1f322001
| 786 |
py
|
Python
|
flogger/blog/forms.py
|
jcromerohdz/FlaskDev
|
29539259cba3a0e18c205fb439ee916fb12e5318
|
[
"MIT"
] | null | null | null |
flogger/blog/forms.py
|
jcromerohdz/FlaskDev
|
29539259cba3a0e18c205fb439ee916fb12e5318
|
[
"MIT"
] | null | null | null |
flogger/blog/forms.py
|
jcromerohdz/FlaskDev
|
29539259cba3a0e18c205fb439ee916fb12e5318
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import validators, StringField, TextAreaField, SelectField, FileField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from flask_wtf.file import FileAllowed
from blog.models import Category
def categories():
return Category.query
class PostForm(FlaskForm):
image = FileField('Image', validators=[
FileAllowed(['jpg', 'png'], 'We only accept JPG or PNG images')
])
title = StringField('Title', [
validators.InputRequired(),
validators.Length(max=80)
])
body = TextAreaField('Content', validators=[validators.InputRequired()])
category = QuerySelectField('Category', query_factory=categories,
allow_blank=True)
new_category = StringField('New Category')
| 35.727273 | 82 | 0.71883 |
b2122773da4db94c7c6d624876ec676393c38b82
| 442 |
py
|
Python
|
6_kombinationen/loopRandom/StarrySky.py
|
Coding-for-the-Arts/drawbot-samples
|
e37994f3497aca252312431100b53548b4573f15
|
[
"CC0-1.0"
] | null | null | null |
6_kombinationen/loopRandom/StarrySky.py
|
Coding-for-the-Arts/drawbot-samples
|
e37994f3497aca252312431100b53548b4573f15
|
[
"CC0-1.0"
] | null | null | null |
6_kombinationen/loopRandom/StarrySky.py
|
Coding-for-the-Arts/drawbot-samples
|
e37994f3497aca252312431100b53548b4573f15
|
[
"CC0-1.0"
] | null | null | null |
"""
Random Starry Sky
"""
newPage(300, 300)
fill(0)
rect(0, 0, 300, 300)
for i in range (200):
dia = random() * 3
fill(random())
oval(random()*300, random()*300, dia, dia)
"""
Aufgabe:
- Platziere ein paar zufällig farbige Planeten am Nachthimmel
- Was passiert, wenn du Zeile 13 zu oval(dia, dia, dia, dia) änderst?
- Warum braucht es für die x- und y-Position seperate Zufallswerte?
"""
| 18.416667 | 74 | 0.606335 |
b2a191d2f033116b1e97f05b8fc94f5305e78ddd
| 690 |
py
|
Python
|
blog/posts.py
|
Lanseuo/lucas-blog
|
fd6932952aac3a3055026551700bc86adef279f4
|
[
"MIT"
] | 1 |
2019-01-29T15:57:20.000Z
|
2019-01-29T15:57:20.000Z
|
blog/posts.py
|
Lanseuo/lucas-blog
|
fd6932952aac3a3055026551700bc86adef279f4
|
[
"MIT"
] | 1 |
2018-05-05T19:43:28.000Z
|
2018-05-05T19:43:28.000Z
|
blog/posts.py
|
Lanseuo/lucas-blog
|
fd6932952aac3a3055026551700bc86adef279f4
|
[
"MIT"
] | 1 |
2018-04-24T09:36:57.000Z
|
2018-04-24T09:36:57.000Z
|
import re
from . import top_level_path
from .post import Post
class Posts:
@staticmethod
def get_posts():
posts_path = top_level_path / "posts"
posts = []
for filename in posts_path.iterdir():
permalink = re.sub(
r"[-_\w/]*\d\d\d\d-\d\d-\d\d-([\w\d_-]*).md",
lambda x: x.group(1),
filename.name
)
post = Post(permalink)
if post.is_published():
posts.append(post)
posts.sort()
return posts
@staticmethod
def get_posts_as_json():
posts = Posts.get_posts()
return [post.to_json() for post in posts]
| 20.294118 | 61 | 0.515942 |
b2aa3b81ce40646b2b5ced0272d45e40ff05e4ae
| 518 |
py
|
Python
|
Curso-Em-Video-Python/2Exercicios/009_Tabuada.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/2Exercicios/009_Tabuada.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/2Exercicios/009_Tabuada.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
x = int(input('Qual tabuada multiplicar: '))
print('-' * 15)
print('{} x {:2} = {}'.format(x, 1, (x*1)))
print('{} x {:2} = {}'.format(x, 2, (x*2)))
print('{} x {:2} = {}'.format(x, 3, (x*3)))
print('{} x {:2} = {}'.format(x, 4, (x*4)))
print('{} x {:2} = {}'.format(x, 5, (x*5)))
print('{} x {:2} = {}'.format(x, 6, (x*6)))
print('{} x {:2} = {}'.format(x, 7, (x*7)))
print('{} x {:2} = {}'.format(x, 8, (x*8)))
print('{} x {:2} = {}'.format(x, 9, (x*9)))
print('{} x {:2} = {}'.format(x, 10, (x*10)))
print('-' * 15)
| 39.846154 | 45 | 0.418919 |
ac4cdee27c4d9f2d7fb68bae29c170724f60d9c5
| 442 |
py
|
Python
|
exercises/pt/test_04_04.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/pt/test_04_04.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/pt/test_04_04.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
def test():
assert (
'spacy.blank("en")' in __solution__
), "Você inicializou um fluxo de processamento em Inglês vazio?"
assert "DocBin(docs=docs)" in __solution__, "Você criou o DocBin corretamente?"
assert "doc_bin.to_disk(" in __solution__, "Você utilizou o método to_disk?"
assert "train.spacy" in __solution__, "Você criou um arquivo com o nome correto?"
__msg__.good("Muito bem! Tudo certo por aqui.")
| 44.2 | 85 | 0.699095 |
ac866eb6d77f1235a95b61e1134dad10dcf0549f
| 2,745 |
py
|
Python
|
.venv/Lib/site-packages/dexpy/simplex_lattice.py
|
AI-Assistant/FEMAG-Python
|
ff86e8f41485ae9df6034e6b8e810b59f8094c70
|
[
"MIT"
] | null | null | null |
.venv/Lib/site-packages/dexpy/simplex_lattice.py
|
AI-Assistant/FEMAG-Python
|
ff86e8f41485ae9df6034e6b8e810b59f8094c70
|
[
"MIT"
] | null | null | null |
.venv/Lib/site-packages/dexpy/simplex_lattice.py
|
AI-Assistant/FEMAG-Python
|
ff86e8f41485ae9df6034e6b8e810b59f8094c70
|
[
"MIT"
] | null | null | null |
"""Functions for building a simplex lattice design."""
import dexpy.design as design
import pandas as pd
import numpy as np
import itertools
from dexpy.model import ModelOrder
from dexpy.eval import count_n_choose_k as count_nk
def build_simplex_lattice(factor_count, model_order = ModelOrder.quadratic):
"""Builds a Simplex Lattice mixture design.
This design can be used for 2 to 30 components. A simplex-lattice mixture
design of degree m consists of m+1 points of equally spaced values between
0 and 1 for each component. If m = 2 then possible fractions are 0, 1/2, 1.
For m = 3 the possible values are 0, 1/3, 2/3, 1. The points include the
pure components and enough points between them to estimate an equation of
degree m. This design differs from a simplex-centroid design by having
enough points to estimate a full cubic model.
:param factor_count: The number of mixture components to build for.
:type factor_count: int
:param model_order: The order to build for. ModelOrder.linear will choose
vertices only (pure blends). ModelOrder.quadratice will
add binary blends, and ModelOrder.cubic will add blends
of three components.
:type model_order: dexpy.model.ModelOrder
"""
run_count = factor_count # pure blends
if model_order == ModelOrder.quadratic:
run_count += count_nk(factor_count, 2) # 1/2 1/2 blends
elif model_order == ModelOrder.cubic:
# 2/3 1/3 blends (and vice versa)
run_count += count_nk(factor_count, 2) * 2
if factor_count > 2:
run_count += count_nk(factor_count, 3) # 1/3 1/3 1/3 blends
factor_names = design.get_factor_names(factor_count)
factor_data = pd.DataFrame(0, columns=factor_names,
index=np.arange(0, run_count))
row = 0
# always do pure blends
for combo in itertools.combinations(factor_names, 1):
factor_data.loc[row, combo] = 1.0
row += 1
if model_order == ModelOrder.quadratic:
# 1/2 1/2 binary blends
for combo in itertools.combinations(factor_names, 2):
factor_data.loc[row, combo] = 0.5
row += 1
elif model_order == ModelOrder.cubic:
# 2/3 1/3 blends
for combo in itertools.combinations(factor_names, 2):
factor_data.loc[row, combo] = [2/3, 1/3]
row += 1
factor_data.loc[row, combo] = [1/3, 2/3]
row += 1
# 1/3 1/3 1/3 triple blend
if factor_count > 2:
for combo in itertools.combinations(factor_names, 3):
factor_data.loc[row, combo] = 1/3
row += 1
return factor_data
| 40.367647 | 79 | 0.647723 |
3bb164ccb18d1eee3a856188b51eefd4ddf62b4b
| 394 |
py
|
Python
|
quant/observers/t_example.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 7 |
2017-10-22T15:00:09.000Z
|
2019-09-19T11:45:43.000Z
|
quant/observers/t_example.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 1 |
2018-01-19T16:19:40.000Z
|
2018-01-19T16:19:40.000Z
|
quant/observers/t_example.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 5 |
2017-12-11T15:10:29.000Z
|
2018-12-21T17:40:58.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
from quant.observers.basicbot import BasicBot
class T_Example(BasicBot):
"""
python -m quant.cli -mBitfinex_BCH_USD,Liqui_BCC_BTC,Bitfinex_BTC_USD -oT_Example -f=example -d
"""
def __init__(self):
super(T_Example, self).__init__()
def tick(self, depths):
logging.debug("t_test tick invoke")
| 20.736842 | 99 | 0.675127 |
3bc15f298e917a5de0717848eef82a3117a7b210
| 622 |
py
|
Python
|
HackerP/introduction/Write_a_function.py
|
JKChang2015/hackerrank
|
5e5bd6892d2e4754e73f73eecfa8f4b9f266c3bd
|
[
"MIT"
] | null | null | null |
HackerP/introduction/Write_a_function.py
|
JKChang2015/hackerrank
|
5e5bd6892d2e4754e73f73eecfa8f4b9f266c3bd
|
[
"MIT"
] | null | null | null |
HackerP/introduction/Write_a_function.py
|
JKChang2015/hackerrank
|
5e5bd6892d2e4754e73f73eecfa8f4b9f266c3bd
|
[
"MIT"
] | null | null | null |
# Write_a_function
# Created by JKChang
# 14/08/2018, 10:58
# Tag:
# Description: https://www.hackerrank.com/challenges/write-a-function/problem
# In the Gregorian calendar three criteria must be taken into account to identify leap years:
# The year can be evenly divided by 4, is a leap year, unless:
# The year can be evenly divided by 100, it is NOT a leap year, unless:
# The year is also evenly divisible by 400. Then it is a leap year.
def is_leap(year):
leap = False
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
leap = True
return leap
year = int(input())
print(is_leap(year))
| 27.043478 | 93 | 0.692926 |
0202e5b0c93e940c7eff2e8366514c0c53fc2871
| 8,668 |
py
|
Python
|
src/visitpy/examples/matexprs.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/visitpy/examples/matexprs.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/visitpy/examples/matexprs.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
###############################################################################
#
# Purpose: Use VisIt CLI to iterate over Curves in a material database and
# compute and plot some common difference curves and output the results
# to either a curve or image file format.
#
# Programmer: Mark C. Miller
# Date: Wed May 27 13:15:07 PDT 2009
#
#
# Modifications:
# Mark C. Miller, Mon Jun 15 17:52:15 PDT 2009
# Removed subclassing used to override behavior of Optparse in presence of
# unrecognized options. By using Argv(), VisIt-specific options never wind
# up getting passed to this script.
###############################################################################
import sys, re, os, glob
from optparse import *
#
# Convert '#FFCC13" strings to color tuple
#
def ColorTupleFromHexString(s):
if s[0] != '#':
return (0, 0, 0, 255)
return (int("0x%s"%s[1:3],16), \
int("0x%s"%s[3:5],16), \
int("0x%s"%s[5:7],16), \
255)
#
# Command-line options
#
def BuildCommandLineOptions():
parser = OptionParser()
parser.add_option("--image-width",
help="Set width of images [%default].",
type="int", dest="image_width", default="500", metavar="INT")
parser.add_option("--image-height",
help="Set height of images [%default].",
type="int", dest="image_height", default="500", metavar="INT")
parser.add_option("--data-min",
type="float", dest="data_min", metavar="FLOAT",
help="Mininum data value to be applied to all plots. If no "
"value is specified, the minimum will be allowed to vary "
"as needed from plot to plot.")
parser.add_option("--data-max",
type="float", dest="data_max", metavar="FLOAT",
help="Mininum data value to be applied to all plots. If no "
"value is specified, the minimum will be allowed to vary "
"as needed from plot to plot.")
parser.add_option("--log-data",
help="Display data (y) axis in log scaling.",
action="store_true", dest="log_data", default=False)
parser.add_option("--x-min",
type="float", dest="x_min", metavar="FLOAT",
help="Mininum positional (x) value to be applied to all plots. If no "
"value is specified, the minimum will be allowed to vary "
"as needed from plot to plot.")
parser.add_option("--x-max",
type="float", dest="x_max", metavar="FLOAT",
help="Maximum positional (x) value to be applied to all plots. If no "
"value is specified, the minimum will be allowed to vary "
"as needed from plot to plot.")
parser.add_option("--log-x",
help="Display positional (x) axis in log scaling.",
action="store_true", dest="log_x", default=False)
parser.add_option("--image-format",
help="Set output format for images (e.g. 'tiff', 'png', 'jpeg'). "
"If none specified, no images will be saved.",
dest="image_format", metavar="STRING")
parser.add_option("--curve-format",
help="Set output format for curves (e.g. 'ultra', 'curve'). "
"If none specified, no curve files will be saved.",
dest="curve_format", metavar="STRING")
parser.add_option("--color0",
help="Set color to be used for first curve plot.",
dest="color0", metavar="#RRGGBB")
parser.add_option("--color1",
help="Set color to be used for second curve plot.",
dest="color1", metavar="#RRGGBB")
parser.add_option("--line-width",
help="Set line width for curves.",
type="int", default=0, dest="line_width", metavar="INT")
parser.add_option("--point-density",
help="Plot symbols representing individual points in curves every Nth point. "
"A value of zero turns the display of points off [%default].",
type="int", default=0, dest="point_density", metavar="N")
parser.add_option("--point-size",
help="Size of symbols representing individual points in curve plots.",
type="int", default=5, dest="point_size", metavar="INT")
parser.add_option("--show-legend",
help="Display curve plot legends.",
action="store_true", dest="show_legend", default=False)
parser.add_option("--show-labels",
help="Display curve plot labels.",
action="store_true", dest="show_labels", default=False)
parser.set_usage("matexprs.py [options] dbname")
return parser
#
# Iterate through curves, finding all unique 'dirs' containing curves.
#
def GetVarMap(metadata):
dirMap = {}
for i in range(metadata.GetNumCurves()):
dirinfo = re.search("(.*)/([^/]*)", metadata.GetCurves(i).name)
if dirinfo != None:
dirname = dirinfo.group(1)
varname = dirinfo.group(2)
varMap = {}
if dirname in dirMap:
varMap = dirMap[dirname]
varMap[varname] = 1
dirMap[dirname] = varMap
return dirMap
#
# Begin main program
#
parser = BuildCommandLineOptions()
#
# This bit of logic allows users to get usage/help from
# the command 'python matexpers.py --help'. Without it
# using VisIt's cli the '--help' will get interpreted
# in internallauncher and never make it into this script.
#
if "-h" in sys.argv or \
"--help" in sys.argv or \
"-help" in sys.argv or \
"help" in sys.argv:
parser.print_help()
sys.exit(1)
#
# Argv() is a function defined by VisIt's cli that
# returns ONLY the options after the argument (filename)
# to the '-s' command-line option. In theory, that
# should be only the arguments that this script itself
# should interpret.
#
(clOpts, clArgs) = parser.parse_args(list(Argv()))
#
# Set the name of the database. It is the only 'positional'
# argument on the command line.
#
dbname = ""
if len(clArgs) > 0:
dbname = clArgs[0]
if not glob.glob(dbname):
if dbname == "":
sys.stderr.write("No database specified.\n")
else:
sys.stderr.write("Invalid database, \"%s\", specified.\n"%dbname)
parser.print_usage()
sys.exit(1)
#
# Open the database, get metadata, get info on curve 'dirs'
#
OpenDatabase(dbname)
metadata = GetMetaData(dbname)
dirMap = GetVarMap(metadata)
#
# Build up base save window attributes
#
swa = SaveWindowAttributes()
swa.family = 0
swa.width = clOpts.image_width
swa.height = clOpts.image_height
#
# Build up base curve attributes
#
ca = CurveAttributes()
ca.lineWidth = clOpts.line_width
if clOpts.color0 != None:
ca.color = ColorTupleFromHexString(clOpts.color0)
ca.cycleColors = 0
ca.showLabels = clOpts.show_labels
#if clOpts.point_density > 0:
# ca.showPoints = 1
#ca.pointSize = clOpts.point_size
ca.showLegend = clOpts.show_legend
#ca.symbolDensity = clOpts.point_density
SetDefaultPlotOptions(ca)
#
# Iterate through all curve 'dirs', finding instances where
# all essential variables exist. Create expressions and plot 'em
#
for k in list(dirMap.keys()):
if not ("Ec" in dirMap[k] and \
"cEc" in dirMap[k] and \
"cEc_fit" in dirMap[k]):
print("Ignoring %s because not all required vars are present."%k)
#del dirMap[k]
continue
DefineCurveExpression("%s/c0"%k, "<%s/Ec>-<%s/cEc_fit>"%(k,k))
DefineCurveExpression("%s/c1"%k, "<%s/cEc>-<%s/cEc_fit>"%(k,k))
AddPlot("Curve","%s/c0"%k)
AddPlot("Curve","%s/c1"%k)
DrawPlots()
v = GetViewCurve()
if clOpts.x_min != None:
v.domainCoords = (clOpts.x_min, v.domainCoords[1])
if clOpts.x_max != None:
v.domainCoords = (v.domainCoords[0], clOpts.x_max)
if clOpts.log_x:
v.domainScale = v.LOG
if clOpts.data_min != None:
v.rangeCoords = (clOpts.data_min, v.rangeCoords[1])
if clOpts.data_max != None:
v.rangeCoords = (v.rangeCoords[0], clOpts.data_max)
if clOpts.log_data:
v.rangeScale = v.LOG
SetViewCurve(v)
if clOpts.color1 != None:
ca2 = CurveAttributes()
ca2.color = ColorTupleFromHexString(clOpts.color1)
ca2.cycleColors = 0
SetActivePlots((1,))
SetPlotOptions(ca2)
DrawPlots()
if clOpts.curve_format != None:
swa.format = getattr(swa,clOpts.curve_format.upper())
swa.fileName = k # .curve is added automatically
SetSaveWindowAttributes(swa)
SaveWindow()
if clOpts.image_format != None:
swa.format = getattr(swa,clOpts.image_format.upper())
#swa.fileName = "%s.%s"%(k,clOpts.image_format.lower())
swa.fileName = k
SetSaveWindowAttributes(swa)
SaveWindow()
DeleteAllPlots()
| 33.338462 | 86 | 0.623327 |
5a6bc0ea58a494099c0c7001e41e5d0c095d80c1
| 30,288 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/source_control/gitlab/gitlab.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/source_control/gitlab/gitlab.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/source_control/gitlab/gitlab.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import
import sys
from httmock import response # noqa
from httmock import urlmatch # noqa
from ansible_collections.community.general.tests.unit.compat import unittest
from gitlab import Gitlab
class FakeAnsibleModule(object):
def __init__(self):
self.check_mode = False
def fail_json(self, **args):
pass
def exit_json(self, **args):
pass
class GitlabModuleTestCase(unittest.TestCase):
def setUp(self):
unitest_python_version_check_requirement(self)
self.mock_module = FakeAnsibleModule()
self.gitlab_instance = Gitlab("http://localhost", private_token="private_token", api_version=4)
# Python 2.7+ is needed for python-gitlab
GITLAB_MINIMUM_PYTHON_VERSION = (2, 7)
# Verify if the current Python version is higher than GITLAB_MINIMUM_PYTHON_VERSION
def python_version_match_requirement():
return sys.version_info >= GITLAB_MINIMUM_PYTHON_VERSION
# Skip unittest test case if python version don't match requirement
def unitest_python_version_check_requirement(unittest_testcase):
if not python_version_match_requirement():
unittest_testcase.skipTest("Python %s+ is needed for python-gitlab" % ",".join(map(str, GITLAB_MINIMUM_PYTHON_VERSION)))
'''
USER API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="get")
def resp_find_user(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",'
'"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
'"web_url": "http://localhost:3000/john_smith"}, {"id": 2,'
'"username": "jack_smith", "name": "Jack Smith", "state": "blocked",'
'"avatar_url": "http://gravatar.com/../e32131cd8.jpeg",'
'"web_url": "http://localhost:3000/jack_smith"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="post")
def resp_create_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",'
'"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
'"web_url": "http://localhost:3000/john_smith","created_at": "2012-05-23T08:00:58Z",'
'"bio": null, "location": null, "public_email": "[email protected]", "skype": "",'
'"linkedin": "", "twitter": "", "website_url": "", "organization": ""}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get")
def resp_get_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "john_smith", "name": "John Smith",'
'"state": "active",'
'"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
'"web_url": "http://localhost:3000/john_smith",'
'"created_at": "2012-05-23T08:00:58Z", "bio": null, "location": null,'
'"public_email": "[email protected]", "skype": "", "linkedin": "",'
'"twitter": "", "website_url": "", "organization": "", "is_admin": false}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get")
def resp_get_missing_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(404, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
def resp_delete_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
def resp_delete_missing_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(404, content, headers, None, 5, request)
'''
USER SSHKEY API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="get")
def resp_get_user_keys(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596'
'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQa'
'SeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2014-08-01T14:47:39.080Z"},{"id": 3,'
'"title": "Another Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596'
'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaS'
'eP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2014-08-01T14:47:39.080Z"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="post")
def resp_create_user_keys(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "title": "Private key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcUdRuSuA5z'
'szUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+xawxKWmI7hJ5S0tOv6MJ+Ixy'
'Ta4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2jTiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH'
'2WOKBw6za0az6XoG75obUdFVdW3qcD0xc809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9'
'M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF",'
'"created_at": "2014-08-01T14:47:39.080Z"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
'''
GROUP API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="get")
def resp_find_group(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
'"full_name": "Foobar Group", "full_path": "foo-bar",'
'"file_template_project_id": 1, "parent_id": null, "projects": []}, {"id": 2, "name": "BarFoo Group", "path": "bar-foor",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",'
'"web_url": "http://localhost:3000/groups/bar-foo", "request_access_enabled": false,'
'"full_name": "BarFoo Group", "full_path": "bar-foo",'
'"file_template_project_id": 1, "parent_id": null, "projects": []}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get")
def resp_get_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
'"full_name": "Foobar Group", "full_path": "foo-bar",'
'"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}]}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get")
def resp_get_missing_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(404, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post")
def resp_create_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
'"full_name": "Foobar Group", "full_path": "foo-bar",'
'"file_template_project_id": 1, "parent_id": null}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post")
def resp_create_subgroup(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 2, "name": "BarFoo Group", "path": "bar-foor",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar/bar-foo", "request_access_enabled": false,'
'"full_name": "BarFoo Group", "full_path": "foo-bar/bar-foo",'
'"file_template_project_id": 1, "parent_id": 1}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
def resp_delete_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
GROUP MEMBER API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="get")
def resp_get_member(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="get")
def resp_find_member(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30},{'
'"id": 2, "username": "john_doe", "name": "John Doe","state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root","expires_at": "2012-10-22T14:13:35Z",'
'"access_level": 30}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="post")
def resp_add_member(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",'
'"state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",'
'"access_level": 30}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="put")
def resp_update_member(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",'
'"state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",'
'"access_level": 10}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
'''
DEPLOY KEY API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="get")
def resp_find_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T10:12:29Z"},{"id": 3,"title": "Another Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T11:12:29Z"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="get")
def resp_get_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T10:12:29Z"}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="post")
def resp_create_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T10:12:29Z"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="delete")
def resp_delete_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
PROJECT API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="get")
def resp_find_project(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="get")
def resp_get_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/foo-bar%2Fdiaspora-client", method="get")
def resp_get_project_by_name(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects", method="get")
def resp_find_group_project(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects/1", method="get")
def resp_get_group_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="post")
def resp_create_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="delete")
def resp_delete_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
HOOK API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="get")
def resp_find_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"url": "http://example.com/hook","project_id": 3,'
'"push_events": true,"push_events_branch_filter": "","issues_events": true,'
'"confidential_issues_events": true,"merge_requests_events": true,'
'"tag_push_events": true,"note_events": true,"job_events": true,'
'"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
'"created_at": "2012-10-12T17:04:47Z"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="get")
def resp_get_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,'
'"push_events": true,"push_events_branch_filter": "","issues_events": true,'
'"confidential_issues_events": true,"merge_requests_events": true,'
'"tag_push_events": true,"note_events": true,"job_events": true,'
'"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
'"created_at": "2012-10-12T17:04:47Z"}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="post")
def resp_create_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,'
'"push_events": true,"push_events_branch_filter": "","issues_events": true,'
'"confidential_issues_events": true,"merge_requests_events": true,'
'"tag_push_events": true,"note_events": true,"job_events": true,'
'"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
'"created_at": "2012-10-12T17:04:47Z"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="delete")
def resp_delete_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
RUNNER API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/all", method="get")
def resp_find_runners_all(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"},{"active": true,'
'"description": "test-2-20150125","id": 2,"ip_address": "127.0.0.1",'
'"is_shared": false,"name": null,"online": false,"status": "offline"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners", method="get")
def resp_find_runners_list(url, request):
headers = {'content-type': 'application/json',
"X-Page": 1,
"X-Next-Page": 2,
"X-Per-Page": 1,
"X-Total-Pages": 1,
"X-Total": 2}
content = ('[{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"},{"active": true,'
'"description": "test-2-20150125","id": 2,"ip_address": "127.0.0.1",'
'"is_shared": false,"name": null,"online": false,"status": "offline"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/1", method="get")
def resp_get_runner(url, request):
headers = {'content-type': 'application/json'}
content = ('{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners", method="post")
def resp_create_runner(url, request):
headers = {'content-type': 'application/json'}
content = ('{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/1", method="delete")
def resp_delete_runner(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
| 52.130809 | 137 | 0.634773 |
ce568c1609aad2260df4dae064f07fc3a4240da9
| 9,571 |
py
|
Python
|
official/cv/lenet/modelarts/train_start.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/lenet/modelarts/train_start.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/lenet/modelarts/train_start.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
######################## train lenet example ########################
train lenet and get network model files(.ckpt) :
python train.py --data_path /YourDataPath
"""
import os
import argparse
import glob
import sys
import time
import numpy as np
import moxing as mox
from src.model_utils.moxing_adapter import get_device_id, get_device_num, get_rank_id, get_job_id
from src.dataset import create_dataset
from src.lenet import LeNet5
import mindspore.nn as nn
from mindspore.context import ParallelMode
from mindspore.communication.management import init
from mindspore import context
from mindspore import export
from mindspore import Tensor
from mindspore.train import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.nn.metrics import Accuracy
from mindspore.common import set_seed
root_dir = os.path.join(os.path.dirname(os.path.realpath(__file__))) # src root dir
cwd = os.getcwd()
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__))))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../'))
parser = argparse.ArgumentParser(description='mindspore lenet training')
parser.add_argument("--enable_modelarts", default='True', type=str, help="")
parser.add_argument("--data_url", type=str, default="", help="dataset path for obs")
parser.add_argument("--train_url", type=str, default="", help="train path for obs")
parser.add_argument('--data_path', type=str, default='/cache/data', help='Dataset url for local')
parser.add_argument("--output_path", type=str, default="/cache/train", help="dir of training output for local")
# parser.add_argument("--checkpoint_path", type=str, default="./checkpoint/", help="setting dir of checkpoint output")
parser.add_argument('--device_target', type=str, default='Ascend', choices=['Ascend', 'GPU', 'CPU'],
help='device where the code will be implemented. (Default: Ascend)')
parser.add_argument('--num_classes', type=int, default=10, help='number of classes')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--epoch_size', type=int, default=1, help='epoch sizse')
parser.add_argument("--learning_rate", type=float, default=0.002, help="")
parser.add_argument("--sink_size", type=int, default=-1, help="")
parser.add_argument("--momentum", type=float, default=0.9, help="")
parser.add_argument("--save_checkpoint_steps", type=int, default=125, help="")
parser.add_argument('--lr', type=float, default=0.01, help='base learning rate')
parser.add_argument("--image_height", type=int, default=32, help="")
parser.add_argument("--image_width", type=int, default=32, help="")
parser.add_argument("--buffer_size", type=int, default=1000, help="")
parser.add_argument("--keep_checkpoint_max", type=int, default=10, help="")
parser.add_argument('--z', type=str, default='AIR', choices=['AIR', 'ONNX', 'MINDIR'],
help='Format of output model(Default: AIR)')
parser.add_argument('--file_name', type=str, default='lenet', help='output file name')
parser.add_argument("--ckpt_path", type=str, default="/cache/train", help="")
parser.add_argument("--ckpt_file", type=str, default="/cache/train/checkpoint_lenet-10_1875.ckpt", help="")
cfg = parser.parse_args()
set_seed(1)
_global_sync_count = 0
def frozen_to_air(net, args):
param_dict = load_checkpoint(args.get("ckpt_file"))
load_param_into_net(net, param_dict)
input_arr = Tensor(np.zeros([args.get("batch_size"),
1, args.get("image_height"), args.get("image_width")], np.float32))
export(net, input_arr, file_name=args.get("file_name"), file_format=args.get("file_format"))
def sync_data(from_path, to_path):
"""
Download data from remote obs to local directory if the first url is remote url and the second one is local path
Upload data from local directory to remote obs in contrast.
"""
global _global_sync_count
sync_lock = "/tmp/copy_sync.lock" + str(_global_sync_count)
_global_sync_count += 1
# Each server contains 8 devices as most.
if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
print("from path: ", from_path)
print("to path: ", to_path)
mox.file.copy_parallel(from_path, to_path)
print("===finish data synchronization===")
try:
os.mknod(sync_lock)
except IOError:
print("Failed to create directory")
print("===save flag===")
while True:
if os.path.exists(sync_lock):
break
time.sleep(1)
print("Finish sync data from {} to {}.".format(from_path, to_path))
def wrapped_func(config_name):
"""
Download data from remote obs to local directory if the first url is remote url and the second one is local path
Upload data from local directory to remote obs in contrast.
"""
if config_name.enable_modelarts:
if config_name.data_url:
if not os.path.isdir(config_name.data_path):
os.makedirs(config_name.data_path)
sync_data(config_name.data_url, config_name.data_path)
print("Dataset downloaded: ", os.listdir(cfg.data_path))
if config_name.train_url:
if not os.path.isdir(config_name.output_path):
os.makedirs(config_name.output_path)
sync_data(config_name.train_url, config_name.output_path)
print("Workspace downloaded: ", os.listdir(config_name.output_path))
def train_lenet_model():
"""
main function to train model in modelArts
"""
print(cfg)
print('device id:', get_device_id())
print('device num:', get_device_num())
print('rank id:', get_rank_id())
print('job id:', get_job_id())
device_target = cfg.device_target
context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target)
context.set_context(save_graphs=False)
if device_target == "GPU":
context.set_context(enable_graph_kernel=True)
context.set_context(graph_kernel_flags="--enable_cluster_ops=MatMul")
device_num = get_device_num()
if device_num > 1:
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
if device_target == "Ascend":
context.set_context(device_id=get_device_id())
init()
elif device_target == "GPU":
init()
else:
context.set_context(device_id=get_device_id())
# create dataset
ds_train = create_dataset(os.path.join(cfg.data_path, "train"), cfg.batch_size)
if ds_train.get_dataset_size() == 0:
raise ValueError("Please check dataset size > 0 and batch_size <= dataset size")
print("dataset size is : " + str(ds_train.get_dataset_size()))
network = LeNet5(cfg.num_classes)
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum)
time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())
config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps,
keep_checkpoint_max=cfg.keep_checkpoint_max)
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", directory=cfg.ckpt_path, config=config_ck)
if cfg.device_target != "Ascend":
if cfg.device_target == "GPU":
context.set_context(enable_graph_kernel=True)
model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
else:
model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}, amp_level="O2")
print("============== Starting Training ==============")
model.train(cfg.epoch_size, ds_train, callbacks=[time_cb, ckpoint_cb, LossMonitor()])
print("============== Training finish ==============")
ckpt_list = glob.glob(str(cfg.output_path) + "/*lenet*.ckpt")
print(ckpt_list)
if not ckpt_list:
print("ckpt file not generated")
ckpt_list.sort(key=os.path.getmtime)
ckpt_model = ckpt_list[-1]
print(ckpt_model)
frozen_to_air_args = {"ckpt_file": ckpt_model,
"batch_size": cfg.batch_size,
"image_height": cfg.image_height,
"image_width": cfg.image_width,
"file_name": "/cache/train/lenet",
"file_format": "AIR"}
frozen_to_air(network, frozen_to_air_args)
mox.file.copy_parallel(cfg.output_path, cfg.train_url)
if __name__ == "__main__":
wrapped_func(cfg)
train_lenet_model()
| 43.504545 | 120 | 0.680911 |
ced8d9159b0ab3959cb0dcc3a667cfcf949c66af
| 1,718 |
py
|
Python
|
2-resources/_External-learning-resources/02-pyth/python-patterns-master/patterns/structural/flyweight_with_metaclass.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_External-learning-resources/02-pyth/python-patterns-master/patterns/structural/flyweight_with_metaclass.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_External-learning-resources/02-pyth/python-patterns-master/patterns/structural/flyweight_with_metaclass.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 1 |
2021-11-05T07:48:26.000Z
|
2021-11-05T07:48:26.000Z
|
import weakref
class FlyweightMeta(type):
def __new__(mcs, name, parents, dct):
"""
Set up object pool
:param name: class name
:param parents: class parents
:param dct: dict: includes class attributes, class methods,
static methods, etc
:return: new class
"""
dct["pool"] = weakref.WeakValueDictionary()
return super().__new__(mcs, name, parents, dct)
@staticmethod
def _serialize_params(cls, *args, **kwargs):
"""
Serialize input parameters to a key.
Simple implementation is just to serialize it as a string
"""
args_list = list(map(str, args))
args_list.extend([str(kwargs), cls.__name__])
key = "".join(args_list)
return key
def __call__(cls, *args, **kwargs):
key = FlyweightMeta._serialize_params(cls, *args, **kwargs)
pool = getattr(cls, "pool", {})
instance = pool.get(key)
if instance is None:
instance = super().__call__(*args, **kwargs)
pool[key] = instance
return instance
class Card2(metaclass=FlyweightMeta):
def __init__(self, *args, **kwargs):
# print('Init {}: {}'.format(self.__class__, (args, kwargs)))
pass
if __name__ == "__main__":
instances_pool = getattr(Card2, "pool")
cm1 = Card2("10", "h", a=1)
cm2 = Card2("10", "h", a=1)
cm3 = Card2("10", "h", a=2)
assert (cm1 == cm2) and (cm1 != cm3)
assert (cm1 is cm2) and (cm1 is not cm3)
assert len(instances_pool) == 2
del cm1
assert len(instances_pool) == 2
del cm2
assert len(instances_pool) == 1
del cm3
assert len(instances_pool) == 0
| 26.84375 | 69 | 0.582654 |
0ba36f10893dd40cea03ed5ad9665d560d7cf57e
| 938 |
py
|
Python
|
python/PDF/pdfcat.py
|
eucalypto/potato
|
9df3eada95956daf344eb49900d2ed79dc418817
|
[
"Unlicense"
] | null | null | null |
python/PDF/pdfcat.py
|
eucalypto/potato
|
9df3eada95956daf344eb49900d2ed79dc418817
|
[
"Unlicense"
] | null | null | null |
python/PDF/pdfcat.py
|
eucalypto/potato
|
9df3eada95956daf344eb49900d2ed79dc418817
|
[
"Unlicense"
] | null | null | null |
#! /usr/bin/env python
from PyPDF2 import PdfFileReader, PdfFileWriter
import sys
def merge_pdfs(paths, output):
"""take pdf files defined in array files and concatenate them
into one PDF with output name output.
"""
pdf_writer = PdfFileWriter()
for path in paths:
pdf_reader = PdfFileReader(path)
for pagenum in range(pdf_reader.getNumPages()):
pdf_writer.addPage(pdf_reader.getPage(pagenum))
with open(output, "wb") as out:
pdf_writer.write(out)
if __name__ == '__main__':
"""
Take files from command line input parameters. The last one is the
output destination. All others are input files:
pdfcat.py input1.pdf input2.pdf input3.pdf output.pdf
"""
inputfiles = sys.argv[1:len(sys.argv)-1]
outputfile = sys.argv[-1]
# print("infputfiles: ", inputfiles)
# print("outputfile: ", outputfile)
merge_pdfs(inputfiles, outputfile)
| 26.8 | 70 | 0.675906 |
6893255b84fb832056a9719346d50581759b4a63
| 500 |
py
|
Python
|
source/pkgsrc/devel/py-rlp/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/devel/py-rlp/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/devel/py-rlp/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-setup.py,v 1.1 2021/04/11 16:59:36 wiz Exp $
setuptools-markdown is deprecated for functionality included in setuptools.
--- setup.py.orig 2020-11-23 15:09:47.000000000 +0000
+++ setup.py
@@ -52,7 +52,7 @@ setup(
url='https://github.com/ethereum/pyrlp',
packages=find_packages(exclude=["tests", "tests.*"]),
include_package_data=True,
- setup_requires=['setuptools-markdown'],
+ setup_requires=[],
install_requires=[
"eth-utils>=1.0.2,<2",
],
| 31.25 | 75 | 0.658 |
cc09eddcc69d15e1922c5c267f7af3451c63b8d4
| 839 |
py
|
Python
|
01.DataStructure/Stack&Queue/B1966-M.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 1 |
2021-11-21T06:03:06.000Z
|
2021-11-21T06:03:06.000Z
|
01.DataStructure/Stack&Queue/B1966-M.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 2 |
2021-10-13T07:21:09.000Z
|
2021-11-14T13:53:08.000Z
|
01.DataStructure/Stack&Queue/B9012-M.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | null | null | null |
num = 0
num = (int)(input ())
for i in range(num):
check = [int(x) for x in input().split()]
arr = [int(x) for x in input().split()]
checkingP = check[1]
popNum = 0
while(True):
existPop = False
#맨 처음 값의 중요도와 뒤의 중요도 비교 for문
for j in range(len(arr)-1):
if(arr[0] >= arr[j+1]):
continue
else:
arr.append(arr[0])
arr.pop(0)
checkingP -= 1
if(checkingP < 0):
checkingP = len(arr)-1
existPop = True
break
if(not existPop):
arr.pop(0)
popNum += 1
if(checkingP == 0):
print(popNum)
break
else:
checkingP -= 1
| 23.971429 | 45 | 0.388558 |
f0ba8955ee9f2620bce3f490df0dc50acbc26f0e
| 2,219 |
py
|
Python
|
foundation/www/service_providers.py
|
prafful1234/foundation
|
6fcb027e76eae8d307c3dd70436a9657ff681f01
|
[
"MIT"
] | 59 |
2017-03-15T08:14:52.000Z
|
2021-11-17T14:21:58.000Z
|
foundation/www/service_providers.py
|
prafful1234/foundation
|
6fcb027e76eae8d307c3dd70436a9657ff681f01
|
[
"MIT"
] | 147 |
2017-01-25T10:44:47.000Z
|
2020-11-05T04:24:22.000Z
|
foundation/www/service_providers.py
|
prafful1234/foundation
|
6fcb027e76eae8d307c3dd70436a9657ff681f01
|
[
"MIT"
] | 134 |
2017-03-14T14:04:21.000Z
|
2022-03-18T08:19:47.000Z
|
import frappe
no_cache = 1
def get_context(context):
context.form_dict = frappe.form_dict
context.title = 'Service Providers'
context.gold_members = []
if frappe.form_dict.country:
context.parents = [dict(label='All Service Providers',
route='service-providers', title='All Service Providers')]
filters = dict()
filters['show_in_website'] = 1
if frappe.form_dict.country:
filters['country'] = frappe.form_dict.country
gold_members = [d.name for d in frappe.get_all('Member', dict(membership_type='Gold'))]
if gold_members:
filters['member'] = ('in', gold_members)
context.gold_members = frappe.get_all('Service Provider',
'title, introduction, `image`, route, website_url, country', filters)
if context.gold_members:
context.has_gold_member = 1
else:
context.gold_members.append(dict(
title='Your Company',
introduction='Become a Gold Member today and get your company featured here',
image='/assets/foundation/img/gold.png',
route='/members',
placeholder=True
))
context.silver_members = []
silver_members = [d.name for d in frappe.get_all('Member', dict(membership_type='Silver'))]
if silver_members:
filters['member'] = ('in', silver_members)
context.silver_members = frappe.get_all('Service Provider',
'title, introduction, `image`, route, website_url, country', filters)
if context.silver_members:
context.has_silver_member = 1
else:
context.silver_members.append(dict(
title='Your Company',
introduction='Become a silver Member today and get your company featured here',
image='/assets/foundation/img/silver.png',
route='/members',
placeholder=True
))
context.individual_members = []
individual_members = [d.name for d in frappe.get_all('Member',
dict(membership_type='Individual'))]
if individual_members:
filters['member'] = ('in', individual_members)
context.individual_members = frappe.get_all('Service Provider',
'title, introduction, `image`, route, website_url, country', filters)
if context.individual_members:
context.has_individual_member = 1
else:
context.individual_members.append(dict(
title='Your Company',
introduction='Become an invidual member to list here',
route='/members'
))
| 32.15942 | 92 | 0.735016 |
9bd6bc27393e5b225a1876a4cde52b9fb2dda43d
| 7,569 |
py
|
Python
|
create_auto_mount.py
|
Hofei90/create_automount
|
6975b1e7b477ac650fd33592bf771e8484357f92
|
[
"MIT"
] | 3 |
2020-07-19T12:05:13.000Z
|
2021-04-19T12:38:28.000Z
|
create_auto_mount.py
|
Hofei90/create_automount
|
6975b1e7b477ac650fd33592bf771e8484357f92
|
[
"MIT"
] | 2 |
2019-08-20T19:28:26.000Z
|
2021-09-30T10:03:51.000Z
|
create_auto_mount.py
|
Hofei90/create_automount
|
6975b1e7b477ac650fd33592bf771e8484357f92
|
[
"MIT"
] | 1 |
2019-05-24T05:13:30.000Z
|
2019-05-24T05:13:30.000Z
|
#!/usr/bin/python3
import getpass
import os
import platform
import shlex
import shutil
import subprocess
import sys
import toml
SKRIPTPFAD = os.path.abspath(os.path.dirname(__file__))
SPEICHERORT_ZUGANGSDATEN = "/etc/smbcredentials"
PFAD_PING_SERVER_SERVICE = "/etc/systemd/system/ping_server.service"
PFAD_PING_SERVER = "/usr/local/sbin/ping_server.py"
PFAD_SYSTEMD_SERVICE_UNIT = "/etc/systemd/system"
def pfadeingabe():
ordner = input("Name für neuen Mountordner: ")
pfad = input("Verzeichnis für den Mountordner, wenn leer: -> /media ")
if pfad == "":
pfad = "/media"
return os.path.join(pfad, ordner)
def zugangsdaten_eingeben():
print("Zugangsdaten für das einzuhängende Gerät - Zugang muss am anderen Gerät freigeben/erstellt werden.")
username = input("Benutzername: ")
pw = getpass.getpass("Passwort: ")
return {"username": username, "pw": pw}
def adresse_eingeben():
return input("Externe Adresse eingeben: ")
def optionen_eingeben():
uid = "uid={}".format(input("uid: Bsp. '1000': "))
gid = "gid={}".format(input("gid: Bsp. '1000': "))
eingabe_liste = [uid, gid]
eingabe = True
while eingabe:
eingabe = input("Weitere Optionen eingeben - Bsp: vers=1.0, weiter mit leerer Eingabe: ")
if eingabe:
eingabe_liste.append(eingabe)
optionen = ",".join(eingabe_liste)
return optionen
def zugangsdaten_erstellen(zugangsdaten):
with open(SPEICHERORT_ZUGANGSDATEN, "w") as file:
file.write("username={username}\npassword={pw}".format(username=zugangsdaten["username"],
pw=zugangsdaten["pw"]))
shutil.chown(SPEICHERORT_ZUGANGSDATEN, "root", "root")
os.chmod(SPEICHERORT_ZUGANGSDATEN, 0o600)
print("Zugangsdaten erstellt - Pfad: {}".format(SPEICHERORT_ZUGANGSDATEN))
def ordner_erstellen(pfad):
if os.path.exists(pfad):
print("Pfad existiert schon!")
else:
os.mkdir(pfad)
if os.path.exists(pfad):
print("Ordner {} erstellt".format(pfad))
else:
raise BaseException("Ordner konnte nicht erstellt werden")
def inhalt_systemd_service_mount_unit_generieren(mount_pfad, adresse, optionen, type_="cifs"):
mount_unit = """[Unit]
Description=Mount von {mount_pfad}
Requires=ping_server.service
After=ping_server.service
Conflicts=shutdown.target
ConditionPathExists={mount_pfad}
[Mount]
What={adresse}
Where={mount_pfad}
Options=credentials={zugangsdaten},{optionen}
Type={type}
[Install]
WantedBy=multi-user.target
""".format(mount_pfad=mount_pfad, adresse=adresse, zugangsdaten=SPEICHERORT_ZUGANGSDATEN, optionen=optionen, type=type_)
return mount_unit
def name_mount_unit_ermitteln(mount_pfad):
cmd = shlex.split("systemd-escape --suffix=mount --path {}".format(mount_pfad))
instanz = subprocess.Popen(cmd, stdout=subprocess.PIPE)
filename = instanz.stdout.read().decode("utf-8").strip()
return filename
def mount_unit_erstellen(inhalt, mount_pfad):
filename = name_mount_unit_ermitteln(mount_pfad)
pfad = os.path.join(PFAD_SYSTEMD_SERVICE_UNIT, filename)
with open(pfad, "w") as file:
file.write(inhalt)
shutil.chown(pfad, "root", "root")
os.chmod(pfad, 0o644)
print("Datei {} erstellt".format(pfad))
return filename
def ping_server_kopieren():
src = os.path.join(SKRIPTPFAD, "ping_server.py")
shutil.copy(src, PFAD_PING_SERVER)
shutil.chown(PFAD_PING_SERVER, "root", "root")
os.chmod(PFAD_PING_SERVER, 0o755)
print("Datei {} erstellt".format(PFAD_PING_SERVER))
def ip_pingziel_eingeben():
ip_pingziel = input("IP Pingziel zur Überprüfung der Netwerkverfügbarkeit eingeben: ")
return ip_pingziel
def ping_server_service_erstellen(ip_pingziel):
inhalt = """[Unit]
Description=serverctl.service: Waiting for Network or Server to be up
After=network.target
[Service]
Type=oneshot
TimeoutStartSec=95
ExecStart=/usr/local/sbin/ping_server.py {}
[Install]
WantedBy=multi-user.target""".format(ip_pingziel)
with open(PFAD_PING_SERVER_SERVICE, "w") as file:
file.write(inhalt)
shutil.chown(PFAD_PING_SERVER_SERVICE, "root", "root")
os.chmod(PFAD_PING_SERVER_SERVICE, 0o644)
print("Datei {} erstellt".format(PFAD_PING_SERVER_SERVICE))
def mount_unit_aktivieren(mount_unit):
cmd = shlex.split("systemctl start {}".format(mount_unit))
start = subprocess.Popen(cmd, stdout=subprocess.PIPE)
print(start.stdout.read())
befehl = input("Unit aktivieren? (j|n)")
if befehl == "j":
cmd = shlex.split("systemctl enable {}".format(mount_unit))
start = subprocess.Popen(cmd, stdout=subprocess.PIPE)
print(start.stdout.read())
else:
print("Hinweis, wird eine Service Unit verändert muss anschließend 'systemctl daemon-reload' ausgeführt werden")
def eingabe_sichern(pfad_mountpunkt, zugangsdaten, adresse, optionen, ip_pingziel):
ausgabe = {"pfad_mountpunkt": pfad_mountpunkt,
"zugangsdaten": zugangsdaten,
"adresse": adresse,
"optionen": optionen,
"ip_pingziel": ip_pingziel}
ausgabe_toml = toml.dumps(ausgabe)
name = input("Configname eingeben: ")
filename = "{}_cfg.toml".format(name)
pfad = os.path.join(SKRIPTPFAD, filename)
with open(pfad, "w") as file:
file.write(ausgabe_toml)
shutil.chown(pfad, "root", "root")
os.chmod(pfad, 0o600)
print("Datei {} erstellt".format(pfad))
def lade_daten(cfg):
if "cfg.toml" in cfg:
datei = os.path.join(SKRIPTPFAD, cfg)
with open(datei) as file:
config = toml.loads(file.read())
return config
else:
raise ValueError("Dateiformat falsch")
def willkommen():
text = """Dieses Skript soll die Einrichtung zum Einhängen von Netzwerkfreigaben beschleunigen.
Es kann nicht das notwendige Wissen zu den einzelnen Punkten während der Erstellung ersetzen.
Verwendung und Benutzung auf eigene Gefahr!"""
print(text)
def main():
willkommen()
if platform.system() == "Linux":
if len(sys.argv) > 1:
daten = lade_daten(sys.argv[1])
pfad_mountpunkt = daten["pfad_mountpunkt"]
zugangsdaten = daten["zugangsdaten"]
adresse = daten["adresse"]
optionen = daten["optionen"]
ip_pingziel = daten["ip_pingziel"]
else:
pfad_mountpunkt = pfadeingabe()
zugangsdaten = zugangsdaten_eingeben()
adresse = adresse_eingeben()
optionen = optionen_eingeben()
ip_pingziel = ip_pingziel_eingeben()
print("Die Konfigruationsdatei enthält wenn sie gespeichert wird alle Eingaben einschließlich Passwörter "
"in Klartext!")
eingabe = input("Eingaben sichern? (j|n)")
if eingabe == "j":
eingabe_sichern(pfad_mountpunkt, zugangsdaten, adresse, optionen, ip_pingziel)
ordner_erstellen(pfad_mountpunkt)
zugangsdaten_erstellen(zugangsdaten)
mount_unit = mount_unit_erstellen(inhalt_systemd_service_mount_unit_generieren(pfad_mountpunkt, adresse,
optionen),
pfad_mountpunkt)
ping_server_kopieren()
ping_server_service_erstellen(ip_pingziel)
mount_unit_aktivieren(mount_unit)
else:
print("Falsches Betriebssystem")
if __name__ == "__main__":
main()
| 33.64 | 120 | 0.672744 |
accc0c969f3ac52318b9f4e6d020139f04634baf
| 5,771 |
py
|
Python
|
pypi_installer/sbtab/sbtab2html.py
|
derHahn/SBtab
|
da998eacc49f7f29d8168be366eb0c211c3adb5f
|
[
"MIT"
] | 4 |
2015-02-20T09:20:10.000Z
|
2018-02-05T10:54:10.000Z
|
pypi_installer/sbtab/sbtab2html.py
|
tlubitz/SBtab
|
da998eacc49f7f29d8168be366eb0c211c3adb5f
|
[
"MIT"
] | 80 |
2018-04-13T13:46:24.000Z
|
2022-02-16T16:01:46.000Z
|
pypi_installer/sbtab/sbtab2html.py
|
tlubitz/SBtab
|
da998eacc49f7f29d8168be366eb0c211c3adb5f
|
[
"MIT"
] | 6 |
2018-06-06T19:55:32.000Z
|
2021-09-30T15:16:40.000Z
|
"""
SBtab2HTML
==========
Python script that converts SBtab file/s to HTML.
"""
#!/usr/bin/env python
import re
import string
import sys
from . import misc
urns = ["obo.chebi","kegg.compound","kegg.reaction","obo.go","obo.sgd","biomodels.sbo","ec-code","kegg.orthology","uniprot"]
def csv2html(sbtab_file,file_name,definition_file=None,sbtype=None):
'''
Generates html view out of csv file.
Parameters
----------
sbtab_file : str
SBtab file as string representation.
file_name : str
SBtab file name.
definition_file : str
SBtab definition file as string representation.
sbtype : str
SBtab attribute TableType.
'''
#extract information from the definition file
if not definition_file:
try:
def_file_open = open('definitions.tsv','r')
def_file = def_file_open.read()
def_delimiter = '\t'
col2description = findDescriptions(def_file,def_delimiter,sbtype)
def_file_open.close()
except:
print('You have not provided the definition file and it cannot be found in this directory. Please provide it.')
sys.exit(1)
else:
def_delimiter = '\t'
col2description = findDescriptions(definition_file,def_delimiter,sbtype)
#now start building the HTML file from the SBtab file
delimiter = misc.getDelimiter(sbtab_file) #checkseparator(sbtab_file)
ugly_sbtab = sbtab_file.split('\n')
nice_sbtab = '<html>\n<body>\n'
nice_sbtab += '<p>\n<h2><b>'+file_name+'</b></h2>\n</p>\n'
nice_sbtab += '<a style="background-color:#00BFFF">'+ugly_sbtab[0]+'</a>\n<br>\n'
nice_sbtab += '<table>\n'
ident_url = False
ident_cols = []
for row in ugly_sbtab[1:]:
if row.startswith('!'):
nice_sbtab += '<tr bgcolor="#87CEFA">\n'
splitrow = row.split(delimiter)
for i,element in enumerate(splitrow):
if 'Identifiers:' in element:
try:
searcher = re.search('Identifiers:(.*)',element)
ident_url = 'http://identifiers.org/'+searcher.group(1)+'/'
ident_cols.append(i)
except: pass
else: nice_sbtab += '<tr>\n'
for i,thing in enumerate(row.split(delimiter)):
try: title = col2description[thing[1:]]
except: title = ''
if not ident_url:
new_row = '<td title="'+str(title)+'">'+str(thing)+'</td>'
nice_sbtab += new_row+'\n'
else:
if i in ident_cols and not thing.startswith('!'):
ref_string = ident_url+thing
new_row = '<td><a href="'+ref_string+'" target="_blank">'+str(thing)+'</a></td>'
else:
new_row = '<td title="'+title+'">'+str(thing)+'</td>'
nice_sbtab += new_row+'\n'
nice_sbtab += '</tr>\n'
nice_sbtab += '</table>\n'
nice_sbtab += '</body>\n</html>\n'
html_file = open(file_name[:-4]+'.html','w')
for row in nice_sbtab: html_file.write(row)
html_file.close()
return nice_sbtab
def findDescriptions(def_file,def_delimiter,sbtype):
'''
Preprocesses the definition file in order to enable some nice mouseover effects for the known column names.
Parameters
----------
def_file : str
SBtab definition file as string representation.
def_delimiter : str
Delimiter used for the columns; usually comma, tab, or semicolon.
sbtype : str
SBtab attribute TableType.
'''
col2description = {}
col_dsc = False
columnrow = def_file.split('\n')[1]
columnrowspl = columnrow.split(def_delimiter)
for row in def_file.split('\n'):
splitrow = row.split(def_delimiter)
if len(splitrow) != len(columnrowspl): continue
if row.startswith('!!'): continue
if row.startswith('!'):
for i,elem in enumerate(splitrow):
if elem == "!Description":
col_dsc = i
if not string.capitalize(splitrow[2]) == string.capitalize(sbtype): continue
if col_dsc and not splitrow[2].startswith('!'): col2description[splitrow[0]] = splitrow[col_dsc]
return col2description
def checkseparator(sbtabfile):
'''
Finds the separator of the SBtab file.
Parameters
----------
sbtabfile : str
SBtab file as string representation.
'''
sep = False
for row in sbtabfile.split('\n'):
if row.startswith('!!'): continue
if row.startswith('!'):
s = re.search('(.)(!)',row[1:])
sep = s.group(1)
return sep
if __name__ == '__main__':
try: sys.argv[1]
except:
print('You have not provided input arguments. Please start the script by also providing an SBtab file, the definition file, and an optional HTML output filename: >python sbtab2html.py SBtabfile.csv definitions.tsv Output')
sys.exit()
file_name = sys.argv[1]
try:
default_def = sys.argv[2]
def_file = open(default_def,'r')
def_tab = def_file.read()
def_file.close()
except:
def_tab = None
try: output_name = sys.argv[3]+'.html'
except: output_name = file_name[:-4]+'.html'
sbtab_file = open(file_name,'r')
sbtab = sbtab_file.read()
html = csv2html(sbtab,file_name,def_tab,output_name)
#html_name = output_name
html_file = open(output_name,'w')
html_file.write(html)
html_file.close()
print('The HTML file has been successfully written to your working directory or chosen output path.')
| 32.789773 | 230 | 0.587073 |
768a31ddde23314da728b346aa3c4e4f92544ad6
| 385 |
py
|
Python
|
src/main/python/client/PythonClient.py
|
mfentler-tgm/sew5-simple-user-database-mfentler-tgm
|
98fba2cdca4243c3b2f25c45ceb043c258a5db53
|
[
"MIT"
] | null | null | null |
src/main/python/client/PythonClient.py
|
mfentler-tgm/sew5-simple-user-database-mfentler-tgm
|
98fba2cdca4243c3b2f25c45ceb043c258a5db53
|
[
"MIT"
] | null | null | null |
src/main/python/client/PythonClient.py
|
mfentler-tgm/sew5-simple-user-database-mfentler-tgm
|
98fba2cdca4243c3b2f25c45ceb043c258a5db53
|
[
"MIT"
] | null | null | null |
#from clientController import ClientController
from client.clientController import ClientController
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
def main():
try:
app = QtWidgets.QApplication([])
controller = ClientController()
controller.show()
app.exec_()
except Exception as e:
print(e)
if __name__ == "__main__":
main()
| 24.0625 | 52 | 0.680519 |
507dc9b6f7e264d08e2122152493a59a20b17eae
| 631 |
py
|
Python
|
source/pkgsrc/benchmarks/glmark2/patches/patch-waflib_Tools_c__config.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/benchmarks/glmark2/patches/patch-waflib_Tools_c__config.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/benchmarks/glmark2/patches/patch-waflib_Tools_c__config.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-waflib_Tools_c__config.py,v 1.1 2019/12/22 22:21:58 joerg Exp $
When detecting the C++ compiler, force C++ mode for stdin as the wrappers
add -std=c++11 and that breaks for C input.
--- waflib/Tools/c_config.py.orig 2019-12-21 22:11:24.000906920 +0000
+++ waflib/Tools/c_config.py
@@ -632,7 +632,7 @@ def cxx_load_tools(conf):
conf.load('cxx')
@conf
def get_cc_version(conf,cc,gcc=False,icc=False,clang=False):
- cmd=cc+['-dM','-E','-']
+ cmd=cc+(['-x','c++']if cc[0].endswith('+')else[])+['-dM','-E','-']
env=conf.env.env or None
try:
out,err=conf.cmd_and_log(cmd,output=0,input='\n'.encode(),env=env)
| 37.117647 | 78 | 0.664025 |
5088d15e9b45d5e4cb16b4bcbc95c013d661734e
| 4,792 |
py
|
Python
|
Challenge_1/members/martin_game.py
|
joeherold/weekly_coding_challenge_fwkwkw_python
|
5c2795fdf38970a387540141ad95408c527a7779
|
[
"MIT"
] | 1 |
2020-05-15T13:43:16.000Z
|
2020-05-15T13:43:16.000Z
|
Challenge_1/members/martin_game.py
|
joeherold/weekly_coding_challenge_fwkwkw_python
|
5c2795fdf38970a387540141ad95408c527a7779
|
[
"MIT"
] | 4 |
2020-05-15T17:46:52.000Z
|
2020-07-06T11:48:37.000Z
|
Challenge_1/members/martin_game.py
|
joeherold/weekly_coding_challenge_fwkwkw_python
|
5c2795fdf38970a387540141ad95408c527a7779
|
[
"MIT"
] | null | null | null |
# Erstellung eines Spiels mit pygame
# pygame ist eine Library, welche die einfache Erstellung von Spielen ermöglicht.
# Nachfolgend wird die Erstellung eines einfachen Spiels mit Pygame dargestellt.
# Was bringt diese Kenntnis für FH Schüler? Während langweiligen wirtschaftsfächern kann sich die Zeit vertrieben werden.
# Um pygame auszuführen muss dies in der Shell / Terminal installiert werden. Dazu ist der folgende Befehle notwendig:
# pip install pygame
# Mit dem nachfolgendem Befehl kann die Installation in der Shell / Terminal getestet werden. Erschein ein Mini-Game hat es funktioniert.
# python3 -m pygame.examples.aliens
import pygame # Import der Library
import random # Ermöglicht die Generierung von Zufallswerten
# Damit wird die Steuerung für das Spiel importiert
from pygame.locals import (
K_UP,
K_DOWN,
K_LEFT,
K_RIGHT,
K_ESCAPE,
KEYDOWN,
QUIT,
)
# Festlegung des Bildschirms für das Spiel. Das Fenster wird vom Betriebssystem geöffnet.
fensterBreite = 800
fensterHöhe = 600
heldFarbe = (50, 50, 50)
# Definition des Spielers:
class Held(pygame.sprite.Sprite):
def __init__(self):
super(Held, self).__init__()
self.surf = pygame.Surface((80, 40))
self.surf.fill(heldFarbe)
self.rect = self.surf.get_rect()
def update(self, pressed_keys): # Festlegung der Bewegung des Spielers
if pressed_keys[K_UP]:
self.rect.move_ip(0, -6)
if pressed_keys[K_DOWN]:
self.rect.move_ip(0, 6)
if pressed_keys[K_LEFT]:
self.rect.move_ip(-6, 0)
if pressed_keys[K_RIGHT]:
self.rect.move_ip(6, 0)
if self.rect.left < 0: # Behalten des Spielers am Bildschirm
self.rect.left = 0
if self.rect.right > fensterBreite:
self.rect.right = fensterBreite
if self.rect.top <= 0:
self.rect.top = 0
if self.rect.bottom >= fensterHöhe:
self.rect.bottom = fensterHöhe
# Erstellen von Feinden am Spielfeld
class Enemy(pygame.sprite.Sprite):
def __init__(self):
super(Enemy, self).__init__()
self.surf = pygame.Surface((40, 20))
self.surf.fill((200, 200, 200))
self.rect = self.surf.get_rect(
center=(
random.randint(fensterBreite + 20, fensterBreite + 100),
random.randint(0, fensterHöhe),
)
)
self.speed = random.randint(2, 10)
# Angaben zur Bewegung der Hindernisse
def update(self):
self.rect.move_ip(-self.speed, 0)
if self.rect.right < 0:
self.kill()
pygame.init() # Initialisierung von pygame, damit dies auch verwendet werden kann. Der Vorteil ist, dass pygame auf Windows, Linux und Mac genutzt werden kann.
screen = pygame.display.set_mode((fensterBreite, fensterHöhe)) # Erstellt das Fenster für das Spiel. Die Einstellungen werden on oben übernommen.
# Hinzufügen von Feinden, alle 250 Millisekunden, 4/Sekunde. Dies gilt für das Gesamt Spiel
ADDENEMY = pygame.USEREVENT + 1
pygame.time.set_timer(ADDENEMY, 250)
held = Held() # Erstellen des Spielers
feinde = pygame.sprite.Group() # Fesstellen von Kollisionen
all_sprites = pygame.sprite.Group() # Erstellen einer Gruppe von allen Elementen zum Rendern
all_sprites.add(held)
clock = pygame.time.Clock() # Erstellen einer Uhr für die Spielgeschwindigkeit
# Die Ausführung des Spiels festlegen, bis diese beendet wird durch schließen des Fensters.
running = True
while running:
for event in pygame.event.get(): # Erfasst eingaben im Spiel durch den User.
if event.type == KEYDOWN:
if event.key == K_ESCAPE: # Das Spiel wird durch die Taste Escape beendet. FUNKTIONIERT NOCH NICHT
running = False
elif event.type == QUIT: # Das Spiel wird durch schließen des Fensters beendet
running = False
elif event.type == ADDENEMY: # Hinzufügen von Feinden im Spiel und Zuweisung zu allen Elementen
new_enemy = Enemy()
feinde.add(new_enemy)
all_sprites.add(new_enemy)
pressed_keys = pygame.key.get_pressed() # Erfassen der Usereingaben
held.update(pressed_keys) # Ermöglicht die Steuerung des Spielerelements
feinde.update() # die Feinde werden aktualisiert
screen.fill((255, 204, 153)) # die Hintergrundfarbe
#pygame.display.flip()
# Anzeigen aller Spielelemente
for entity in all_sprites:
screen.blit(entity.surf, entity.rect)
if pygame.sprite.spritecollideany(held, feinde):# Prüfen ob der Spieler mit den Feinden kollidiert
held.kill()
running = False
pygame.display.flip() # Aktualisiert den Bildschirm
clock.tick(100) # Frames per second
| 36.030075 | 160 | 0.680509 |
e8d77a779d54fa02e46590fa4d1f3240b9b5976d
| 3,510 |
py
|
Python
|
SSH-Honeypot-master/SSH.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
SSH-Honeypot-master/SSH.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
SSH-Honeypot-master/SSH.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
#Author:D4Vinci
#Squnity Developers
import socket
our_log=open("Attackers_Data.txt","w") #Our text file to save attackers data in it
def ssh(msg="",listeners=2):
welcome="""Welcome to BackBox Linux 4.5 (GNU/Linux 4.2.0-30-generic i686)\n
* Documentation: http://www.backbox.org/\n\n
The programs included with the BackBox/Ubuntu system are free software;
the exact distribution terms for each program are described in the
individual files in /usr/share/doc/*/copyright.\n
BackBox/Ubuntu comes with ABSOLUTELY NO WARRANTY, to the extent
permitted by applicable law.\n
"""
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 22))#binding for the ssh port
print "\nSSH Honeypot ready(Waiting For Attackers)..\n"
s.listen(int(listeners))
stat=0
n=0
rqs=["http","HTTP/1.0","GET","bind","version","OPTIONS"]
while True:
n+=1
c,attacker= s.accept()
port=attacker[1]
ip=attacker[0]
c.send("login as: ")
login=c.recv(1024)
c.send(login+"@host's password: ")
a=c.recv(1024)
PROMPT = login+"@host:~$"
c.send(welcome)
ips.append(ip)
our_log.write("\n ["+str(n)+"] IP: "+str(ip)+"\tPort: "+str(port)+"\n")
print "\n ["+str(n)+"] IP: "+str(ip)+"\tPort: "+str(port)+"\n"
c.send(PROMPT)
data = c.recv(1024)
for rq in rqs:
if rq in data.split(" ") or data.split(" ")=="" or data==" " :
our_log.write(" ["+str(ip)+"] is Scanning us With nmap looking for service info.!"+"\n")
print " ["+str(ip)+"] is Scanning us With nmap looking for service info.!"+"\n"
if ip in ips:c.close()
stat=1
break
if data.split(" ")[0] == "id":
our_log.write(" ["+str(ip)+"][!]Command: "+str(data)+"\n")
print " ["+str(ip)+"][!]Command: "+str(data)+"\n"
c.send("\nuid=0(root) gid=0(root) groups=0(root)")
our_log.write(" ["+str(ip)+"]>Output: uid=0(root) gid=0(root) groups=0(root)\n")
print " ["+str(ip)+"]>Output: uid=0(root) gid=0(root) groups=0(root)\n"
c.send(str(msg)+'\n')
stat=1
c.close()
elif data.split(" ")[0] == "uname":
our_log.write(" ["+str(ip)+"]!]Command: "+str(data)+"\n")
print " ["+str(ip)+"][!]Command: "+str(data)+"\n"
c.send("\nLinux f001 3.13.3-7-high-octane-fueled #3000-LPG SMPx4 Fri Jun 31 25:24:23 UTC 2200 x86_64 x64_86 x13_37 GNU/Linux")
our_log.write(" ["+str(ip)+"]>Output: Linux f001 3.13.3-7-high-octane-fueled #3000-LPG SMPx4 Fri Jun 31 25:24:23 UTC 2200 x86_64 x64_86 x13_37 GNU/Linux\n")
print " ["+str(ip)+"]>Output: Linux f001 3.13.3-7-high-octane-fueled #3000-LPG SMPx4 Fri Jun 31 25:24:23 UTC 2200 x86_64 x64_86 x13_37 GNU/Linux\n"
c.send(str(msg)+'\n')
stat=1
c.close()
elif stat==0:
our_log.write("\t[!]Command: "+str(data)+"\n")
print " ["+str(ip)+"][!]Command: "+str(data)+"\n"
c.send("\n"+str(data.split(" ")[0]) + ": command not found")
our_log.write(" ["+str(ip)+"]>Output: "+ data.split(" ")[0] + ": command not found\n")
print " ["+str(ip)+"]>Output: "+ data.split(" ")[0] + ": command not found\n"
c.send(str(msg)+'\n')
c.close()
our_log.write("="*10)
print "="*10
our_log.close()
ssh()
| 43.875 | 169 | 0.54359 |
fa0daac23a28688052634f2fc791ac428b3239ac
| 445 |
py
|
Python
|
Python/gregorian_leap_year.py
|
paurav11/HackerRank
|
80c91c5cc55dd56671a5906be7a106ad4f1db95e
|
[
"MIT"
] | 1 |
2021-05-19T06:44:03.000Z
|
2021-05-19T06:44:03.000Z
|
Python/gregorian_leap_year.py
|
paurav11/HackerRank
|
80c91c5cc55dd56671a5906be7a106ad4f1db95e
|
[
"MIT"
] | null | null | null |
Python/gregorian_leap_year.py
|
paurav11/HackerRank
|
80c91c5cc55dd56671a5906be7a106ad4f1db95e
|
[
"MIT"
] | null | null | null |
def is_leap(year):
if year >= 1900 and year <= pow(10,5):
leap = False
if year%4 == 0:
if year%100 == 0:
if year%400 == 0:
leap = True
else:
leap = False
else:
leap = True
return leap
else:
return 'Enter valid year!'
if __name__ == '__main__':
year = int(input())
print(is_leap(year))
| 24.722222 | 42 | 0.420225 |
d71d04d38fab7aa45aaa600f680fa91be52afc7b
| 20,616 |
py
|
Python
|
Vargi_Bots/ros_packages/pkg_task5/scripts/node_t5_ur5_1_package_pick.py
|
ROBODITYA/Eyantra-2021-Vargi-Bots
|
f1c6a82c46e6e84486a4832b3fbcd02625849447
|
[
"MIT"
] | 1 |
2021-07-13T07:05:29.000Z
|
2021-07-13T07:05:29.000Z
|
Vargi_Bots/ros_packages/pkg_task5/scripts/node_t5_ur5_1_package_pick.py
|
TejasPhutane/Eyantra-2021-Vargi-Bots
|
ab84a1304101850be8c0f69cfe6de70d53c33189
|
[
"MIT"
] | 1 |
2021-06-05T07:58:03.000Z
|
2021-06-05T07:58:03.000Z
|
Vargi_Bots/ros_packages/pkg_task5/scripts/node_t5_ur5_1_package_pick.py
|
ROBODITYA/Eyantra-2021-Vargi-Bots
|
f1c6a82c46e6e84486a4832b3fbcd02625849447
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
''' This node is used for controlling the ur5_1 arm and conveyor belt. '''
import sys
import math
import datetime
from datetime import datetime
import yaml
import rospy
import rospkg
import moveit_commander
import moveit_msgs.msg
import actionlib
from std_srvs.srv import Empty
from std_msgs.msg import String
from pkg_vb_sim.srv import vacuumGripper
from pkg_task5.srv import camera_packages
from pkg_ros_iot_bridge.msg import msgRosIotAction
# Message Class that is used by ROS Actions internally
from pkg_ros_iot_bridge.msg import msgRosIotGoal
# Message Class that is used for Action Goal Messages
from pkg_ros_iot_bridge.msg import msgRosIotResult
# Message Class that is used for Action Result Messages
from pkg_ros_iot_bridge.msg import msgRosIotFeedback
# # Message Class that is used for Action Feedback Messages
from pkg_ros_iot_bridge.msg import msgMqttSub
# Message Class for MQTT Subscription Messages
''' Class to initiate the pick & place process. '''
class Ur5PickPlace:
# Constructor
def __init__(self):
self._original_orders = []
self.HigherPriorityOrder = []
self.ur5_1_home_pose = [math.radians(-90), math.radians(-90), math.radians(0),
math.radians(-90), math.radians(-90), math.radians(90)]
self.ur5_1_conveyor_pose = [math.radians(7.8), math.radians(-139.4), math.radians(-57.6),
math.radians(-72.8), math.radians(89.9), math.radians(7.8)]
ur5_1_pkg00 = [math.radians(-55.8), math.radians(-67.0), math.radians(1.2),
math.radians(-114.1), math.radians(-121.3), math.radians(90)]
ur5_1_pkg01 = [math.radians(-118.9), math.radians(-85.6), math.radians(18.7),
math.radians(-113.1), math.radians(-61.0), math.radians(90.0)]
ur5_1_pkg02 = [math.radians(55.7), math.radians(-117.0), math.radians(5.4),
math.radians(-68.4), math.radians(124.2), math.radians(90)]
ur5_1_pkg10 = [math.radians(-55.1), math.radians(-96.9), math.radians(82.6),
math.radians(-165.7), math.radians(-124.8), math.radians(90)]
ur5_1_pkg11 = [math.radians(-122.7), math.radians(-116.5), math.radians(95.9),
math.radians(-159.3), math.radians(-57.2), math.radians(90.0)]
ur5_1_pkg12 = [math.radians(54.4), math.radians(-84.5), math.radians(-83.6),
math.radians(-9.3), math.radians(126.7), math.radians(90)]
ur5_1_pkg20 = [math.radians(-55.09), math.radians(-96.44), math.radians(87.31),
math.radians(9.035), math.radians(125.49), math.radians(90)]
ur5_1_pkg21 = [math.radians(116.01), math.radians(-61.96), math.radians(-129.27),
math.radians(10.33), math.radians(62.64), math.radians(90)]
ur5_1_pkg22 = [math.radians(55.5), math.radians(-85.8), math.radians(-114.2),
math.radians(20.8), math.radians(124.7), math.radians(90.0)]
ur5_1_pkg30 = [math.radians(-55.08), math.radians(-91.64), math.radians(117.76),
math.radians(-26.22), math.radians(125.48), math.radians(90.0)]
ur5_1_pkg31 = [math.radians(-121.6), math.radians(-115.9), math.radians(135.1),
math.radians(-19.2), math.radians(58.3), math.radians(90)]
ur5_1_pkg32 = [math.radians(-160.73), math.radians(-92.61), math.radians(118.27),
math.radians(-25.89), math.radians(19.84), math.radians(90)]
# Names of packages and their respective bins in gazebo
self.packages_name_position = {"packagen00":ur5_1_pkg00, "packagen01":ur5_1_pkg01,
"packagen02":ur5_1_pkg02, "packagen10":ur5_1_pkg10,
"packagen11":ur5_1_pkg11, "packagen12":ur5_1_pkg12,
"packagen20":ur5_1_pkg20, "packagen21":ur5_1_pkg21,
"packagen22":ur5_1_pkg22, "packagen30":ur5_1_pkg30,
"packagen31":ur5_1_pkg31, "packagen32":ur5_1_pkg32}
# Initialize ROS Node
rospy.init_node('node_t5_ur5_1_package_pick', anonymous=True)
rospy.sleep(15)
self.publish_orders = rospy.Publisher('/Orders_to_ship', String, queue_size=10)
# Wait for service
rospy.wait_for_service('/2Dcamera_packages_type')
# Load variables for moveit!
self._robot_ns = '/' + "ur5_1"
self._planning_group = "manipulator"
self._commander = moveit_commander.roscpp_initialize(sys.argv)
self._robot = moveit_commander.RobotCommander(robot_description=self._robot_ns + "/robot_description",
ns=self._robot_ns)
self._scene = moveit_commander.PlanningSceneInterface(ns=self._robot_ns)
self._group = moveit_commander.MoveGroupCommander(self._planning_group, robot_description=self._robot_ns + "/robot_description",
ns=self._robot_ns)
self._display_trajectory_publisher = rospy.Publisher(self._robot_ns + '/move_group/display_planned_path',
moveit_msgs.msg.DisplayTrajectory, queue_size=2)
self._exectute_trajectory_client = actionlib.SimpleActionClient(self._robot_ns + '/execute_trajectory',
moveit_msgs.msg.ExecuteTrajectoryAction)
self._exectute_trajectory_client.wait_for_server()
rospy.set_param('/ur5_1_vacuum_gripper_service', False)
self._planning_frame = self._group.get_planning_frame()
self._eef_link = self._group.get_end_effector_link()
self._group_names = self._robot.get_group_names()
self._computed_plan = ''
self._curr_state = self._robot.get_current_state()
self._group.set_planning_time(99)
rp = rospkg.RosPack()
self._pkg_path = rp.get_path('pkg_task5')
self._file_path = self._pkg_path + '/config/saved_trajectories/'
rospy.loginfo("Package Path: {}".format(self._file_path))
rospy.loginfo('\033[94m' + "Planning Group: {}".format(self._planning_frame) + '\033[0m')
rospy.loginfo('\033[94m' + "End Effector Link: {}".format(self._eef_link) + '\033[0m')
rospy.loginfo('\033[94m' + "Group Names: {}".format(self._group_names) + '\033[0m')
rospy.loginfo('\033[94m' + " >>> Ur5Moveit init done." + '\033[0m')
## MQTT Client
# Initialize Action Client
self._ac = actionlib.ActionClient('/action_ros_iot',
msgRosIotAction)
param_config_iot = rospy.get_param('config_pyiot')
# Store the ROS Topic to get the start message from bridge action server
self._param_order_topic = param_config_iot['mqtt']['sub_cb_ros_topic']
self._config_mqtt_pub_topic = param_config_iot['mqtt']['topic_pub']
# Subscribe to the desired topic and attach a Callback Funtion to it.
rospy.Subscriber(self._param_order_topic, msgMqttSub, self.func_callback_orders)
# Dictionary to Store all the goal handels
self._goal_handles = {}
self._orders = []
self._package_colours = None
# Wait for Action Server that will use the action - '/action_iot_ros' to start
self._ac.wait_for_server()
rospy.loginfo("Action server up, we can send goals.")
''' Get all data from incoming orders. '''
def func_callback_orders(self, msg):
rospy.loginfo('***Order received:'+ msg.message)
order_msg = eval(msg.message)
self._order_type = {'Medicine':['HP', 'Red', '450'],
'Clothes':['LP', 'Green', '150'],
'Food':['MP', 'Yellow', '250']}
order_id = order_msg['order_id']
order_time = order_msg['order_time']
order_item = order_msg['item']
order_priority = self._order_type[order_item][0]
order_city = order_msg['city']
order_lon = order_msg['lon']
order_lat = order_msg['lat']
order_cost = self._order_type[order_item][2]
info = {'id':'IncomingOrders', 'Team Id':'VB#693', 'Unique Id':'RRCneYRC',
'Order ID':order_id, 'Order Date and Time': order_time,
'Item':order_item, 'Priority':order_priority,
'Order Quantity':'1', 'City':order_city, 'Longitude':order_lon,
"Latitude":order_lat, 'Cost':order_cost}
message = str(info)
goal_handle = self.send_goal_to_mqtt_client("spreadsheet", "pub",
self._config_mqtt_pub_topic, message)
self._goal_handles['Order'] = goal_handle
if info["Priority"] == 'HP':
info["color"] = 'red'
info["package_name"] = self.assignName('red')
info["location_on_shelf"] = self.assignLoc(info["package_name"])
elif info["Priority"] == 'MP':
info["color"] = 'yellow'
info["package_name"] = self.assignName('yellow')
info["location_on_shelf"] = self.assignLoc(info["package_name"])
else:
info["color"] = 'green'
info["package_name"] = self.assignName('green')
info["location_on_shelf"] = self.assignLoc(info["package_name"])
self._orders.append(info)
rospy.loginfo('******Orders Received******* :')
self._original_orders.append(info)
''' Assigns package names to the prioritized order. '''
def assignName(self, curr_color):
for k in sorted(self._package_colours):
if self._package_colours[k] == curr_color:
val = k
self._package_colours.pop(k)
return val
''' Assigns package names to the prioritized order. '''
def assignLoc(self, pkgName):
val = self.packages_name_position[pkgName]
self.packages_name_position.pop(pkgName)
return val
''' Function to prioritize incoming order using insertion sort algorithm. '''
def func_prioritize_orders(self):
orders = self._orders
l = len(orders)
priority_to_value = {"LP":1, "MP":2, "HP":3}
for i in range(l):
pos = i
while pos > 0 and priority_to_value[orders[pos]['Priority']] > priority_to_value[orders[pos-1]['Priority']]:
orders[pos-1], orders[pos] = orders[pos], orders[pos-1]
pos -= 1
return orders
''' Function to get detected packages. '''
def camera1_callback(self):
get_packages_type = rospy.ServiceProxy('/2Dcamera_packages_type', camera_packages)
try:
self.get_packages = get_packages_type(True)
self._package_colours = eval(self.get_packages.pack_type)
except rospy.ServiceException as exc:
print "Service did not process request: " + str(exc)
'''This function will be called when there is a change of state
in the Action Client State Machine. '''
def on_transition(self, goal_handle):
# from on_goal() to on_transition(). goal_handle generated by send_goal() is used here.
result = msgRosIotResult()
index = 0
for i in self._goal_handles:
if self._goal_handles[i] == goal_handle:
index = i
break
rospy.loginfo("Transition Callback. Client Goal Handle #: " + str(index))
rospy.loginfo("Comm. State: " + str(goal_handle.get_comm_state()))
rospy.loginfo("Goal Status: " + str(goal_handle.get_goal_status()))
# Comm State - Monitors the State Machine of the Client which is different from Server's
# Comm State = 2 -> Active
# Comm State = 3 -> Wating for Result
# Comm State = 7 -> Done
# if (Comm State == ACTIVE)
if goal_handle.get_comm_state() == 2:
rospy.loginfo(str(index) + ": Goal just went active.")
# if (Comm State == DONE)
if goal_handle.get_comm_state() == 7:
rospy.loginfo(str(index) + ": Goal is DONE")
rospy.loginfo(goal_handle.get_terminal_state())
# get_result() gets the result produced by the Action Server
result = goal_handle.get_result()
rospy.loginfo(result.flag_success)
if result.flag_success == True:
rospy.loginfo("Goal successfully completed. Client Goal Handle #: " + str(index))
else:
rospy.loginfo("Goal failed. Client Goal Handle #: " + str(index))
''' This function is used to send Goals to MQtt client. '''
def send_goal_to_mqtt_client(self, arg_protocol, arg_mode, arg_topic, arg_message):
# Create a Goal Message object
goal = msgRosIotGoal()
goal.protocol = arg_protocol
goal.mode = arg_mode
goal.topic = arg_topic
goal.message = arg_message
rospy.loginfo("Sending to mqtt client")
# self.on_transition - It is a function pointer to a function which will be called when
# there is a change of state in the Action Client State Machine
goal_handle = self._ac.send_goal(goal,
self.on_transition,
None)
return goal_handle
''' Function to attach box to UR5_1 vacuum gripper. '''
def attach_box(self, current_package, timeout=4):
touch_links = self._robot.get_link_names(self._planning_group)
self._scene.attach_box(self._eef_link, current_package, touch_links=touch_links)
if rospy.get_param('/ur5_2_vacuum_gripper_service') == True or rospy.get_param('/conveyor_belt_service') == True:
rospy.loginfo_once("Waiting for Service")
rospy.sleep(1)
rospy.set_param('/ur5_1_vacuum_gripper_service', True)
try:
rospy.wait_for_service('/eyrc/vb/ur5/activate_vacuum_gripper/ur5_1')
self.attach = rospy.ServiceProxy('eyrc/vb/ur5/activate_vacuum_gripper/ur5_1',
vacuumGripper)
self.attach(True)
rospy.set_param('/ur5_1_vacuum_gripper_service', False)
except rospy.ServiceException, e:
print "Service call failed: %s" % e
print "Trying to reconnect service"
rospy.wait_for_service('/eyrc/vb/ur5/activate_vacuum_gripper/ur5_1')
rospy.set_param('/ur5_1_vacuum_gripper_service', True)
self.attach = rospy.ServiceProxy('eyrc/vb/ur5/activate_vacuum_gripper/ur5_1',
vacuumGripper)
self.attach(True)
rospy.set_param('/ur5_1_vacuum_gripper_service', False)
''' Function to detach box from UR5_1 vacuum gripper. '''
def detach_remove_box(self, current_package, timeout=4):
if rospy.get_param('/ur5_2_vacuum_gripper_service') == True or rospy.get_param('/conveyor_belt_service') == True:
rospy.loginfo_once("Waiting for Service")
rospy.sleep(1)
rospy.set_param('/ur5_1_vacuum_gripper_service', True)
self._scene.remove_attached_object(self._eef_link, name=current_package)
try:
rospy.wait_for_service('/eyrc/vb/ur5/activate_vacuum_gripper/ur5_1')
self.attach = rospy.ServiceProxy('eyrc/vb/ur5/activate_vacuum_gripper/ur5_1',
vacuumGripper)
self.attach(False)
rospy.set_param('/ur5_1_vacuum_gripper_service', False)
except rospy.ServiceException, e:
print "Service call failed: %s" % e
print "Trying again to connect service"
rospy.wait_for_service('/eyrc/vb/ur5/activate_vacuum_gripper/ur5_1')
rospy.set_param('/ur5_1_vacuum_gripper_service', True)
self.attach = rospy.ServiceProxy('eyrc/vb/ur5/activate_vacuum_gripper/ur5_1',
vacuumGripper)
self.attach(False)
rospy.set_param('/ur5_1_vacuum_gripper_service', False)
self._scene.remove_world_object(current_package)
''' Set UR5_1 joint angles. '''
def set_joint_angles(self, arg_list_joint_angles):
self._group.set_joint_value_target(arg_list_joint_angles)
self._computed_plan = self._group.plan()
self._group.get_current_joint_values()
rospy.loginfo('\033[94m' + ">>> Current Joint Values:" + '\033[0m')
flag_plan = self._group.go(wait=True)
self._group.stop()
if flag_plan == True:
rospy.loginfo(
'\033[94m' + ">>> set_joint_angles() Success" + '\033[0m')
else:
rospy.logerr(
'\033[94m' + ">>> set_joint_angles() Failed." + '\033[0m')
return flag_plan
''' Function to set the angles until it reaches max attempt. '''
def hard_set_joint_angles(self, arg_list_joint_angles, arg_max_attempts):
number_attempts = 0
flag_success = False
while ((number_attempts <= arg_max_attempts) and (flag_success is False)):
number_attempts += 1
flag_success = self.set_joint_angles(arg_list_joint_angles)
rospy.logwarn("attempts: {}".format(number_attempts))
''' Function to play saved trajectories. '''
def moveit_play_planned_path_from_file(self, arg_file_path, arg_file_name):
file_path = arg_file_path + arg_file_name
list_joint_values = self._group.get_current_joint_values()
with open(file_path, 'r') as file_open:
loaded_plan = yaml.load(file_open)
ret = self._group.execute(loaded_plan)
return ret
''' Function to play saved trajectories until it reaches max attempt. '''
def moveit_hard_play_planned_path_from_file(self, arg_file_path, arg_file_name, arg_max_attempts):
number_attempts = 0
flag_success = False
while ((number_attempts <= arg_max_attempts) and (flag_success is False)):
number_attempts += 1
flag_success = self.moveit_play_planned_path_from_file(arg_file_path, arg_file_name)
rospy.logwarn("attempts: {}".format(number_attempts))
return True
''' UR5_1 robot arm pick and place routine. '''
def robot_pick_place(self, _current_package, package_location, conveyor_location, ordersheet):
self.hard_set_joint_angles(package_location, 7)
self.attach_box(_current_package)
rospy.sleep(0.5)
self.moveit_hard_play_planned_path_from_file(self._file_path,
'conveyor_'+_current_package+'.yaml', 5)
self.detach_remove_box(_current_package)
rospy.sleep(0.5)
dt_string = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
info = {'id':'OrdersDispatched', 'Team Id':'VB#693', 'Unique Id':'RRCneYRC',
'Order ID':ordersheet['Order ID'],
'City':ordersheet['City'], 'Item':ordersheet['Item'],
'Priority':ordersheet['Priority'],
'Dispatch Quantity':'1', 'Cost':ordersheet['Cost'],
'Dispatch Status':'YES', 'Dispatch Date and Time': dt_string}
message = str(info)
goal_handle = self.send_goal_to_mqtt_client("spreadsheet", "pub",
self._config_mqtt_pub_topic, message)
self._goal_handles['Order Dispatched'] = goal_handle
self.publish_orders.publish(message)
# Destructor
def __del__(self):
moveit_commander.roscpp_shutdown()
rospy.loginfo(
'\033[94m' + "Object of class Ur5Moveit Deleted." + '\033[0m')
''' Main Function. '''
def main():
# Wait for Initializing Gazebo and Rviz
# Create UR5 object
ur5_1 = Ur5PickPlace()
pkg_names=ur5_1.camera1_callback()
# Initially move the robot to home position
ur5_1.set_joint_angles(ur5_1.ur5_1_conveyor_pose)
while not rospy.is_shutdown():
if len(ur5_1._orders) != 0:
ur5_1.func_prioritize_orders()
curr_order = ur5_1._orders.pop(0)
ur5_1.robot_pick_place(curr_order['package_name'], curr_order['location_on_shelf'],
ur5_1.ur5_1_conveyor_pose, curr_order)
else:
pass
if __name__ == '__main__':
main()
| 46.537246 | 136 | 0.61622 |
d101cf6bb24a02017604ba63c91029a4ec28843a
| 2,818 |
py
|
Python
|
reasoner/json2f2.py
|
shiv-io/blawx
|
2fef1fbc26f7c4479714e3fa291660964672b612
|
[
"MIT"
] | 1 |
2020-06-21T02:19:54.000Z
|
2020-06-21T02:19:54.000Z
|
reasoner/json2f2.py
|
shiv-io/blawx
|
2fef1fbc26f7c4479714e3fa291660964672b612
|
[
"MIT"
] | null | null | null |
reasoner/json2f2.py
|
shiv-io/blawx
|
2fef1fbc26f7c4479714e3fa291660964672b612
|
[
"MIT"
] | null | null | null |
# Script to Take a JSON object, convert it into a Python structure, and convert the Python structure into Flora-2 code.
# Jason Morris
import sys, json, types
def json2flora(key,value,parentname="data",root=False):
retstr = ""
# If this is not a leaf:
if isinstance(value, (list,dict,tuple)):
# for each of the subvariables, json2flora it
if isinstance(value, list):
if root:
retstr += parentname + "[" + key + "->\\#[list->{"
else:
retstr += "{"
if len(value):
for i, v in enumerate(value):
if isinstance(v, (list,dict,tuple)):
retstr += json2flora(key,v,parentname)
else:
retstr += jsonvalue2flora(v)
retstr += ", "
retstr = retstr[ :-2 ]
if root:
retstr += "}]]"
else:
retstr += "}"
elif isinstance(value, dict):
#retstr += "The elements of the dict are: \n\n"
if root:
retstr += parentname + "[" + key + "->\\#["
else:
retstr += "\\#["
if len(value):
for k, v in value.items():
#retstr += str(k) + ": " + str(v) + "\n\n"
retstr += k + "->"
if isinstance(v, (list,dict,tuple)):
retstr += json2flora(k,v,"")
else:
retstr += jsonvalue2flora(v)
retstr += ", "
retstr = retstr[ :-2 ]
retstr += "]"
if root:
retstr += "]"
elif isinstance(value, tuple):
# Convert tuple to a list, and try again
# I'm not sure if this is correct... need to test.
newvalue = list(value)
#retstr += "Converting " + str(value) + " to " + str(newvalue) + " and retrying.\n\n"
retstr += json2flora(key,newvalue,parentname)
else:
if root:
retstr += parentname + "["
retstr += str(key) + "->" + jsonvalue2flora(value)
if root:
retstr += "]"
return retstr
def jsonvalue2flora(value):
if isinstance(value,str):
if not (value[0] == "'" and value[-1] == "'"):
return "'" + str(value) + "'"
else:
return str(value)
elif isinstance(value,type(None)):
return "\@?"
elif isinstance(value,bool):
if value:
return "true"
else:
return "false"
elif isinstance(value,(int,float)):
return str(value)
else:
return str(value) + ":" + type(value).__name__
# Get the data from the command line
filename = sys.argv[1]
file = open(filename, "r")
# Convert from JSON to Python structure
dictionary = json.load(file)
#print(dictionary)
#print(dictionary['test'])
# Convert all lists to dictionaries, maybe?
# Convert the resulting Python dictionary to a list of Flora-2 Entries.
output = []
for k,v in dictionary.items():
output.append(json2flora(k,v,root=True))
# Output the Flora-2 Code
for o in output:
print(o + ".\n")
| 28.18 | 119 | 0.562811 |
d15a656c606653fb0e9991d671e77ed444fc45d5
| 3,464 |
py
|
Python
|
andinopy/nextion_util.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
andinopy/nextion_util.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
andinopy/nextion_util.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
# _ _ _
# / \ _ __ __| (_)_ __ ___ _ __ _ _
# / _ \ | '_ \ / _` | | '_ \ / _ \| '_ \| | | |
# / ___ \| | | | (_| | | | | | (_) | |_) | |_| |
# /_/ \_\_| |_|\__,_|_|_| |_|\___/| .__/ \__, |
# |_| |___/
# by Jakob Groß
import time
import serial
import sys
import os
e = bytearray([0xFF, 0xFF, 0xFF])
def get_baud_rate(dev_port: serial.Serial, diagnostics: bool = False):
def diag_print(text: str):
if diagnostics:
print(text)
for baud_rate in (2400, 4800, 9600, 19200, 38400, 57600, 115200, 921600, 512000, 256000, 250000, 230400):
dev_port.baudrate = baud_rate
dev_port.timeout = 3000 / baud_rate + 0.2
diag_print(f"trying with {baud_rate} baud")
dev_port.write(e)
dev_port.write("connect".encode('ascii'))
dev_port.write(e)
r = dev_port.read(128)[:-3]
if 'comok' in str(r):
diag_print(f"Connected with {baud_rate} baud")
status, unknown1, model, firmware, mcucode, nextion_serial, nextion_flash_size = str(r).strip("\xff").split(
',')
if status.split(' ')[1] == "1":
diag_print('Touchscreen: enabled')
else:
diag_print('Touchscreen: disabled')
diag_print(
f"Model:{model}\nFirmware:{firmware}\nMCU-Code:{mcucode}\nSerial:{nextion_serial}\nFlashSize:{nextion_flash_size}")
return baud_rate
return False
def force_max_baud(dev_port, filesize, diagnostics=False):
def diag_print(text: str):
if diagnostics:
print(text)
for baud in [921600, 512000, 256000, 250000, 230400, 115200, 57600, 38400, 31250, 19200, 9600]:
diag_print(f"Trying {baud} baud")
diag_print(f"SENDING: whmi-wri {filesize},{baud},0")
dev_port.write(f"whmi-wri {filesize},{baud},0".encode("ascii"))
dev_port.write(e)
time.sleep(0.4)
dev_port.baudrate = baud
dev_port.timeout = 0.5
time.sleep(.1)
r = dev_port.read(1)
if 0x05 in r:
return True
return False
def upload_image(dev_port, filename, filesize):
with open(filename, 'rb') as image:
data_count = 0
while 1:
data = image.read(4096)
if len(data) < 1:
break
data_count += len(data)
dev_port.timeout = 5
dev_port.write(data)
sys.stdout.write('\rUpload, %3.1f%%...' % (data_count / float(filesize) * 100.0))
sys.stdout.flush()
time.sleep(.5)
r = dev_port.read(1)
if 0x05 not in r:
return False
return True
def flash(port: str, tft_file: str):
port = serial.Serial(port, 9600, timeout=None)
if not port.isOpen():
port.open()
if not get_baud_rate(port, diagnostics=True):
print("Baud Rate could not be specified")
exit(1)
file_size = os.path.getsize(tft_file)
if not force_max_baud(port, file_size, diagnostics=True):
print("Could not force baud rate")
exit(1)
if not upload_image(port, tft_file, file_size):
print("could not upload tft File")
exit(1)
if __name__ == "__main__":
if len(sys.argv) != 2:
print('usage:\npython3 nextion_util.py file_to_upload.tft')
file = sys.argv[1]
flash("/dev/ttyAMA0", file)
exit(0)
| 33.307692 | 131 | 0.555139 |
3872f64391460d5939a9be87f7c946d907584969
| 966 |
py
|
Python
|
showcase1/com/aaron/sysexample.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | null | null | null |
showcase1/com/aaron/sysexample.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | 2 |
2021-03-25T22:00:07.000Z
|
2022-01-20T15:51:48.000Z
|
showcase1/com/aaron/sysexample.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#from sys import argv,path,modules,exec_prefix
from sys import *
def printCommandParam():
"""打印命令行参数"""
print(type(argv))
for commandParam in argv:
print(commandParam)
def printModuleSearchPath():
"""打印模块搜索路径"""
print(type(path))
for subpath in path:
print(subpath)
def printModuleDictionary():
"""打印模块dictionary"""
for module in modules:
print(module)
def printStaticObjectInfo():
"""sys模块静态对象信息"""
#print(copyright)
print(exec_prefix)
print(executable)
print(hash_info)
print(implementation)
print(platform)
print(prefix)
print(thread_info)
print(version)
print(version_info)
# exit(2)
displayhook('a')
def print_hello():
print("export")
if __name__=="__main__" :
printCommandParam()
printModuleSearchPath()
printModuleDictionary()
#print(printCommandParam.__doc__)
printStaticObjectInfo()
| 16.655172 | 46 | 0.653209 |
2a1f2249846d8c80a2fb58e9208835cc3b60a427
| 1,908 |
py
|
Python
|
reports/srp/code/symbol_classifier_test.py
|
klawr/deepmech
|
61de238f1d4b1b867ec1d5f4e4af2a3b25a5abff
|
[
"MIT"
] | 1 |
2020-04-17T12:27:06.000Z
|
2020-04-17T12:27:06.000Z
|
reports/srp/code/symbol_classifier_test.py
|
klawr/deepmech
|
61de238f1d4b1b867ec1d5f4e4af2a3b25a5abff
|
[
"MIT"
] | 1 |
2022-02-27T13:13:17.000Z
|
2022-02-27T13:13:17.000Z
|
reports/srp/code/symbol_classifier_test.py
|
klawr/deepmech
|
61de238f1d4b1b867ec1d5f4e4af2a3b25a5abff
|
[
"MIT"
] | null | null | null |
from os.path import join
import cv2
import numpy as np
from numpy.random import uniform
from sys import exit
import tensorflow as tf
model_path = join('models', 'symbol_classifier', 'model.h5')
model = tf.keras.models.load_model(model_path)
path = join('data', 'raw', 'n', '1.jpeg')
image_name = "data"
drawing = False
pt1_x , pt1_y = None , None
img = None
color = None
thickness = None
def draw(event, x, y, r1, r2):
global pt1_x, pt1_y, drawing, img, color
if event==cv2.EVENT_LBUTTONDOWN:
drawing=True
pt1_x,pt1_y=x,y
elif event==cv2.EVENT_LBUTTONUP:
drawing=False
cv2.line(img,(pt1_x,pt1_y),(x,y),color=color,thickness=thickness)
elif event==cv2.EVENT_MOUSEMOVE:
if drawing==True:
cv2.line(img,(pt1_x,pt1_y),(x,y),color=color,thickness=thickness)
pt1_x,pt1_y=x,y
elif event==cv2.EVENT_RBUTTONUP:
image = tf.convert_to_tensor(np.asarray(img, np.uint8), np.uint8)
tensor = tf.io.encode_jpeg(image)
print(predict(tensor))
new_image()
elif event==cv2.EVENT_MBUTTONUP:
new_image()
def new_image():
global img, color, thickness
w_on_b = round(uniform())
thickness = 5 + round(uniform(0, 255))
img = np.ones((512,512,3), np.uint8)
img *= round(uniform(0, 255))
color = (255,255,255) if w_on_b else (0,0,0)
def predict(image):
label = ['n', 'o', 'x']
blob = tf.io.decode_jpeg(image, channels=1)
blob = tf.image.convert_image_dtype(blob, tf.float32)
blob = tf.image.resize(blob, (32, 32))
blob = tf.reshape(blob, (1, 32, 32, 1))
pred = list(model.predict(blob, steps = 1)[0])
index = pred.index(max(pred))
return label[index]
new_image()
cv2.namedWindow(image_name)
cv2.setMouseCallback(image_name, draw)
while(1):
cv2.imshow(image_name, img)
if cv2.waitKey(1)&0xFF==27:
break
cv2.destroyAllWindows()
| 27.652174 | 77 | 0.651992 |
2d8207f1fd16138b6a8da26069278c95a052f21c
| 382 |
py
|
Python
|
aoc2020/day_02/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_02/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_02/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
from .part_1 import Solution as Part1Solution
class Solution(Part1Solution):
expected = 1
@classmethod
def check_pw(cls, line: str):
policy, pw = line.split(': ', 2)
pos, letter = policy.split(' ')
pos1, pos2 = pos.split('-')
return sum([
pw[int(pos1)-1] == letter,
pw[int(pos2)-1] == letter,
]) == 1
| 21.222222 | 45 | 0.528796 |
2dcf9585563c0f6c24fe2c6cf9dcadd24e3a9a58
| 2,722 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/accounts/doctype/c_form/c_form.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/accounts/doctype/c_form/c_form.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/accounts/doctype/c_form/c_form.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import _
from frappe.model.document import Document
class CForm(Document):
def validate(self):
"""Validate invoice that c-form is applicable
and no other c-form is received for that"""
for d in self.get('invoices'):
if d.invoice_no:
inv = frappe.db.sql("""select c_form_applicable, c_form_no from
`tabSales Invoice` where name = %s and docstatus = 1""", d.invoice_no)
if inv and inv[0][0] != 'Yes':
frappe.throw(_("C-form is not applicable for Invoice: {0}".format(d.invoice_no)))
elif inv and inv[0][1] and inv[0][1] != self.name:
frappe.throw(_("""Invoice {0} is tagged in another C-form: {1}.
If you want to change C-form no for this invoice,
please remove invoice no from the previous c-form and then try again"""\
.format(d.invoice_no, inv[0][1])))
elif not inv:
frappe.throw(_("Row {0}: Invoice {1} is invalid, it might be cancelled / does not exist. \
Please enter a valid Invoice".format(d.idx, d.invoice_no)))
def on_update(self):
""" Update C-Form No on invoices"""
self.set_total_invoiced_amount()
def on_submit(self):
self.set_cform_in_sales_invoices()
def before_cancel(self):
# remove cform reference
frappe.db.sql("""update `tabSales Invoice` set c_form_no=null where c_form_no=%s""", self.name)
def set_cform_in_sales_invoices(self):
inv = [d.invoice_no for d in self.get('invoices')]
if inv:
frappe.db.sql("""update `tabSales Invoice` set c_form_no=%s, modified=%s where name in (%s)""" %
('%s', '%s', ', '.join(['%s'] * len(inv))), tuple([self.name, self.modified] + inv))
frappe.db.sql("""update `tabSales Invoice` set c_form_no = null, modified = %s
where name not in (%s) and ifnull(c_form_no, '') = %s""" %
('%s', ', '.join(['%s']*len(inv)), '%s'), tuple([self.modified] + inv + [self.name]))
else:
frappe.throw(_("Please enter atleast 1 invoice in the table"))
def set_total_invoiced_amount(self):
total = sum([flt(d.grand_total) for d in self.get('invoices')])
frappe.db.set(self, 'total_invoiced_amount', total)
def get_invoice_details(self, invoice_no):
""" Pull details from invoices for referrence """
if invoice_no:
inv = frappe.db.get_value("Sales Invoice", invoice_no,
["posting_date", "territory", "base_net_total", "base_grand_total"], as_dict=True)
return {
'invoice_date' : inv.posting_date,
'territory' : inv.territory,
'net_total' : inv.base_net_total,
'grand_total' : inv.base_grand_total
}
| 38.338028 | 99 | 0.678913 |
10a8fceb69749d737ac321e18341213acab74cff
| 757 |
py
|
Python
|
haas_lib_bundles/python/docs/examples/home_intrusion_alarm/esp32/code/buzzer.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
haas_lib_bundles/python/docs/examples/home_intrusion_alarm/esp32/code/buzzer.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
haas_lib_bundles/python/docs/examples/home_intrusion_alarm/esp32/code/buzzer.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
from driver import PWM
class BUZZER(object):
def __init__(self, pwmObj,data=None):
self.pwmObj = None
if not isinstance(pwmObj, PWM):
raise ValueError("parameter is not an PWM object")
self.pwmObj = pwmObj
if data is not None:
self.setOptionDuty(data)
def setOptionDuty(self,data):
if self.pwmObj is None:
raise ValueError("invalid PWM object")
self.pwmObj.setOption(data)
def start(self,data):
if self.pwmObj is None:
raise ValueError("invalid PWM object")
self.setOptionDuty(data)
def close(self,data):
if self.pwmObj is None:
raise ValueError("invalid PWM object")
self.setOptionDuty(data)
| 25.233333 | 62 | 0.611625 |
d8325ac080b537056e230ce0a40a19f3ffd5ccc7
| 2,196 |
py
|
Python
|
opencv_tutorial/opencv_python_tutorials/Image_Processing/hough_line_transfom.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
opencv_tutorial/opencv_python_tutorials/Image_Processing/hough_line_transfom.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
opencv_tutorial/opencv_python_tutorials/Image_Processing/hough_line_transfom.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 14:52:34 2019
@author: jone
"""
#%%
import cv2
import numpy as np
def nothing(x):
pass
img = cv2.imread('img/chessboard2.jpg')
img = cv2.resize(img, (800, 800))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.namedWindow('image')
cv2.createTrackbar('threshold', 'image', 200, 400, nothing)
cv2.namedWindow('canny')
cv2.createTrackbar('canny', 'canny', 50, 255, nothing)
while(1):
if cv2.waitKey(1) & 0xFF == 27:
break
img_copy = img.copy()
threshold = cv2.getTrackbarPos('threshold', 'image')
c = cv2.getTrackbarPos('canny', 'canny')
if threshold < 50:
threshold = 50
edges = cv2.Canny(gray, c, 3*c, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi/180, threshold)
for line in lines:
for rho, theta in line:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img_copy, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imshow('canny', edges)
cv2.imshow('image', img_copy)
cv2.destroyAllWindows()
#%% 확률 허프 변환
import cv2
import numpy as np
def nothing(x):
pass
img = cv2.imread('img/building.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(img, 50, 150, apertureSize=3)
cv2.namedWindow('image')
cv2.createTrackbar('threshold', 'image', 100, 255, nothing)
cv2.createTrackbar('min_length', 'image', 100, 500, nothing)
cv2.createTrackbar('max_gap', 'image', 0, 100, nothing)
while(1):
if cv2.waitKey(1) & 0xFF == 27:
break
img_copy = img.copy()
threshold = cv2.getTrackbarPos('threshold', 'image')
min_length = cv2.getTrackbarPos('min_length', 'image')
max_gap = cv2.getTrackbarPos('max_gap', 'image')
lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold, min_length, max_gap)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img_copy, (x1,y1), (x2,y2), (0,255,0), 2)
cv2.imshow('image', img_copy)
cv2.destroyAllWindows()
| 25.835294 | 80 | 0.598816 |
dc6a5600b667723f71c4c04ae1ce374f18cebb40
| 506 |
py
|
Python
|
Uebung3/Uebung3_Aufgabe8_2_3.py
|
B0mM3L6000/EiP
|
f68718f95a2d3cde8ead62b6134ac1b5068881a5
|
[
"MIT"
] | 1 |
2018-04-18T19:10:06.000Z
|
2018-04-18T19:10:06.000Z
|
Uebung3/Uebung3_Aufgabe8_2_3.py
|
B0mM3L6000/EiP
|
f68718f95a2d3cde8ead62b6134ac1b5068881a5
|
[
"MIT"
] | null | null | null |
Uebung3/Uebung3_Aufgabe8_2_3.py
|
B0mM3L6000/EiP
|
f68718f95a2d3cde8ead62b6134ac1b5068881a5
|
[
"MIT"
] | 1 |
2018-04-29T08:48:00.000Z
|
2018-04-29T08:48:00.000Z
|
n = int(2367363789863971985761)
#überprüfen welche länge n = 2367363789863971985761 hat
#print(n)
i = 1
while n != 1: #solange n noch nicht gleich 1 ist
if n%2 == 0: #wenn n durch 2 ganz teilbar ist
n = n//2
#print(n)
else: #ansonsten
n = n*3+1
#print(n)
i = i+1
print("Die Länge der Folge ist:",i)
"""
Hier muss noch in die Kommentare wie man die Zahl n findet welches die möglichst
längste Folge hat für n < 10^6. Mit Worten erklärt.
"""
| 18.071429 | 81 | 0.610672 |
dc7dbf70550ac654806a84fb48a51e926b2303be
| 974 |
py
|
Python
|
backend/apps/mapview/utils.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | 2 |
2020-03-28T13:56:39.000Z
|
2020-03-29T10:16:12.000Z
|
backend/apps/mapview/utils.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | 76 |
2020-03-27T21:53:04.000Z
|
2020-03-30T20:27:43.000Z
|
backend/apps/mapview/utils.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | null | null | null |
import json
from math import asin, cos, radians, sin, sqrt
from os.path import abspath, dirname, join
current_location = dirname(abspath(__file__))
with open(join(current_location, "files/plzs_merged.json")) as f:
plzs = json.loads(f.read())
def haversine(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
return 2 * 6371 * asin(sqrt(a))
def get_plzs_close_to(countrycode, plz, distance_in_km):
lon1, lat1, _ = plzs[countrycode][plz]
close = []
for other_plz, (lon2, lat2, ort) in plzs[countrycode].items():
dist = haversine(lon1, lat1, lon2, lat2)
if dist < distance_in_km:
close.append(other_plz)
return close
def get_plz_data(countrycode, plz):
lat, lon, ort = plzs[countrycode][plz]
return {"latitude": lat, "longitude": lon, "city": ort}
| 28.647059 | 71 | 0.646817 |
7618678767cd18eead7a1f2453584206b1fceedb
| 3,639 |
py
|
Python
|
tests/ingestion/transformers/monosi/test_monitors.py
|
monosidev/monosi
|
a88b689fc74010b10dbabb32f4b2bdeae865f4d5
|
[
"Apache-2.0"
] | 156 |
2021-11-19T18:50:14.000Z
|
2022-03-31T19:48:59.000Z
|
tests/ingestion/transformers/monosi/test_monitors.py
|
monosidev/monosi
|
a88b689fc74010b10dbabb32f4b2bdeae865f4d5
|
[
"Apache-2.0"
] | 30 |
2021-12-27T19:30:56.000Z
|
2022-03-30T17:49:00.000Z
|
tests/ingestion/transformers/monosi/test_monitors.py
|
monosidev/monosi
|
a88b689fc74010b10dbabb32f4b2bdeae865f4d5
|
[
"Apache-2.0"
] | 14 |
2022-01-17T23:24:34.000Z
|
2022-03-29T09:27:47.000Z
|
import pytest
import ingestion.transformers.monosi.monitors as monitors
@pytest.fixture
def schema():
return {
'columns': ['NAME', 'COL_NAME', 'COL_TYPE', 'COL_DESCRIPTION', 'COL_SORT_ORDER', 'DATABASE', 'SCHEMA', 'DESCRIPTION', 'IS_VIEW'],
'rows': [
{
'NAME': 'name_of_table',
'COL_NAME': 'name_of_col',
'COL_TYPE': 'timestamp_tz',
'COL_DESCRIPTION': None,
'COL_SORT_ORDER': '3',
'DATABASE': 'database',
'SCHEMA': 'schema',
'DESCRIPTION': None,
'IS_VIEW': 'false'
},
{
'NAME': 'name_of_table',
'COL_NAME': 'name_of_col_2',
'COL_TYPE': 'text',
'COL_DESCRIPTION': None,
'COL_SORT_ORDER': '3',
'DATABASE': 'database',
'SCHEMA': 'schema',
'DESCRIPTION': None,
'IS_VIEW': 'false'
},
{
'NAME': 'name_of_table_2',
'COL_NAME': 'name_of_col_3',
'COL_TYPE': 'int',
'COL_DESCRIPTION': None,
'COL_SORT_ORDER': '3',
'DATABASE': 'database',
'SCHEMA': 'schema',
'DESCRIPTION': None,
'IS_VIEW': 'false'
},
]
}
def test__transform_empty():
input_arr = {'rows': []}
output_arr = monitors.MonitorTransformer._transform(input_arr)
assert len(output_arr) == 0
def test__transform(schema):
output_arr = monitors.MonitorTransformer._transform(schema)
expected_num_monitors = 2
assert len(output_arr) == expected_num_monitors
@pytest.fixture
def monitor():
return {}
@pytest.fixture
def normalized_schema():
return monitors.MonitorTransformer._normalized_schema()
def test__normalized_schema_correct(normalized_schema, monitor):
input_arr = [monitor]
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == True
def test__normalized_schema_correct_multiple(normalized_schema, monitor):
input_arr = [monitor, monitor]
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == True
def test__normalized_schema_incorrect_to_have_none(normalized_schema):
input_arr = []
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == False
def test__normalized_schema_incorrect(normalized_schema):
input_arr = [{"anything": "goeshere"}]
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == False
def test__normalized_schema_incorrect_multiple(normalized_schema):
input_arr = [{}, {"anything": "goeshere"}]
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == False
@pytest.fixture
def original_schema():
return monitors.MonitorTransformer._original_schema()
def test__original_schema_correct(original_schema, schema):
is_correct = monitors.MonitorTransformer.match(schema, original_schema)
assert is_correct == True
def test__original_schema_incorrect_to_have_none(original_schema):
is_correct = monitors.MonitorTransformer.match({}, original_schema)
assert is_correct == False
def test__original_schema_incorrect(original_schema):
input_arr = {'anything': 'goeshere'}
is_correct = monitors.MonitorTransformer.match(input_arr, original_schema)
assert is_correct == False
| 30.579832 | 137 | 0.638637 |
0d74b4ba6a0f2b3e6686afdb8d25a5cfb67fbe31
| 82 |
py
|
Python
|
Python/Loops/loops.py
|
boneskewer69/ifis
|
926323f60bf8eac55ccc67b6f6e536fe19c6b4c2
|
[
"MIT"
] | null | null | null |
Python/Loops/loops.py
|
boneskewer69/ifis
|
926323f60bf8eac55ccc67b6f6e536fe19c6b4c2
|
[
"MIT"
] | null | null | null |
Python/Loops/loops.py
|
boneskewer69/ifis
|
926323f60bf8eac55ccc67b6f6e536fe19c6b4c2
|
[
"MIT"
] | null | null | null |
x = 1
while True:
x = input("Number:\n> ")
if int(x) == 0:
break
| 11.714286 | 28 | 0.439024 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.