hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17b328f0e82744b91ceb1e856b4c428328eebbbd
| 569 |
py
|
Python
|
leetcode/665-Non-decreasing-Array/NonDecreasingArr.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/665-Non-decreasing-Array/NonDecreasingArr.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/665-Non-decreasing-Array/NonDecreasingArr.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
is_modified = False
for i in xrange(len(nums) - 1):
if nums[i] > nums[i + 1]:
if is_modified:
return False
else:
if i == 0 or nums[i - 1] <= nums[i + 1]:
nums[i] = nums[i + 1]
else:
nums[i + 1] = nums[i]
is_modified = True
return True
| 28.45 | 60 | 0.379613 |
35019a8cdcdc4bd46aae8b294cf7d088d7c2a1b4
| 6,727 |
py
|
Python
|
pyScript_NodeManager/NodeInput.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
pyScript_NodeManager/NodeInput.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
pyScript_NodeManager/NodeInput.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
from PySide2.QtWidgets import QWidget, QPlainTextEdit, QRadioButton, QGridLayout, QPushButton, QComboBox, QLineEdit, QMessageBox, QGroupBox, QVBoxLayout
class NodeInput(QWidget):
def __init__(self, content_widget):
super(NodeInput, self).__init__()
self.content_widget = content_widget
self.widget_type = '' # gets specified automatically when creating ui below (see self.widget_combo_box_changed)
# create UI
# create all layouts
self.grid_layout = QGridLayout(self)
# move buttons
self.up_button = QPushButton(' < ')
self.down_button = QPushButton(' > ')
# type and label
self.type_combo_box = QComboBox(self)
self.type_combo_box.addItem('exec')
self.type_combo_box.addItem('data')
self.type_combo_box.currentTextChanged.connect(self.type_combo_box_changed)
self.label_text_edit = QPlainTextEdit(self)
self.label_text_edit.setPlaceholderText('Label')
self.label_text_edit.setFixedWidth(self.type_combo_box.width())
# self.label_text_edit.setMinimumHeight(20)
self.label_text_edit.setMaximumHeight(56)
# widget
self.widget_grid_layout = QGridLayout()
self.widget_yes_no_group_box = QGroupBox(self)
self.widget_yes_no_group_box.setLayout(QVBoxLayout())
self.widget_yes_radio_button = QRadioButton('Yes', self)
self.widget_yes_radio_button.setChecked(True)
self.widget_yes_radio_button.toggled.connect(self.widget_yes_set)
self.widget_no_radio_button = QRadioButton('No', self)
self.widget_yes_no_group_box.layout().addWidget(self.widget_yes_radio_button)
self.widget_yes_no_group_box.layout().addWidget(self.widget_no_radio_button)
self.widget_grid_layout.addWidget(self.widget_yes_no_group_box, 0, 0, 4, 1)
self.widget_group_box = QGroupBox(self)
self.widget_group_box.setLayout(QVBoxLayout())
self.widget_type_combo_box = QComboBox(self)
self.widget_type_combo_box.addItem('std line edit')
self.widget_type_combo_box.addItem('std spin box')
self.widget_type_combo_box.addItem('custom widget')
self.widget_type_combo_box.currentTextChanged.connect(self.widget_type_combo_box_changed)
self.custom_widget_line_edit = QLineEdit()
self.custom_widget_line_edit.setPlaceholderText('input widget name')
self.custom_widget_line_edit.editingFinished.connect(self.widget_name_line_edit_edited)
self.custom_widget_line_edit.setEnabled(False)
self.widget_under_label_radio_button = QRadioButton('widget under label')
self.widget_under_label_radio_button.setChecked(True)
self.widget_besides_label_radio_button = QRadioButton('widget besides label')
self.widget_group_box.layout().addWidget(self.widget_type_combo_box)
self.widget_group_box.layout().addWidget(self.custom_widget_line_edit)
self.widget_group_box.layout().addWidget(self.widget_under_label_radio_button)
self.widget_group_box.layout().addWidget(self.widget_besides_label_radio_button)
self.widget_grid_layout.addWidget(self.widget_group_box, 0, 3, 4, 1)
# del button
self.del_button = QPushButton(self)
self.del_button.setText(' Del ')
self.del_button.clicked.connect(self.delete_clicked)
# create layout
self.grid_layout.addWidget(self.up_button, 0, 0, 1, 1)
self.grid_layout.addWidget(self.down_button, 3, 0, 1, 1)
self.grid_layout.addWidget(self.type_combo_box, 0, 1)
self.grid_layout.addWidget(self.label_text_edit, 1, 1, 3, 1)
self.grid_layout.addLayout(self.widget_grid_layout, 0, 2, 4, 1)
self.grid_layout.addWidget(self.del_button, 0, 4, 4, 1)
def get_type(self):
return self.type_combo_box.currentText()
def get_label(self):
return self.label_text_edit.toPlainText()
def has_widget(self):
return self.widget_yes_radio_button.isChecked()
def set_has_widget(self, has_widget):
if has_widget:
self.widget_yes_radio_button.setChecked(True)
self.widget_no_radio_button.setChecked(False)
else:
self.widget_yes_radio_button.setChecked(False)
self.widget_no_radio_button.setChecked(True)
def get_widget_type(self):
return self.widget_type_combo_box.currentText()
def set_widget_type(self, new_widget_type):
self.widget_type_combo_box.setCurrentText(new_widget_type)
def get_widget_name(self):
return self.content_widget.prepare_class_name(self.custom_widget_line_edit.text())
def set_widget_name(self, name):
self.custom_widget_line_edit.setText(name)
def get_widget_pos(self):
under = self.widget_under_label_radio_button
# besides = self.widget_besides_label_radio_button
return 'under' if under.isChecked() else 'besides'
def set_widget_pos(self, pos):
if pos == 'under':
self.widget_under_label_radio_button.setChecked(True)
self.widget_besides_label_radio_button.setChecked(False)
elif pos == 'besides':
self.widget_under_label_radio_button.setChecked(False)
self.widget_besides_label_radio_button.setChecked(True)
def widget_yes_set(self):
if self.widget_yes_radio_button.isChecked():
self.widget_group_box.setEnabled(True)
else:
self.widget_group_box.setEnabled(False)
def widget_name_line_edit_edited(self):
self.custom_widget_line_edit.setText(self.content_widget.prepare_class_name(self.custom_widget_line_edit.text()))
def widget_type_combo_box_changed(self, new_text):
self.widget_type = new_text
if new_text == 'custom widget':
self.custom_widget_line_edit.setEnabled(True)
else:
self.custom_widget_line_edit.setEnabled(False)
def set_type(self, new_type):
self.type_combo_box.setCurrentText(new_type)
def type_combo_box_changed(self, new_type):
if new_type == 'data':
self.widget_grid_layout.setEnabled(True)
elif new_type == 'exec':
self.widget_grid_layout.setEnabled(False)
def set_label(self, new_label):
self.label_text_edit.setPlainText(new_label)
def delete_clicked(self):
ret = QMessageBox.warning(self, 'Input', 'Do you really want to delete this input? All changes'
'will be lost.',
QMessageBox.Yes, QMessageBox.No)
if ret == QMessageBox.Yes:
self.content_widget.delete_input(self)
| 42.04375 | 152 | 0.704326 |
dcb7dde46743662af9ff8f47290bed7f12d2002c
| 3,199 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/plugins/lookup/test_avi.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/plugins/lookup/test_avi.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/plugins/lookup/test_avi.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# (c) 2019, Sandeep Bandi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pytest
import json
from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock
from ansible.errors import AnsibleError
from ansible.plugins.loader import lookup_loader
from ansible_collections.community.general.plugins.lookup import avi
try:
import builtins as __builtin__
except ImportError:
import __builtin__
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
with open(fixture_path + '/avi.json') as json_file:
data = json.load(json_file)
@pytest.fixture
def dummy_credentials():
dummy_credentials = {}
dummy_credentials['controller'] = "192.0.2.13"
dummy_credentials['username'] = "admin"
dummy_credentials['password'] = "password"
dummy_credentials['api_version'] = "17.2.14"
dummy_credentials['tenant'] = 'admin'
return dummy_credentials
@pytest.fixture
def super_switcher(scope="function", autouse=True):
# Mocking the inbuilt super as it is used in ApiSession initialization
original_super = __builtin__.super
__builtin__.super = MagicMock()
yield
# Revert the super to default state
__builtin__.super = original_super
def test_lookup_multiple_obj(dummy_credentials):
avi_lookup = lookup_loader.get('avi')
avi_mock = MagicMock()
avi_mock.return_value.get.return_value.json.return_value = data["mock_multiple_obj"]
with patch.object(avi, 'ApiSession', avi_mock):
retval = avi_lookup.run([], {}, avi_credentials=dummy_credentials,
obj_type="network")
assert retval == data["mock_multiple_obj"]["results"]
def test_lookup_single_obj(dummy_credentials):
avi_lookup = lookup_loader.get('avi')
avi_mock = MagicMock()
avi_mock.return_value.get_object_by_name.return_value = data["mock_single_obj"]
with patch.object(avi, 'ApiSession', avi_mock):
retval = avi_lookup.run([], {}, avi_credentials=dummy_credentials,
obj_type="network", obj_name='PG-123')
assert retval[0] == data["mock_single_obj"]
def test_invalid_lookup(dummy_credentials):
avi_lookup = lookup_loader.get('avi')
avi_mock = MagicMock()
with pytest.raises(AnsibleError):
with patch.object(avi, 'ApiSession', avi_mock):
avi_lookup.run([], {}, avi_credentials=dummy_credentials)
| 34.397849 | 89 | 0.727727 |
87721dc837cfcc46d6cb41435ab729954799560c
| 541 |
py
|
Python
|
exercises/es/test_02_05_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/es/test_02_05_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/es/test_02_05_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
assert (
"from spacy.tokens import Doc" in __solution__
), "¿Estás importando la clase Doc correctamente?"
assert (
len(spaces) == 5
), "Parece que el número de espacios no concuerda con el número de palabras."
assert all(isinstance(s, bool) for s in spaces), "Los espacios tienen que ser booleanos."
assert [int(s) for s in spaces] == [0, 0, 1, 0, 0], "¿Están correctos los espacios?"
assert doc.text == "¡Vamos, empieza!", "¿Creaste el Doc correctamente?"
__msg__.good("¡Bien!")
| 45.083333 | 93 | 0.645102 |
df185f518b1ffb96918cba7daa6cf65f80a3bf83
| 598 |
py
|
Python
|
serve.py
|
lurch/Pinout2
|
bd3b39de2c053607b9415dd9eb0297a401ba7162
|
[
"CC-BY-4.0"
] | null | null | null |
serve.py
|
lurch/Pinout2
|
bd3b39de2c053607b9415dd9eb0297a401ba7162
|
[
"CC-BY-4.0"
] | null | null | null |
serve.py
|
lurch/Pinout2
|
bd3b39de2c053607b9415dd9eb0297a401ba7162
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python
from flask import Flask, send_from_directory
import sys
app = Flask(__name__)
lang = 'en'
@app.route('/')
def show_index():
return send_from_directory(basedir,'index.html')
@app.route("/resources/<path:filename>")
def custom_static(filename):
return send_from_directory(basedir + 'resources/', filename)
@app.route("/<path:page>")
def show_page(page):
return send_from_directory(basedir,'{}.html'.format(page))
if __name__ == "__main__":
if len(sys.argv) > 1:
lang = sys.argv[1]
basedir = 'output/{lang}/'.format(lang=lang)
app.run(host='0.0.0.0', debug=True)
| 21.357143 | 61 | 0.70903 |
df812269ac879c53fb5ed863c8c091f330f4846f
| 228 |
py
|
Python
|
personal-files/python-test/test.py
|
lyf0327/PycharmProjects
|
ce6507f04a33156a0d6c3f732856dcb655bc2351
|
[
"Apache-2.0"
] | null | null | null |
personal-files/python-test/test.py
|
lyf0327/PycharmProjects
|
ce6507f04a33156a0d6c3f732856dcb655bc2351
|
[
"Apache-2.0"
] | null | null | null |
personal-files/python-test/test.py
|
lyf0327/PycharmProjects
|
ce6507f04a33156a0d6c3f732856dcb655bc2351
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding:UTF-8 -*-
from sys import argv
script, filename = argv
txt=open(filename)
print txt.read()
file_again=raw_input("> ")
txg_again=open(file_again)
print txg_again.read()
txt.close()
txg_again.close()
| 20.727273 | 26 | 0.72807 |
63733bb1acddaa24e368933c4cfe58bd56e53809
| 212 |
py
|
Python
|
Theories/Algorithms/Recursion1/ReverseString/reverse_string.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Theories/Algorithms/Recursion1/ReverseString/reverse_string.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Theories/Algorithms/Recursion1/ReverseString/reverse_string.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from typing import List
def reverseString(s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
for i in range(len(s) // 2):
s[i], s[-i - 1] = s[-i - 1], s[i]
| 23.555556 | 54 | 0.537736 |
8967e17c2b719b250094aa362ace32622a48bc56
| 1,067 |
py
|
Python
|
00 Calculus Operations in SymPy.py
|
Verma314/Experiments-in-Symbolic-Computation
|
baf800c5ff69cc125df8c84ab93c4f40a7aba725
|
[
"MIT"
] | null | null | null |
00 Calculus Operations in SymPy.py
|
Verma314/Experiments-in-Symbolic-Computation
|
baf800c5ff69cc125df8c84ab93c4f40a7aba725
|
[
"MIT"
] | null | null | null |
00 Calculus Operations in SymPy.py
|
Verma314/Experiments-in-Symbolic-Computation
|
baf800c5ff69cc125df8c84ab93c4f40a7aba725
|
[
"MIT"
] | null | null | null |
from sympy import *
#define sympy symbols for
x , t , z , nu = symbols ('x t z nu')
#for pretty printing:
init_printing ( use_unicode=True)
#take a derivative of [ sin (x) e ^ x ]
print (" Diffrentiating sin (x) e ^ x ")
print ( diff ( sin ( x ) * exp (x), x))
#integration:
print ( "\n Integrating the result:")
print ( integrate ( exp(x) * sin (x) + exp(x)*cos(x) , x) )
print("\n Integrating with limits -oo to + oo ")
print ( integrate( sin(x**2) , ( x, -oo , oo ) ) )
print ("\n Finding the limit as x -> 0 " )
print ( limit (sin(x)/x, x, 0 ) )
print ("\n Solving an equation:")
print ( solve ( x ** 2 - 2, x) )
print ("\n Solving a diffrentaial equation:")
y = Function ( 'y' )
print ( dsolve( Eq( y(t).diff(t,t) - y(t), exp(t) ), y(t) ) )
print("\n Finding EigenValues:")
print ( Matrix( [ [1,2],[2,2] ] ).eigenvals() )
#to check mathematical equality, between two expressions say a and b
# if simplify(a-b) evaluates to 0 => a = b
x = symbols('x')
a = (x+1)**2
b = x**2 + 2*x + 1
print ( simplify (a-b) )
| 27.358974 | 69 | 0.56701 |
98730936a520d6e0bb3dfcfd99ec7be3b5175c16
| 2,796 |
py
|
Python
|
kts/core/backend/signal.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 18 |
2019-02-14T13:10:07.000Z
|
2021-11-26T07:10:13.000Z
|
kts/core/backend/signal.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 2 |
2019-02-17T14:06:42.000Z
|
2019-09-15T18:05:54.000Z
|
kts/core/backend/signal.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 2 |
2019-09-15T13:12:42.000Z
|
2020-04-15T14:05:54.000Z
|
import os
from collections import defaultdict
from typing import Union, Tuple, List, Any
import ray
from kts.core.run_id import RunID
try:
from ray.experimental import get_actor # ray<=0.8.1
except ImportError:
from ray.util import get_actor # ray>=0.8.2
def _get_task_id(source):
if type(source) is ray.actor.ActorHandle:
return source._actor_id
else:
if type(source) is ray.TaskID:
return source
else:
return ray._raylet.compute_task_id(source)
@ray.remote
class SignalManager:
def __init__(self):
self.data = defaultdict(list)
def put(self, key, value):
self.data[key].append([value, 0])
def get(self, key):
result = [value for value, seen in self.data[key] if not seen]
for item in self.data[key]:
item[1] = 1
return result
def clear(self):
self.data.clear()
pid = os.getpid()
def get_signal_manager():
return get_actor(f"SignalManager{pid}")
def create_signal_manager():
return SignalManager.options(name=f"SignalManager{pid}").remote()
def send(signal):
if ray.worker.global_worker.actor_id.is_nil():
source_key = ray.worker.global_worker.current_task_id.hex()
else:
source_key = ray.worker.global_worker.actor_id.hex()
signal_manager = get_signal_manager()
signal_manager.put.remote(source_key, signal)
def receive(sources, timeout=None):
signal_manager = get_signal_manager()
task_id_to_sources = dict()
for s in sources:
task_id_to_sources[_get_task_id(s).hex()] = s
results = []
for task_id in task_id_to_sources:
signals = ray.get(signal_manager.get.remote(task_id))
for signal in signals:
results.append((task_id_to_sources[task_id], signal))
return results
class Signal:
pass
class ErrorSignal(Signal):
def __init__(self, error):
self.error = error
class Sync(Signal):
def __init__(self, run_id, res_df, res_state, stats):
self.run_id = run_id
self.res_df = res_df
self.res_state = res_state
self.stats = stats
def get_contents(self):
return {
'run_id': self.run_id,
'res_df': self.res_df,
'res_state': self.res_state,
'stats': self.stats,
}
class ResourceRequest(Signal):
def __init__(self, key: Union[RunID, Tuple[str, str], str]):
self.key = key
def get_contents(self):
return self.key
class RunPID(Signal):
def __init__(self, pid):
self.pid = pid
def get_contents(self):
return self.pid
def filter_signals(signals: List[Tuple[Any, Signal]], signal_type: type):
return [i[1] for i in signals if isinstance(i[1], signal_type)]
| 24.103448 | 73 | 0.64628 |
7f83f70ca09f602baaa5bde57979658c4e48db88
| 583 |
py
|
Python
|
products/migrations/0002_amazoncategory_sales_rank.py
|
silver-whale-enterprises-llc/amzproduzer
|
25e63f64b0ef09241475c72af9a710dcb7d9e926
|
[
"Apache-2.0"
] | 1 |
2019-07-22T14:03:11.000Z
|
2019-07-22T14:03:11.000Z
|
products/migrations/0002_amazoncategory_sales_rank.py
|
silver-whale-enterprises-llc/amzproduzer
|
25e63f64b0ef09241475c72af9a710dcb7d9e926
|
[
"Apache-2.0"
] | null | null | null |
products/migrations/0002_amazoncategory_sales_rank.py
|
silver-whale-enterprises-llc/amzproduzer
|
25e63f64b0ef09241475c72af9a710dcb7d9e926
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-03-16 14:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('configurations', '0001_initial'),
('products', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='amazoncategory',
name='sales_rank',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='amazon_ids', to='configurations.CategorySalesRank'),
),
]
| 27.761905 | 170 | 0.658662 |
63e277177ce2004787ea9567977b39099b96ebe7
| 1,066 |
bzl
|
Python
|
tools/platforms/toolchain_config_suite_spec.bzl
|
wcalandro/kythe
|
64969a853711c228b4e3cfc3ce91b84b5bb853d7
|
[
"Apache-2.0"
] | 703 |
2018-11-05T22:51:58.000Z
|
2022-03-31T21:54:22.000Z
|
tools/platforms/toolchain_config_suite_spec.bzl
|
wcalandro/kythe
|
64969a853711c228b4e3cfc3ce91b84b5bb853d7
|
[
"Apache-2.0"
] | 720 |
2018-11-05T21:29:49.000Z
|
2022-03-29T19:43:50.000Z
|
tools/platforms/toolchain_config_suite_spec.bzl
|
wcalandro/kythe
|
64969a853711c228b4e3cfc3ce91b84b5bb853d7
|
[
"Apache-2.0"
] | 99 |
2018-11-06T22:20:08.000Z
|
2022-03-16T12:50:35.000Z
|
# Copyright 2020 The Kythe Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RBE toolchain specification."""
load("//tools/platforms/configs:versions.bzl", "TOOLCHAIN_CONFIG_AUTOGEN_SPEC")
DEFAULT_TOOLCHAIN_CONFIG_SUITE_SPEC = {
"repo_name": "io_kythe",
"output_base": "tools/platforms/configs",
"container_repo": "kythe-repo/kythe-builder",
"container_registry": "gcr.io",
"default_java_home": "/usr/lib/jvm/11.29.3-ca-jdk11.0.2/reduced",
"toolchain_config_suite_autogen_spec": TOOLCHAIN_CONFIG_AUTOGEN_SPEC,
}
| 39.481481 | 79 | 0.754221 |
89e684f23e912f35b1d93b1cf51a643243c6a207
| 10,538 |
py
|
Python
|
examples/model_interpretation/rationale_extraction/newp_text_generate.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/model_interpretation/rationale_extraction/newp_text_generate.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/model_interpretation/rationale_extraction/newp_text_generate.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import math
import numpy as np
import argparse
import os
import sys
def get_args():
parser = argparse.ArgumentParser('generate data')
parser.add_argument('--pred_path', required=True)
parser.add_argument('--save_path', required=True)
parser.add_argument('--language', required=True)
parser.add_argument('--task', required=True)
parser.add_argument('--ratio', type=str, required=True)
args = parser.parse_args()
return args
def evids_load(path):
evids = []
with open(path, 'r') as f:
for line in f.readlines():
dic = json.loads(line)
evids.append(dic)
return evids
def generate_for_senti(args, evid_dict, ratio):
r = {}
ex_r = {}
raw_text = evid_dict['context']
label = evid_dict['pred_label']
char_attri = list(evid_dict['char_attri'].keys())
length = len(char_attri)
rationale_ratio = ratio[0]
toprationale_text, toprationale_exclusive_text = [], []
keys = [int(x) for x in char_attri[:math.ceil(length * rationale_ratio)]]
keys.sort()
for key in keys:
toprationale_text.append(evid_dict['char_attri'][str(key)][0].strip())
keys = [int(x) for x in char_attri[math.ceil(length * rationale_ratio):]]
keys.sort()
for key in keys:
toprationale_exclusive_text.append(
evid_dict['char_attri'][str(key)][0].strip())
if args.language == 'en':
toprationale_text = ' '.join(toprationale_text)
toprationale_exclusive_text = ' '.join(toprationale_exclusive_text)
else:
toprationale_text = ''.join(toprationale_text)
toprationale_exclusive_text = ''.join(toprationale_exclusive_text)
if len(toprationale_text) == 0:
toprationale_text = "['UNK']"
if len(toprationale_exclusive_text) == 0:
toprationale_exclusive_text = "['UNK']"
r['id'] = evid_dict['id']
r['context'] = toprationale_text
r['context_idx'] = [[
int(x) for x in char_attri[:math.ceil(length * rationale_ratio)]
]]
r['context_token'] = [[
evid_dict['char_attri'][x][0]
for x in char_attri[:math.ceil(length * rationale_ratio)]
]]
r['label'] = label
ex_r['id'] = evid_dict['id']
ex_r['context'] = toprationale_exclusive_text
ex_r['context_idx'] = [[
int(x) for x in char_attri[math.ceil(length * rationale_ratio):]
]]
ex_r['context_token'] = [[
evid_dict['char_attri'][x][0]
for x in char_attri[math.ceil(length * rationale_ratio):]
]]
ex_r['label'] = label
return r, ex_r
def generate_for_similarity(args, evid_dict, ratio):
r = {}
ex_r = {}
q_rationale_ratio = ratio[0]
t_rationale_ratio = ratio[1]
label = evid_dict['pred_label']
# query
q_text = evid_dict['query']
q_char_attri = list(evid_dict['query_char_attri'].keys())
q_length = len(q_char_attri)
q_topR_Rtext, q_topR_noRtext = [], []
keys = [
int(x) for x in q_char_attri[:math.ceil(q_length * q_rationale_ratio)]
]
keys.sort()
for key in keys:
q_topR_Rtext.append(evid_dict['query_char_attri'][str(key)][0].strip())
keys = [
int(x) for x in q_char_attri[math.ceil(q_length * q_rationale_ratio):]
]
keys.sort()
for key in keys:
q_topR_noRtext.append(
evid_dict['query_char_attri'][str(key)][0].strip())
if args.language == 'ch':
q_topR_Rtext = ''.join(q_topR_Rtext)
q_topR_noRtext = ''.join(q_topR_noRtext)
else:
q_topR_Rtext = ' '.join(q_topR_Rtext)
q_topR_noRtext = ' '.join(q_topR_noRtext)
if len(q_topR_Rtext) == 0:
q_topR_Rtext = "['UNK']"
if len(q_topR_noRtext) == 0:
q_topR_noRtext = "['UNK']"
# title
t_text = evid_dict['title']
t_char_attri = list(evid_dict['title_char_attri'].keys())
t_length = len(t_char_attri)
t_topR_Rtext, t_topR_noRtext = [], []
keys = [
int(x) for x in t_char_attri[:math.ceil(t_length * t_rationale_ratio)]
]
keys.sort()
for key in keys:
t_topR_Rtext.append(evid_dict['title_char_attri'][str(key)][0])
keys = [
int(x) for x in t_char_attri[math.ceil(t_length * t_rationale_ratio):]
]
keys.sort()
for key in keys:
t_topR_noRtext.append(evid_dict['title_char_attri'][str(key)][0])
if args.language == 'ch':
t_topR_Rtext = ''.join(t_topR_Rtext)
t_topR_noRtext = ''.join(t_topR_noRtext)
else:
t_topR_Rtext = ' '.join(t_topR_Rtext)
t_topR_noRtext = ' '.join(t_topR_noRtext)
if len(t_topR_Rtext) == 0:
t_topR_Rtext = "['UNK']"
if len(t_topR_noRtext) == 0:
t_topR_noRtext = "['UNK']"
r['id'] = evid_dict['id']
r['context'] = [q_topR_Rtext, t_topR_Rtext]
r['context_idx'] = [[
int(x) for x in q_char_attri[:math.ceil(q_length * q_rationale_ratio)]
], [int(x) for x in t_char_attri[:math.ceil(t_length * t_rationale_ratio)]]]
r['context_token'] = [
[
evid_dict['query_char_attri'][x][0]
for x in q_char_attri[:math.ceil(q_length * q_rationale_ratio)]
],
[
evid_dict['title_char_attri'][x][0]
for x in t_char_attri[:math.ceil(t_length * t_rationale_ratio)]
]
]
r['label'] = label
ex_r['id'] = evid_dict['id']
ex_r['context'] = [q_topR_noRtext, t_topR_noRtext]
ex_r['context_idx'] = [[
int(x) for x in q_char_attri[math.ceil(q_length * q_rationale_ratio):]
], [int(x) for x in t_char_attri[math.ceil(t_length * t_rationale_ratio):]]]
ex_r['context_token'] = [
[
evid_dict['query_char_attri'][x][0]
for x in q_char_attri[math.ceil(q_length * q_rationale_ratio):]
],
[
evid_dict['title_char_attri'][x][0]
for x in t_char_attri[math.ceil(t_length * t_rationale_ratio):]
]
]
ex_r['label'] = label
return r, ex_r
def generate_for_MRC(args, evid_dict, ratio):
id = evid_dict['id']
raw_text = evid_dict['context'] + evid_dict['title']
question = evid_dict['question']
char_attri = list(evid_dict['char_attri'].keys())
length = len(char_attri)
rationale_ratio = ratio[0]
toprationale_text, toprationale_exclusive_text = [], []
keys = [int(x) for x in char_attri[:math.ceil(length * rationale_ratio)]]
keys.sort()
for key in keys:
toprationale_text.append(evid_dict['char_attri'][str(key)][0].strip())
keys = [int(x) for x in char_attri[math.ceil(length * rationale_ratio):]]
keys.sort()
for key in keys:
toprationale_exclusive_text.append(
evid_dict['char_attri'][str(key)][0].strip())
if args.language == 'en':
toprationale_text = ' '.join(toprationale_text)
toprationale_exclusive_text = ' '.join(toprationale_exclusive_text)
else:
toprationale_text = ''.join(toprationale_text)
toprationale_exclusive_text = ''.join(toprationale_exclusive_text)
if len(toprationale_text) == 0:
toprationale_text = "['UNK']"
if len(toprationale_exclusive_text) == 0:
toprationale_exclusive_text = "['UNK']"
data_R_dict, Rdata_noR_dict = {}, {}
data_R_dict['id'] = id
data_R_dict['title'] = ""
data_R_dict['context'] = toprationale_text
data_R_dict['question'] = question
data_R_dict['answers'] = ['']
data_R_dict['answer_starts'] = [-1]
data_R_dict['is_impossible'] = False
data_R_dict['context_idx'] = [[
int(x) for x in char_attri[:math.ceil(length * rationale_ratio)]
]]
data_R_dict['context_token'] = [[
evid_dict['char_attri'][x][0]
for x in char_attri[:math.ceil(length * rationale_ratio)]
]]
Rdata_noR_dict['id'] = id
Rdata_noR_dict['title'] = ""
Rdata_noR_dict['context'] = toprationale_exclusive_text
Rdata_noR_dict['question'] = question
Rdata_noR_dict['answers'] = ['']
Rdata_noR_dict['answer_starts'] = [-1]
Rdata_noR_dict['is_impossible'] = False
Rdata_noR_dict['context_idx'] = [[
int(x) for x in char_attri[math.ceil(length * rationale_ratio):]
]]
Rdata_noR_dict['context_token'] = [[
evid_dict['char_attri'][x][0]
for x in char_attri[math.ceil(length * rationale_ratio):]
]]
return data_R_dict, Rdata_noR_dict
def r_text_generation(evids, args):
print('num: {}'.format(len(evids)))
f_rationale_path = os.path.join(args.save_path, 'rationale_text/dev')
f_rationale_exclusive_path = os.path.join(args.save_path,
'rationale_exclusive_text/dev')
if not os.path.exists(f_rationale_path):
os.makedirs(f_rationale_path)
if not os.path.exists(f_rationale_exclusive_path):
os.makedirs(f_rationale_exclusive_path)
f_rationale = open(os.path.join(f_rationale_path, 'dev'), 'w')
f_rationale_exclusive = open(
os.path.join(f_rationale_exclusive_path, 'dev'), 'w')
rationale_ratio = json.loads(args.ratio)
for id, evid_dict in enumerate(evids):
if args.task == 'senti':
data_R_dict, Rdata_noR_dict = generate_for_senti(
args, evid_dict, rationale_ratio)
elif args.task == 'similarity':
data_R_dict, Rdata_noR_dict = generate_for_similarity(
args, evid_dict, rationale_ratio)
elif args.task == 'mrc':
data_R_dict, Rdata_noR_dict = generate_for_MRC(
args, evid_dict, rationale_ratio)
f_rationale.write(json.dumps(data_R_dict, ensure_ascii=False) + '\n')
f_rationale_exclusive.write(
json.dumps(Rdata_noR_dict, ensure_ascii=False) + '\n')
f_rationale.close()
f_rationale_exclusive.close()
if __name__ == '__main__':
args = get_args()
evids = evids_load(args.pred_path)
r_text_generation(evids, args)
| 33.138365 | 80 | 0.638546 |
89fe1e6c6b51baaffbd930436027c6f14a2b82ee
| 210 |
py
|
Python
|
etc/ipcdemo/B.py
|
huberthoegl/tsgrain
|
405d0ba8b98c2afa950d27294e55cd1e07506db4
|
[
"Apache-2.0"
] | 1 |
2021-06-15T08:59:02.000Z
|
2021-06-15T08:59:02.000Z
|
etc/ipcdemo/B.py
|
huberthoegl/tsgrain
|
405d0ba8b98c2afa950d27294e55cd1e07506db4
|
[
"Apache-2.0"
] | null | null | null |
etc/ipcdemo/B.py
|
huberthoegl/tsgrain
|
405d0ba8b98c2afa950d27294e55cd1e07506db4
|
[
"Apache-2.0"
] | null | null | null |
import time
import ipc
ipc.open()
ab = ipc.AtoB()
ba = ipc.BtoA()
n = 0
while True:
msg = ab.getb() # blocking get
ba.put("bla")
time.sleep(1)
n = n + 1
if n % 50 == 0:
print(n)
| 12.352941 | 35 | 0.514286 |
d61ce7ba203e31ce62c99595e2c5ae79bee8369c
| 814 |
py
|
Python
|
pythonProj/FZPython/datas/datas.py
|
iHamburg/FZQuant
|
86b750ec33d01badfd3f324d6f1599118b9bf8ff
|
[
"MIT"
] | null | null | null |
pythonProj/FZPython/datas/datas.py
|
iHamburg/FZQuant
|
86b750ec33d01badfd3f324d6f1599118b9bf8ff
|
[
"MIT"
] | null | null | null |
pythonProj/FZPython/datas/datas.py
|
iHamburg/FZQuant
|
86b750ec33d01badfd3f324d6f1599118b9bf8ff
|
[
"MIT"
] | 2 |
2019-04-10T10:05:00.000Z
|
2021-11-24T17:17:23.000Z
|
#!/usr/bin/env python
# coding: utf8
import tushare as ts
import backtrader as bt
import pandas as pd
#下载上证综指 20年的数据
codeList = []
df = ts.get_k_data('000002',index=False,start='1990-01-01')
# df = ts.get_k_data('600435',index=True,start='2016-01-01')
df = df.set_index('date')
# df.to_csv('index/399006.csv', columns=['open','high','close','low','volume'])
df.to_csv('stock/000002.csv', columns=['open','high','close','low','volume'])
#
# #直接保存
# df.to_csv('000001-2016-2017_2.csv')
# df = pd.read_csv('000001.csv', parse_dates=True,index_col=0)
#
# data = bt.feeds.PandasData(dataname=df,
# # datetime='Date',
#
# nocase=True,
# )
# cerebro = bt.Cerebro()
# cerebro.adddata(data)
# cerebro.run()
# cerebro.plot()
| 24.666667 | 79 | 0.59828 |
61a6a97a3a0e8481d6750c4a618a7570c7281e48
| 849 |
py
|
Python
|
system/withdraw.py
|
billalxcode/ATM-cli
|
68e295b1e3c152ee5c35913af84d72468cab06e0
|
[
"MIT"
] | null | null | null |
system/withdraw.py
|
billalxcode/ATM-cli
|
68e295b1e3c152ee5c35913af84d72468cab06e0
|
[
"MIT"
] | null | null | null |
system/withdraw.py
|
billalxcode/ATM-cli
|
68e295b1e3c152ee5c35913af84d72468cab06e0
|
[
"MIT"
] | null | null | null |
from .database import Database
class Withdraw:
def __init__(self) -> None:
self.connection = Database("database/database.db")
self.connection.connect()
def get(self, card, nominal=0):
if int(nominal) <= 10000:
print ("[INFO]: Minimal penarikan 10 ribu")
return False
else:
data = self.connection.get("balance", "card", f"card_number={card}")
balance = data[0][0]
if int(nominal) < balance:
newBalance = balance - int(nominal)
self.connection.update("card", f"balance={newBalance}", where=f"card_number={card}")
print ("[INFO]: Penarikan berhasil")
return True
else:
print ("[INFO]: Maaf saldo anda kurang, silahkan cek saldo")
return False
| 38.590909 | 100 | 0.552415 |
4eeaf58a17ac8c23669f9ce4e5b6c9559977c847
| 516 |
py
|
Python
|
tensorflow/2.1-gpu/save.py
|
newnius/YAO-Dockerfiles
|
eb0e494f20dcc0959ef550e4bf55e2015baf45ec
|
[
"Apache-2.0"
] | 2 |
2020-07-29T08:57:40.000Z
|
2020-07-31T06:38:25.000Z
|
tensorflow/1.14-gpu/save.py
|
newnius/YAO-Dockerfiles
|
eb0e494f20dcc0959ef550e4bf55e2015baf45ec
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/1.14-gpu/save.py
|
newnius/YAO-Dockerfiles
|
eb0e494f20dcc0959ef550e4bf55e2015baf45ec
|
[
"Apache-2.0"
] | 2 |
2020-07-29T09:01:59.000Z
|
2020-12-17T13:07:34.000Z
|
from hdfs import *
import os
import time
if __name__ == '__main__':
os.environ["TZ"] = 'Asia/Shanghai'
if hasattr(time, 'tzset'):
time.tzset()
try:
hdfs_address = os.environ['hdfs_address']
hdfs_dir = os.environ['hdfs_dir']
output_dir = os.environ['output_dir']
client = Client(hdfs_address)
client.upload(hdfs_dir, output_dir)
print('Save ' + output_dir + ' to' + hdfs_address + ' ' + hdfs_dir)
except Exception as e:
print('Unable to persist data to HDFS,', str(e))
| 24.571429 | 71 | 0.653101 |
f624bb6227580bc61e0e6a8eb43dfd9747bc731f
| 230 |
py
|
Python
|
Licence 2/I33/TP 2/ex_2.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 8 |
2020-11-26T20:45:12.000Z
|
2021-11-29T15:46:22.000Z
|
Licence 2/I33/TP 2/ex_2.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | null | null | null |
Licence 2/I33/TP 2/ex_2.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 6 |
2020-10-23T15:29:24.000Z
|
2021-05-05T19:10:45.000Z
|
def euclide_e(a, n):
u, v = 1, 0
u1, v1 = 0, 1
while n > 0:
u1_t = u - a // n * u1
v1_t = v - a // n * v1
u, v = u1, v1
u1, v1 = u1_t, v1_t
a, n = n, a - a // n * n;
return [u, v, a]
print(euclide_e(39, 5))
| 16.428571 | 27 | 0.434783 |
9c95e15bffddf566c840fe27327f41749dc01a86
| 541 |
py
|
Python
|
Coursera/Interfacing with the Raspberry Pi/Week-2/Excercise/Server.py
|
rishav3101/Online-Courses-Learning
|
1e9356af331b27b6ee33d376d8d7104edaeac2fa
|
[
"MIT"
] | 331 |
2019-10-22T09:06:28.000Z
|
2022-03-27T13:36:03.000Z
|
Coursera/Interfacing with the Raspberry Pi/Week-2/Excercise/Server.py
|
rishav3101/Online-Courses-Learning
|
1e9356af331b27b6ee33d376d8d7104edaeac2fa
|
[
"MIT"
] | 8 |
2020-04-10T07:59:06.000Z
|
2022-02-06T11:36:47.000Z
|
Coursera/Interfacing with the Raspberry Pi/Week-2/Excercise/Server.py
|
rishav3101/Online-Courses-Learning
|
1e9356af331b27b6ee33d376d8d7104edaeac2fa
|
[
"MIT"
] | 572 |
2019-07-28T23:43:35.000Z
|
2022-03-27T22:40:08.000Z
|
import socket
import sys
#b
if __name__ == '__main__':
ip = 'x.x.x.x'
port = 1234
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
mysocket.bind((ip, 1234))
except socket.error:
print("Failed to bind")
sys.exit()
mysocket.listen(5)
while True:
connection, addr = mysocket.accept()
data = connection.recv(1000)
if not data:
break
print(data)
#connection.sendall(data)
connection.close()
mysocket.close()
| 19.321429 | 64 | 0.57671 |
14a80c4e214db0e123a0dab4bd743d4715183091
| 979 |
py
|
Python
|
DataStructures/BalancedTrees/Median.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
DataStructures/BalancedTrees/Median.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
DataStructures/BalancedTrees/Median.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
#coding:utf-8
import bisect
N = int(raw_input())
array = []
for _ in range(N):
operation, x = raw_input().split()
if operation == 'a':
#array.append(int(x))
#array.sort()
bisect.insort(array, int(x))
length = len(array)
if length % 2 == 0:
index = length / 2
L = array[index-1]
R = array[index]
print (L+R) / 2.0 if (L+R) % 2 != 0 else (L+R) /2
else:
print array[length / 2]
elif operation == 'r':
if array == [] or int(x) not in array:
print 'Wrong!'
continue
array.remove(int(x))
length = len(array)
if length == 0:
print 'Wrong!'
continue
if length % 2 == 0:
index = length / 2
L = array[index-1]
R = array[index]
print (L+R) / 2.0 if (L+R) % 2 != 0 else (L+R) /2
else:
print array[length / 2]
| 26.459459 | 61 | 0.443309 |
2ecb9a5df6a2384cb617dc9d5d10bdedab19dae6
| 2,547 |
py
|
Python
|
demos/process-results.py
|
allprojects/thesis-consyst-operation-types
|
fbed2cabadd96958e11f6f60f531081cee44ad91
|
[
"Apache-2.0"
] | 1 |
2021-03-30T19:36:13.000Z
|
2021-03-30T19:36:13.000Z
|
demos/process-results.py
|
allprojects/thesis-consyst-operation-types
|
fbed2cabadd96958e11f6f60f531081cee44ad91
|
[
"Apache-2.0"
] | null | null | null |
demos/process-results.py
|
allprojects/thesis-consyst-operation-types
|
fbed2cabadd96958e11f6f60f531081cee44ad91
|
[
"Apache-2.0"
] | null | null | null |
import csv
import sys
import argparse
import os
# import plotly.graph_objects as go
# import plotly.express as px
import pandas as pd
import numpy as np, scipy.stats as st
arg_parser = argparse.ArgumentParser(description='Process benchmark outputs.')
arg_parser.add_argument('output', metavar='Output',
help='Name of the output file.')
arg_parser.add_argument('inputs', metavar='Inputs', nargs='*',
help='Directories that contains only csv files that are parsed. Every directory is assumed to be a different data point.' +
'Every direction has to have a transaction number associated to it. Example: results/benchmark1/:100')
args = arg_parser.parse_args()
# creates a list of lists with all the paths for a single project
data = pd.DataFrame([], columns = ['file', 'mean', 'median' ,'len', 'std', 'conf_1_low', 'conf_1_high', 'conf_2_low', 'conf_2_high'])
print("Start processing...", end = '')
for input in args.inputs :
splitted_input = input.split(":", 1)
path = splitted_input[0]
num_of_transactions = int(splitted_input[1])
csv_paths = [path + '/' + filepath for filepath in os.listdir(path) if filepath.startswith('proc')]
times = []
for csv_path in csv_paths:
print(f"\rProcessing {csv_path}...", end ='')
dataframe = pd.read_csv(open(csv_path), delimiter=',')
for row in dataframe.iterrows():
times.append(row[1]['ns'])
times_ms = [(time / 1000000) / num_of_transactions for time in times]
#Compute interesting data points
arr = np.array(times_ms)
print(arr)
count = len(arr)
mean = np.mean(arr)
median = np.median(arr)
standard_dev = np.std(arr)
confidence_1sig = st.t.interval(0.6827, count - 1, loc=mean, scale=st.sem(arr))
confidence_2sig = st.t.interval(0.9545, count - 1, loc=mean, scale=st.sem(arr))
print()
print("***")
print(f'Results for {path}')
print(f'Total number of entries: {count}')
print(f'Mean: {mean}ms')
print(f'Median: {median}ms')
print(f'Standard deviation: {standard_dev}ms')
print(f'68% confidence interval: {confidence_1sig}')
print(f'95% confidence interval: {confidence_2sig}')
# Add data that should be plotted
new_data = pd.DataFrame(
[[path, mean, median, count, standard_dev, confidence_1sig[0], confidence_1sig[1], confidence_2sig[0], confidence_2sig[1]]],
columns = ['file', 'mean', 'median' ,'len', 'std', 'conf_1_low', 'conf_1_high', 'conf_2_low', 'conf_2_high']
)
data = data.append(new_data)
print("***")
data.set_index('file')
print(data)
data.to_csv(args.output, sep = ';')
# fig = px.bar(data, x = 'file', y = 'mean')
# fig.show()
| 31.060976 | 133 | 0.704751 |
d33cd300bb9b9d781ad3a18a1eca6b9212de6c31
| 1,324 |
py
|
Python
|
Source/09_Chat_Industrial/publisher.py
|
rbiotblbk/WBS_T9_2022
|
533156db88ff2fe676564b0e5d6e84e888ab0916
|
[
"MIT"
] | 1 |
2022-02-28T09:49:35.000Z
|
2022-02-28T09:49:35.000Z
|
Source/09_Chat_Industrial/publisher.py
|
rbiotblbk/WBS_T9_2022
|
533156db88ff2fe676564b0e5d6e84e888ab0916
|
[
"MIT"
] | null | null | null |
Source/09_Chat_Industrial/publisher.py
|
rbiotblbk/WBS_T9_2022
|
533156db88ff2fe676564b0e5d6e84e888ab0916
|
[
"MIT"
] | null | null | null |
# pip install pubnub
from pubnub.callbacks import SubscribeCallback
from pubnub.enums import PNStatusCategory
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
import os
from pathlib import Path
import json
os.chdir(Path(__file__).parent)
# Read Config
with open("./pubnub.json", mode= "r") as file:
content = file.read()
json_dict = json.loads(content)
pnconfig = PNConfiguration()
pnconfig.publish_key = json_dict["publish_key"]
pnconfig.subscribe_key = json_dict["subscribe_key"]
pnconfig.uuid = json_dict["app_uuid"]
pnconfig.ssl = True
pubnub = PubNub(pnconfig)
class MySubscribeCallback(SubscribeCallback):
def presence(self, pubnub, presence):
pass
def status(self, pubnub, status):
pass
def message(self, pubnub, message):
print("from device 1: " + message.message)
def my_publish_callback(envelope, status):
# Check whether request successfully completed or not
if not status.is_error():
pass
pubnub.add_listener(MySubscribeCallback())
pubnub.subscribe().channels("chan-1").execute()
## publish a message
while True:
msg = input("Input a message to publish: ")
if msg == 'exit':
os._exit(1)
pubnub.publish().channel("chan-1").message(str(msg)).pn_async(my_publish_callback)
| 22.440678 | 86 | 0.719789 |
d36912d73bfaa1f71d5eda5b2b7cafb0ba626d2c
| 322 |
py
|
Python
|
Licence 1/I22/TP 5/tp_5_3.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 8 |
2020-11-26T20:45:12.000Z
|
2021-11-29T15:46:22.000Z
|
Licence 1/I22/TP 5/tp_5_3.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | null | null | null |
Licence 1/I22/TP 5/tp_5_3.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 6 |
2020-10-23T15:29:24.000Z
|
2021-05-05T19:10:45.000Z
|
def majuscule(chaine):
whitelist = "abcdefghijklmnopqrstuvwxyz"
chrs = [chr(ord(c) - 32) if c in whitelist else c for c in chaine]
return "".join(chrs)
print(majuscule("axel coezard"))
def val2ascii(entier):
ch = ""
for i in range(entier):
ch = chr(ord(ch) + 1)
print(ch)
val2ascii(355)
| 21.466667 | 70 | 0.630435 |
d3b049472c8a20a967c5e9b77793036d8612972a
| 419 |
py
|
Python
|
source/blog/migrations/0003_blogpost_author.py
|
JakubGutowski/PersonalBlog
|
96122b36486f7e874c013e50d939732a43db309f
|
[
"BSD-3-Clause"
] | null | null | null |
source/blog/migrations/0003_blogpost_author.py
|
JakubGutowski/PersonalBlog
|
96122b36486f7e874c013e50d939732a43db309f
|
[
"BSD-3-Clause"
] | null | null | null |
source/blog/migrations/0003_blogpost_author.py
|
JakubGutowski/PersonalBlog
|
96122b36486f7e874c013e50d939732a43db309f
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.0.5 on 2018-06-23 18:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20180616_1236'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='author',
field=models.CharField(default='Gutech', max_length=20),
),
]
| 22.052632 | 69 | 0.575179 |
9f2a1cbf98036ecfa33762b217555321b413ef81
| 1,215 |
py
|
Python
|
src/logging/test/tango_test.py
|
ska-telescope/sdp-prototype
|
8c6cbda04a83b0e16987019406ed6ec7e1058a31
|
[
"BSD-3-Clause"
] | 2 |
2019-07-15T09:49:34.000Z
|
2019-10-14T16:04:17.000Z
|
src/logging/test/tango_test.py
|
ska-telescope/sdp-prototype
|
8c6cbda04a83b0e16987019406ed6ec7e1058a31
|
[
"BSD-3-Clause"
] | 17 |
2019-07-15T14:51:50.000Z
|
2021-06-02T00:29:43.000Z
|
src/logging/test/tango_test.py
|
ska-telescope/sdp-configuration-prototype
|
8c6cbda04a83b0e16987019406ed6ec7e1058a31
|
[
"BSD-3-Clause"
] | 1 |
2019-10-10T08:16:48.000Z
|
2019-10-10T08:16:48.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# pylint: disable=too-few-public-methods
# pylint: disable-msg=E0611
# pylint: disable-msg=E0401
# pylint: disable-msg=R0201
# pylint: disable-msg=E1120
# pylint: disable-msg=C0111
# pylint: disable-msg=W0621
# pylint: disable-msg=W0613
# pylint: disable=fixme
import logging
import pytest
from ska_sdp_logging import core_logging, tango_logging
import core_test
class FakeDevice:
def info_stream(self, _: str, *args) -> None:
print("info stream should not be called")
def get_logger(self) -> logging.Logger:
return None
@pytest.fixture
def init():
log = tango_logging.init(device_name="tango test", device_class=FakeDevice)
hnd = core_test.ListHandler()
log.addHandler(hnd)
FakeDevice.get_logger = lambda self: log
return log, hnd
def test_tango_logging(init):
log, hnd = init
dev = FakeDevice()
assert dev.get_logger() is log
dev.info_stream("Running tango test")
dev.info_stream("Running %s test", "tango")
assert len(hnd.list) == 2
for log_entry in hnd.list:
record = core_logging.SkaLogRecord.from_string(log_entry)
assert record.msg == "Running tango test"
| 25.851064 | 79 | 0.704527 |
e2940c70e0b67269ae269c3ff5a70c9c8fb20cf9
| 12,191 |
py
|
Python
|
Packs/NetscoutArborSightline/Integrations/NetscoutArborSightline/NetscoutArborSightline_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/NetscoutArborSightline/Integrations/NetscoutArborSightline/NetscoutArborSightline_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/NetscoutArborSightline/Integrations/NetscoutArborSightline/NetscoutArborSightline_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import json
import io
from copy import deepcopy
import pytest
from NetscoutArborSightline import NetscoutClient, \
fetch_incidents_command, list_alerts_command, alert_annotation_list_command, mitigation_list_command, \
mitigation_template_list_command, router_list_command, tms_group_list_command, managed_object_list_command, \
mitigation_create_command, clean_links, validate_json_arg, build_human_readable
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
# from Packs
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
client = NetscoutClient(base_url='dummy_url', verify=False, proxy=False, first_fetch='3 days', max_fetch=10)
http_responses = util_load_json('test_data/http_responses.json')
command_results = util_load_json('test_data/command_results.json')
@pytest.fixture(autouse=True)
def setup(mocker):
mocker.patch.object(demisto, 'debug')
def test_fetch_incidents_command(mocker):
"""
Given:
- NetscoutClient client.
When:
- Fetching incidents.
Then:
- Ensure that the incidents returned are as expected.
"""
alerts_http_response = http_responses['incidents']
alerts_command_results = command_results['fetched_incidents']
mocker.patch.object(client, "list_alerts", return_value=alerts_http_response)
mocker.patch.object(client, "calculate_amount_of_incidents", return_value=40)
mocker.patch.object(demisto, 'incidents')
fetch_incidents_command(client)
demisto.incidents.assert_called_with(alerts_command_results)
@pytest.mark.parametrize(
'function_to_mock, function_to_test, args, http_response_key, expected_command_results_key', [
('list_alerts', list_alerts_command, {}, 'incidents', 'get_incidents'),
('get_alert', list_alerts_command, {'alert_id': 1}, 'incident', 'get_incident'),
('get_annotations', alert_annotation_list_command, {'alert_id': '2009'}, 'annotations', 'list_annotations'),
('list_mitigations', mitigation_list_command, {'limit': '3'}, 'mitigations', 'list_mitigations'),
('create_mitigation', mitigation_create_command,
{"description": "just desc", "ip_version": "IPv4", "name": "test_mit", "ongoing": "true",
"sub_object": "{\"protection_prefixes\": [\"192.0.2.0/24\"]}", "sub_type": "flowspec"}, 'mitigation',
'create_mitigation'),
('mitigation_template_list', mitigation_template_list_command, {}, 'mitigation_templates',
'list_mitigation_templates'),
('router_list', router_list_command, {}, 'routers', 'list_routers'),
('managed_object_list', managed_object_list_command, {}, 'managed_objects', 'list_managed_objects'),
('tms_group_list', tms_group_list_command, {}, 'tms_groups', 'list_tms_group'),
])
def test_commands(mocker, function_to_mock, function_to_test, args, http_response_key,
expected_command_results_key):
"""
Given:
- NetscoutClient client.
When:
- Case A: Calling the list_alerts_command function.
- Case B: Calling the list_alerts_command function with a specific alert.
- Case C: Calling the alert_annotation_list_command function.
- Case D: Calling the mitigation_list_command function with a specific alert.
- Case E: Calling the mitigation_create_command function with mitigation details.
- Case F: Calling the mitigation_template_list_command function.
- Case G: Calling the router_list_command function.
- Case H: Calling the managed_object_list_command function.
- Case I: Calling the tms_group_list_command function.
Then:
- Case A: Assert that the command results has the relevant alerts with the relevant extracted fields.
- Case B: Assert that the command results has only one alert and that it has the relevant extracted fields.
- Case C: Assert that the command results has the relevant annotations with the relevant extracted fields.
- Case D: Assert that the command results contains the alert ID and has the relevant mitigations with the relevant
extracted fields.
- Case E: Assert that the command results has the newly create mitigation with its relevant extracted fields.
- Case F: Assert that the command results has the relevant mitigation template list with the relevant extracted
fields.
- Case G: Assert that the command results has the relevant router list with the relevant extracted fields.
- Case H: Assert that the command results has the relevant list of manged groups with the relevant extracted
fields.
- Case I: Assert that the command results has the relevant list of tms groups with the relevant extracted fields.
"""
mocked_http_response = http_responses[http_response_key]
expected_command_results = command_results[expected_command_results_key]
mocker.patch.object(client, function_to_mock, return_value=mocked_http_response)
command_result: CommandResults = function_to_test(client, args)
assert command_result.outputs == expected_command_results
@pytest.mark.parametrize('http_response_key, expected_number_of_pages', [
('amount_of_incidents_vanilla_case', 25),
('amount_of_incidents_one_result', 1),
('amount_of_incidents_no_results', 0)
])
def test_calculate_amount_of_incidents(mocker, http_response_key, expected_number_of_pages):
"""
Given:
- Case A: A "regular" query that returns response with 25 pages.
- Case B: A query that returns response with only one page.
- Case C: A query that response with no pages and data.
When:
Calculating the amount of relevant incidents by counting the amount of pages
Then:
- Case A: Assert the the amount of incidents calculated is 25
- Case B: Assert the the amount of incidents calculated is 1
- Case C: Assert the the amount of incidents calculated is 0
"""
mocked_http_response = http_responses[http_response_key]
mocker.patch.object(client, 'list_alerts', return_value=mocked_http_response)
number_of_pages = client.calculate_amount_of_incidents('', {})
assert number_of_pages == expected_number_of_pages
def test_calculate_amount_of_incidents_raise_error(mocker):
mocked_http_response = http_responses['amount_of_incidents_broken_last_page']
mocker.patch.object(client, 'list_alerts', return_value=mocked_http_response)
with pytest.raises(DemistoException,
match='Could not calculate page size, last page number was not found:\n'
'https://content.demisto.works:57585/api/sp/v7/alerts/?'):
client.calculate_amount_of_incidents('', {})
@pytest.mark.parametrize('object_to_clean', [
({}),
({'some_key': 'some_value'}),
({'some_key': 'some_value', 'links': {'self': 'some_link'}}),
([{'some_key': 'some_value', 'links': {'self': 'some_link'}}]),
({'some_key': {'links': {'self': 'some_link'}}}),
({'some_key': [{'links': {'self': 'some_link'}}]}),
({'some_key': [{'links': {'self': 'some_link'}}, {'links': {'self': 'some_other_link'}}]}),
([{'some_key': [{'links': {'self': 'some_link'}}, {'links': {'self': 'some_other_link'}}]}]),
])
def test_clean_links(object_to_clean):
"""
Given:
- Case A: An empty dict.
- Case B: A dict with no 'links' key in it.
- Case C: A dict with a 'links' key in it.
- Case D: A list containing a dict with a 'links' key in it.
- Case E: A dict containing another dict with a 'links' key in it.
- Case F: A dict containing a list containing another dict with a 'links' key in it.
- Case F: A dict containing a list containing additional dict with a 'links' key in them.
- Case F: A list containing a dict containing another list containing additional dict with a 'links' key in them.
When:
Running the clean_links function
Then:
No links key appear in transformed dict (checking by parsing the dict into a string)
"""
copy_of_object = deepcopy(object_to_clean)
clean_links(copy_of_object)
str_result = json.dumps(copy_of_object)
assert str_result.find('link') == -1
def test_validate_json_arg():
"""
Given:
- A string representing a json object.
When:
- Validating a string has a dict structure
Then:
- Ensure no parsing error was returned.
"""
validate_json_arg('{"some_key": "some_value"}', '')
def test_validate_json_arg_raise_error():
"""
Given:
- A string that has no json format.
When:
- Validating a string has a json structure.
Then:
- Ensure a parsing error was raised
"""
with pytest.raises(DemistoException, match='The value given in the argument is not a valid JSON format:\n'
'{"some_key" "some_value"}'):
validate_json_arg('{"some_key" "some_value"}', '')
@pytest.mark.parametrize('object_to_build, expected_result', [
({}, {}),
({'attributes': {'key_1': 'val_1'}, 'key_2': 'val_2'},
{'key_1': 'val_1', 'key_2': 'val_2'}),
({'attributes': {'key_1': 'val_1'}, 'key_2': 'val_2', 'relationships': [{'key_3': 'val_3'}],
'subobject': {'key_4': 'val_4'}}, {'key_1': 'val_1', 'key_2': 'val_2'})
])
def test_build_human_readable(object_to_build, expected_result):
"""
Given:
- Case A: A dict with two keys: 'attributes' and 'key_2`.
- Case B: A dict with four keys: 'attributes', 'relationships', 'subobject' and 'key_2'.
When:
- Building the human readable from a response dict.
Then:
- Case A:
1. Keys under the 'attributes' key are extracted to the root level.
2. The second key - 'key_2' still appears in the object.
- Case B: Ensure that:
1. Keys under the 'attributes' key are extracted to the root level.
2. The second key - 'key_2' still appears in the object.
3. That the 'relationships' and 'subobject' keys are missing from the object.
"""
result = build_human_readable(object_to_build)
assert result == expected_result
@pytest.mark.parametrize('args_dict, expected_json_str', [
(
{
"limit": "10",
"page": "2",
"alert_id": "123",
"alert_class": "bgp",
"alert_type": "bgp_hijack",
"classification": "Flash Crowd",
"importance": "1",
"ongoing": "true",
"start_time": "2021-01-11T13:15:00",
"stop_time": "2021-01-12T13:15:00",
},
'/data/attributes/limit=10 AND /data/attributes/page=2 AND /data/attributes/alert_id=123 AND '
'/data/attributes/alert_class=bgp AND /data/attributes/alert_type=bgp_hijack AND '
'/data/attributes/classification=Flash Crowd AND /data/attributes/importance=1 AND '
'/data/attributes/ongoing=true AND /data/attributes/start_time=2021-01-11T13:15:00 AND '
'/data/attributes/stop_time=2021-01-12T13:15:00'
),
(
{
"importance": "1",
"importance_operator": "=",
"start_time": "2021-01-11T13:15:00",
"start_time_operator": ">",
"stop_time": "2021-01-12T13:15:00",
"stop_time_operator": "<"
},
'/data/attributes/importance=1 AND /data/attributes/start_time>2021-01-11T13:15:00 AND '
'/data/attributes/stop_time<2021-01-12T13:15:00'
)
])
def test_build_relationships(args_dict, expected_json_str):
"""
Given:
- Case A: A dict of possible relationship filters`.
- Case B: A dict of possible relationship filters in addition to special allowed operators.
When:
- Building a relationship string representation to be sent in the url query.
Then:
- Case A: Assert that all filters are uses the `=` operator and are chained using the `AND` operator.
- Case B: Assert that start_time uses the '>' operator, stop_time uses the '<' operator and importance uses the '='
operator.
"""
result = client.build_data_attribute_filter(args_dict)
assert result == expected_json_str
| 41.75 | 119 | 0.684357 |
2c33352508590e61164c232379f14d63d7be6d86
| 62 |
py
|
Python
|
test Text implementieren.py
|
abbashan03/Kinduino
|
292be35053763ee593b5721bf7ce450ec4dc1603
|
[
"MIT"
] | null | null | null |
test Text implementieren.py
|
abbashan03/Kinduino
|
292be35053763ee593b5721bf7ce450ec4dc1603
|
[
"MIT"
] | null | null | null |
test Text implementieren.py
|
abbashan03/Kinduino
|
292be35053763ee593b5721bf7ce450ec4dc1603
|
[
"MIT"
] | null | null | null |
f = open("Testabc.txt", "r")
daten = f.read()
print(daten)
| 15.5 | 29 | 0.580645 |
2ccda491b969e54a5ab9eb3be3e79624f53f25af
| 404 |
py
|
Python
|
listings/chapter04/triangle.py
|
SaschaKersken/Daten-Prozessanalyse
|
370f07a75b9465329deb3671adbfbef8483f76f6
|
[
"Apache-2.0"
] | 2 |
2021-09-20T06:16:41.000Z
|
2022-01-17T14:24:43.000Z
|
listings/chapter04/triangle.py
|
SaschaKersken/Daten-Prozessanalyse
|
370f07a75b9465329deb3671adbfbef8483f76f6
|
[
"Apache-2.0"
] | null | null | null |
listings/chapter04/triangle.py
|
SaschaKersken/Daten-Prozessanalyse
|
370f07a75b9465329deb3671adbfbef8483f76f6
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
plt.xlabel('x')
plt.ylabel('y')
plt.scatter([-3, 3, 3], [-3, -3, 3])
plt.annotate('A', (-3, -3))
plt.annotate('B', (3, -3))
plt.annotate('C', (3, 3))
plt.plot([-3, 3], [-3, -3])
plt.plot([3, 3], [-3, 3])
plt.plot([3, -3], [3, -3])
plt.text(0, -3.2, 'c')
plt.text(3.1, 0, 'a')
plt.text(-0.2, 0, 'b')
plt.grid()
plt.axhline(linewidth=2)
plt.axvline(linewidth=2)
plt.show()
| 21.263158 | 36 | 0.556931 |
b3c43eacedb82f18f1352631cc2d6cb195700eea
| 287 |
py
|
Python
|
kong/test-server.py
|
IBBD/dockerfiles
|
d4c7f5b95ed80a400e494d458f1bd53cd4710c23
|
[
"MIT"
] | null | null | null |
kong/test-server.py
|
IBBD/dockerfiles
|
d4c7f5b95ed80a400e494d458f1bd53cd4710c23
|
[
"MIT"
] | null | null | null |
kong/test-server.py
|
IBBD/dockerfiles
|
d4c7f5b95ed80a400e494d458f1bd53cd4710c23
|
[
"MIT"
] | 2 |
2018-09-19T08:28:50.000Z
|
2020-04-27T13:13:24.000Z
|
# -*- coding: utf-8 -*-
#
# 测试server
# Author: alex
# Created Time: 2018年12月29日 星期六 20时30分34秒
def hello(name='world'):
return 'Hello {name} in func!'.format(name=name)
if __name__ == '__main__':
from fireRest import API, app
API(hello)
app.run(debug=True, port=5000)
| 17.9375 | 52 | 0.651568 |
b3fc655226107b2eef0972e63b4d1bf8b28420c6
| 4,626 |
py
|
Python
|
Packs/Viper/Integrations/Viper/Viper.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Viper/Integrations/Viper/Viper.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Viper/Integrations/Viper/Viper.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
''' IMPORTS '''
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
class ViperClient(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def sample_information(self, file_hash):
'''Get Sample instance information from Viper'''
return self._http_request(
method='GET',
url_suffix=f'/malware/{file_hash}/'
)
def test_module(self):
return self._http_request(
method='GET',
url_suffix='/'
)
def test_module(client):
"""
Returning 'ok' indicates that the integration works like it suppose to. Connection to the service is successful.
Args:
client: Viper client
Returns:
'ok' if test passed, anything else will fail the test
"""
client.test_module()
return 'ok'
def sample_download_helper(file_hash):
api_key = demisto.params().get('apikey')
viper_project = demisto.params().get('viper_project')
base_url = urljoin(demisto.params()['url'], f'/api/v3/project/{viper_project}')
verify_certificate = not demisto.params().get('insecure', False)
url = f'{base_url}/malware/{file_hash}/download/'
authorization = f'Token {api_key}'
try:
sample = requests.get(
url, verify=verify_certificate, headers={
'Authorization': authorization,
'Accept': 'application/json'
}
)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
return sample
def viper_download(client, args):
file_hash = args.get('file_hash')
if len(file_hash) == 64:
filename = viper_search(client, args)
sample = sample_download_helper(file_hash)
if sample.status_code == 200:
return_results(fileResult(filename, sample.content))
else:
raise DemistoException('No valid sample found')
else:
return_error('Hash length is invalid.')
def viper_search(client, args):
file_hash = args.get('file_hash')
if len(file_hash) == 64:
sample_info = client.sample_information(file_hash)
if sample_info['data']:
filename = sample_info['data']['name']
viper_id = sample_info['data']['id']
mime = sample_info['data']['mime']
file_type = sample_info['data']['type']
size = sample_info['data']['size']
viper_search_results = CommandResults(
outputs_prefix='Viper',
outputs_key_field='ViperID',
outputs={
'Name': filename,
'SHA256': file_hash,
'ViperID': viper_id,
'MIME': mime,
'Type': file_type,
'Size': size
}
)
return_results(viper_search_results)
return filename
else:
return_error('No valid sample found')
else:
raise DemistoException('Hash length is invalid.')
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# Parse parameters
api_key = demisto.params().get('apikey')
viper_project = demisto.params().get('viper_project')
base_url = urljoin(demisto.params()['url'], f'/api/v3/project/{viper_project}')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': f'Token {api_key}',
'Accept': 'application/json'
}
client = ViperClient(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client))
elif demisto.command() == 'viper-download':
viper_download(client, demisto.args())
elif demisto.command() == 'viper-search':
viper_search(client, demisto.args())
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 30.434211 | 116 | 0.597925 |
205c1c2880331000e57db020ce9cec7228d83c07
| 6,393 |
py
|
Python
|
agents/ac_dqn.py
|
yerfor/Soft-DRGN
|
0c96d1ea295077b949229261c37d8dde25001a03
|
[
"MIT"
] | 2 |
2022-02-24T08:21:49.000Z
|
2022-03-10T08:57:35.000Z
|
agents/ac_dqn.py
|
yerfor/Soft-DRGN
|
0c96d1ea295077b949229261c37d8dde25001a03
|
[
"MIT"
] | 1 |
2022-02-24T08:40:21.000Z
|
2022-02-24T12:01:58.000Z
|
agents/ac_dqn.py
|
yerfor/Soft-DRGN
|
0c96d1ea295077b949229261c37d8dde25001a03
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.ac_dqn import ActorDQNNetwork, CriticDQNNetwork
from agents.base_agent import BaseActorCriticAgent
from utils.hparams import hparams
from utils.numba_utils import *
from utils.torch_utils import *
class ActorCriticDQNAgent(BaseActorCriticAgent):
def __init__(self, in_dim, act_dim):
nn.Module.__init__(self)
super(ActorCriticDQNAgent, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hparams['hidden_dim']
self.act_dim = act_dim
self.actor_learned_model = ActorDQNNetwork(in_dim, hparams['hidden_dim'], act_dim)
self.actor_target_model = ActorDQNNetwork(in_dim, hparams['hidden_dim'], act_dim)
self.actor_target_model.load_state_dict(self.actor_learned_model.state_dict())
self.critic_learned_model = CriticDQNNetwork(in_dim, hparams['hidden_dim'], act_dim)
self.critic_target_model = CriticDQNNetwork(in_dim, hparams['hidden_dim'], act_dim)
self.critic_target_model.load_state_dict(self.critic_learned_model.state_dict())
def action(self, obs, adj, epsilon=0.3, action_mode='epsilon-greedy'):
"""
:param obs: ndarray with [n_agent, hidden], or Tensor with [batch, n_agent, hidden]
:param adj: not used by DQN
:param epsilon: float
:param action_mode: str
:return:
"""
is_batched_input = obs.ndim == 3
if isinstance(obs, np.ndarray):
# from environment
assert obs.ndim == 2
obs = torch.tensor(obs, dtype=torch.float32).unsqueeze(0).cuda()
adj = torch.tensor(adj, dtype=torch.float32).unsqueeze(0).cuda()
elif isinstance(obs, torch.Tensor):
# from replay buffer
assert obs.ndim == 3
obs, adj = to_cuda(obs), to_cuda(adj)
else:
raise TypeError
with torch.no_grad():
if is_batched_input:
batch_size = obs.shape[0]
p = self.actor_learned_model(obs).squeeze().cpu().numpy() # [batch, n_agent]
action = []
for b_i in range(batch_size):
p_i = p[b_i]
action_i = self._sample_action_from_p(p_i, epsilon, action_mode) # [n_agent, ]
action.append(action_i)
action = np.stack(action, axis=0)
else:
p = self.actor_learned_model(obs).squeeze().cpu().numpy() # [batch, n_agent]
action = self._sample_action_from_p(p, epsilon, action_mode) # [n_agent, ]
return action
def _sample_action_from_p(self, p, epsilon, action_mode):
"""
:param p: np.ndarray [n_agent, n_action]
:param epsilon: float
:param action_mode: str
:return: action, np.ndarray [n_agent, ]
"""
action = []
assert p.ndim == 2
n_agent = p.shape[0]
if action_mode == 'epsilon-greedy':
for i in range(n_agent): # agent-wise epsilon-greedy
if np.random.rand() < epsilon:
a = np.random.randint(self.act_dim)
else:
a = p[i].argmax().item()
action.append(a)
elif action_mode == 'categorical':
action = numba_categorical_sample(p)
elif action_mode == 'epsilon-categorical':
action = numba_epsilon_categorical_sample(p, epsilon)
elif action_mode == 'greedy':
for i in range(n_agent):
a = p[i].argmax().item()
action.append(a)
else:
raise ValueError
action = np.array(action, dtype=np.float32).reshape([n_agent, ])
return action
def cal_q_loss(self, sample, losses, log_vars=None, global_steps=None):
obs = sample['obs']
action = sample['action']
reward = sample['reward']
next_obs = sample['next_obs']
done = sample['done']
batch_size, n_ant, _ = obs.shape
# q_values : [b,n_agent,n_action]
q_values = self.critic_learned_model(obs)
# target_q_values: [b,n_agent,]
with torch.no_grad():
# when calculating, use the fixed graph (in paper) or true dynamic graph (experimentally better)
next_probs = self.actor_learned_model(next_obs, return_log_pi=False)
target_q_values = self.critic_target_model(next_obs)
target_q_values = (target_q_values * next_probs).sum(dim=-1) # [batch, n_agent]
target_q_values = target_q_values.cpu().numpy()
numpy_q_values = q_values.detach().cpu().numpy()
expected_q = numba_get_expected_q(numpy_q_values, action.cpu().numpy(), reward.cpu().numpy(),
done.cpu().numpy(), hparams['gamma'], target_q_values,
batch_size, n_ant)
# q_loss: MSE calculated on the sampled action index!
q_loss = (q_values - torch.tensor(expected_q).cuda()).pow(2).mean()
losses['q_loss'] = q_loss
def cal_p_loss(self, sample, losses, log_vars=None, global_steps=None):
obs = sample['obs']
batch_size, n_ant, _ = obs.shape
probs, log_probs = self.actor_learned_model(obs, return_log_pi=True)
log_probs = torch.log(probs + 1e-15) # [batch, agent, action]
with torch.no_grad():
# q_values: Q(s,a), [b, n_agent, n_action]
q_values = self.critic_learned_model(obs)
# baseline, V(s)=E_a[Q(s,a)]=\Sigma \pi(a|s)*Q(s,a)
v_values = (probs * q_values).sum(dim=-1, keepdim=True)
# advantage, A(s,a)=Q(s,a)-V(s)
advantages = q_values - v_values
# loss_p: \Sigma log\pi(a|s)*A(s,a)
# p_loss = (-masked_log_probs * advantages).mean()
p_loss = (-log_probs * advantages).mean()
losses['p_loss'] = p_loss
def update_target(self):
if hparams['soft_update_target_network']:
soft_update(self.actor_learned_model, self.actor_target_model, hparams['tau'])
soft_update(self.critic_learned_model, self.critic_target_model, hparams['tau'])
else:
hard_update(self.actor_learned_model, self.actor_target_model)
hard_update(self.critic_learned_model, self.critic_target_model)
| 43.787671 | 108 | 0.60488 |
2065124aae29cd6bad906b99db7fdcb54dbb8442
| 3,107 |
py
|
Python
|
5_DeepLearning-Visualization/3-Keract_CNN_HeatMap.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
5_DeepLearning-Visualization/3-Keract_CNN_HeatMap.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
5_DeepLearning-Visualization/3-Keract_CNN_HeatMap.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
'''
komplette Visualisierung der Schritte des Netzes mit Keract -> ziemlich cool
'''
import os
import keract
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import *
from tensorflow.keras.activations import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.initializers import *
from tensorflow.keras.callbacks import *
from DogsCats_Dataset_class import DOGSCATS
data = DOGSCATS()
data.data_augmentation(augment_size=15000)
x_train, y_train = data.x_train, data.y_train
x_test, y_test = data.x_test, data.y_test
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.2)
batch_size = 128
epochs = 15
train_size, width, height, depth = x_train.shape
test_size, num_classes = y_test.shape
width, height, depth = x_train.shape[1:]
# Define the CNN
def create_model():
input_img = Input(shape=(width, height, depth))
x = Conv2D(filters=32, kernel_size=3, padding="same", name="heatmap1")(input_img)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=32, kernel_size=3, padding="same", name="heatmap2")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Dropout(rate=0.1)(x)
x = Conv2D(filters=64, kernel_size=3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=64, kernel_size=3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Dropout(rate=0.2)(x)
x = Conv2D(filters=96, kernel_size=3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=96, kernel_size=3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Dropout(rate=0.3)(x)
x = Conv2D(filters=128, kernel_size=3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=128, kernel_size=3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Dropout(rate=0.3)(x)
x = GlobalAveragePooling2D()(x)
x = Dense(num_classes, name="features")(x)
output_pred = Activation("softmax")(x)
optimizer = Adam()
model = Model(
inputs=input_img,
outputs=output_pred)
model.compile(
loss="categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
model.summary()
return model
model = create_model()
model.load_weights("../DeepLearning/models/heat_cnn.h5")
grads = keract.get_gradients_of_activations(
model,
x_test[[20]],
y_test[[20]],
layer_name='heatmap1')
keract.display_heatmaps(
grads,
x_test[20]*255.0)
activations = keract.get_activations(
model,
x_test[[20]])
keract.display_activations(
activations)
| 27.990991 | 86 | 0.674284 |
20d16156bfffe09f0c26fd3c8ff0353e54619a75
| 2,589 |
py
|
Python
|
Entnahmesimulation.py
|
ThoEngel/rentenplanung
|
879c9a678ba1ff951a1f92b0c42673a7943a18e6
|
[
"MIT"
] | 3 |
2022-01-01T18:24:46.000Z
|
2022-01-08T15:28:46.000Z
|
Entnahmesimulation.py
|
ThoEngel/Finanzen-Simuliert
|
879c9a678ba1ff951a1f92b0c42673a7943a18e6
|
[
"MIT"
] | null | null | null |
Entnahmesimulation.py
|
ThoEngel/Finanzen-Simuliert
|
879c9a678ba1ff951a1f92b0c42673a7943a18e6
|
[
"MIT"
] | null | null | null |
'''
Vorsicht vor der 4%-Regel
https://www.finanzen-erklaert.de/vorsicht-vor-der-4-regel/
'''
import pandas as pd
import time
from SEsimulation.mDate import mDate
from SEsimulation import SEsimulation
import plotly.express as px
import numpy as np
print('Start')
starttime = time.time()
# Lesen monatliche S&P500 Daten
RETURN_FILE = 'real_return_df.pickle'
real_return_df = pd.read_pickle(RETURN_FILE)
# Konfiguration der Entnahme Simulation
config = {
'date': {'start': mDate(1, 2022), # Start Datum
'start_retirement': mDate(1, 2022)}, # Start der Entnahme
'assets': {'depot': 500000, # Depotvolumen zum Startzeitpunkt
'fees': 0.00}, # Jährliche Depotgebühren in %
'simulation': {'returns_df': real_return_df, # S&P500 Daten
'n_ret_years': 30}, # Simulationsdauer in Jahren
'withdrawal': {'fixed_pct': 4.0}, # Proz. Entnahmerate pro Jahr vom Startdepot
'pension': {'point': np.array([0]), # Anzahl erworbener Rentenpunkte
'point_add': np.array([0.0]), # Rentenpunktzuwachs pro Jahr
'start_date': [mDate(1, 3000)], # Beginn der gesetzlichen Rente
'name': {'John Doe'}, # Name des Rentenbeziehers
'point_value': 0.0, # aktueller Rentenpunktwert
'point_value_inc': 0.0}, # Proz. Steigerung des Rentenpunktwertes
'visualization': {'textoutput': True} # Textueller Zwischenausgaben als Debug Info
}
years = range(1, 101, 1) # Dauer der Entnahme in Jahre
df = pd.DataFrame(columns=[1], index=years)
row_indexer = 0
for year in years:
config['simulation']['n_ret_years'] = year
s = SEsimulation.SEsimulation(config)
s.simulate()
survival = [trial_dict['exhaustion'] for trial_dict in s.latest_simulation]
curProb = 100 * (len(survival) - survival.count(year * 12)) / len(survival)
print(year, ' Jahre, ', config['withdrawal']['fixed_pct'], '% Entnahme, Ausfallwahrscheinlichkeit: ', curProb, '%')
df.iloc[row_indexer, 0] = curProb
row_indexer += 1
fig = px.line(df)
fig.update_layout(
title="Fehlerquote der 4% Regel nach Laufzeit / mit Inflationsanpassung",
xaxis_title="Entnahmedauer [Jahre]",
yaxis_title="Fehlerrate [%]",
font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
)
)
fig.show()
endTime = time.time()
print('\nSimulationsdauer: %5.2f sec.' % (endTime - starttime))
| 34.065789 | 119 | 0.625338 |
ff7cbed85adf70a5a3eaa842a99f8ca081372761
| 40 |
py
|
Python
|
haferml/__init__.py
|
emptymalei/haferml
|
ba193ce1c022c89fb4e88924b7bb7a05b676929a
|
[
"MIT"
] | 11 |
2021-04-17T18:51:45.000Z
|
2021-06-25T19:42:25.000Z
|
haferml/__init__.py
|
emptymalei/haferml
|
ba193ce1c022c89fb4e88924b7bb7a05b676929a
|
[
"MIT"
] | 3 |
2021-04-29T19:24:15.000Z
|
2021-05-21T04:30:54.000Z
|
haferml/__init__.py
|
emptymalei/haferml
|
ba193ce1c022c89fb4e88924b7bb7a05b676929a
|
[
"MIT"
] | 2 |
2021-06-10T00:55:43.000Z
|
2021-12-30T07:37:07.000Z
|
from haferml.version import __version__
| 20 | 39 | 0.875 |
440799a851de9679778ae473f11b0954fc63a35d
| 2,304 |
py
|
Python
|
plotter.py
|
lastleon/EvolutionSimulator
|
3dace3bd07044c0af27c4805ee79b9dd11c45826
|
[
"MIT"
] | 2 |
2017-12-14T20:54:21.000Z
|
2018-03-05T18:31:43.000Z
|
plotter.py
|
lastleon/EvolutionSimulator
|
3dace3bd07044c0af27c4805ee79b9dd11c45826
|
[
"MIT"
] | 1 |
2018-03-11T20:40:24.000Z
|
2018-05-06T16:26:30.000Z
|
plotter.py
|
lastleon/EvolutionSimulator
|
3dace3bd07044c0af27c4805ee79b9dd11c45826
|
[
"MIT"
] | 1 |
2017-12-06T20:29:18.000Z
|
2017-12-06T20:29:18.000Z
|
import os
import matplotlib.pyplot as plt
from ast import literal_eval as make_tuple
myIndex = int(input("Plotindex: "))
x = []
y = []
currDir = os.path.dirname(__file__)
path = currDir + "/data/ältestesLw/ältestesLw%s.txt"% myIndex
try:
with open(path) as data:
data = data.read().split(";")
data.pop(-1)
lastID = -1;
for i in data:
xCoordinate = make_tuple(i)[0]
yCoordinate = make_tuple(i)[1]
x.append(make_tuple(i)[0])
y.append(make_tuple(i)[1])
plt.plot(x,y)
plt.xlabel("Jahre")
plt.ylabel("Alter in Jahren")
plt.show()
except:
pass
x = []
y = []
path = currDir + "/data/durchschnittsLw/durchschnittsLw%s.txt"% myIndex
try:
with open(path) as data:
data = data.read().split(";")
data.pop(-1)
for i in data:
x.append(make_tuple(i)[0])
y.append(make_tuple(i)[1])
plt.plot(x,y)
plt.xlabel("Jahre")
plt.ylabel("Durchschnittsalter in Jahren")
plt.show()
except:
pass
x = []
y = []
path = currDir + "/data/durchschnittsFitnessLw/durchschnittsFitnessLw%s.txt"% myIndex
try:
with open(path) as data:
data = data.read().split(";")
data.pop(-1)
for i in data:
x.append(make_tuple(i)[0])
y.append(make_tuple(i)[1])
plt.plot(x,y)
plt.xlabel("Jahre")
plt.ylabel("Durchschnittsfitness")
plt.show()
except:
pass
x = []
y1 = []
y2 =[]
path = currDir + "/data/todeUndGeburtenLw/todeUndGeburtenLw%s.txt"% myIndex
try:
with open(path) as data:
data = data.read().split(";")
data.pop(-1)
for i in data:
x.append(make_tuple(i)[0])
y1.append(make_tuple(i)[1])
y2.append(make_tuple(i)[2])
plt.plot(x,y1,"r-")
plt.plot(x,y2,"g-")
plt.xlabel("Jahre")
plt.ylabel("Tode und Geburten")
plt.show()
except:
pass
def plotIDs(dataTuple, currentPlot):
print("calleds")
currentPlot.plot(dataTuple[0], dataTuple[1], "bx", markersize=4)
| 22.588235 | 86 | 0.516493 |
441e96f18b9bdd5933a138f55f2d8708ce2cc1f9
| 9,022 |
py
|
Python
|
research/cv/squeezenet1_1/modelArts/train_on_modelarts.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/squeezenet1_1/modelArts/train_on_modelarts.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/squeezenet1_1/modelArts/train_on_modelarts.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train squeezenet."""
import ast
import os
import argparse
import glob
import numpy as np
from mindspore import context
from mindspore import Tensor
from mindspore import export
from mindspore.nn.optim.momentum import Momentum
from mindspore.train.model import Model
from mindspore.context import ParallelMode
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import set_seed
from mindspore.nn.metrics import Accuracy
from mindspore.communication.management import init
from src.lr_generator import get_lr
from src.CrossEntropySmooth import CrossEntropySmooth
from src.squeezenet import SqueezeNet as squeezenet
parser = argparse.ArgumentParser(description='SqueezeNet1_1')
parser.add_argument('--net', type=str, default='squeezenet', help='Model.')
parser.add_argument('--dataset', type=str, default='imagenet', help='Dataset.')
parser.add_argument('--run_cloudbrain', type=ast.literal_eval, default=False,
help='Whether it is running on CloudBrain platform.')
parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute')
parser.add_argument('--device_num', type=int, default=1, help='Device num.')
parser.add_argument('--dataset_path', type=str, default='', help='Dataset path')
parser.add_argument('--device_target', type=str, default='Ascend', help='Device target')
parser.add_argument('--pre_trained', type=str, default="None", help='Pretrained checkpoint path')
parser.add_argument('--data_url', type=str, default="None", help='Datapath')
parser.add_argument('--train_url', type=str, default="None", help='Train output path')
parser.add_argument('--num_classes', type=int, default="1000", help="classes")
parser.add_argument('--epoch_size', type=int, default="200", help="epoch_size")
parser.add_argument('--batch_size', type=int, default="32", help="batch_size")
args_opt = parser.parse_args()
local_data_url = '/cache/data'
local_train_url = '/cache/ckpt'
local_pretrain_url = '/cache/preckpt.ckpt'
set_seed(1)
def filter_checkpoint_parameter_by_list(origin_dict, param_filter):
"""remove useless parameters according to filter_list"""
for key in list(origin_dict.keys()):
for name in param_filter:
if name in key:
print("Delete parameter from checkpoint: ", key)
del origin_dict[key]
break
def frozen_to_air(network, args):
paramdict = load_checkpoint(args.get("ckpt_file"))
load_param_into_net(network, paramdict)
input_arr = Tensor(np.zeros([args.get("batch_size"), 3, args.get("height"), args.get("width")], np.float32))
export(network, input_arr, file_name=args.get("file_name"), file_format=args.get("file_format"))
if __name__ == '__main__':
target = args_opt.device_target
if args_opt.device_target != "Ascend":
raise ValueError("Unsupported device target.")
# init context
if args_opt.run_distribute:
device_num = int(os.getenv("RANK_SIZE"))
device_id = int(os.getenv("DEVICE_ID"))
context.set_context(mode=context.GRAPH_MODE,
device_target=target)
context.set_context(device_id=device_id,
enable_auto_mixed_precision=True)
context.set_auto_parallel_context(
device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
init()
local_data_url = os.path.join(local_data_url, str(device_id))
else:
device_id = 0
context.set_context(mode=context.GRAPH_MODE,
device_target=target)
# create dataset
if args_opt.dataset == "cifar10":
from src.config import config_cifar as config
from src.dataset import create_dataset_cifar as create_dataset
else:
from src.config import config_imagenet as config
from src.dataset import create_dataset_imagenet as create_dataset
if args_opt.run_cloudbrain:
import moxing as mox
mox.file.copy_parallel(args_opt.data_url, local_data_url)
dataset = create_dataset(dataset_path=local_data_url,
do_train=True,
repeat_num=1,
batch_size=args_opt.batch_size,
target=target,
run_distribute=args_opt.run_distribute)
step_size = dataset.get_dataset_size()
# define net
net = squeezenet(num_classes=args_opt.num_classes)
# load checkpoint
if args_opt.pre_trained != "None":
if args_opt.run_cloudbrain:
dir_path = os.path.dirname(os.path.abspath(__file__))
ckpt_name = args_opt.pre_trained[2:]
ckpt_path = os.path.join(dir_path, ckpt_name)
print(ckpt_path)
param_dict = load_checkpoint(ckpt_path)
filter_list = [x.name for x in net.final_conv.get_parameters()]
filter_checkpoint_parameter_by_list(param_dict, filter_list)
load_param_into_net(net, param_dict)
# init lr
lr = get_lr(lr_init=config.lr_init,
lr_end=config.lr_end,
lr_max=config.lr_max,
total_epochs=args_opt.epoch_size,
warmup_epochs=config.warmup_epochs,
pretrain_epochs=config.pretrain_epoch_size,
steps_per_epoch=step_size,
lr_decay_mode=config.lr_decay_mode)
lr = Tensor(lr)
# define loss
if args_opt.dataset == "imagenet":
if not config.use_label_smooth:
config.label_smooth_factor = 0.0
loss = CrossEntropySmooth(sparse=True,
reduction='mean',
smooth_factor=config.label_smooth_factor,
num_classes=args_opt.num_classes)
else:
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
# define opt, model
loss_scale = FixedLossScaleManager(config.loss_scale,
drop_overflow_update=False)
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()),
lr,
config.momentum,
config.weight_decay,
config.loss_scale,
use_nesterov=True)
model = Model(net,
loss_fn=loss,
optimizer=opt,
loss_scale_manager=loss_scale,
metrics={'acc': Accuracy()},
amp_level="O2",
keep_batchnorm_fp32=False)
# define callbacks
time_cb = TimeMonitor(data_size=step_size)
loss_cb = LossMonitor()
cb = [time_cb, loss_cb]
if config.save_checkpoint and device_id == 0:
config_ck = CheckpointConfig(
save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
keep_checkpoint_max=config.keep_checkpoint_max)
ckpt_cb = ModelCheckpoint(prefix=args_opt.net,
directory=local_train_url,
config=config_ck)
cb += [ckpt_cb]
# train model
model.train(args_opt.epoch_size - config.pretrain_epoch_size,
dataset,
callbacks=cb)
if device_id == 0:
ckpt_list = glob.glob("/cache/ckpt/squeezenet*.ckpt")
if not ckpt_list:
print("ckpt file not generated.")
ckpt_list.sort(key=os.path.getmtime)
ckpt_model = ckpt_list[-1]
print("checkpoint path", ckpt_model)
net = squeezenet(args_opt.num_classes)
frozen_to_air_args = {'ckpt_file': ckpt_model,
'batch_size': 1,
'height': 227,
'width': 227,
'file_name': '/cache/ckpt/squeezenet',
'file_format': 'AIR'}
frozen_to_air(net, frozen_to_air_args)
if args_opt.run_cloudbrain:
mox.file.copy_parallel(local_train_url, args_opt.train_url)
| 41.196347 | 112 | 0.647639 |
92298960309d93040545a8b12eb854f6416b4976
| 17,228 |
py
|
Python
|
indl/model/beta_vae.py
|
SachsLab/indl
|
531d2e0c2ee765004aedc553af40e258262f86cb
|
[
"Apache-2.0"
] | 1 |
2021-02-22T01:39:50.000Z
|
2021-02-22T01:39:50.000Z
|
indl/model/beta_vae.py
|
SachsLab/indl
|
531d2e0c2ee765004aedc553af40e258262f86cb
|
[
"Apache-2.0"
] | null | null | null |
indl/model/beta_vae.py
|
SachsLab/indl
|
531d2e0c2ee765004aedc553af40e258262f86cb
|
[
"Apache-2.0"
] | null | null | null |
__all__ = ['generate_default_args', 'generate_default_params', 'prepare_inputs',
'create_f_encoder', 'make_f_variational', 'create_z_encoder', 'make_z_variational',
'get_z_prior']
import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as tfkl
from tensorflow.keras import backend as K
import tensorflow_probability as tfp
from .lfads.utils import CoordinatedDropout
from .lfads.utils import GRUClip
# from indl.model.tfp import LearnableMultivariateNormalDiag
from .tfp import make_mvn_prior, make_mvn_dist_fn
from .tfp import LearnableMultivariateNormalDiagCell
from .lfads.utils import LearnableAutoRegressive1Prior
tfd = tfp.distributions
tfpl = tfp.layers
def generate_default_args():
# non tunable parameters
return type('TestArgs', (object,), dict(
random_seed=1337,
batch_size=16,
n_epochs=100,
resample_X=10, # spike count bin size
f_rnn_type="BidirectionalGRUClip", # Encoder RNN cell type: ('Bidirectional' or '') + ('GRU', 'LSTM', 'SimpleRNN', 'GRUClip')
f_latent_off_diag=False, # If latent dist may have non-zero off diagonals
q_f_init_std=0.1, # ic_prior_var
q_f_samples=1,
f_prior_off_diag=False, # If latent prior may have non-zero off diagonals
f_prior_kappa=0.0, # In LFADS this is a tuned hyperparameter, ~0.1
f_prior_train_mean=True, # True in LFADS
f_prior_train_var=True, # False in LFADS
f_enc_input_samps=0, # Set to > 0 to restrict f_enc to only see this many samples, to prevent acausal
z1_rnn_type="BidirectionalGRUClip", # Encoder RNN cell type
z1_lag=0, # Time lag on the z-encoder output.
# Same as LFADS' `controller_input_lag`
q_z_init_std=0.1, #
z_prior_var=0.1, # co_prior_var
z_prior_process="RNN", # RNN or AR1
latent_samples=4, # Currently unused
gen_cell_type="GRUClip", # Decoder generative RNN cell type. "Complex" is for LFADS.
gen_tile_input=False,
))
def generate_default_params():
# tunable parameters
return {
"dropout_rate": 1e-2, # (1e-2)
"coordinated_dropout_rate": 0.1, #
"input_factors": 0, # Extra Dense layer applied to inputs. Good for multi-session.
"gru_clip_value": 5.0, # Max value recurrent cell can take before being clipped (5.0)
"f_units": [128], # Number of units in f-encoder RNN. Increase list length to add more RNN layers. (128)
# Same as LFADS' `ic_enc_dim`
"f_latent_size": 10, # Size of latent vector f (10)
# Same as LFADS' `ic_dim`
"z1_units": 16, # Number of units in z-encoder RNN.
# Same as LFADS `ci_enc_dim`
"z2_units": 16, # Number of units in z2 RNN, in DHSAE Full or LFADS controller.
# Same as LFADS `con_dim`
"z_latent_size": 4, # Dimensionality of q_zt posterior.
# Same as LFADS' `co_dim`
"gen_n_hidden": 256, # Number of RNN cells in generator (256)
# Same as LFADS `gen_dim`
"n_factors": 10, # Number of latent factors (24)
# Same as LFADS' `factors_dim`
"gen_l2_reg": 1e-4, # (1e-4)
"learning_rate": 2e-3, # (2e-3)
# "max_grad_norm": 200.0
}
# Anecdotally
# -larger f_units (~128) is important to get latents that discriminate task
# -larger gen_n_hidden (~256) is important to get good reconstruction
_args = generate_default_args()
_params = generate_default_params()
def prepare_inputs(params, _inputs):
_inputs = tfkl.Dropout(params['dropout_rate'])(_inputs)
# The f-encoder takes the entire sequence and outputs a single-timestamp vector,
# this vector is used as the decoder's initial condition. This has the potential
# to create acausal modeling because the decoder will have knowledge of the entire
# sequence from its first timestep.
# We can optionally split the input to _f_enc_inputs and remaining _inputs
# RNN will only see _f_enc_inputs to help prevent acausal modeling.
_f_enc_inputs = _inputs[:, :params['f_enc_input_samps'], :]
_inputs = _inputs[:, params['f_enc_input_samps']:, :]
# Coordinated dropout on _inputs only.
# Why not _f_enc_inputs? Is it because it is likely too short to matter?
_masked_inputs, cd_kept_mask = CoordinatedDropout(params['coordinated_dropout_rate'])(_inputs)
# cd_kept_mask is part of the return so it can be used during decoding.
# The z-encoder inputs will always be full length.
_z_enc_inputs = tf.concat([_f_enc_inputs, _masked_inputs], axis=-2)
if params['f_enc_input_samps'] == 0:
# With no f_enc_input_samps specification, the f_enc inputs are the full input.
# Note this has coordinated dropout, whereas it wouldn't if f_enc_input_samps was specified.
_f_enc_inputs = _masked_inputs
# Note: Skipping over CV Mask
if params['input_factors'] > 0:
_f_enc_inputs = tfkl.Dense(params['input_factors'])(_f_enc_inputs)
_z_enc_inputs = tfkl.Dense(params['input_factors'])(_z_enc_inputs)
return _f_enc_inputs, _z_enc_inputs, cd_kept_mask
def test_prepare_inputs(n_times=None, n_sensors=36):
K.clear_session()
inputs = tf.keras.Input(shape=(n_times, n_sensors))
f_enc_inputs, z_enc_inputs, cd_mask = prepare_inputs(
{**_params, **_args.__dict__}, inputs)
test_model = tf.keras.Model(inputs=inputs, outputs=[f_enc_inputs, z_enc_inputs, cd_mask])
test_model.summary()
test_output = test_model(tf.random.uniform((_args.batch_size, n_times or 2, n_sensors)))
print([(_.shape, _.dtype) for _ in test_output])
def create_f_encoder(params, _inputs,
kernel_initializer='lecun_normal',
bias_initializer='zeros',
recurrent_regularizer='l2'):
"""
The f-encoder.
Also called "Static Encoder", or in LFADS the "initial condition encoder".
"""
_latents = _inputs
rnn_kwargs = dict(
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
recurrent_regularizer=recurrent_regularizer,
dropout=0, # Dropout on inputs not needed.
return_sequences=False)
if params['f_rnn_type'].endswith('GRU'):
rnn_layer_cls = tfkl.GRU
elif params['f_rnn_type'].endswith('LSTM'):
rnn_layer_cls = tfkl.LSTM
elif params['f_rnn_type'].endswith('SimpleRNN'):
rnn_layer_cls = tfkl.SimpleRNN
elif params['f_rnn_type'].endswith('GRUClip'):
rnn_layer_cls = GRUClip
rnn_kwargs['clip_value'] = params['gru_clip_value']
for ix, rnn_units in enumerate(params['f_units']):
if params['f_rnn_type'].startswith('Bidirectional'):
_latents = tfkl.Bidirectional(rnn_layer_cls(rnn_units, **rnn_kwargs),
merge_mode="concat",
name="f_rnn_" + str(ix))(_latents)
else:
_latents = rnn_layer_cls(rnn_units, **rnn_kwargs)(_latents)
return _latents
def make_f_variational(params, _enc_f):
_enc_f = tfkl.Dropout(params['dropout_rate'])(_enc_f)
# Use a helper function to get a MVN distribution from _latents.
make_dist_fn, dist_params = make_mvn_dist_fn(_enc_f, params['f_latent_size'],
shift_std=params['q_f_init_std'],
offdiag=params['f_latent_off_diag'],
loc_name="f_loc", scale_name="f_scale",
use_mvn_diag=True)
_q_f = tfpl.DistributionLambda(make_distribution_fn=make_dist_fn,
# convert_to_tensor_fn=lambda s: s.sample(N_SAMPLES),
name="q_f")(dist_params)
# Also return a matching prior. This will be used in test_step to measure KL.
if params['f_prior_kappa'] > 0:
raise NotImplementedError
prior = make_mvn_prior(params['f_latent_size'],
init_std=params['q_f_init_std'],
trainable_mean=params['f_prior_train_mean'],
trainable_var=params['f_prior_train_var'],
offdiag=params['f_prior_off_diag'])
# prior_factory = lambda: tfd.MultivariateNormalDiag(loc=0, scale_diag=params['f_prior_kappa'])
# prior_factory = LearnableMultivariateNormalDiag(params['f_latent_size'])
# prior_factory.build(input_shape=(0,))
return _q_f, prior
def test_create_f_encoder(n_times=None, n_sensors=36):
K.clear_session()
input_samps = _args.f_enc_input_samps or n_times
input_dim = _params['input_factors'] or n_sensors
f_enc_inputs = tf.keras.Input(shape=(input_samps, input_dim))
latents = create_f_encoder({**_params, **_args.__dict__}, f_enc_inputs)
q_f, f_prior = make_f_variational({**_params, **_args.__dict__}, latents)
f_encoder = tf.keras.Model(inputs=f_enc_inputs, outputs=q_f, name="f_encoder_model")
f_encoder.summary()
dummy_q_f = f_encoder(tf.random.uniform((_args.batch_size, input_samps or 2, input_dim)))
print("q_f: ", dummy_q_f)
print("q_f.sample(2).shape (samples, batch_size, f_dim): ", dummy_q_f.sample(2).shape)
# f_prior = f_prior_factory()
f_kl = tfd.kl_divergence(dummy_q_f, f_prior)
print("f KL: ", f_kl)
def create_z_encoder(params, _inputs,
_f_inputs=None,
f_inputs_pre_z1=True,
kernel_initializer='lecun_normal',
bias_initializer='zeros',
recurrent_regularizer='l2'):
# For LFADS, set _f_inputs=None and params['gen_cell_type']="Complex"
if _f_inputs is not None:
# Expand along time dimension by broadcast-add to zeros.
n_times = tf.shape(_inputs)[-2]
exp_zeros = tf.zeros(tf.stack((n_times, 1)))
_f_inputs = tf.expand_dims(_f_inputs, -2) + exp_zeros
# Add optional f_input that we tile and concatenate onto _inputs.
if _f_inputs is not None and f_inputs_pre_z1:
if params['q_f_samples'] > 1:
# _inputs needs to be repeated NUM_SAMPLES on a new samples axis at axis=0.
_inputs = _inputs[tf.newaxis, ...] + tf.zeros([params['q_f_samples'], 1, 1, 1])
# Concatenate _x2 (features) and _static_sample
_inputs = tfkl.Concatenate()([_inputs, _f_inputs]) # (optional-samples, batch, timesteps, feat_dim+latent_static)
# Collapse samples + batch dims -- required by LSTM
new_d1 = tf.reshape(tf.reduce_prod(tf.shape(_inputs)[:-2]), (1,))
new_shape = tf.concat((new_d1, tf.shape(_inputs)[-2:]), 0)
_inputs = tf.reshape(_inputs, new_shape)
# _inputs shape now (samples*batch, T, feat+lat_stat)
is_rnn = params['z1_rnn_type'].startswith('Bidirectional') \
or (params['z1_rnn_type'] in ['GRU', 'LSTM', 'SimpleRNN', 'GRUClip'])
if is_rnn:
rnn_kwargs = dict(
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
recurrent_regularizer=recurrent_regularizer,
dropout=0, # Dropout on inputs not needed.
return_sequences=True)
if params['z1_rnn_type'].endswith('GRU'):
rnn_layer_cls = tfkl.GRU
elif params['z1_rnn_type'].endswith('LSTM'):
rnn_layer_cls = tfkl.LSTM
elif params['z1_rnn_type'].endswith('SimpleRNN'):
rnn_layer_cls = tfkl.SimpleRNN
elif params['z1_rnn_type'].endswith('GRUClip'):
rnn_layer_cls = GRUClip
rnn_kwargs['clip_value'] = params['gru_clip_value']
if params['z1_rnn_type'].startswith('Bidirectional'):
_enc_z = tfkl.Bidirectional(rnn_layer_cls(params['z1_units'], **rnn_kwargs),
merge_mode="concat", name="z_rnn_1")(_inputs)
else:
_enc_z = rnn_layer_cls(params['z1_units'], **rnn_kwargs)(_inputs)
else:
# Not RNN, just MLP
_enc_z = tfkl.Dense(params['z1_units'])(_inputs)
if params['z1_lag'] > 0:
raise NotImplementedError
# TODO: Split output to forward and backward,
# For forward, trim off ending `toffset` and prepend zeros
# if rnn and bidirectional, for backward, trim off beginning `toffset` and append zeros
if is_rnn and params['z1_rnn_type'].startswith('Bidirectional'):
# TODO: Recombine forward and backward with equivalent to merge_mode="sum"
pass
if params['z2_units'] > 0 and params['gen_cell_type'] != "Complex":
if _f_inputs is not None and not f_inputs_pre_z1:
if params['q_f_samples'] > 1:
# _inputs needs to be repeated NUM_SAMPLES on a new samples axis at axis=0.
_enc_z = _enc_z[tf.newaxis, ...] + tf.zeros([params['q_f_samples'], 1, 1, 1])
# Concatenate _x2 (features) and _static_sample
_enc_z = tfkl.Concatenate()(
[_enc_z, _f_inputs]) # (optional-samples, batch, timesteps, feat_dim+latent_static)
# Collapse samples + batch dims -- required by LSTM
new_d1 = tf.reshape(tf.reduce_prod(tf.shape(_enc_z)[:-2]), (1,))
new_shape = tf.concat((new_d1, tf.shape(_enc_z)[-2:]), 0)
_enc_z = tf.reshape(_enc_z, new_shape)
# _enc_z shape now (samples*batch, T, feat+lat_stat)
# z2 vanilla RNN used in DHSAE Full. LFADS' z2 used elsewhere.
_ = rnn_kwargs.pop('clip_value', None)
_enc_z = tfkl.SimpleRNN(params['z2_units'], **rnn_kwargs)(_enc_z)
return _enc_z
def make_z_variational(params, _enc_z):
"""
Take the encoded latent sequence z (output of z1 and optionally z2)
and convert it to a distribution.
This isn't necessary for LFADS models because z isn't in its final encoded
form until inside the Complex cell, so it's up to the complex cell to
handle the formation of the distribution.
"""
if params['gen_cell_type'] == "Complex":
# LFADS - variational part taken care of in complex cell.
return _enc_z
# Get a multivariate normal diag over each timestep.
make_dist_fn, dist_params = make_mvn_dist_fn(
_enc_z, params['z_latent_size'], shift_std=params['q_z_init_std'],
offdiag=False, loc_name="z_loc", scale_name="z_scale", use_mvn_diag=True)
_q_z = tfpl.DistributionLambda(make_distribution_fn=make_dist_fn,
# convert_to_tensor_fn=lambda s: s.sample(N_SAMPLES),
name="q_z")(dist_params)
return _q_z
def get_z_prior(params):
# TODO: Also return appropriate prior
if params['z_prior_process'] == 'AR1':
prior = LearnableAutoRegressive1Prior(graph_batch_size, hps.co_dim,
autocorrelation_taus,
noise_variances,
hps.do_train_prior_ar_atau,
hps.do_train_prior_ar_nvar,
"u_prior_ar1")
else:
# RNN
prior = LearnableMultivariateNormalDiagCell(self.hidden_size, self.latent_size,
cell_type='gru')
return prior
def test_create_z_encoder(n_times=None, n_sensors=36):
# by skipping over prepare_inputs we are ignoring any read-in layers and coordinated dropout
K.clear_session()
input_dim = _params['input_factors'] or n_sensors
inputs = tf.keras.Input(shape=(n_times, input_dim))
f_sample = tf.keras.Input(shape=(_params['f_latent_size']))
z_enc = create_z_encoder({**_params, **_args.__dict__}, inputs, _f_inputs=f_sample)
q_z = make_z_variational({**_params, **_args.__dict__}, z_enc)
z_encoder = tf.keras.Model(inputs=(inputs, f_sample), outputs=q_z, name="z_encoder_model")
z_encoder.summary()
dummy_q_z = z_encoder(tf.random.uniform((_args.batch_size, n_times or 2, input_dim)),
tf.random.uniform((_args.batch_size, _params['f_latent_size'])))
print(type(dummy_q_z), dummy_q_z.shape)
# TODO: Test KL divergence
z_prior = get_z_prior({**_params, **_args.__dict__})
z_kl = tfd.kl_divergence(dummy_q_z, z_prior)
print("z KL: ", z_kl)
def main():
# test_prepare_inputs()
# test_create_f_encoder()
test_create_z_encoder()
if __name__ == "__main__":
main()
| 45.819149 | 135 | 0.62085 |
a65069c1b3cfb81acde4553301457d0034898566
| 775 |
py
|
Python
|
04.DFS_BFS/SY/b1260_SY.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 1 |
2021-11-21T06:03:06.000Z
|
2021-11-21T06:03:06.000Z
|
04.DFS_BFS/SY/b1260_SY.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 2 |
2021-10-13T07:21:09.000Z
|
2021-11-14T13:53:08.000Z
|
04.DFS_BFS/SY/b1260_SY.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | null | null | null |
# b1260
import sys
from collections import deque
def DFS(v):
visit_D[v] = 1
print(v, end = " ")
for i in range(1, n + 1):
if visit_D[i] == 0 and graph[v][i] == 1 :
DFS(i)
def BFS(v):
q = deque()
q.append(v)
visit_B[v] = 1
while q:
v = q.popleft()
print(v, end = " ")
for i in range(1, n + 1):
if visit_B[i] == 0 and graph[v][i] == 1 :
q.append(i)
visit_B[i] = 1
n, m, v = map(int, sys.stdin.readline().split())
graph = []
visit_D = [0] * (n + 1)
visit_B = [0] * (n + 1)
for _ in range(n + 1) :
graph.append([0] * (n + 1))
for _ in range(m) :
a, b = map(int, sys.stdin.readline().split())
graph[a][b] = graph[b][a] = 1
DFS(v)
print()
BFS(v)
| 19.871795 | 53 | 0.468387 |
5bd398812029a6c03bb794ddaf5be383498f5fa8
| 11,611 |
py
|
Python
|
packages/watchmen-utilities/src/watchmen_utilities/datetime_helper.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-utilities/src/watchmen_utilities/datetime_helper.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-utilities/src/watchmen_utilities/datetime_helper.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from json import JSONEncoder
from re import sub
from typing import Any, List, Optional, Tuple, Union
from .array_helper import ArrayHelper
class DateTimeEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, (datetime, date)):
return o.isoformat()
if isinstance(o, Decimal):
return float(o)
return super().default(o)
def get_current_time_in_seconds() -> datetime:
return datetime.now().replace(tzinfo=None, microsecond=0)
def is_date_or_time_instance(value: Any) -> bool:
return value is not None and (isinstance(value, date) or isinstance(value, time) or isinstance(value, datetime))
def truncate_time(value: Union[date, datetime]) -> datetime:
if isinstance(value, datetime):
return value.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None)
else:
return datetime(year=value.year, month=value.month, day=value.day) \
.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None)
def last_day_of_month(a_date: date) -> int:
return ((a_date.replace(day=1) + timedelta(days=31)).replace(day=1) - timedelta(days=1)).day
def year_diff(end_date: date, start_date: date) -> int:
end_year = end_date.year
end_month = end_date.month
end_day = end_date.day
start_year = start_date.year
start_month = start_date.month
start_day = start_date.day
if end_year == start_year:
# same year, always return 0
return 0
elif end_year > start_year:
if end_month == start_month:
if end_day >= start_day:
return end_year - start_year
elif end_month == 2:
last_day_of_end_month = last_day_of_month(end_date)
if end_day == last_day_of_end_month and start_day >= end_day:
return end_year - start_year
else:
return end_year - start_year - 1
else:
return end_year - start_year - 1
elif end_month > start_month:
return end_year - start_year
else:
return end_year - start_year - 1
else:
if end_month == start_month:
if end_day > start_day:
if end_month == 2:
last_day_of_start_month = last_day_of_month(start_date)
if start_day == last_day_of_start_month:
return end_year - start_year
else:
return end_year - start_year + 1
else:
return end_year - start_year + 1
else:
return end_year - start_year
elif end_month > start_month:
return end_year - start_year + 1
else:
return end_year - start_year
def month_diff(end_date: date, start_date: date) -> int:
end_year = end_date.year
end_month = end_date.month
end_day = end_date.day
start_year = start_date.year
start_month = start_date.month
start_day = start_date.day
if end_year == start_year:
if end_month == start_month:
# same year, same month, always return 0
return 0
if end_month > start_month:
if end_day >= start_day:
return end_month - start_month
else:
last_day_of_end_month = last_day_of_month(end_date)
if last_day_of_end_month == end_day and start_day >= end_day:
# it is last day of end month
return end_month - start_month
else:
return end_month - start_month - 1
else:
# end date is before start date
if end_day > start_day:
last_day_of_start_month = last_day_of_month(start_date)
if last_day_of_start_month == start_day and end_day >= start_day:
# it is last day of start month
return end_month - start_month
else:
return end_month - start_month + 1
else:
return end_month - start_month
elif end_year > start_year:
if end_day >= start_day:
return (end_year - start_year) * 12 + end_month - start_month
else:
last_day_of_end_month = last_day_of_month(end_date)
if last_day_of_end_month == end_day and start_day >= end_day:
return (end_year - start_year) * 12 + end_month - start_month
else:
return (end_year - start_year) * 12 + end_month - start_month + 1
else:
# end year is before start year
if end_day > start_day:
last_day_of_start_month = last_day_of_month(start_date)
if last_day_of_start_month == start_day and end_day >= start_day:
# it is last day of start month
return (end_year - start_year + 1) * 12 + 12 - end_month + start_month
else:
return (end_year - start_year + 1) * 12 + 12 - end_month + start_month - 1
else:
return (end_year - start_year + 1) * 12 + 12 - end_month + start_month
def try_to_format_time(might_be_time: str, time_format: str) -> Tuple[bool, Optional[time]]:
"""
return a datetime object is parsed
"""
try:
d = datetime.strptime(might_be_time, time_format)
return True, d.time()
except ValueError:
return False, None
def is_time(value: Optional[str], formats: List[str]) -> Tuple[bool, Optional[time]]:
"""
none is not a time value, otherwise remove non-number characters and try to parse by given formats.
digits after removing must match digits of format
"""
if value is None:
return False, None
tidy_value = sub(r'[^0-9+]', '', value)
count = len(tidy_value)
suitable_formats = ArrayHelper(formats).filter(lambda x: len(x) == count).to_list()
for suitable_format in suitable_formats:
parsed, date_value = try_to_format_time(tidy_value, suitable_format)
if parsed:
return parsed, date_value
return False, None
def try_to_time(value: Any, formats: List[str]) -> Optional[time]:
"""
try to parse given value to date, or returns none when cannot be parsed.
formats can be datetime and date format
"""
if value is None:
return None
elif isinstance(value, time):
return value
elif isinstance(value, str):
parsed, date_value = is_time(value, formats)
if parsed:
return date_value
return None
def try_to_format_date(might_be_date: str, date_format: str) -> Tuple[bool, Optional[date]]:
"""
return a datetime object is parsed
"""
try:
d = datetime.strptime(might_be_date, date_format)
return True, d
except ValueError:
return False, None
def is_suitable_format(value_length: int, a_format: str) -> bool:
plus_year = '%Y' in a_format
plus_timezone = '%z' in a_format
plus_digits = (2 if plus_year else 0) + (3 if plus_timezone else 0)
if value_length > 14 and not plus_timezone:
return '%f' in a_format
elif value_length > 14 and plus_timezone:
if value_length > 19:
return '%f' in a_format and plus_timezone
else:
return value_length == len(a_format) + plus_digits
else:
return value_length == len(a_format) + plus_digits
def is_date_plus_format(value: Optional[str], formats: List[str]) -> Tuple[bool, Optional[date], Optional[str]]:
"""
none is not a date value, otherwise remove non-number characters and try to parse by given formats.
digits after removing must match digits of format.
return format itself when parsed
"""
if value is None:
return False, None, None
tidy_value = sub(r'[^0-9+]', '', value)
count = len(tidy_value)
# format cannot use length to match
suitable_formats = ArrayHelper(formats).filter(lambda x: is_suitable_format(count, x)).to_list()
for suitable_format in suitable_formats:
parsed, date_value = try_to_format_date(tidy_value, suitable_format)
if parsed:
return parsed, date_value, suitable_format
return False, None, None
def is_date(value: Optional[str], formats: List[str]) -> Tuple[bool, Optional[date]]:
parsed, date_value, _ = is_date_plus_format(value, formats)
return parsed, date_value
def is_datetime(value: Optional[str], formats: List[str]) -> Tuple[bool, Optional[datetime]]:
parsed, date_value = is_date(value, formats)
if not parsed:
return False, None
elif isinstance(date_value, datetime):
return True, date_value
else:
return True, datetime(
year=date_value.year, month=date_value.month, day=date_value.day,
hour=0, minute=0, second=0, microsecond=0)
def try_to_date(value: Any, formats: List[str], allow_timestamp: bool = False) -> Optional[date]:
"""
try to parse given value to date, or returns none when cannot be parsed.
formats can be datetime and date format
"""
if value is None:
return None
elif isinstance(value, date):
return value
elif allow_timestamp and (isinstance(value, int) or isinstance(value, float)):
# timestamp
return datetime.fromtimestamp(value, tz=None)
elif allow_timestamp and isinstance(value, Decimal):
return datetime.fromtimestamp(float(value), tz=None)
elif isinstance(value, str):
parsed, date_value = is_date(value, formats)
if parsed:
return date_value
return None
DATE_FORMAT_MAPPING = {
'Y': '%Y', # 4 digits year
'y': '%y', # 2 digits year
'M': '%m', # 2 digits month
'D': '%d', # 2 digits day of month
'h': '%H', # 2 digits hour, 00 - 23
'H': '%I', # 2 digits hour, 01 - 12
'm': '%M', # 2 digits minute
's': '%S', # 2 digits second
'W': '%A', # Monday - Sunday
'w': '%a', # Mon - Sun
'B': '%B', # January - December
'b': '%b', # Jan - Dec
'p': '%p' # AM/PM
}
def translate_date_format_to_memory(date_format: str) -> str:
return ArrayHelper(list(DATE_FORMAT_MAPPING)) \
.reduce(lambda original, x: original.replace(x, DATE_FORMAT_MAPPING[x]), date_format)
class DateTimeConstants(int, Enum):
"""
Week starts from Sunday
"""
HALF_YEAR_FIRST = 1
HALF_YEAR_SECOND = 2
QUARTER_FIRST = 1
QUARTER_SECOND = 2
QUARTER_THIRD = 3
QUARTER_FOURTH = 4
JANUARY = 1
FEBRUARY = 2
MARCH = 3
APRIL = 4
MAY = 5
JUNE = 6
JULY = 7
AUGUST = 8
SEPTEMBER = 9
OCTOBER = 10
NOVEMBER = 11
DECEMBER = 12
HALF_MONTH_FIRST = 1
HALF_MONTH_SECOND = 2
TEN_DAYS_FIRST = 1
TEN_DAYS_SECOND = 2
TEN_DAYS_THIRD = 3
WEEK_OF_YEAR_FIRST_SHORT = 0 # first week less than 7 days, otherwise week of year starts from 1
WEEK_OF_YEAR_FIRST = 1
WEEK_OF_YEAR_LAST = 53
WEEK_OF_MONTH_FIRST_SHORT = 0 # first week less than 7 days, otherwise week of month starts from 1
WEEK_OF_MONTH_FIRST = 1
WEEK_OF_MONTH_LAST = 5
HALF_WEEK_FIRST = 1
HALF_WEEK_SECOND = 2
DAY_OF_MONTH_MIN = 1
DAY_OF_MONTH_MAX = 31
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
DAY_KIND_WORKDAY = 1
DAY_KIND_WEEKEND = 2
DAY_KIND_HOLIDAY = 3
HOUR_KIND_WORKTIME = 1
HOUR_KIND_OFF_HOURS = 2
HOUR_KIND_SLEEPING_TIME = 3
AM = 1
PM = 2
def get_year(dt: date) -> int:
return dt.year
def get_month(dt: date) -> int:
return dt.month
def get_half_year(dt: date) -> int:
return DateTimeConstants.HALF_YEAR_FIRST.value if get_month(dt) <= 6 else DateTimeConstants.HALF_YEAR_SECOND.value
def get_quarter(dt: date) -> int:
month = get_month(dt)
if month <= 3:
return DateTimeConstants.QUARTER_FIRST.value
elif month <= 6:
return DateTimeConstants.QUARTER_SECOND.value
elif month <= 9:
return DateTimeConstants.QUARTER_THIRD.value
else:
return DateTimeConstants.QUARTER_FOURTH.value
def get_week_of_year(dt: date) -> int:
return int(dt.strftime('%U'))
def get_week_of_month(dt: date) -> int:
first_day = dt.replace(day=1)
first_day_week = get_week_of_year(first_day)
week_of_year = get_week_of_year(dt)
if first_day_week == week_of_year:
if get_day_of_week(first_day) == DateTimeConstants.SUNDAY.value:
# first week is full week
return DateTimeConstants.WEEK_OF_MONTH_FIRST
else:
# first week is short
return DateTimeConstants.WEEK_OF_MONTH_FIRST_SHORT
else:
if get_day_of_week(first_day) == DateTimeConstants.SUNDAY.value:
# first week is full week, must add 1
return week_of_year - first_day_week + 1
else:
# first week is short
return week_of_year - first_day_week
def get_day_of_month(dt: date) -> int:
return dt.day
def get_day_of_week(dt: date) -> int:
# iso weekday: Monday is 1 and Sunday is 7
return (dt.isoweekday() + 1) % 8
| 28.319512 | 115 | 0.720351 |
1b4652dffd6c7f8f74860a065e98ab012675f6ba
| 5,200 |
py
|
Python
|
tests/onegov/fsi/test_models.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/fsi/test_models.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/fsi/test_models.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import datetime
from sedate import utcnow
from onegov.fsi.models.course_attendee import CourseAttendee
from onegov.fsi.models.course_event import CourseEvent
from onegov.fsi.models.course_notification_template import get_template_default
from onegov.fsi.models.course_subscription import CourseSubscription
def test_attendee(
session, attendee, future_course_event, member, course_event):
# past_event = course_event(session)
course_event = future_course_event(session)
attendee, data = attendee(session)
member = member(session)
assert attendee.subscriptions.count() == 0
assert attendee.possible_course_events().count() == 1
assert attendee.user == member
assert member.attendee == attendee
# Add a subscription
subscription = CourseSubscription(
course_event_id=course_event[0].id, attendee_id=attendee.id)
session.add(subscription)
session.flush()
assert attendee.subscriptions.count() == 1
assert course_event[0].start > utcnow()
assert attendee.course_events.first() == course_event[0]
assert attendee.possible_course_events().count() == 0
# Test subscription backref
assert subscription.attendee == attendee
# Check the event of the the subscription
assert attendee.subscriptions[0].course_event == course_event[0]
# delete the subscription
attendee.subscriptions.remove(subscription)
# and add it differently
attendee.subscriptions.append(subscription)
assert attendee.subscriptions.count() == 1
def test_course_event(scenario):
scenario.add_attendee()
scenario.add_course(name='Course')
scenario.add_course_event(scenario.latest_course, max_attendees=20)
delta = datetime.timedelta(days=366)
# Add a participant via a subscription
event = scenario.latest_event
scenario.add_subscription(event, None, dummy_desc='Placeholder')
scenario.add_subscription(event, scenario.latest_attendee)
# Add inactive attendee
scenario.add_attendee(active=False)
scenario.add_subscription(event, scenario.latest_attendee)
scenario.commit()
scenario.refresh()
event = scenario.latest_event
assert event.subscriptions.count() == 3
assert event.attendees.count() == 2
assert event.available_seats == 20 - 3
assert event.possible_subscribers().first() is None
# Test possible and excluded subscribers
scenario.add_attendee(username='[email protected]')
attendee_2 = scenario.latest_attendee
# event = scenario.latest_event
assert event.course
assert event.possible_subscribers(year=event.end.year).all() == [
attendee_2
]
scenario.add_course_event(
scenario.latest_course,
start=event.start + delta, end=event.end + delta,
max_attendees=20
)
event2 = scenario.latest_event
# Event for a year later, exclude the one who has a subscription to this
# course
assert event.possible_subscribers(year=event.end.year + 1).count() == 1
assert event2.possible_subscribers(year=event.end.year).count() == 1
assert event2.possible_subscribers(year=event.end.year + 1).count() == 2
assert event.possible_subscribers(external_only=True).count() == 0
assert event.excluded_subscribers().count() == 2
assert event2.possible_subscribers().first() == attendee_2
assert scenario.latest_course.future_events.count() == 2
def test_subscription(session, attendee, course_event):
attendee = attendee(session)
course_event = course_event(session)
res = CourseSubscription(
course_event_id=course_event[0].id,
attendee_id=attendee[0].id
)
session.add(res)
session.flush()
# Test backrefs
assert res.course_event == course_event[0]
assert res.attendee == attendee[0]
assert str(res) == 'L, F'
def test_cascading_event_deletion(session, db_mock_session):
# If a course event is deleted, all the subscriptions should be deleted
session = db_mock_session(session)
event = session.query(CourseEvent).first()
assert event.subscriptions.count() == 2
session.delete(event)
assert session.query(CourseSubscription).count() == 0
assert event.subscriptions.count() == 0
def test_cascading_attendee_deletion(session, db_mock_session):
# If an attendee is deleted, his reservations should be deleted
session = db_mock_session(session)
attendee = session.query(CourseAttendee).first()
assert session.query(CourseSubscription).count() == 2
session.delete(attendee)
assert session.query(CourseSubscription).count() == 1
def test_notification_templates(session, course_event):
event, data = course_event(session)
assert len(event.notification_templates) == 4
assert event.info_template
assert event.reservation_template
assert event.reminder_template
assert event.cancellation_template
func = get_template_default
assert event.info_template.subject == func(None, 'info')
assert event.reservation_template.subject == func(None, 'reservation')
assert event.reminder_template.subject == func(None, 'reminder')
assert event.cancellation_template.subject == func(None, 'cancellation')
| 35.135135 | 79 | 0.735962 |
04793fb03b0e4a3d04cf47dc8afa445e2b0a4477
| 1,603 |
py
|
Python
|
pizza/tests/test_module_pizza.py
|
gray-adeyi/pizza
|
659db6e85492903374416295cc3ca3a78584eccb
|
[
"MIT"
] | null | null | null |
pizza/tests/test_module_pizza.py
|
gray-adeyi/pizza
|
659db6e85492903374416295cc3ca3a78584eccb
|
[
"MIT"
] | null | null | null |
pizza/tests/test_module_pizza.py
|
gray-adeyi/pizza
|
659db6e85492903374416295cc3ca3a78584eccb
|
[
"MIT"
] | 1 |
2022-03-17T00:54:27.000Z
|
2022-03-17T00:54:27.000Z
|
import unittest
from ..pizza import (
Gender,
MailTemplate,
User,
AddressBook,
Setting,
MailTemplateBook,
)
class PizzaTest(unittest.TestCase):
def test_user_model(self):
user = User('gbenga', 'adeyi', '[email protected]', Gender.male)
self.assertEqual(user.gender, Gender.male)
self.assertEqual(user.fullname(), 'adeyi gbenga')
self.assertEqual(user.fullname(order='fl'), 'gbenga adeyi')
def test_address_book(self):
book = AddressBook()
self.assertIsInstance(book, AddressBook)
self.assertEqual(book.contact_list, [])
book.add_contact('gbenga', 'adeyi',
'[email protected]', Gender.male)
self.assertListEqual(book.contact_list, [
User('gbenga', 'adeyi', '[email protected]', Gender.male)]) # TODO: find a way to compare the two list to pass the test
def test_settings(self):
settings = Setting()
self.assertEqual(settings.settings, {})
settings.update_setting('PORT', 587)
self.assertDictEqual(settings.settings, {'PORT': 587})
def test_mail_template_book(self):
mail_template_book = MailTemplateBook()
self.assertListEqual(mail_template_book.mail_template_list, [])
mail_template_book.add_template(
'Newsletter', 'Dear {{}}, How have you been?')
self.assertListEqual(mail_template_book.mail_template_list, [
MailTemplate('Newsletter', 'Dear {{}}, How have you been?')])
if __name__ == '__main__':
unittest.main()
| 34.847826 | 139 | 0.644417 |
047bc3f27311aade1b6443a981a8ece6acc26357
| 6,552 |
py
|
Python
|
SBTK_League_Helper/update_tournaments.py
|
juanchodepisa/sbtk
|
7cba7748e75a02b491e551d6c1be9bd7987c5051
|
[
"MIT"
] | null | null | null |
SBTK_League_Helper/update_tournaments.py
|
juanchodepisa/sbtk
|
7cba7748e75a02b491e551d6c1be9bd7987c5051
|
[
"MIT"
] | null | null | null |
SBTK_League_Helper/update_tournaments.py
|
juanchodepisa/sbtk
|
7cba7748e75a02b491e551d6c1be9bd7987c5051
|
[
"MIT"
] | null | null | null |
from src.interfacing.ogs.connect import Authentication
import codecs
import sys
import os
from time import sleep
def loadList(pNameFile):
iList = []
with codecs.open(pNameFile, "r", "utf-8") as f:
for line in f:
iList.append(line)
return iList
if __name__ == "__main__":
a = Authentication("Kuksu League", "", testing=False);
iGroupNames = loadList("E:/Project/OGS/OGS-League/group_names.txt");
iGroupIDs = loadList("E:/Project/OGS/OGS-League/group_ids.txt");
nGroups = len(iGroupNames);
for i in range(nGroups):
iGroupNames[i] = iGroupNames[i].replace("\r\n", "");
iGroupNames[i] = iGroupNames[i].replace("\n", "");
iGroupIDs[i] = iGroupIDs[i].replace("\r\n", "");
iGroupIDs[i] = iGroupIDs[i].replace("\n", "");
iGroupIDs[i] = int(iGroupIDs[i]);
iDescription = """
Kuksu Main Title Tournament 9th Cycle Group %s
Title Holder: <a href='https://online-go.com/user/view/35184/vitality'>vitality (5d)</a>
Previous cycles:
<table style="text-align:center;" border='2'>
<tr><th rowspan=2>Cycle</th><td colspan=3><b>Title Match</b></td><td colspan=3><b>Title Tournament</b></td></tr>
<tr>
<th>Winner</th><th>Score</th><th>Runner-up</th>
<th>Winner<img src='https://a00ce0086bda2213e89f-570db0116da8eb5fdc3ce95006e46d28.ssl.cf1.rackcdn.com/4.2/img/trophies/gold_title_19.png' alt='Gold'></img></th>
<th>Runner-up<img src='https://a00ce0086bda2213e89f-570db0116da8eb5fdc3ce95006e46d28.ssl.cf1.rackcdn.com/4.2/img/trophies/silver_title_19.png' alt='Silver'></img></th>
<th>3rd Place<img src='https://a00ce0086bda2213e89f-570db0116da8eb5fdc3ce95006e46d28.ssl.cf1.rackcdn.com/4.2/img/trophies/bronze_title_19.png' alt='Bronze'></img></th>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2375'>1</a></td>
<td><b>luke</b></td><td></td><td></td>
<td><b>luke (2d)</b></td><td>davos</td><td>gomad361</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2384'>2</a></td>
<td><b>gomad361</b></td><td>3-2</td><td>luke</td>
<td><b>luke (2d)</b></td><td>gomad361</td><td>hotspur</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2391'>3</a></td>
<td><b>Uberdude</b></td><td>∗</td><td>gomad361</td>
<td><b>Uberdude (6d)</b></td><td>KyuT</td><td>marigo</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2406'>4</a></td>
<td><b>Uberdude</b></td><td>5-0</td><td>KyuT</td>
<td><b>KyuT (4d)</b></td><td>quiller</td><td>morituri</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2424'>5</a></td>
<td><b>Uberdude</b></td><td>5-0</td><td>gomad361</td>
<td><b>gomad361 (2d)</b></td><td>morituri</td><td>betterlife</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2439'>6</a></td>
<td><b>Uberdude</b></td><td>5-0</td><td>Elin</td>
<td><b>Elin (3d)</b></td><td>gomad361</td><td>morituri</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2460'>7</a></td>
<td><b>Uberdude</b></td><td>3-2</td><td>vitality</td>
<td><b>vitality (5d)</b></td><td>Elin</td><td>gomad361</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2475'>8</a></td>
<td><b>vitality</b></td><td>∗</td><td>Uberdude</td>
<td><b>vitality (5d)</b></td><td>nrx</td><td>gojohn</td>
</tr>
<tr>
<td rowspan=5><a href='#'>9</a></td>
<td rowspan=5 colspan=3></td>
<td colspan=3>
<a href='https://online-go.com/tournament/12653'>[A]</a>
</td>
</tr>
<tr>
<td colspan=3>
<a href='https://online-go.com/tournament/12654'>[B1]</a>
<a href='https://online-go.com/tournament/12655'>[B2]</a>
</td>
</tr>
<tr>
<td colspan=3>
<a href='https://online-go.com/tournament/12656'>[C1]</a>
<a href='https://online-go.com/tournament/12657'>[C2]</a>
<a href='https://online-go.com/tournament/12658'>[C3]</a>
<a href='https://online-go.com/tournament/12659'>[C4]</a>
</td>
</tr>
<tr>
<td colspan=3>
<a href='https://online-go.com/tournament/12660'>[D1]</a>
<a href='https://online-go.com/tournament/12661'>[D2]</a>
<a href='https://online-go.com/tournament/12662'>[D3]</a>
<a href='https://online-go.com/tournament/12663'>[D4]</a>
<a href='https://online-go.com/tournament/12664'>[D5]</a>
<a href='https://online-go.com/tournament/12665'>[D6]</a>
<a href='https://online-go.com/tournament/12666'>[D7]</a>
<a href='https://online-go.com/tournament/12667'>[D8]</a>
</td>
</tr>
<tr>
<td colspan=3>
<a href='https://online-go.com/tournament/12668'>[E1]</a>
<a href='https://online-go.com/tournament/12669'>[E2]</a>
<a href='https://online-go.com/tournament/12670'>[E3]</a>
<a href='https://online-go.com/tournament/12671'>[E4]</a>
<a href='https://online-go.com/tournament/12672'>[E5]</a>
<a href='https://online-go.com/tournament/12673'>[E6]</a>
</td>
</tr>
</table>
∗ means the games were finished by timeout or retiring.
Rules could be found <a href='https://forums.online-go.com/t/league-format-kuksu-title-tournament-rules-and-discussion/5191'>here</a>.
""" % iGroupNames[i];
a.put(['tournaments', iGroupIDs[i]], {"description": iDescription
});
print("Tournament %s with id %d updated.\n" % (iGroupNames[i], iGroupIDs[i]));
sleep(2);
# tourney id 7370
"""
iTournament = a.post(['tournaments'],{
"id":12650,
"name":"Test Tournament 2",
"group":515,
"tournament_type":"roundrobin",
"description":"<b>Test 3</b>",
"board_size":19,
"handicap":0, #default -1 for auto
"time_start": "2015-12-01T00:00:00Z",
"time_control_parameters":{
"time_control":"fischer",
"initial_time":604800,
"max_time":604800,
"time_increment":86400
},
"rules": "korean",
"exclusivity": "invite", # open, group. default
"exclude_provisional": False, # default
"auto_start_on_max": True, # default
"analysis_enabled": True, #default
"settings":{
"maximum_players":10,
},
"players_start": 6, #default
"first_pairing_method": "slide", #slaughter, random, slide, strength . default
"subsequent_pairing_method": "slide", # default
"min_ranking":0,
"max_ranking":36
});
#print("Hello");
print(iTournament["id"]);
"""
#print "Tournament %s is created." % iTournament["id"];
# r= a.post (['tournaments', 12642, 'players'], app_param= {"player_id":40318} )
# print (r)
| 36 | 171 | 0.608364 |
b6fda47987073139c9ff2a0bdbc46017f6efc8d8
| 721 |
py
|
Python
|
config.py
|
persquare/ZK2
|
5c00d0a5a59123931848259286bb13bb86c5396b
|
[
"MIT"
] | 2 |
2020-03-05T16:01:17.000Z
|
2020-09-25T11:28:27.000Z
|
config.py
|
persquare/ZK2
|
5c00d0a5a59123931848259286bb13bb86c5396b
|
[
"MIT"
] | null | null | null |
config.py
|
persquare/ZK2
|
5c00d0a5a59123931848259286bb13bb86c5396b
|
[
"MIT"
] | null | null | null |
#
# Location of ZK-notes
# Defaults to "~/.zk"
# Can be overridden on commandline when starting server)
#
# notesdir = "~/Library/Mobile Documents/com~apple~CloudDocs/zk"
# notesdir = "~/mynotes"
#
# Editor
# Defaults to "/usr/bin/nano"
#
# editor = "/usr/local/bin/mate"
# editor = "/usr/local/bin/bbedit"
#
# Markdown rendering
# If no md_cmd given, all note text is rendered in a <pre></pre> environment
#
# md_cmd = "/usr/local/bin/markdown++"
md_cmd = "/usr/local/bin/markdown"
# md_cmd = "/usr/local/bin/pandoc -f markdown -t html"
# md_cmd = "/Library/Frameworks/Python.framework/Versions/3.7/bin/markdown_py"
if __name__ == '__main__':
import mdproc
print(mdproc.render("## Hello\nåäö world!"))
| 24.862069 | 78 | 0.682386 |
8e27be64dcad3409d7806f927914b835288f276f
| 475 |
py
|
Python
|
FlaskOccupations/util/minify.py
|
kkysen/Soft-Dev
|
b19881b1fcc9c7daefc817e6b975ff6bce545d81
|
[
"Apache-2.0"
] | null | null | null |
FlaskOccupations/util/minify.py
|
kkysen/Soft-Dev
|
b19881b1fcc9c7daefc817e6b975ff6bce545d81
|
[
"Apache-2.0"
] | null | null | null |
FlaskOccupations/util/minify.py
|
kkysen/Soft-Dev
|
b19881b1fcc9c7daefc817e6b975ff6bce545d81
|
[
"Apache-2.0"
] | null | null | null |
import bs4
import htmlmin
import flask
def prettify(html):
# type: (str) -> str
soup = bs4.BeautifulSoup(html, 'html.parser')
return soup.prettify()
def minify(app):
# type: (flask.Flask) -> None
wrapped = prettify if app.debug else htmlmin.minify
@app.after_request
def minifying_filter(response):
# type: (flask.Response) -> flask.Response
response.set_data(wrapped(response.get_data(as_text=True)))
return response
| 22.619048 | 67 | 0.671579 |
edd63906a404ad8c346076aa9f39ad9f1e8891c7
| 168 |
py
|
Python
|
aoc2020/day_15/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_15/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_15/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
from .part_1 import Solution as Part1
class Solution(Part1):
expected = 436
def solve(self) -> any:
# /shrug
return self.spoken_at(30000000)
| 16.8 | 39 | 0.636905 |
949082f6dc27e1799babb6fb321a50e4212c03cf
| 6,739 |
py
|
Python
|
tests/onegov/core/test_cronjobs.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/core/test_cronjobs.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/core/test_cronjobs.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import requests
from datetime import datetime
from freezegun import freeze_time
from onegov.core import Framework
from onegov.core.cronjobs import parse_cron, Job
from onegov.core.utils import scan_morepath_modules
from pytest_localserver.http import WSGIServer
from sedate import replace_timezone
from sqlalchemy.ext.declarative import declarative_base
from time import sleep
from webtest import TestApp as Client
def test_run_cronjob(postgres_dsn, redis_url):
result = 0
class App(Framework):
pass
@App.path(path='')
class Root(object):
pass
@App.json(model=Root)
def view_root(self, request):
return {}
@App.cronjob(hour='*', minute='*', timezone='UTC', once=True)
def run_test_cronjob(request):
nonlocal result
result += 1
scan_morepath_modules(App)
app = App()
app.configure_application(
dsn=postgres_dsn,
base=declarative_base(),
redis_url=redis_url
)
app.namespace = 'municipalities'
app.set_application_id('municipalities/new-york')
# to test we need an actual webserver, webtest doesn't cut it here because
# we are making requests from the application itself
server = WSGIServer(application=app)
try:
server.start()
with freeze_time(replace_timezone(datetime(2016, 1, 1, 8, 0), 'UTC')):
requests.get(server.url)
for i in range(0, 600):
if result == 0:
sleep(0.1)
else:
break
sleep(0.1)
assert result == 1
finally:
server.stop()
def test_disable_cronjobs(redis_url):
class App(Framework):
pass
@App.path(path='')
class Root(object):
pass
@App.json(model=Root)
def view_root(self, request):
return {}
@App.cronjob(hour=8, minute=0, timezone='UTC')
def run_test_cronjob(request):
pass
@App.setting(section='cronjobs', name='enabled')
def cronjobs_enabled():
return False
scan_morepath_modules(App)
app = App()
app.configure_application(redis_url=redis_url)
app.namespace = 'municipalities'
app.set_application_id('municipalities/new-york')
client = Client(app)
client.get('/')
assert not app.config.cronjob_registry.cronjob_threads
def test_parse_cron():
assert tuple(parse_cron('*', 'hour')) == (
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23)
assert tuple(parse_cron('*/2', 'hour')) == (
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22)
assert tuple(parse_cron('5', 'hour')) == (5, )
assert tuple(parse_cron('*/20', 'minute')) == (0, 20, 40)
def test_job_offset():
job = Job(test_job_offset, 8, 15, 'Europe/Zurich')
assert job.offset != 0
second_job = Job(test_job_offset, 8, 15, 'Europe/Zurich')
assert second_job.offset == job.offset
third_job = Job(lambda x: x, 8, 15, 'Europe/Zurich')
assert third_job.offset != job.offset
def test_next_runtime():
def in_timezone(*args, timezone='Europe/Zurich'):
return replace_timezone(datetime(*args), timezone)
def next_runtime(hour, minute, timezone='Europe/Zurich'):
job = Job(lambda x: x, hour=hour, minute=minute, timezone=timezone)
# disable offset for tests
job.offset = 0
return job.next_runtime()
# fixed hour, minute
with freeze_time(in_timezone(2019, 1, 1, 8, 14, 59)):
assert next_runtime(hour=8, minute=15) \
== in_timezone(2019, 1, 1, 8, 15)
with freeze_time(in_timezone(2019, 1, 1, 8, 15, 0)):
assert next_runtime(hour=8, minute=15) \
== in_timezone(2019, 1, 1, 8, 15)
with freeze_time(in_timezone(2019, 1, 1, 8, 15, 1)):
assert next_runtime(hour=8, minute=15) \
== in_timezone(2019, 1, 2, 8, 15)
# any hour, fixed minute
with freeze_time(in_timezone(2019, 1, 1, 8, 14, 59)):
assert next_runtime(hour='*', minute=15) \
== in_timezone(2019, 1, 1, 8, 15)
with freeze_time(in_timezone(2019, 1, 1, 8, 15, 0)):
assert next_runtime(hour='*', minute=15) \
== in_timezone(2019, 1, 1, 8, 15)
with freeze_time(in_timezone(2019, 1, 1, 8, 15, 1)):
assert next_runtime(hour='*', minute=15) \
== in_timezone(2019, 1, 1, 9, 15)
# fixed hour, any minute
with freeze_time(in_timezone(2019, 1, 1, 8, 14, 59)):
assert next_runtime(hour=8, minute='*') \
== in_timezone(2019, 1, 1, 8, 15)
with freeze_time(in_timezone(2019, 1, 1, 8, 15, 0)):
assert next_runtime(hour=8, minute='*') \
== in_timezone(2019, 1, 1, 8, 15)
with freeze_time(in_timezone(2019, 1, 1, 8, 15, 1)):
assert next_runtime(hour=8, minute='*') \
== in_timezone(2019, 1, 1, 8, 16)
# any hour, every 15 minutes
with freeze_time(in_timezone(2019, 1, 1, 8, 14, 59)):
assert next_runtime(hour='*', minute='*/15') \
== in_timezone(2019, 1, 1, 8, 15)
with freeze_time(in_timezone(2019, 1, 1, 8, 15, 0)):
assert next_runtime(hour='*', minute='*/15') \
== in_timezone(2019, 1, 1, 8, 15)
with freeze_time(in_timezone(2019, 1, 1, 8, 15, 1)):
assert next_runtime(hour='*', minute='*/15') \
== in_timezone(2019, 1, 1, 8, 30)
with freeze_time(in_timezone(2019, 1, 1, 8, 45, 0)):
assert next_runtime(hour='*', minute='*/15') \
== in_timezone(2019, 1, 1, 8, 45)
with freeze_time(in_timezone(2019, 1, 1, 8, 45, 1)):
assert next_runtime(hour='*', minute='*/15') \
== in_timezone(2019, 1, 1, 9, 0)
# every 2 hours, every 15 minutes
with freeze_time(in_timezone(2019, 1, 1, 8, 14, 59)):
assert next_runtime(hour='*/2', minute='*/15') \
== in_timezone(2019, 1, 1, 8, 15)
with freeze_time(in_timezone(2019, 1, 1, 8, 15, 0)):
assert next_runtime(hour='*/2', minute='*/15') \
== in_timezone(2019, 1, 1, 8, 15)
with freeze_time(in_timezone(2019, 1, 1, 8, 15, 1)):
assert next_runtime(hour='*/2', minute='*/15') \
== in_timezone(2019, 1, 1, 8, 30)
with freeze_time(in_timezone(2019, 1, 1, 8, 45, 0)):
assert next_runtime(hour='*/2', minute='*/15') \
== in_timezone(2019, 1, 1, 8, 45)
with freeze_time(in_timezone(2019, 1, 1, 8, 45, 1)):
assert next_runtime(hour='*/2', minute='*/15') \
== in_timezone(2019, 1, 1, 10, 0)
with freeze_time(in_timezone(2019, 1, 1, 23, 59, 59)):
assert next_runtime(hour='*/2', minute='*/15') \
== in_timezone(2019, 1, 2, 0, 0)
| 30.355856 | 78 | 0.593411 |
bfa8e961e83f3d24fc629fcaaf1cca1650b03496
| 368 |
py
|
Python
|
src/dfs/dfs.py
|
jwvg0425/DSAndAlgo3
|
173ccf717b84610def861854736c3b16a57ac727
|
[
"MIT"
] | null | null | null |
src/dfs/dfs.py
|
jwvg0425/DSAndAlgo3
|
173ccf717b84610def861854736c3b16a57ac727
|
[
"MIT"
] | null | null | null |
src/dfs/dfs.py
|
jwvg0425/DSAndAlgo3
|
173ccf717b84610def861854736c3b16a57ac727
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self, value, adj):
self.value = value
self.adj = adj
self.visited = False
def dfs(graph):
for source in graph:
source.visited = True
dfs_node(source)
def dfs_node(node):
print(node.value)
for e in node.adj:
if not e.visited:
e.visited = True
dfs_node(e)
| 21.647059 | 35 | 0.559783 |
44cd4fa2ede3b7e3cd97ae2ef6df746e454a2041
| 4,386 |
py
|
Python
|
Scripts/syncWiki.py
|
HuttonICS/API
|
22d61759a77f2ab3c4e610bb251684c78f71d2e6
|
[
"MIT"
] | 40 |
2016-09-13T12:54:11.000Z
|
2021-09-05T18:45:38.000Z
|
Scripts/syncWiki.py
|
HuttonICS/API
|
22d61759a77f2ab3c4e610bb251684c78f71d2e6
|
[
"MIT"
] | 351 |
2016-08-10T17:58:38.000Z
|
2021-09-15T12:19:48.000Z
|
Scripts/syncWiki.py
|
HuttonICS/API
|
22d61759a77f2ab3c4e610bb251684c78f71d2e6
|
[
"MIT"
] | 32 |
2016-09-09T16:04:01.000Z
|
2021-01-21T13:27:40.000Z
|
#! /usr/bin/env python
import yaml
import glob
import sys
import json
import os
import requests
from time import sleep
def login(session, username, password):
# get login token
r1 = session.get(api_url, params={
'format': 'json',
'action': 'query',
'meta': 'tokens',
'type': 'login',
}, verify=verifySSL)
r1.raise_for_status()
# log in
r2 = session.post(api_url, data={
'format': 'json',
'action': 'login',
'lgname': username,
'lgpassword': password,
'lgtoken': r1.json()['query']['tokens']['logintoken'],
}, verify=verifySSL)
if r2.json()['login']['result'] != 'Success':
raise RuntimeError(r2.json()['login']['reason'])
# get edit token
r3 = session.get(api_url, params={
'format': 'json',
'action': 'query',
'meta': 'tokens',
}, verify=verifySSL)
return r3.json()['query']['tokens']['csrftoken']
def downloadAllPages(session, dir):
allPagesResponse = session.get(api_url, params={
'format': 'json',
'action': 'query',
'list': 'allpages',
'aplimit': 'max',
}, verify=verifySSL)
allpages = allPagesResponse.json()['query']['allpages']
for page in allpages:
pageContentResponse = session.get(api_url, params={
'format': 'json',
'action': 'parse',
'prop': 'wikitext',
'formatversion': '2',
'pageid': page['pageid']
}, verify=verifySSL)
outFilePath = dir + page['title'].replace('/', '-') + '.wiki'
fullText = pageContentResponse.json()['parse']['wikitext']
with open(outFilePath, "w") as outFile:
outFile.write(fullText)
print(outFilePath)
def downloadAllImages(session, dir):
allImagesResponse = session.get(api_url, params={
'format': 'json',
'action': 'query',
'list': 'allimages',
'ailimit': 'max',
}, verify=verifySSL)
allImages = allImagesResponse.json()['query']['allimages']
for image in allImages:
pageContentResponse = session.get(image['url'], verify=verifySSL, stream=True)
outFilePath = dir + "images/" + image['name'].replace('/', '-')
with open(outFilePath, "wb") as outFile:
for chunk in pageContentResponse.iter_content(1024):
outFile.write(chunk)
print(outFilePath)
def restoreAllImages(session, outputDir, wikiToken):
filePaths = glob.glob(outputDir + 'images/**/*', recursive=True)
for filePath in filePaths:
params = {
"action": "upload",
"filename": os.path.basename(filePath),
"format": "json",
"token": wikiToken,
"ignorewarnings": 1
}
file = {'file':(os.path.basename(filePath), open(filePath, 'rb'), 'multipart/form-data')}
r5 = session.post(api_url, files=file, data=params)
print()
print(r5.text)
print()
def restoreAllPages(session, outputDir, wikiToken):
filenames = glob.glob(outputDir + '/**/*.wiki', recursive=True)
for file in filenames:
pageTitle = os.path.basename(file)[:-5]
print(pageTitle)
pageContent = ''
with open(file, "r") as inFile:
pageContent += inFile.read()
pushPage(session, pageTitle, pageContent, wikiToken)
def pushPage(session, pageTitle, pageContent, wikiToken):
r4 = session.post(api_url, data={
'format': 'json',
'action': 'edit',
'assert': 'user',
'text': pageContent,
'summary': pageTitle,
'title': pageTitle,
'token': wikiToken,
})
print (r4.text)
sleep(1)
if 'error' in r4.json() :
if r4.json()['error']['code'] == 'ratelimited':
print('too many uploads, sleeping for 30 sec')
sleep(30)
pushPage(session, pageTitle, pageContent, wikiToken)
elif r4.json()['error']['code'] == 'badtoken':
print(wikiToken)
def main():
outputDir = '../Wiki/'
userName = ''
passw = ''
if '-out' in sys.argv:
i = sys.argv.index('-out')
outputDir = sys.argv[i + 1]
if outputDir[-1] != '/':
outputDir = outputDir + '/'
if '-un' in sys.argv:
i = sys.argv.index('-un')
userName = sys.argv[i + 1]
if '-pw' in sys.argv:
i = sys.argv.index('-pw')
passw = sys.argv[i + 1]
session = requests.Session()
if '-restore' in sys.argv:
wikiToken = login(session, userName, passw)
##restoreAllPages(session, outputDir, wikiToken)
restoreAllImages(session, outputDir, wikiToken)
else:
downloadAllPages(session, outputDir)
downloadAllImages(session, outputDir)
verifySSL=True
api_url = 'https://wiki.brapi.org/api.php'
##api_url = 'https://132.236.81.198/api.php'
main()
| 25.062857 | 91 | 0.637255 |
78610a3d99c5b8f59465d14df9217985b3fdbdb5
| 366 |
py
|
Python
|
restraunt/django_project/dashboard/views.py
|
rakesh-gopal/restraunt_dashboard
|
8eba72dabd9de27500b184b18e02b43dfdc7da89
|
[
"MIT"
] | null | null | null |
restraunt/django_project/dashboard/views.py
|
rakesh-gopal/restraunt_dashboard
|
8eba72dabd9de27500b184b18e02b43dfdc7da89
|
[
"MIT"
] | null | null | null |
restraunt/django_project/dashboard/views.py
|
rakesh-gopal/restraunt_dashboard
|
8eba72dabd9de27500b184b18e02b43dfdc7da89
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from libs.decorators import json_view
from dashboard import documents
@json_view
def index(request):
docs_count = documents.Actions.objects().count()
return {
'status': 'ok',
'results': 'hello from json',
'count': docs_count
}
| 20.333333 | 52 | 0.693989 |
78815d9fe318aa9c6bc1d3741d354564147b0c2a
| 1,475 |
py
|
Python
|
Problems/LinkedList/Medium/LRUCache/lru_cache.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/LinkedList/Medium/LRUCache/lru_cache.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/LinkedList/Medium/LRUCache/lru_cache.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from typing import Optional
class Node:
def __init__(self, key: int, val: int) -> None:
self.key = key
self.val = val
self.prev = None
self.next = None
class LRUCache:
def __init__(self, capacity: int) -> None:
self.capacity = capacity
self.dic = dict()
self.head = Node(-1, -1)
self.tail = Node(-1, -1)
self.head.next, self.tail.prev = self.tail, self.head
def get(self, key: int) -> int:
if key in self.dic:
cur_node = self.dic[key]
self._remove(cur_node)
self._add(cur_node)
return cur_node.val
return -1
def put(self, key: int, val: int) -> None:
if key in self.dic:
self._remove(self.dic[key])
node = Node(key, val)
self._add(node)
self.dic[key] = node
if len(self.dic) > self.capacity:
cur_node = self.head.next
self._remove(cur_node)
del self.dic[cur_node.key]
def _remove(self, node: Optional[Node]) -> None:
prev, nxt = node.prev, node.next
prev.next, nxt.prev = nxt, prev
def _add(self, node: Optional[Node]) -> None:
cur_node = self.tail.prev
cur_node.next, node.prev = node, cur_node
self.tail.prev, node.next = node, self.tail
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
| 27.830189 | 63 | 0.568814 |
155e11485aac914ffeff5271972b2326b37e3bfb
| 489 |
py
|
Python
|
monitor/monitorfunctions.py
|
magns12/python-hwmonitor
|
12bceb82303ebcd22a2ef1be1c5afee0c29da269
|
[
"MIT"
] | null | null | null |
monitor/monitorfunctions.py
|
magns12/python-hwmonitor
|
12bceb82303ebcd22a2ef1be1c5afee0c29da269
|
[
"MIT"
] | null | null | null |
monitor/monitorfunctions.py
|
magns12/python-hwmonitor
|
12bceb82303ebcd22a2ef1be1c5afee0c29da269
|
[
"MIT"
] | 2 |
2021-03-22T09:32:14.000Z
|
2021-03-22T09:43:14.000Z
|
# SQL-Code ausführen, Parameter: Datenbank (die Verbindung), und der SQL-Befehl
def executeSQL(db, sql, dictionary=True):
# Cursor setzen und Daten als Dictionary speichern, außer die Variabe wurde auf "FALSE" gesetzt
if dictionary == True:
cursor = db.cursor(dictionary=True)
else:
cursor = db.cursor()
# SQL-Befehl ausführen
cursor.execute(sql)
# Daten abrufen
data = cursor.fetchone()
return data
| 34.928571 | 103 | 0.627812 |
eca0c26cac4c3e497cb207bc77bebfca04c5e6f2
| 660 |
py
|
Python
|
Intro-Python-II/src/test_adv.py
|
tobias-fyi/01_intro_python
|
c56645291b8bce94d8511c3277fefba1fe8add89
|
[
"MIT"
] | null | null | null |
Intro-Python-II/src/test_adv.py
|
tobias-fyi/01_intro_python
|
c56645291b8bce94d8511c3277fefba1fe8add89
|
[
"MIT"
] | 8 |
2020-03-24T17:47:23.000Z
|
2022-03-12T00:33:21.000Z
|
cs/lambda_cs/01_intro_python/Intro-Python-II/src/test_adv.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
"""
BeWilder - text adventure game :: Game tests
All tests implemented in Pytest.
"""
# Third-party imports
import pytest
# Local imports
from .room import Room
from .player import Player
narrow_name = "Narrow Passage"
narrow_description = """The narrow passage bends here from west
to north. The smell of gold permeates the air."""
def test_room_instance():
# Instantiate the room
room = Room(narrow_name, narrow_description)
assert isinstance(room, Room)
def test_player_instance():
# Instantiate the player
room = Room(narrow_name, narrow_description)
player = Player("Tobias", room)
assert isinstance(player, Player)
| 20 | 63 | 0.731818 |
1709fabc685eb78da9432070ff451e971eef5cc4
| 25,589 |
py
|
Python
|
isp/config.py
|
MePyDo/pygqa
|
61cde42ee815968fdd029cc5056ede3badea3d91
|
[
"MIT"
] | null | null | null |
isp/config.py
|
MePyDo/pygqa
|
61cde42ee815968fdd029cc5056ede3badea3d91
|
[
"MIT"
] | null | null | null |
isp/config.py
|
MePyDo/pygqa
|
61cde42ee815968fdd029cc5056ede3badea3d91
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
config
======
logging
-------
logging verwenden::
import logging
logger = logging.getLogger( "MQTT" )
debug Meldungen ausgeben::
logger.setLevel( logging.DEBUG )
logging level:
* CRITICAL - 50
* ERROR - 40
* WARNING - 30
* INFO - 20
* DEBUG - 10
* NOTSET - 0
CHANGELOG
=========
0.1.2 / 2022-05-16
------------------
- add scheme parameter to server.webserver
- remove webserver from use_as_variables
0.1.1 / 2022-03-28
------------------
- add jinja Filter: fromisoformat, datetimeformat and jsondumps
- use secrets.token_hex() instead of os.urandom(16) for SECRET_KEY
0.1.0 / 2021-01-16
------------------
- First Release
'''
__author__ = "R. Bauer"
__copyright__ = "MedPhyDO - Machbarkeitsstudien des Instituts für Medizinische Strahlenphysik und Strahlenschutz am Klinikum Dortmund im Rahmen von Bachelor und Masterarbeiten an der TU-Dortmund / FH-Dortmund"
__credits__ = ["R. Bauer", "K.Loot"]
__license__ = "MIT"
__version__ = "0.1.1"
__status__ = "Prototype"
import sys
import json
import os.path as osp
from dotmap import DotMap
from jinja2 import Environment, FileSystemLoader
from datetime import datetime
import glob
import re
import threading
import secrets
import logging
from isp.mqtt import MQTTclass
default_config = {
"server" : {
"webserver" : {
"scheme": "http",
"host": "127.0.0.1",
"port": 8085,
"name": "webapp",
"title": "webapp",
"resources" : "{{BASE_DIR}}/resources/",
"globals" : "{{BASE_DIR}}/resources/",
"ui" : "{{BASE_DIR}}/ui/",
"debug": True,
"reloader": True,
"TESTING": False,
"resources_test" : "{{BASE_DIR}}/tests/",
"checkNetarea": True,
"SECRET_KEY": secrets.token_hex()
},
"api": {
"prefix" : "/api",
"models" : [ ],
"DBADMIN": False,
"COVERAGE" : False
}
},
"use_as_variables":{
# "webserver" : "server.webserver",
"api" : "server.api",
"mqtt" : "server.mqtt",
"title" : "server.webserver.title",
"resources" : "server.webserver.resources"
}
}
class ispConfig( object ):
"""Konfiguaration aus config/config.json einlesen und bereitstellen.
Die config ist immer im development Mode außer im Pfad kommt production vor
dann wird production Mode gesetzt
Aufbau der config.json zugriff über ._config::
{
"config": {
<haupt konfig bereich : zugriff über .config>
}
}
Attributes
----------
_config: Dot
Die aktuelle Konfiguration
_configs: list
Eingebundene Konfigurationen (filename oder angegeben bei der intialisierung )
_lastOverlay: str
Gibt an bis zu welcher config Datei eingelesen wurde
_rootlevel:int
Fehlerlevel für das root logging (console). Default logging.WARNING
_mqttlevel:int
Fehlerlevel für das MQTT logging. Default logging.ERROR
_basedir: str
Verzeichniss des aufgerufenen Programms
_name : str
name des aufgerufenen Programms
_development : bool
Entwicklungszweig verwenden (True) oder nicht (False)
_loadErrors: list
listet die Dateien auf bei denen es zu einem Fehler beim einlesen kam
_mqtthdlr: None|cls
logger für mqtt zugriff über self._mqtthdlr
"""
def __init__( self, lastOverlay:int=None, development:bool=True,
rootlevel:int=logging.ERROR,
mqttlevel:int=logging.NOTSET,
cleanup:bool=False,
config:dict=None
):
"""Konfiguration initialisieren und laden.
Zuerst wird die Konfiguration config.json eingelesen
und anschließend sortiert von allen passenden config-*.json Dateien überlagert
Parameters
----------
lastOverlay : int
Gibt an bis zu welcher config Datei eingelesen wird.Default = 99999999 (config-99999999.json).
development : bool
Entwicklungszweig verwenden oder nicht. Default is True.
Wird die App in einem Unterverzeichnis mit dem Namen production/ oder development/ abgelegt,
so wird development autom. je nach Name gesetzt.
rootlevel: int - logging.ERROR
NOTSET=0, DEBUG=10, INFO=20, WARN=30, ERROR=40, and CRITICAL=50. Default: ERROR
mqttlevel: int - logging.NOTSET
NOTSET=0, DEBUG=10, INFO=20, WARN=30, ERROR=40, and CRITICAL=50. Default: NOTSET
cleanup: bool
MQTT Cleanup vor dem initialisieren durchführen. Default = False
config: dict
mit dieser Angabe wird keine Konfiguration geladen, sondern die angegebenen Daten verwendet
"""
# _basedir festlegen mit __file__ damit ein testaufruf von hier funktioniert
self._basedir = osp.abspath( osp.join( osp.dirname( osp.abspath( __file__ ) ) , "../" ) )
# name des startenden programms
self._name = osp.basename( sys.argv[0] )
# test auf Entwicklungsumgebung
self._development = development
if self._basedir.find( '/production/' ) > -1: # pragma: no cover
self._development = False
elif self._basedir.find( '/development/' ) > -1:
self._development = True
# lastOverlay auf das aktuelle Datum
if lastOverlay == None:
# ohne lastOverlay zuerst den Zahlenwert für das aktuelle Datum
lastOverlay = datetime.now().strftime("%Y%m%d")
# listet die Dateien auf bei denen es zu einem Fehler beim einlesen kam
self._loadErrors = []
# default werte setzen
self._config = DotMap( default_config )
self._configs = ["default"]
if config:
# config in self._config merken
self.update( config )
self._configs.append( "init" )
else:
# Konfiguration einlesen und in self._config merken
self._configLoad( int(lastOverlay) )
self._lastOverlay = lastOverlay
# die Konfiguration um BASE_DIR erweitern
self._config[ "BASE_DIR" ] = self._basedir
# default logger
self.rootInitLogger( rootlevel )
# logger für mqtt zugriff über self._mqtthdlr
self._mqtthdlr = None
# mqtt Logger bereitstellen oder initialisieren
self.mqttInitLogger( mqttlevel, cleanup )
# variables vorbelegen
self.setVariables()
# Jinja Environment bereitstellen
self._env = self.jinjaEnv()
def update(self, config:dict={} ):
"""Führt ein update wie bei dict.update aber mit dict_merge aus.
Parameters
----------
config : dict
In die config zu mischendes dict.
Returns
-------
self
"""
self._config = dict_merge(self._config, DotMap( config ) )
return self
def merge(self, name:str=None, config:dict={}):
"""Führt ein update in einem angegebenen config Zweig aus.
Gibt es name nicht wird er angelegt
Parameters
----------
name : str
Bezeichner dessen Inhalt ausgelesen wird . operator für die tiefe
config : dict
In den config Zweig zu mischendes dict.
Returns
-------
self
"""
branch = self.get(name, {} )
self.set( name, dict_merge(branch, DotMap( config ) ) )
return self
def _configLoad( self, lastOverlay:int=99999999 ):
"""Konfiguration aus config.json einlesen.
Die Datei muss sich ab _basedir im Verzeichniss config befinden
Alle config Dateien bis zu der durch _overlayLast gebildeten einlesen
Parameters
----------
lastOverlay : int
Default is 99999999
"""
def readConfig( filename:str ):
if osp.isfile( filename ):
# zuerst die normale config Datei einlesen
with open( filename, 'r') as f:
try:
config = json.load( f )
self._config = dict_merge(self._config, DotMap( config ) )
self._configs.append( osp.basename( filename ) )
except:
# Fehler auch hier anzeigen, da noch kein logger bereitsteht
self._loadErrors.append( filename )
self._configs.append( osp.basename( filename ) + " - ERROR" )
print( "CONFIG: Fehler bei json.load", filename )
pass
# den pfad zur konfiguration festlegen
configPath = osp.join( self._basedir, "config")
# zuerst die normale config Datei einlesen
readConfig( osp.join( configPath, "config.json") )
# jetzt alle anderen overlay dateien sortiert einlesen und überlagern
configs = glob.glob(osp.join( configPath, 'config-*.json') )
if len(configs) > 0:
configs.sort()
# alle config Dateien mit Zahlen nach dem - zusammenstellen
for name in configs:
res = re.search('config-([0-9]*)\.json', name )
# jahr und monat als zahl umwandeln, ein jahr allein wird mit 00 ergänzt
ym = 99999999
if res:
ym = int( res.group(1) )
if ym <= lastOverlay:
readConfig( name )
def setVariables( self ):
"""Setzt Defaults und Angaben aus use_as_variables in variables.
setzt immer::
- BASE_DIR
- version
- serverHost
- alles aus use_as_variables
Returns
-------
variables : dict
variables Bereich aus der config
"""
variables = self._config.get("variables", DotMap() ).toDict()
use_as_variables = self._config.get("use_as_variables", DotMap() ).toDict()
variables["BASE_DIR"] = self._basedir
variables["version"] = self.get( "version", __version__)
variables["serverHost"] = "{}://{}:{}".format(
self.get("server.webserver.scheme", ""),
self.get("server.webserver.host", ""),
self.get("server.webserver.port", "")
)
for config_name, config_key in use_as_variables.items():
value = self.get( config_key )
if isinstance( value, DotMap ):
variables[ config_name ] = self.get( config_key ).toDict()
else:
variables[ config_name ] = self.get( config_key )
self._config["variables"] = variables
return variables
def __setitem__(self, k, v):
"""Defines behavior for when an item is assigned to.
using the notation self[nkey] = value.
This is part of the mutable container protocol.
Again, you should raise KeyError and TypeError where appropriate.
Parameters
----------
k : str
Name des Attributs aus dem Object oder der _config.
v :
Zu setzender Inhalt.
"""
if k[0] == "_":
super().__setattr__(k, v)
else:
self._config[k] = v
def __getitem__(self, k):
"""Zugriff auf die Klassenattribute mit _.
sonst wird aus self._config geholt
Defines behavior for when an item is accessed, using the notation self[key].
This is also part of both the mutable and immutable container protocols.
It should also raise appropriate exceptions::
TypeError if the type of the key is wrong and KeyError if there is no corresponding value for the key.
Parameters
----------
k : str
Name des gesuchten Attributs aus dem dict des Object oder der _config.
Returns
-------
Wert des Attributs
"""
if k[0] == "_":
return self.__dict__[k]
else:
return self._config[k]
def __setattr__(self, k, v):
"""Zugriff auf die Klassenattribute mit _.
sonst wird in self._config gesetzt
Unlike __getattr__, __setattr__ is an encapsulation solution.
It allows you to define behavior for assignment to an attribute regardless
of whether or not that attribute exists,
meaning you can define custom rules for any changes in the values of attributes.
However, you have to be careful with how you use __setattr__.
Parameters
----------
k : str
Name des Attributs aus dem Object oder der _config.
v :
Zu setzender Inhalt.
"""
if k[0] == "_":
self.__dict__[k] = v
else:
self._config[k] = v
def __getattr__(self, k):
"""Access nonexistent attribute.
Gibt bei _ None und sonst aus config zu bestimmen.
* Nicht Vorhanden im object bei _ : None
* Nicht vorhanden in config: DotMap bzw. DotMap mit inhalt
self.name # name doesn't exist
Parameters
----------
k : str
Name des gesuchten Attributs aus dem Object oder der _config.
Returns
-------
Wert des Attributs oder None.
"""
if k[0] == "_":
return None
else:
return self._config[k]
def __repr__(self):
"""Define behavior for when repr() is called on an instance of your class.
The major difference between str() and repr() is intended audience.
repr() is intended to produce output that is mostly machine-readable (in many cases, it could be valid Python code even),
whereas str() is intended to be human-readable.
Returns
-------
str
Inhalt der config.
"""
return str(self._config)
def get(self, name:str=None, default=None, replaceVariables:bool=False):
"""Read from configuration.
without specifying complete config returned
Parameters
----------
name : str|list
Identifier whose content is read out. Dot operator for depth
default :
Return if name not found
replaceVariables: bool
Replace variable information in strings. Default is False
"""
# without specifying complete config returned
if not name:
return self._config.toDict()
keys = []
if isinstance(name, str):
keys = name.split(".")
elif isinstance(name, list):
keys = name
val = None
for key in keys:
if val == None:
# try first level
val = self._config.get( key )
# undefined : always use DotMap
if not val:
self._config[ key ] = DotMap()
else:
if isinstance( val, DotMap):
try:
val = val.get(key, default)
except Exception as e: # pragma: no cover
# occurs when a non-existent sub key is searched for, a.b = 12 but search for a.b.c
print("CONFIG: config.get error on get", keys, key, type(val), e )
val = default
pass
if val == None:
val = default
# replace variables if desired
if isinstance(val, str) and replaceVariables==True:
val = self.render_template( val )
return val
def set(self, setkeys:str=None, value=None):
"""set a value in the configuration.
Parameters
----------
setkeys : str|list
Identifier whose content is set use dot operator for the depth.
value :
Content to set
"""
# starting point is the config itself
here = self._config
# convert setkeys to list
keys = []
if isinstance(setkeys, str):
keys = setkeys.split(".")
elif isinstance(setkeys, list):
keys = setkeys
# For every key *before* the last one, we concentrate on navigating through the dictionary.
for key in keys[:-1]:
# Try to find here[key]. If it doesn't exist, create it with an empty DotMap.
# Then, update our `here` pointer to refer to the thing we just found (or created).
here = here.setdefault(key, DotMap() )
# Finally, set the final key to the given value
here[keys[-1]] = value
def rootInitLogger( self, level:int=None ):
"""Initializes the root logger
Parameters
----------
level : int, optional
Logging Level. The default is None.
Returns
-------
None.
"""
baselogger = logging.getLogger( )
# set level if specified
if level:
baselogger.setLevel( level )
# ---- Jinja Environment
#
def jinjaEnv(self):
"""Create Jinja Environment.
to add more extensions read:
- https://github.com/jpsca/jinja-markdown
Returns
-------
env: Environment
"""
#
# since the template system is not yet ready, simply replace BASE_DIR
#
tpl_dir = self.server.webserver.get("resources", ".").replace("{{BASE_DIR}}", self.BASE_DIR )
from jinja2 import select_autoescape
env = Environment(
extensions=[ 'jinja_markdown.MarkdownExtension'],
loader=FileSystemLoader(tpl_dir),
autoescape=select_autoescape(
disabled_extensions=('tmpl',),
default_for_string=False,
default=True,
)
)
def fromisoformat(value):
try:
value = datetime.fromisoformat( value )
except Exception:
pass
return value
def datetimeformat(value, format="%Y-%m-%d"):
try:
value = value.strftime(format)
except Exception:
pass
return value
def jsondumps(value):
try:
value = json.dumps(value, indent=2)
except Exception:
pass
return value
env.filters["fromisoformat"] = fromisoformat
env.filters["datetimeformat"] = datetimeformat
env.filters["jsondumps"] = jsondumps
return env
def render_template( self, tpl:str="", variables:dict=None, deep_replace:bool=False ):
"""Replaces all variables from variables in tpl.
If variables are not specified, _config["variables"] is used
Parameters
----------
tpl : str, optional
Jinja template string. The default is "".
variables : dict, optional
Variable information to be replaced. The default is _config["variables"].
deep_replace: bool, optional
Executes render twice to also replace statements in variables. The default is False
Returns
-------
tpl: str
rendered template
"""
if not variables:
variables = self._config["variables"]
# always give now with the current time
variables["now"] = datetime.now()
# depending on deep_replace single or multiple runs
n = range(1)
if deep_replace:
n = range(3)
for i in n:
try:
_tpl = self._env.from_string( tpl )
tpl = _tpl.render( **variables )
except Exception as e: # pragma: no cover
print("CONFIG: config.render_template error on _tpl.render", e)
return tpl
# ---- MQTT Logging
#
def mqttInitLogger( self, level:int=None, cleanup:bool=False ):
"""Turn on logging via MQTT.
Parameters
----------
level : int, optional
NOTSET=0, DEBUG=10, INFO=20, WARN=30, ERROR=40, and CRITICAL=50. Default: NOTSET
cleanup : bool, optional
Perform MQTT cleanup before initializing. Default = False
Returns
-------
None.
"""
# root logger first
self.logger_name = "root"
# set up a new handler if desired
if cleanup:
self.mqttCleanup()
if self._config.server.mqtt:
# Set MQTT logger
logger = logging.getLogger( "MQTT" )
# Handler for MQTT
mqtthdlr = self.mqttGetHandler( )
if not mqtthdlr:
#
# if something is changed here, the kernel must be restarted or mqttCleanup called
#
mqtt_init_ready = threading.Event()
self._thread_mqtthdlr = None
def signalStartup( msg ):
mqtt_init_ready.set()
def startMQTTclass():
"""Start MQTTclass via threading and wait for signalStartup.
Returns
-------
None.
"""
self._thread_mqtthdlr = MQTTclass( self._config.server.mqtt.toDict() )
# wait for signal
self._thread_mqtthdlr.signalStartup.connect( signalStartup )
# Call as a thread,via mq.get() to get the return of _retrieve
thread = threading.Thread( target=startMQTTclass )
thread.start()
# wait for 2 seconds or mqtt_init_ready signalStartup
while not mqtt_init_ready.wait( timeout=2 ):
mqtt_init_ready.set()
# if mqtt handler has been initialized set logging and _mqtthdlr
if self._thread_mqtthdlr and self._thread_mqtthdlr._mqttc:
_mqtthdlr = self._thread_mqtthdlr
# Initialize the logging handler with the MQTTclass class
logging.Handler.__init__( _mqtthdlr )
logger.addHandler( _mqtthdlr )
# put _mqtthdlr reference and send to logger
logger._mqtthdlr = _mqtthdlr
logger.send = _mqtthdlr.send
# provide progress
logger.progressStart = _mqtthdlr.progress_start
logger.progress = _mqtthdlr.progress_run
logger.progressReady = _mqtthdlr.progress_ready
# when everything is ready put reference to _mqtthdlr
self._mqtthdlr = _mqtthdlr
# remember logger name
self.logger_name = logger.name
else:
# logger is available put reference to _mqtthdlr
self._mqtthdlr = mqtthdlr
# remember logger name
self.logger_name = logger.name
# set level if specified
if level:
logger.setLevel( level )
def mqttGetHandler(self):
"""Specifies the mqtt handler when initialized.
Returns
-------
mqtthdlr.
"""
mqtthdlr = None
# If there is no logger in self._mqtthdlr, use logging to determine it
if self._mqtthdlr:
mqtthdlr = self._mqtthdlr
else:
logger = logging.getLogger( "MQTT" )
if hasattr(logger, '_mqtthdlr'):
mqtthdlr = logger._mqtthdlr
return mqtthdlr
def mqttCleanup( self ):
"""shutdown mqtt and remove the logger.
"""
if self._mqtthdlr:
# shutdown mqtt
self._mqtthdlr.shutdown()
logger = logging.getLogger( "MQTT" )
# remove connection to _mqtthdlr in logger
del( logger._mqtthdlr )
for h in logger.handlers:
logger.removeHandler(h)
self._mqtthdlr = None
# ----
import collections
def dict_merge(dct, merge_dct, add_keys=True):
"""Recursive dict merge.
Inspired by ``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
This version will return a copy of the dictionary and leave the original
arguments untouched.
The optional argument ``add_keys``, determines whether keys which are
present in ``merge_dict`` but not ``dct`` should be included in the
new dict.
https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
Args:
dct (dict): onto which the merge is executed
merge_dct (dict): dct merged into dct
add_keys (bool): whether to add new keys
Returns:
dict: updated dict
"""
dct = dct.copy()
if not add_keys:
merge_dct = {
k: merge_dct[k]
for k in set(dct).intersection(set(merge_dct))
}
for k, v in merge_dct.items():
if isinstance(dct.get(k), dict) and isinstance(v, collections.Mapping):
dct[k] = dict_merge(dct[k], v, add_keys=add_keys)
else:
dct[k] = v
return dct
| 30.140165 | 209 | 0.559576 |
e531d824abbcc5d33c9e24616b212ac2e1f9e390
| 208 |
py
|
Python
|
tests/model/test_direction.py
|
jonashellmann/informaticup21-team-chillow
|
f2e519af0a5d9a9368d62556703cfb1066ebb58f
|
[
"MIT"
] | 3 |
2021-01-17T23:32:07.000Z
|
2022-01-30T14:49:16.000Z
|
tests/model/test_direction.py
|
jonashellmann/informaticup21-team-chillow
|
f2e519af0a5d9a9368d62556703cfb1066ebb58f
|
[
"MIT"
] | 2 |
2021-01-17T13:37:56.000Z
|
2021-04-14T12:28:49.000Z
|
tests/model/test_direction.py
|
jonashellmann/informaticup21-team-chillow
|
f2e519af0a5d9a9368d62556703cfb1066ebb58f
|
[
"MIT"
] | 2 |
2021-04-02T14:53:38.000Z
|
2021-04-20T11:10:17.000Z
|
import unittest
from chillow.model.direction import Direction
class DirectionTest(unittest.TestCase):
def test_should_have_four_different_directions(self):
self.assertEqual(len(Direction), 4)
| 20.8 | 57 | 0.793269 |
00d95248d16f3bce6d6d04804049d3a4454d28bf
| 1,611 |
py
|
Python
|
2-resources/_External-learning-resources/02-pyth/python-patterns-master/patterns/behavioral/visitor.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_External-learning-resources/02-pyth/python-patterns-master/patterns/behavioral/visitor.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_External-learning-resources/02-pyth/python-patterns-master/patterns/behavioral/visitor.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 1 |
2021-11-05T07:48:26.000Z
|
2021-11-05T07:48:26.000Z
|
"""
http://peter-hoffmann.com/2010/extrinsic-visitor-pattern-python-inheritance.html
*TL;DR
Separates an algorithm from an object structure on which it operates.
An interesting recipe could be found in
Brian Jones, David Beazley "Python Cookbook" (2013):
- "8.21. Implementing the Visitor Pattern"
- "8.22. Implementing the Visitor Pattern Without Recursion"
*Examples in Python ecosystem:
- Python's ast.NodeVisitor: https://github.com/python/cpython/blob/master/Lib/ast.py#L250
which is then being used e.g. in tools like `pyflakes`.
- `Black` formatter tool implements it's own: https://github.com/ambv/black/blob/master/black.py#L718
"""
class Node:
pass
class A(Node):
pass
class B(Node):
pass
class C(A, B):
pass
class Visitor:
def visit(self, node, *args, **kwargs):
meth = None
for cls in node.__class__.__mro__:
meth_name = "visit_" + cls.__name__
meth = getattr(self, meth_name, None)
if meth:
break
if not meth:
meth = self.generic_visit
return meth(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
print("generic_visit " + node.__class__.__name__)
def visit_B(self, node, *args, **kwargs):
print("visit_B " + node.__class__.__name__)
def main():
"""
>>> a, b, c = A(), B(), C()
>>> visitor = Visitor()
>>> visitor.visit(a)
generic_visit A
>>> visitor.visit(b)
visit_B B
>>> visitor.visit(c)
visit_B C
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21.48 | 101 | 0.631906 |
daba2095a29b38a51fc77657afae60823560bf9c
| 33,415 |
py
|
Python
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/rackmanager/legacy/response_handler.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 5 |
2019-11-11T07:57:26.000Z
|
2022-03-28T08:26:53.000Z
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/rackmanager/legacy/response_handler.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 3 |
2019-09-05T21:47:07.000Z
|
2019-09-17T18:10:45.000Z
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/rackmanager/legacy/response_handler.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 11 |
2019-07-20T00:16:32.000Z
|
2022-01-11T14:17:48.000Z
|
# Copyright (C) Microsoft Corporation. All rights reserved.
# This program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
from xml.etree import ElementTree
from controls.utils import completion_code, check_success, set_failure_dict, set_success_dict
NO_STATUS = False
def add_element (parent, tag, value, write_fail = False, nil = False):
"""
Add a new element to the XML document.
:param parent: The parent element for the element that will be created.
:param tag: The name of the new element.
:param value: The string value to assign to the element.
:param write_fail: Flag to indicate if failed values should be saved as the element data.
:param nil: Flag to indicate if a nil attribute should be added if the tag has no value.
:return The new element instance.
"""
element = ElementTree.SubElement (parent, tag)
if (value and (write_fail or (value.lower () != "failure"))):
element.text = value
elif (nil):
element.set ("i:nil", "true")
return element
class response_category:
"""
Response object that contains status information and other arbitrary information.
"""
def __init__ (self, response_type, status = None, results = []):
"""
Initialize the response container.
:param response_type: The type of response container that is being generated.
:param status: A request_status object representing the status for this container.
:param results: A list of objects that contain information for this container.
"""
self.root = response_type
self.completion = status
self.results = results
def format (self, parent = None):
"""
Format the response as an XML object representing the information contained by the object.
:param parent: The parent element that contains the response data. If this is not
specified, a new element will be created.
:return An XML element that contains the formatted response data and a flag indicating if
the cotegory is reporting success.
"""
if (parent is None):
parent = ElementTree.Element (tag = self.root, attrib = {
"xmlns:i" : "http://www.w3.org/2001/XMLSchema-instance",
"xmlns" : "http://schemas.datacontract.org/2004/07/Microsoft.GFS.WCS.Contracts"})
else:
parent = ElementTree.SubElement (parent, self.root)
success = NO_STATUS
if (self.completion):
success = self.completion.format (parent = parent)[1]
for result in self.results:
if (result):
success = result.format (parent = parent)[1] or success
if (self.completion):
self.completion.update_status (success)
return (parent, success)
class request_status:
"""
Status information for the object being requested.
"""
code_map = {
completion_code.success : "Success",
completion_code.failure : "Failure",
completion_code.deviceoff : "DevicePoweredOff",
completion_code.fwdecompress : "FirmwareDecompressing",
completion_code.notpresent : "Timeout"
}
@staticmethod
def get_placeholder_status ():
"""
Get a status object to act as a place holder for response objects that only have status
based on their children.
:return The status object.
"""
return request_status (set_failure_dict (""))
def __init__ (self, status):
"""
Initialize status information from a function result structure.
:param status: The function result information.
"""
code = status.get (completion_code.cc_key, None)
self.code = code if (code) else "Unknown"
self.description = status.get (completion_code.desc, "")
self.code_element = None
self.code = request_status.code_map.get (self.code, self.code)
def format (self, parent):
"""
Format the status information in the XML document.
:param parent: The parent element that will contain the status information.
:return The parent element and a flag indicating if the status indicates success.
"""
self.code_element = add_element (parent, "completionCode", self.code, write_fail = True)
add_element (parent, "apiVersion", "1")
add_element (parent, "statusDescription", self.description)
return (parent, (self.code == completion_code.success))
def update_status (self, success):
"""
Update the completion code that was formatted in the response to represent the overall code.
:param success: A flag indicating the success state of the request.
"""
if (success and (self.code != completion_code.success) and (self.code_element is not None)):
self.code_element.text = "Success"
class service_version:
"""
Response object for the service version.
"""
def __init__ (self, version):
"""
Initialize the service version response.
:param version: The result of the system query for the version information.
"""
self.version = version.get ("Package", None)
def format (self, parent):
"""
Format the server version in the XML document.
:param parent: The parent element that will contain the version information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "serviceVersion", self.version)
return (parent, NO_STATUS)
class led_state:
"""
Response object for the attention LED state.
"""
def __init__ (self, state):
"""
Initialize the attention LED state response.
:param state: The result of the system query for the LED state.
"""
self.state = state.get ("Manager LED Status", "NA")
if (self.state == "Unknown"):
self.state = "NA"
def format (self, parent):
"""
Format the attention LED state in the XML document.
:param parent: The parent element that will contain the LED state.
:return The parent element and a status flag for the response.
"""
add_element (parent, "ledState", self.state)
return (parent, NO_STATUS)
class blade_number:
"""
Response object to report the blade number.
"""
def __init__ (self, blade):
"""
Initialize the blade number response.
:param blade: The blade ID number to report.
"""
if (blade >= 0):
self.blade = str (blade)
else:
self.blade = str (-blade)
def format (self, parent):
"""
Format the blade number in the XML document.
:param parent: The parent element that will contain the blade number.
:return The parent element and a status flag for the response.
"""
add_element (parent, "bladeNumber", self.blade)
return (parent, NO_STATUS)
class blade_type:
"""
Response object to report the blade number.
"""
def __init__ (self, info):
"""
Initialize the blade type response.
:param blade: The information to determine the type of blade.
"""
self.blade = "Server"
def format (self, parent):
"""
Format the blade type in the XML document.
:param parent: The parent element that will contain the blade type.
:return The parent element and a status flag for the response.
"""
add_element (parent, "bladeType", self.blade)
return (parent, NO_STATUS)
class blade_info:
"""
Response object for blade information.
"""
def __init__ (self, info):
"""
Initialize the blade info response.
:param info: The result of the system query for blade info.
"""
server = info.get ("Server", {})
if (check_success (info)):
self.type = "Server"
else:
self.type = None
self.serial = info.get ("SerialNumber", None)
self.asset_tag = info.get ("AssetTag", None)
self.fw = server.get ("BMCVersion", None)
self.hw = server.get ("HWVersion", None)
def format (self, parent):
"""
Format the blade information in the XML document.
:param parent: The parent element that will contain the blade information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "bladeType", self.type)
add_element (parent, "serialNumber", self.serial)
add_element (parent, "assetTag", self.asset_tag)
add_element (parent, "firmwareVersion", self.fw)
add_element (parent, "hardwareVersion", self.hw)
return (parent, NO_STATUS)
class blade_info_versions:
"""
Response object for blade version information..
"""
def __init__ (self, info):
"""
Initialize the blade version info response.
:param info: The result of the system query for blade info.
"""
server = info.get ("Server", {})
self.bios = server.get ("BiosVersion", None)
self.cpld = server.get ("CpldVersion", None)
def format (self, parent):
"""
Format the blade version information in the XML document.
:param parent: The parent element that will contain the blade version information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "biosVersion", self.bios)
add_element (parent, "cpldVersion", self.cpld)
return (parent, NO_STATUS)
class blade_nic_info:
"""
Response object for NIC information on the blade.
"""
@staticmethod
def get_nic_list (mac):
"""
Get the list of NIC information objects for the system.
:param mac: The result of the system query for blade NIC information.
:return A list contaning response objects for the NIC information.
"""
nics = []
if (mac):
status = request_status (mac)
nic1 = blade_nic_info (mac)
nics.append (response_category ("NicInfo", status = status, results = [nic1]))
if (check_success (mac)):
status = request_status (set_failure_dict ("Not Present", "Success"))
nic2 = blade_nic_info ()
nics.append (response_category ("NicInfo", status = status, results = [nic2]))
return nics
def __init__ (self, nic = None):
"""
Initialize the blade NIC information.
:param nic: The result of the system query for blade NIC information.
"""
if (nic):
self.id = "1" if check_success (nic) else "0"
self.mac = nic.get ("MAC1", None)
else:
self.id = "2"
self.mac = None
def format (self, parent):
"""
Format the blade NIC information in the XML document.
:param parent: The parent element that will contain the NIC information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "deviceId", self.id)
add_element (parent, "macAddress", self.mac)
return (parent, NO_STATUS)
class blade_default_power_state:
"""
Response object for the blade default power state.
"""
def __init__ (self, state):
"""
Initialize the blade default power state response.
:param state: The result of the system query for the blade default power state.
"""
self.state = state.get ("Default Power State", "NA")
def format (self, parent):
"""
Format the blade default power state in the XML document.
:param parent: The parent element that will contain the default power state.
:return The parent element and a status flag for the response.
"""
add_element (parent, "bladeState", self.state)
return (parent, NO_STATUS)
class chassis_controller_info:
"""
Response object for the chassis contorller information.
"""
def __init__ (self, info):
"""
Initialize the controller information.
:param info: The result of the system query for controller information.
"""
self.serial = info.get ("Board Serial", None)
self.asset_tag = info.get ("Product Assettag", None)
self.fw = "NA" if (check_success (info)) else None
self.hw = info.get ("Board Version", None)
self.sw = info.get ("Package", None)
self.uptime = info.get ("Up Time", None)
def format (self, parent):
"""
Format the chassis controller information in the XML document.
:param parent: The parent element that will contain the controller information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "serialNumber", self.serial)
add_element (parent, "assetTag", self.asset_tag)
add_element (parent, "firmwareVersion", self.fw)
add_element (parent, "hardwareVersion", self.hw)
add_element (parent, "softwareVersion", self.sw)
add_element (parent, "systemUptime", self.uptime)
return (parent, NO_STATUS)
class chassis_network_info:
"""
Response object for a chassis network interface.
"""
@staticmethod
def build_network_property (nic):
"""
Create a response object for a single chassis network interface.
:param nic: The result of the system query for the NIC.
:return The NIC response object.
"""
status = request_status (nic)
eth = chassis_network_info (nic)
return response_category ("ChassisNetworkProperty", status = status, results = [eth])
@staticmethod
def build_network_property_collection (*nics):
"""
Create a response object for a collection of network interfaces.
:param nics: The results of the system queries for the NICs that should be in the
collection.
:return The network property collection response object.
"""
collection = []
for nic in nics:
collection.append (chassis_network_info.build_network_property (nic))
return response_category ("chassisNetworkPropertyCollection", results = collection)
@staticmethod
def get_network_properties (*nics):
"""
Create a response object for the chassis network properties.
:param nics: The list of results from the system queries for the NICs that will be reported.
:return The network properties response object.
"""
network = chassis_network_info.build_network_property_collection (*nics)
return response_category ("networkProperties",
status = request_status.get_placeholder_status (), results = [network])
def __init__ (self, info):
"""
Initialize the network interface information.
:param info: The result of the system query for network information.
"""
ip4 = info.get ("IPv4Addresses", {})
ip6 = info.get ("IPv6Addresses", {})
self.mac = info.get ("MACAddress", None)
self.ip4 = ip4.get ("Address", None)
self.subnet = ip4.get ("SubnetMask", None)
self.gateway = ip4.get ("Gateway", None)
self.ip6 = ip6.get ("Address", None)
self.prefix = ip6.get ("PrefixLength", None)
self.hostname = info.get ("Hostname", None)
self.dhcp = ip4.get ("AddressOrigin", None)
if (self.dhcp):
self.dhcp = "true" if (self.dhcp == "DHCP") else "false"
if (self.ip4 and self.ip6):
self.ip = "{0}, {1}".format (self.ip4, self.ip6)
elif (self.ip4):
self.ip = self.ip4
elif (self.ip6):
self.ip = self.ip6
else:
self.ip = None
if (self.subnet and self.prefix):
self.mask = "{0}, {1}".format (self.subnet, self.prefix)
elif (self.subnet):
self.mask = self.subnet
elif (self.prefix):
self.mask = self.prefix
else:
self.mask = None
def format (self, parent):
"""
Format the chassis network interface information in the XML document.
:param parent: The parent element that will contain the network interface information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "macAddress", self.mac)
add_element (parent, "ipAddress", self.ip)
add_element (parent, "subnetMask", self.mask)
add_element (parent, "gatewayAddress", self.gateway)
ElementTree.SubElement (parent, "dnsAddress")
ElementTree.SubElement (parent, "dhcpServer", {"i:nil" : "true"})
ElementTree.SubElement (parent, "dnsDomain", {"i:nil" : "true"})
add_element (parent, "dnsHostName", self.hostname, nil = True)
add_element (parent, "dhcpEnabled", self.dhcp)
return (parent, NO_STATUS)
class chassis_psu_info:
"""
Response object for a chassis power supply.
"""
@staticmethod
def get_psu_list (psu):
"""
Get the list of PSU information objects for the respons.
:param psu: The result of the system query for power supply information.
:return The list of PSU information objects.
"""
psu_list = []
for i in range (1, 7):
status = request_status (psu)
info = chassis_psu_info (i, psu)
psu_list.append (response_category ("PsuInfo", status = status, results = [info]))
return psu_list
def __init__ (self, psu_id, psu):
"""
Initialize the PSU information.
:param psu_id: The ID of the PSU.
:param psu: The result of the system query for power supply information.
"""
self.id = str (psu_id)
self.serial = psu.get ("Board Serial", None)
self.state = "ON" if (check_success (psu)) else "NA"
self.type = None
if (psu_id == 1):
self.power = psu.get ("Feed1Phase1PowerInWatts", -1)
elif (psu_id == 2):
self.power = psu.get ("Feed1Phase2PowerInWatts", -1)
elif (psu_id == 3):
self.power = psu.get ("Feed1Phase3PowerInWatts", -1)
elif (psu_id == 4):
self.power = psu.get ("Feed2Phase1PowerInWatts", -1)
elif (psu_id == 5):
self.power = psu.get ("Feed2Phase2PowerInWatts", -1)
elif (psu_id == 6):
self.power = psu.get ("Feed2Phase3PowerInWatts", -1)
else:
self.power = psu.get ("PowerDrawnInWatts", -1)
self.power = str (int (self.power))
def format (self, parent):
"""
Format the PSU information in the XML document.
:param parent: The parent element that will contain the PSU information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "id", self.id)
add_element (parent, "serialNumber", self.serial)
add_element (parent, "state", self.state)
add_element (parent, "powerOut", self.power)
add_element (parent, "deviceType", self.type)
return (parent, NO_STATUS)
class chassis_blade_info:
"""
Response object for information about a blade in the chassis.
"""
@staticmethod
def get_blade_info (blade_id, info):
"""
Get the complete response object for chassis blade information.
:param blade_id: The slot ID for the blade.
:param info: The result of the system query for the blade information.
:return The chassis blade information response object.
"""
status = request_status (info)
blade = chassis_blade_info (blade_id, info)
mac = response_category ("bladeMacAddress", results = blade_nic_info.get_nic_list (info))
return response_category ("BladeInfo", status = status, results = [blade, mac])
@staticmethod
def get_blade_list (blades):
"""
Get the list of chassis blade information.
:param blades: The list of system query results for all blades.
:return A list of blade information response objects.
"""
blade_list = []
for blade, info in enumerate (blades, 1):
blade_list.append (chassis_blade_info.get_blade_info (blade, info))
return blade_list
def __init__ (self, blade_id, info):
"""
Initialize the chassis blade information.
:param blade_id: The slot ID for the blade.
:param info: The result of the system query for the blade information.
"""
self.id = info.get ("Slot Id", str (blade_id))
self.guid = info.get ("GUID", "00000000-0000-0000-0000-000000000000")
self.name = "BLADE{0}".format (self.id)
self.state = info.get ("Port State", "NA").upper ()
def format (self, parent):
"""
Format the chassis blade information in the XML document.
:param parent: The parent element that will contain the blade information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "bladeNumber", self.id)
add_element (parent, "bladeGuid", self.guid)
add_element (parent, "bladeName", self.name)
add_element (parent, "powerState", self.state)
return (parent, NO_STATUS)
class chassis_battery_info:
"""
Response object for chassis battery information.
"""
@staticmethod
def get_battery_list ():
"""
Get the list of chassis battery information.
:return A list of battery information response objects.
"""
battery_list = []
for i in range (1, 7):
status = request_status (set_success_dict ())
info = chassis_battery_info (i)
battery_list.append (response_category ("BatteryInfo", status = status,
results = [info]))
return battery_list
def __init__ (self, battery_id):
"""
Initialize the chassis battery information.
:param battery_id: The ID for the battery.
"""
self.id = str (battery_id)
self.presence = "0"
self.power = "0"
self.charge = "0"
self.fault = "0"
def format (self, parent):
"""
Format the chassis battery information in the XML document.
:param parent: The parent element that will contain the battery information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "id", self.id)
add_element (parent, "presence", self.presence)
add_element (parent, "batteryPowerOutput", self.power)
add_element (parent, "batteryChargeLevel", self.power)
add_element (parent, "faultDetected", self.power)
return (parent, NO_STATUS)
class chassis_fan_info:
"""
Response object for chassis fan information.
"""
@staticmethod
def get_fan_list ():
"""
Get the list of chassis fan information.
:return A list of fan information response objects.
"""
fan_list = []
for i in range (1, 7):
status = request_status (set_success_dict ())
info = chassis_fan_info (i)
fan_list.append (response_category ("FanInfo", status = status, results = [info]))
return fan_list
def __init__ (self, fan_id):
"""
Initialize the chassis fan information.
:param battery_id: The ID for the fan.
"""
self.id = str (fan_id)
self.healthy = "true"
self.speed = "0"
self.type = None
def format (self, parent):
"""
Format the chassis fan information in the XML document.
:param parent: The parent element that will contain the fan information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "fanId", self.id)
add_element (parent, "isFanHealthy", self.healthy)
add_element (parent, "fanSpeed", self.speed)
add_element (parent, "fanType", self.type)
return (parent, NO_STATUS)
class chassis_temp:
"""
Response object for the inlet temperature.
"""
def __init__ (self, temp):
"""
Initialize the inlet temperature response.
:param state: The result of the system query for the inlet temperature.
"""
self.temp = temp.get ("Temperature", None)
if (self.temp):
self.temp = str (self.temp)
def format (self, parent):
"""
Format the inlet temperature in the XML document.
:param parent: The parent element that will contain the inlet temperature.
:return The parent element and a status flag for the response.
"""
add_element (parent, "bladeInletTemp", self.temp)
return (parent, NO_STATUS)
class power_state:
"""
Response object for the power port state.
"""
def __init__ (self, state):
"""
Initilaize the power port state response.
:param state: The result of the system query for the power state.
"""
self.decompress = state.get ("Decompress", "0")
self.state = state.get ("Port State", "NA")
def format (self, parent):
"""
Format the power port state in the XML document.
:param parent: The parent element that will contain the power port state.
:return The parent element and a status flag for the response.
"""
add_element (parent, "Decompression", self.decompress)
add_element (parent, "powerState", self.state)
return (parent, NO_STATUS)
class blade_state:
"""
Response object for the blade power state.
"""
def __init__ (self, state):
"""
Initilaize the blade power state response.
:param state: The result of the system query for the blade power state.
"""
self.state = state.get ("State", "NA")
def format (self, parent):
"""
Format the blade power state in the XML document.
:param parent: The parent element that will contain the blade power state.
:return The parent element and a status flag for the response.
"""
add_element (parent, "bladeState", self.state)
return (parent, NO_STATUS)
class ac_port_state:
"""
Response object to report the AC port state.
"""
def __init__ (self, port, state):
"""
Initialize the AC port state response.
:param port: The port ID for the response.
:param state: The result of the system query for the AC port state.
"""
self.port = str (port)
self.state = state.get ("Relay", "NA").upper ()
def format (self, parent):
"""
Format the AC port state in the XML document.
:param parent: The parent element that will contain the AC port state.
:return The parent element and a status flag for the response.
"""
add_element (parent, "portNo", self.port)
add_element (parent, "powerState", self.state)
return (parent, NO_STATUS)
class blade_power_limit:
"""
Response object for the blade power limit.
"""
def __init__ (self, limit):
"""
Initialize the blade power limit response.
:param limit: The result of the system query for the power limit.
"""
self.state = limit.get ("StaticState", "false")
self.limit = limit.get ("StaticLimit", "-1")
if (self.state.upper () == "POWER LIMIT ACTIVE"):
self.state = "true"
elif (self.state.upper () == "NO ACTIVE POWER LIMIT"):
self.state = "false"
self.limit = self.limit.split (" ")[0]
def format (self, parent):
"""
Format the blade power limit in the XML document.
:param parent: The parent element that will contain the blade power limit.
:return The parent element and a status flag for the response.
"""
add_element (parent, "isPowerLimitActive", self.state)
add_element (parent, "powerLimit", self.limit)
return (parent, NO_STATUS)
class blade_power_reading:
"""
Response object for the blad power reading.
"""
def __init__ (self, power):
"""
Initialize the blade power reading response.
:param power: The result of the system query for the power reading.
"""
self.power = power.get ("PowerReading", "-1")
def format (self, parent):
"""
Format the blad power reading in the XML document.
:param parent: The parent element that will contain the blade power reading.
:return The parent element and a status flag for the response.
"""
add_element (parent, "powerReading", self.power)
return (parent, NO_STATUS)
class next_boot:
"""
Response object for blade next boot information.
"""
def __init__ (self, boot):
"""
Initialize the blade next boot response.
:param boot: The result of the system query for blade next boot information.
"""
self.boot = boot.get ("BootSourceOverrideTarget", "Unknown")
self.persist = boot.get ("BootSourceOverrideEnabled", "false")
self.uefi = boot.get ("BootSourceOverrideMode", "false")
self.instance = "0"
if ("No override" in self.boot):
self.boot = "NoOverride"
elif ("PXE" in self.boot):
self.boot = "ForcePxe"
elif ("Hard-Drive" in self.boot):
self.boot = "ForceDefaultHdd"
elif ("BIOS" in self.boot):
self.boot = "ForceIntoBiosSetup"
elif ("Floppy" in self.boot):
self.boot = "ForceFloppyOrRemovable"
else:
self.boot = "Unknown"
if (self.persist == "Persistent"):
self.persist = "true"
else:
self.persist = "false"
if (self.uefi == "UEFI"):
self.uefi = "true"
else:
self.uefi = "false"
def format (self, parent):
"""
Format the blade next boot information in the XML document.
:param parent: The parent element that will contain the next boot information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "nextBoot", self.boot)
add_element (parent, "persistence", self.persist)
add_element (parent, "uefi", self.uefi)
add_element (parent, "bootInstance", self.instance)
return (parent, NO_STATUS)
| 32.347531 | 100 | 0.572408 |
daf9c059842880d542a5a00badf6026bbe5bed0e
| 901 |
py
|
Python
|
src/onegov/activity/matching/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/activity/matching/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/activity/matching/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.activity.matching.core import deferred_acceptance
from onegov.activity.matching.core import deferred_acceptance_from_database
from onegov.activity.matching.interfaces import MatchableBooking
from onegov.activity.matching.interfaces import MatchableOccasion
from onegov.activity.matching.score import PreferAdminChildren
from onegov.activity.matching.score import PreferGroups
from onegov.activity.matching.score import PreferInAgeBracket
from onegov.activity.matching.score import PreferMotivated
from onegov.activity.matching.score import PreferOrganiserChildren
from onegov.activity.matching.score import Scoring
__all__ = [
'deferred_acceptance',
'deferred_acceptance_from_database',
'MatchableBooking',
'MatchableOccasion',
'Scoring',
'PreferGroups',
'PreferMotivated',
'PreferInAgeBracket',
'PreferOrganiserChildren',
'PreferAdminChildren',
]
| 37.541667 | 75 | 0.824639 |
c183780fb39ab5e0b76a41aa4fca9ee4e5451faf
| 782 |
py
|
Python
|
py/jpy/src/test/python/jpy_java_embeddable_test.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 55 |
2021-05-11T16:01:59.000Z
|
2022-03-30T14:30:33.000Z
|
py/jpy/src/test/python/jpy_java_embeddable_test.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 943 |
2021-05-10T14:00:02.000Z
|
2022-03-31T21:28:15.000Z
|
py/jpy/src/test/python/jpy_java_embeddable_test.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 29 |
2021-05-10T11:33:16.000Z
|
2022-03-30T21:01:54.000Z
|
import unittest
import jpyutil
jpyutil.init_jvm(jvm_maxmem='512M', jvm_classpath=['target/test-classes', 'target/classes'])
import jpy
class TestJavaTests(unittest.TestCase):
def testStartingAndStoppingIfAvailable(self):
PyLibTest = jpy.get_type('org.jpy.EmbeddableTest')
PyLibTest.testStartingAndStoppingIfAvailable()
def testPassStatement(self):
PyLibTest = jpy.get_type('org.jpy.EmbeddableTest')
PyLibTest.testPassStatement()
def testPrintStatement(self):
PyLibTest = jpy.get_type('org.jpy.EmbeddableTest')
PyLibTest.testPrintStatement()
def testIncrementByOne(self):
PyLibTest = jpy.get_type('org.jpy.EmbeddableTest')
PyLibTest.testIncrementByOne()
if __name__ == '__main__':
print('\nRunning ' + __file__)
unittest.main()
| 27.928571 | 92 | 0.755754 |
8220c49e6caf26f7771a2a0550aadc76fed525a4
| 475 |
py
|
Python
|
exercises/zh/solution_02_06.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/zh/solution_02_06.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/zh/solution_02_06.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
from spacy.lang.zh import Chinese
nlp = Chinese()
# 导入Doc和Span类
from spacy.tokens import Doc, Span
words = ["我", "喜欢", "周", "杰伦"]
spaces = [False, False, False, False]
# 用words和spaces创建一个doc
doc = Doc(nlp.vocab, words=words, spaces=spaces)
print(doc.text)
# 为doc中的"周杰伦"创建一个span,并赋予其"PERSON"的标签
span = Span(doc, 2, 4, label="PERSON")
print(span.text, span.label_)
# 把这个span加入到doc的实体中
doc.ents = [span]
# 打印所有实体的文本和标签
print([(ent.text, ent.label_) for ent in doc.ents])
| 19.791667 | 51 | 0.705263 |
68bb87ab04e5ad43b0606b6bdb17bca3c0bab848
| 8,222 |
py
|
Python
|
PySQL-master/sql.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
PySQL-master/sql.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
PySQL-master/sql.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
#!/dimana saja
# coding=utf-8
"""
- PySQL
# Usage = python3
# Asthor = Karjoq-Kun With Dobleh-Kun
# Source : https://github.com/rezadkim
"""
# WARNA #
t = "\033[1;97m"
m = "\033[1;91m"
u = "\033[1;95m"
h = "\033[1;92m"
k = "\033[1;93m"
b = "\033[1;96m"
logo = t+"""
╔═╗"""+m+"""┬ ┬"""+t+"""╔═╗╔═╗ ╦
╠═╝"""+m+"""└┬┘"""+t+"""╚═╗║═╬╗║
╩ """+m+"""┴ """+t+"""╚═╝╚═╝╚╩═╝\nCreator > """+h+"""Rezadkim\n"""+t+"""Source > """+h+"""https://github.com/rezadkim"""
import os,sys,time
import json
import mysql.connector
def tyk():
os.system("clear")
print(logo)
print(k+36*"-")
q = input(t+"["+h+"?"+t+"] are you already running localhost [y/n]: "+h)
if q =="y":
login()
else:
exit(t+"["+m+"!"+t+"] Exit")
def login():
global hot,us,ps
try:
os.system("clear")
print(logo)
print(k+36*"-")
hot = input(t+"["+b+"+"+t+"] Host ("+b+"localhost"+t+") : "+h)
us = input(t+"["+b+"+"+t+"] Username : "+h)
ps = input(t+"["+b+"+"+t+"] Password : "+h)
print(t+"["+h+"%"+t+"] Menghubungkan ke Database ...")
time.sleep(2)
db = mysql.connector.connect(host=hot,user=us,passwd=ps)
if db.is_connected():
print(t+"["+h+"*"+t+"] Berhasil terhubung ke database")
p = ("{'host':'"+hot+"', 'username':'"+us+"', 'password':'"+ps+"'}")
s = open("config.json","w")
s.write(p)
s.close()
input("["+b+"Enter"+t+"] Untuk melanjutkan ...")
main()
except:
print(t+"["+m+"!"+t+"] Error")
def main():
os.system("clear")
print(logo)
print(k+36*"-")
print(t+"["+b+"*"+t+"] Host "+m+": "+h+hot)
print(t+"["+b+"*"+t+"] User "+m+": "+h+us)
print(k+10*"-")
print(t+"["+h+"1"+t+"] Create Database")
print(t+"["+h+"2"+t+"] Create Table")
print(t+"["+h+"3"+t+"] Insert Data")
print(t+"["+h+"4"+t+"] Show all data from the table")
print(t+"["+h+"5"+t+"] Edit data")
print(t+"["+h+"6"+t+"] Delete data")
print(t+"["+h+"7"+t+"] Search data")
print(t+"["+h+"8"+t+"]"+m+" Exit")
p = input(t+"\n["+b+">>"+t+"] Choose : "+h)
if p =="1":
create_db()
elif p =="2":
create_tb()
elif p =="3":
insert_dt()
elif p =="4":
show_tb()
elif p =="5":
edit()
elif p =="6":
delete()
elif p =="7":
search()
elif p =="8":
print(t+"["+m+"!"+t+"] Exit")
exit()
else:
exit(t+"["+m+"!"+t+"] Exit")
#---------------------------------------------------------------------------------------------------#
def create_db():
try:
db = mysql.connector.connect(host=hot,user=us,passwd=ps)
name = input(t+"\n["+b+"+"+t+"] Name Database : "+h)
print(t+"["+h+"%"+t+"] Membuat Database baru ...")
time.sleep(2)
cursor = db.cursor()
cursor.execute("CREATE DATABASE "+name)
except:
print(t+"["+m+"!"+t+"] Error")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
print(t+"["+h+"*"+t+"] Database berhasil dibuat : "+u+name)
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
def create_tb():
try:
debe = input(t+"\n["+h+"+"+t+"] Input name database : "+h)
name_tb = input(t+"["+h+"+"+t+"] Create name table : "+h)
db = mysql.connector.connect(host=hot,user=us,passwd=ps,database=debe)
cursor = db.cursor()
sql = "CREATE TABLE "+name_tb+" (user_id INT AUTO_INCREMENT PRIMARY KEY,name VARCHAR(255),address Varchar(255),username Varchar(255),password Varchar(255))"
print(t+"["+h+"%"+t+"] Membuat tabel baru ...")
time.sleep(2)
cursor.execute(sql)
print(t+"["+h+"*"+t+"] Tabel berhasil dibuat : "+u+name_tb)
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
except:
print(t+"["+m+"!"+t+"] Error")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
def insert_dt():
try:
debe = input(t+"\n["+h+"+"+t+"] Input name database : "+h)
name_tb = input(t+"["+h+"+"+t+"] Input name table : "+h)
db = mysql.connector.connect(host=hot,user=us,passwd=ps,database=debe)
print(t+"["+h+"?"+t+"] Enter data below to create data table contents")
nama = input(t+"Name : "+h)
address = input(t+"Address : "+h)
username = input(t+"Username : "+h)
password = input(t+"Password : "+h)
val = (nama, address, username, password)
cursor = db.cursor()
sql = "INSERT INTO "+name_tb+" (name, address, username, password) VALUES (%s, %s, %s, %s)"
print(t+"["+h+"%"+t+"] Memasukkan data baru ...")
time.sleep(2)
cursor.execute(sql, val)
db.commit()
print(t+"["+h+"*"+t+"] data telah ditambahkan")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
except:
print(t+"["+m+"!"+t+"] Error")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
def show_tb():
try:
debe = input(t+"\n["+h+"+"+t+"] Input name database : "+h)
tb = input(t+"["+h+"+"+t+"] Input name table : "+h)
print(t+"["+h+"%"+t+"] Dump all data ...")
time.sleep(2)
db = mysql.connector.connect(host=hot,user=us,passwd=ps,database=debe)
cursor = db.cursor()
sql = ("SELECT * FROM "+tb)
cursor.execute(sql)
hasil = cursor.fetchall()
if cursor.rowcount < 0:
print(t+"["+k+"!"+t+"] Data kosong")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
else:
print(k+40*"-"+h)
for isi in hasil:
print(isi)
print(k+40*"-"+h)
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
except:
print(t+"["+m+"!"+t+"] Error")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
def edit():
try:
debe = input(t+"\n["+h+"+"+t+"] Input name database : "+h)
tb = input(t+"["+h+"+"+t+"] Input name table : "+h)
db = mysql.connector.connect(host=hot,user=us,passwd=ps,database=debe)
cursor = db.cursor()
sql = ("SELECT * FROM "+tb)
cursor.execute(sql)
hasil = cursor.fetchall()
if cursor.rowcount < 0:
print(t+"["+k+"!"+t+"] Data kosong")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
else:
print(k+40*"-"+h)
for isi in hasil:
print(isi)
print(k+40*"-"+h)
user_id = input(t+"["+h+"+"+t+"] Choose id user : "+h)
print(t+"["+h+"?"+t+"] Update data contents")
nama = input(t+"New Name : "+h)
address = input(t+"New Address : "+h)
username = input(t+"New Username : "+h)
password = input(t+"New Password : "+h)
sql = "UPDATE "+tb+" SET name=%s, address=%s, username=%s, password=%s WHERE user_id=%s"
val = (nama, address, username, password, user_id)
cursor.execute(sql, val)
db.commit()
print(t+"["+h+"*"+t+"] data telah diperbarui")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
except:
print(t+"["+m+"!"+t+"] Error")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
def delete():
try:
debe = input(t+"\n["+h+"+"+t+"] Input name database : "+h)
tb = input(t+"["+h+"+"+t+"] Input name table : "+h)
db = mysql.connector.connect(host=hot,user=us,passwd=ps,database=debe)
cursor = db.cursor()
sql = ("SELECT * FROM "+tb)
cursor.execute(sql)
hasil = cursor.fetchall()
if cursor.rowcount < 0:
print(t+"["+k+"!"+t+"] Data kosong")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
else:
print(k+40*"-"+h)
for isi in hasil:
print(isi)
print(k+40*"-"+h)
user_id = input(t+"["+h+"+"+t+"] Choose id user : "+h)
sql = "DELETE FROM "+tb+" WHERE user_id=%s"
val = (user_id,)
cursor.execute(sql, val)
db.commit()
print(t+"["+h+"*"+t+"] data telah dihapus")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
except:
print(t+"["+m+"!"+t+"] Error")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
def search():
try:
debe = input(t+"\n["+h+"+"+t+"] Input name database : "+h)
tb = input(t+"["+h+"+"+t+"] Input name table : "+h)
db = mysql.connector.connect(host=hot,user=us,passwd=ps,database=debe)
cursor = db.cursor()
key = input(t+"["+h+"+"+t+"] Query : "+h)
sql = "SELECT * FROM "+tb+" WHERE name LIKE %s OR address LIKE %s"
val = ("%{}%".format(key), "%{}%".format(key))
cursor.execute(sql, val)
hasil = cursor.fetchall()
if cursor.rowcount < 0:
print(t+"["+k+"!"+t+"] Data kosong")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
else:
print(k+40*"-"+h)
for isi in hasil:
print(isi)
print(k+40*"-"+h)
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
except:
print(t+"["+m+"!"+t+"] Error")
input(t+"["+b+"Enter"+t+"] Untuk kembali ke menu ...")
main()
tyk()
| 29.898182 | 158 | 0.541109 |
68bec5f62665ea179108d683571a7b699a736f26
| 1,902 |
py
|
Python
|
code/selfish_proxy/strategy/strategies.py
|
simonmulser/master-thesis
|
5ca2ddda377a0eede5a3c50866e0f90292c5448f
|
[
"CC-BY-4.0"
] | null | null | null |
code/selfish_proxy/strategy/strategies.py
|
simonmulser/master-thesis
|
5ca2ddda377a0eede5a3c50866e0f90292c5448f
|
[
"CC-BY-4.0"
] | null | null | null |
code/selfish_proxy/strategy/strategies.py
|
simonmulser/master-thesis
|
5ca2ddda377a0eede5a3c50866e0f90292c5448f
|
[
"CC-BY-4.0"
] | 1 |
2019-06-05T09:10:30.000Z
|
2019-06-05T09:10:30.000Z
|
selfish_mining_strategy = \
[
[ # irrelevant
['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],
['w', '*', 'a', '*', '*', '*', '*', '*', '*', '*'],
['w', 'w', '*', 'a', '*', '*', '*', '*', '*', '*'],
['w', 'w', 'w', '*', 'a', '*', '*', '*', '*', '*'],
['w', 'w', 'w', 'w', '*', 'a', '*', '*', '*', '*'],
['w', 'w', 'w', 'w', 'w', '*', 'a', '*', '*', '*'],
['w', 'w', 'w', 'w', 'w', 'w', '*', 'a', '*', '*'],
['w', 'w', 'w', 'w', 'w', 'w', 'w', '*', 'a', '*'],
['w', 'w', 'w', 'w', 'w', 'w', 'w', 'w', '*', 'a'],
['w', 'w', 'w', 'w', 'w', 'w', 'w', 'w', 'w', '*']
],
[ # relevant
['*', 'a', '*', '*', '*', '*', '*', '*', '*', '*'],
['*', 'm', 'a', '*', '*', '*', '*', '*', '*', '*'],
['*', 'o', 'm', 'a', '*', '*', '*', '*', '*', '*'],
['*', 'w', 'o', 'm', 'a', '*', '*', '*', '*', '*'],
['*', 'w', 'w', 'o', 'm', 'a', '*', '*', '*', '*'],
['*', 'w', 'w', 'w', 'o', 'm', 'a', '*', '*', '*'],
['*', 'w', 'w', 'w', 'w', 'o', 'm', 'a', '*', '*'],
['*', 'w', 'w', 'w', 'w', 'w', 'o', 'm', 'a', '*'],
['*', 'w', 'w', 'w', 'w', 'w', 'w', 'o', 'm', 'a'],
['*', 'w', 'w', 'w', 'w', 'w', 'w', 'w', 'o', 'm']
],
[ # match
['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],
['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],
['*', 'w', '*', '*', '*', '*', '*', '*', '*', '*'],
['*', 'w', 'w', '*', '*', '*', '*', '*', '*', '*'],
['*', 'w', 'w', 'w', '*', '*', '*', '*', '*', '*'],
['*', 'w', 'w', 'w', 'w', '*', '*', '*', '*', '*'],
['*', 'w', 'w', 'w', 'w', 'w', '*', '*', '*', '*'],
['*', 'w', 'w', 'w', 'w', 'w', 'w', '*', '*', '*'],
['*', 'w', 'w', 'w', 'w', 'w', 'w', 'w', '*', '*'],
['*', 'w', 'w', 'w', 'w', 'w', 'w', 'w', 'w', '*']
]
]
| 47.55 | 59 | 0.099369 |
68f158bf3e6c2594b09794878631c6ca447de9ed
| 15,681 |
py
|
Python
|
cloudmesh/volume/oracle/Provider.py
|
cloudmesh/cloudmesh-volume
|
a7502d077f2fb7c6b1b7986da9595a36591b06cd
|
[
"Apache-2.0"
] | 1 |
2020-03-12T09:49:53.000Z
|
2020-03-12T09:49:53.000Z
|
cloudmesh/volume/oracle/Provider.py
|
cloudmesh/cloudmesh-volume
|
a7502d077f2fb7c6b1b7986da9595a36591b06cd
|
[
"Apache-2.0"
] | 1 |
2020-03-25T00:47:12.000Z
|
2020-03-30T23:32:00.000Z
|
cloudmesh/volume/oracle/Provider.py
|
cloudmesh/cloudmesh-volume
|
a7502d077f2fb7c6b1b7986da9595a36591b06cd
|
[
"Apache-2.0"
] | 5 |
2020-02-20T01:04:24.000Z
|
2020-05-05T14:36:30.000Z
|
import oci
from cloudmesh.common.console import Console
from cloudmesh.common.dotdict import dotdict
from cloudmesh.configuration.Config import Config
from cloudmesh.volume.VolumeABC import VolumeABC
from cloudmesh.mongo.CmDatabase import CmDatabase
class Provider(VolumeABC):
kind = "oracle"
sample = """
cloudmesh:
volume:
{name}:
cm:
active: true
heading: {name}
host: TBD
label: {name}
kind: oracle
version: TBD
service: volume
credentials:
version: TBD
user: TBD
fingerprint: TBD
key_file: oci_api_key.pem
pass_phrase: TBD
tenancy: TBD
compartment_id: TBD
region: TBD
availability_domain: TBD
default:
"""
output = {
"volume": {
"sort_keys": ["cm.name"],
"order": ["cm.name",
"cm.cloud",
"cm.kind",
"availability_domain",
"time_created",
"size_in_gbs",
"lifecycle_state",
"id"
],
"header": ["Name",
"Cloud",
"Kind",
"Availability Zone",
"Created At",
"Size(Gb)",
"Status",
"Id"
],
}
}
def update_dict(self, results):
"""
This function adds a cloudmesh cm dict to each dict in the list
elements.
Libcloud
returns an object or list of objects With the dict method
this object is converted to a dict. Typically this method is used
internally.
:param results: the original dicts.
:return: The list with the modified dicts
"""
if results is None:
return None
d = []
for entry in results:
display_name = entry.__getattribute__("display_name")
availability_domain = entry.__getattribute__("availability_domain")
time_created = entry.__getattribute__("time_created")
size_in_gbs = entry.__getattribute__("size_in_gbs")
lifecycle_state = entry.__getattribute__("lifecycle_state")
attribute_id = entry.__getattribute__("id")
entry = {
"availability_domain": availability_domain,
"time_created": time_created,
"size_in_gbs": size_in_gbs,
"id": attribute_id,
"lifecycle_state": lifecycle_state
}
if "cm" not in entry:
entry['cm'] = {}
entry["cm"].update({
"cloud": self.cloud,
"kind": "volume",
"name": display_name,
})
d.append(entry)
return d
def __init__(self, name):
"""
Initialize provider. The default parameters are read from the
configuration file that is defined in yaml format.
:param name: name of cloud
"""
self.cloud = name
self.config = Config()["cloudmesh.volume.oracle.credentials"]
self.defaults = Config()["cloudmesh.volume.oracle.default"]
self.cm = CmDatabase()
def get_volume_id_from_name(self, block_storage, name):
"""
This function get volume id from volume name
:param block_storage: Block storage client object
:param name: volume name
:return: volume id
"""
v = block_storage.list_volumes(self.config['compartment_id'])
results = v.data
volume_id = None
for entry in results:
display_name = entry.__getattribute__("display_name")
if name == display_name:
volume_id = entry.__getattribute__("id")
break
return volume_id
def get_attachment_id_from_name(self, block_storage, name):
"""
This function get attachment id from volume name
:param block_storage: Block storage client object
:param name: Name of the volume
:return: Volume attachment id
"""
v = block_storage.list_volumes(self.config['compartment_id'])
results = v.data
attachment_id = None
for entry in results:
display_name = entry.__getattribute__("display_name")
if name == display_name:
tags = entry.__getattribute__("freeform_tags")
attachment_id = tags['attachment_id']
break
return attachment_id
def status(self, name):
"""
This function get volume status, such as "in-use", "available"
:param name: Volume name
:return: Volume_status
"""
try:
block_storage = oci.core.BlockstorageClient(self.config)
v = block_storage.list_volumes(self.config['compartment_id'])
volumes = v.data
result = []
entry = None
for entry in volumes:
display_name = entry.__getattribute__("display_name")
if name == display_name:
break
result.append(entry)
result = self.update_dict(result)
except Exception as e:
Console.error("Problem finding status", traceflag=True)
print(e)
raise RuntimeError
return result
def list(self, **kwargs):
"""
This function list all volumes as following:
If NAME (volume_name) is specified, it will print out info of NAME
If NAME (volume_name) is not specified, it will print out info of all
volumes
:param kwargs: contains name of volume
:return: Dictionary of volumes
"""
try:
if kwargs and kwargs['refresh'] is False:
result = self.cm.find(cloud=self.cloud, kind='volume')
for key in kwargs:
if key == 'NAME' and kwargs['NAME']:
result = self.cm.find_name(name=kwargs['NAME'])
elif key == 'NAMES' and kwargs['NAMES']:
result = self.cm.find_names(names=kwargs['NAMES'])
else:
block_storage = oci.core.BlockstorageClient(self.config)
if kwargs and kwargs['NAME']:
v = block_storage.list_volumes(
self.config['compartment_id'])
results = v.data
entry = None
for entry in results:
display_name = entry.__getattribute__("display_name")
if kwargs["NAME"] == display_name:
break
result = [entry]
result = self.update_dict(result)
else:
v = block_storage.list_volumes(
self.config['compartment_id'])
results = v.data
result = self.update_dict(results)
except Exception as e:
Console.error("Problem listing volume", traceflag=True)
print(e)
raise RuntimeError
return result
def create(self, **kwargs):
"""
This function creates a new volume with default size of 50gb.
Default parameters are read from self.config.
:param kwargs: Contains Volume name
:return: Volume dictionary
"""
try:
arguments = dotdict(kwargs)
block_storage = oci.core.BlockstorageClient(self.config)
result = block_storage.create_volume(
oci.core.models.CreateVolumeDetails(
compartment_id=self.config['compartment_id'],
availability_domain=self.config['availability_domain'],
display_name=arguments.NAME
))
# wait for availability of volume
oci.wait_until(
block_storage,
block_storage.get_volume(result.data.id),
'lifecycle_state',
'AVAILABLE'
).data
v = block_storage.list_volumes(self.config['compartment_id'])
results = v.data
result = self.update_dict(results)
except Exception as e:
Console.error("Problem creating volume", traceflag=True)
print(e)
raise RuntimeError
return result
def attach(self, names=None, vm=None):
"""
This function attaches a given volume to a given instance
:param names: Names of Volumes
:param vm: Instance name
:return: Dictionary of volumes
"""
try:
compute_client = oci.core.ComputeClient(self.config)
# get instance id from VM name
i = compute_client.list_instances(self.config['compartment_id'])
instances = i.data
instance_id = None
for entry in instances:
display_name = entry.__getattribute__("display_name")
if vm == display_name:
instance_id = entry.__getattribute__("id")
break
# get volumeId from Volume name
block_storage = oci.core.BlockstorageClient(self.config)
volume_id = self.get_volume_id_from_name(block_storage, names[0])
# attach volume to vm
a = compute_client.attach_volume(
oci.core.models.AttachIScsiVolumeDetails(
display_name='IscsiVolAttachment',
instance_id=instance_id,
volume_id=volume_id
)
)
# tag volume with attachment id. This needed during detach.
block_storage.update_volume(
volume_id,
oci.core.models.UpdateVolumeDetails(
freeform_tags={'attachment_id': a.data.id},
))
# wait until attached
oci.wait_until(
compute_client,
compute_client.get_volume_attachment(
a.data.id),
'lifecycle_state',
'ATTACHED'
)
# return result after attach
v = block_storage.list_volumes(self.config['compartment_id'])
results = v.data
results = self.update_dict(results)
except Exception as e:
Console.error("Problem attaching volume", traceflag=True)
print(e)
raise RuntimeError
return results
def detach(self, name=None):
"""
This function detaches a given volume from an instance
:param name: Volume name
:return: Dictionary of volumes
"""
try:
compute_client = oci.core.ComputeClient(self.config)
block_storage = oci.core.BlockstorageClient(self.config)
attachment_id = self.get_attachment_id_from_name(block_storage,
name)
compute_client.detach_volume(attachment_id)
# wait for detachment
oci.wait_until(
compute_client,
compute_client.get_volume_attachment(attachment_id),
'lifecycle_state',
'DETACHED'
)
# return result after detach
v = block_storage.list_volumes(self.config['compartment_id'])
results = v.data
results = self.update_dict(results)
except Exception as e:
Console.error("Problem detaching volume", traceflag=True)
print(e)
raise RuntimeError
return results[0]
def delete(self, name=None):
"""
This function delete one volume.
:param name: Volume name
:return: Dictionary of volumes
"""
try:
block_storage = oci.core.BlockstorageClient(self.config)
volume_id = self.get_volume_id_from_name(block_storage, name)
if volume_id is not None:
block_storage.delete_volume(volume_id=volume_id)
# wait for termination
oci.wait_until(
block_storage,
block_storage.get_volume(volume_id),
'lifecycle_state',
'TERMINATED'
).data
v = block_storage.list_volumes(self.config['compartment_id'])
results = v.data
result = self.update_dict(results)
except Exception as e:
Console.error("Problem deleting volume", traceflag=True)
print(e)
raise RuntimeError
return result
def add_tag(self, **kwargs):
"""
This function add tag to a volume.
:param kwargs:
NAME: name of volume
key: name of tag
value: value of tag
:return: Dictionary of volume
"""
try:
name = kwargs['NAME']
key = kwargs['key']
value = kwargs['value']
block_storage = oci.core.BlockstorageClient(self.config)
volume_id = self.get_volume_id_from_name(block_storage, name)
block_storage.update_volume(
volume_id,
oci.core.models.UpdateVolumeDetails(
freeform_tags={key: value},
)
)
result = self.list(NAME=name, refresh=True)[0]
except Exception as e:
Console.error("Problem adding tag", traceflag=True)
print(e)
raise RuntimeError
return result
def migrate(self,
name=None,
fvm=None,
tvm=None,
fregion=None,
tregion=None,
fservice=None,
tservice=None,
fcloud=None,
tcloud=None,
cloud=None,
region=None,
service=None):
"""
Migrate volume from one vm to another vm.
:param name: name of volume
:param fvm: name of vm where volume will be moved from
:param tvm: name of vm where volume will be moved to
:param fregion: the region where the volume will be moved from
:param tregion: region where the volume will be moved to
:param fservice: the service where the volume will be moved from
:param tservice: the service where the volume will be moved to
:param fcloud: the provider where the volume will be moved from
:param tcloud: the provider where the volume will be moved to
:param cloud: the provider where the volume will be moved within
:param region: the region where the volume will be moved within
:param service: the service where the volume will be moved within
:return: dict
"""
raise NotImplementedError
def sync(self,
volume_id=None,
zone=None,
cloud=None):
"""
sync contents of one volume to another volume
:param volume_id: id of volume A
:param zone: zone where new volume will be created
:param cloud: the provider where volumes will be hosted
:return: str
"""
raise NotImplementedError
| 34.846667 | 79 | 0.536254 |
2e41059737f4d221e7f02145402803f82b9e1f7c
| 2,196 |
py
|
Python
|
tests/MyFuncLogin.py
|
mribrgr/StuRa-Mitgliederdatenbank
|
87a261d66c279ff86056e315b05e6966b79df9fa
|
[
"MIT"
] | 8 |
2019-11-26T13:34:46.000Z
|
2021-06-21T13:41:57.000Z
|
src/tests/MyFuncLogin.py
|
Sumarbrander/Stura-Mitgliederdatenbank
|
691dbd33683b2c2d408efe7a3eb28e083ebcd62a
|
[
"MIT"
] | 93 |
2019-12-16T09:29:10.000Z
|
2021-04-24T12:03:33.000Z
|
src/tests/MyFuncLogin.py
|
Sumarbrander/Stura-Mitgliederdatenbank
|
691dbd33683b2c2d408efe7a3eb28e083ebcd62a
|
[
"MIT"
] | 2 |
2020-12-03T12:43:19.000Z
|
2020-12-22T21:48:47.000Z
|
from django.urls import reverse
def loginAsLukasAdmin(self):
"""
Opens a Browser instance and login as admin with the account testlukasadmin.
:param self:
:type self:
:return: No return Value
"""
# Öffnen eines Browsers
try:
self.browser.get(self.live_server_url)
except BaseException:
print('Error in opening login page')
# Suche aller Objekte der Seite
try:
entUsername = self.browser.find_element_by_id('id_username')
entPassword = self.browser.find_element_by_id('id_password')
btnLogin = self.browser.find_element_by_id('btn-login')
except BaseException:
print("Es wurden nicht alle Objekte auf der Seite gefunden")
# Eingabe der Login-Daten
entUsername.send_keys('testlukasadmin')
entPassword.send_keys('0123456789test')
btnLogin.click()
# Check Login Success
self.assertEqual(
self.browser.current_url,
self.live_server_url +
reverse('mitglieder:homepage'),
msg="Konnte nicht angemeldet werden bzw. Weiterleitung nicht erfolgt")
pass
def loginAsLukasUser(self):
"""
Opens a Browser instance and login as user with the account testlukas.
:param self:
:type self:
:return: No return Value
"""
# Öffnen eines Browsers
try:
self.browser.get(self.live_server_url)
except BaseException:
print('Error in opening login page')
# Suche aller Objekte der Seite
try:
entUsername = self.browser.find_element_by_id('id_username')
entPassword = self.browser.find_element_by_id('id_password')
btnLogin = self.browser.find_element_by_id('btn-login')
except BaseException:
print("Es wurden nicht alle Objekte auf der Seite gefunden")
# Eingabe der Login-Daten
entUsername.send_keys('testlukas')
entPassword.send_keys('0123456789test')
btnLogin.click()
# Check Login Success
self.assertEqual(
self.browser.current_url,
self.live_server_url +
reverse('mitglieder:homepage'),
msg="Konnte nicht angemeldet werden bzw. Weiterleitung nicht erfolgt")
pass
| 28.894737 | 84 | 0.670765 |
cf122f5b78a949507c5b594af369c2e1f590face
| 2,027 |
py
|
Python
|
src/main/python/tools/json_builder.py
|
gwdgithubnom/ox-patient
|
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
|
[
"MIT"
] | null | null | null |
src/main/python/tools/json_builder.py
|
gwdgithubnom/ox-patient
|
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
|
[
"MIT"
] | null | null | null |
src/main/python/tools/json_builder.py
|
gwdgithubnom/ox-patient
|
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
|
[
"MIT"
] | 1 |
2021-04-14T00:45:38.000Z
|
2021-04-14T00:45:38.000Z
|
import numpy
import json
from cluster.json_viewer import ClusterViwer
from context import resource_manager
def cluster_view_json_builder(clusters=[]):
"""
将用户传入的多个json格式的对象存储在json文件中
newlist【】作为list先读入文件中的数据以防止丢失,然后追加新加入的json对象(以list格式存入newlist)
:param clusters:
:return:
"""
for c in clusters :
if not isinstance(c, ClusterViwer):
raise Exception("错误的数据类型,不是ClusterViwer")
else:
newlist=[]
try:
with open(resource_manager.Properties.getRootPath()+resource_manager.getSeparator()+'data'+resource_manager.getSeparator()+'json'+resource_manager.getSeparator()+'ssdfdssdf.json','r')as R:
readed=json.load(R)
for r in readed:
newlist.append(r)
except:
print('The file is empty!')
newlist.append(c.tolist())
with open(resource_manager.Properties.getRootPath()+resource_manager.getSeparator()+'data'+resource_manager.getSeparator()+'json'+resource_manager.getSeparator()+'ssdfdssdf.json','w+')as f:
f.write(json.dumps(newlist,skipkeys=True,sort_keys=True,indent=2))
def cluster_json_builder(clusterviwer=ClusterViwer(),x=numpy.array([]),y=numpy.array([])):
"""
用于根据ClusterViwer类将x与y传入对象的data属性中,并将对象转换为json格式
:param clusterviwer:[ClusterViwer(name=)]
:param x:
:param y:
:return:
"""
clusterviwer.setData(x,y)
a=clusterviwer.toJson()
print(a)
if __name__ == "__main__":
c1=ClusterViwer()
c2=ClusterViwer()
l=[]
l.append(c1)
l.append(c2)
cluster_json_builder(c1,x=numpy.array([1,2,3]),y=numpy.array([1,2,4]))
cluster_json_builder(c2,x=numpy.array([3,3,3]),y=numpy.array([2,2,2]))
cluster_view_json_builder(l)
| 37.537037 | 213 | 0.579674 |
51189f3815d7180ee5894183b2274ad8f0190351
| 543 |
py
|
Python
|
Curso_Python/Secao5-modulos-uteis/133_web-scraping/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao5-modulos-uteis/133_web-scraping/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao5-modulos-uteis/133_web-scraping/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import requests # instala o html css
# pip install requests // instalando web scraping
# pip install beautifulsoup4
url = 'https://pt.stackoverflow.com/questions'
response = requests.get(url)
html = BeautifulSoup(response.text, 'html.parser')
for question in html.select('.question-summary'):
title = question.select_one('.question-hyperlink')
date = question.select_one('.relativetime')
wishes = question.select_one('.vote-count-post ')
print(date.text, title.text, wishes.text, sep='\t')
| 30.166667 | 55 | 0.734807 |
5ac094cbdfc2122c9de740888c1e1218a43a62ad
| 4,412 |
py
|
Python
|
Openharmony v1.0/third_party/ltp/testcases/realtime/tools/ftqviz.py
|
clkbit123/TheOpenHarmony
|
0e6bcd9dee9f1a2481d762966b8bbd24baad6159
|
[
"MIT"
] | 1 |
2022-02-15T08:51:55.000Z
|
2022-02-15T08:51:55.000Z
|
hihope_neptune-oh_hid/00_src/v0.3/third_party/ltp/testcases/realtime/tools/ftqviz.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
hihope_neptune-oh_hid/00_src/v0.3/third_party/ltp/testcases/realtime/tools/ftqviz.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Filename: ftqviz.py
# Author: Darren Hart <[email protected]>
# Description: Plot the time and frequency domain plots of a times and
# counts log file pair from the FTQ benchmark.
# Prerequisites: numpy, scipy, and pylab packages. For debian/ubuntu:
# o python-numeric
# o python-scipy
# o python-matplotlib
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Copyright (C) IBM Corporation, 2007
#
# 2007-Aug-30: Initial version by Darren Hart <[email protected]>
from numpy import *
from numpy.fft import *
from scipy import *
from pylab import *
from sys import *
from getopt import *
NS_PER_S = 1000000000
NS_PER_MS = 1000000
NS_PER_US = 1000
def smooth(x, wlen):
if x.size < wlen:
raise ValueError("Input vector needs to be bigger than window size.")
# reflect the signal to avoid transients... ?
s = r_[2*x[0]-x[wlen:1:-1], x, 2*x[-1]-x[-1:-wlen:-1]]
w = hamming(wlen)
# generate the smoothed signal
y = convolve(w/w.sum(), s, mode='same')
# recenter the the smoothed signal over the originals (slide along x)
y1 = y[wlen-1:-wlen+1]
return y1
def my_fft(x, sample_hz):
X = abs(fftshift(fft(x)))
freq = fftshift(fftfreq(len(x), 1.0/sample_hz))
return array([freq, abs(X)/len(x)])
def smooth_fft(timefile, countfile, sample_hz, wlen):
# The higher the sample_hz, the larger the required wlen (used to generate
# the hamming window). It seems that each should be adjusted by roughly the
# same factor
ns_per_sample = NS_PER_S / sample_hz
print("Interpolated Sample Rate: ", sample_hz, " HZ")
print("Hamming Window Length: ", wlen)
t = fromfile(timefile, dtype=int64, sep='\n')
x = fromfile(countfile, dtype=int64, sep='\n')
# interpolate the data to achieve a uniform sample rate for use in the fft
xi_len = (t[len(t)-1] - t[0])/ns_per_sample
xi = zeros(xi_len)
last_j = 0
for i in range(0, len(t)-1):
j = (t[i] - t[0])/ns_per_sample
xi[j] = x[i]
m = (xi[j]-xi[last_j])/(j-last_j)
for k in range(last_j + 1, j):
xi[k] = m * (k - last_j) + xi[last_j]
last_j = j
# smooth the signal (low pass filter)
try:
y = smooth(xi, wlen)
except ValueError as e:
exit(e)
# generate the fft
X = my_fft(xi, sample_hz)
Y = my_fft(y, sample_hz)
# plot the hamming window
subplot(311)
plot(hamming(wlen))
axis([0,wlen-1,0,1.1])
title(str(wlen)+" Point Hamming Window")
# plot the signals
subplot(312)
ts = arange(0, len(xi), dtype=float)/sample_hz # time signal in units of seconds
plot(ts, xi, alpha=0.2)
plot(ts, y)
legend(['interpolated', 'smoothed'])
title("Counts (interpolated sample rate: "+str(sample_hz)+" HZ)")
xlabel("Time (s)")
ylabel("Units of Work")
# plot the fft
subplot(313)
plot(X[0], X[1], ls='steps', alpha=0.2)
plot(Y[0], Y[1], ls='steps')
ylim(ymax=20)
xlim(xmin=-3000, xmax=3000)
legend(['interpolated', 'smoothed'])
title("FFT")
xlabel("Frequency")
ylabel("Amplitude")
show()
def usage():
print("usage: "+argv[0]+" -t times-file -c counts-file [-s SAMPLING_HZ] [-w WINDOW_LEN] [-h]")
if __name__=='__main__':
try:
opts, args = getopt(argv[1:], "c:hs:t:w:")
except GetoptError:
usage()
exit(2)
sample_hz = 10000
wlen = 25
times_file = None
counts_file = None
for o, a in opts:
if o == "-c":
counts_file = a
if o == "-h":
usage()
exit()
if o == "-s":
sample_hz = int(a)
if o == "-t":
times_file = a
if o == "-w":
wlen = int(a)
if not times_file or not counts_file:
usage()
exit(1)
smooth_fft(times_file, counts_file, sample_hz, wlen)
| 28.101911 | 102 | 0.607208 |
7a298f862f68bd7b75a12e7274f9edf27416d649
| 99 |
py
|
Python
|
examples/myqueryset/simplerelate/apps.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 5 |
2020-07-14T07:48:10.000Z
|
2021-12-20T21:20:10.000Z
|
examples/myqueryset/simplerelate/apps.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 7 |
2021-03-26T03:13:38.000Z
|
2022-03-12T00:42:03.000Z
|
examples/myqueryset/simplerelate/apps.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 1 |
2021-02-16T07:04:25.000Z
|
2021-02-16T07:04:25.000Z
|
from django.apps import AppConfig
class SimplerelateConfig(AppConfig):
name = 'simplerelate'
| 16.5 | 36 | 0.777778 |
890040b5d1c2982f33dddb934f9f149e4f4ed646
| 21,451 |
py
|
Python
|
Packs/AzureStorageTable/Integrations/AzureStorageTable/AzureStorageTable.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 2 |
2021-12-06T21:38:24.000Z
|
2022-01-13T08:23:36.000Z
|
Packs/AzureStorageTable/Integrations/AzureStorageTable/AzureStorageTable.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 87 |
2022-02-23T12:10:53.000Z
|
2022-03-31T11:29:05.000Z
|
Packs/AzureStorageTable/Integrations/AzureStorageTable/AzureStorageTable.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 2 |
2022-01-05T15:27:01.000Z
|
2022-02-01T19:27:43.000Z
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import copy
from requests import Response
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
account_sas_token = ""
storage_account_name = ""
class Client:
"""
API Client
"""
def __init__(self, server_url, verify, proxy, account_sas_token, storage_account_name, api_version):
self.ms_client = MicrosoftStorageClient(server_url, verify, proxy, account_sas_token, storage_account_name,
api_version)
def create_table_request(self, table_name: str) -> dict:
"""
Creates a new table in a storage account.
Args:
table_name (str): Table name.
Returns:
dict: API response from Azure.
"""
headers = {'Content-Type': 'application/json',
'Accept': 'application/json;odata=nometadata'}
data = {"TableName": table_name}
response = self.ms_client.http_request(method='POST', url_suffix='Tables', headers=headers, resp_type="json",
json_data=data)
return response
def delete_table_request(self, table_name: str) -> Response:
"""
Delete the specified table and any data it contains.
Args:
table_name (str): Table name.
Returns:
Response: API response from Azure.
"""
url_suffix = f'Tables(\'{table_name}\')'
response = self.ms_client.http_request(method='DELETE', url_suffix=url_suffix, return_empty_response=True)
return response
def query_tables_request(self, limit: str = None, query_filter: str = None, next_table: str = None) -> Response:
"""
List tables under the specified account.
Args:
limit (str): Retrieve top n tables.
query_filter (str): Query expression.
next_table (str): Identifies the portion of the list to be returned.
Returns:
Response: API response from Azure.
"""
headers = {'Accept': 'application/json;odata=nometadata'}
params = remove_empty_elements({"$top": limit, "$filter": query_filter, "NextTableName": next_table})
response = self.ms_client.http_request(method='GET', url_suffix='Tables', headers=headers, params=params,
return_empty_response=True)
return response
def insert_entity_request(self, table_name: str, entity_fields: dict) -> dict:
"""
Insert a new entity into a table.
Args:
table_name (str): Table name.
entity_fields (dict): Entity fields data.
Returns:
dict: API response from Azure.
"""
headers = {'Content-Type': 'application/json',
'Accept': 'application/json;odata=nometadata'}
response = self.ms_client.http_request(method='POST', url_suffix=f'{table_name}', headers=headers,
resp_type="json", json_data=entity_fields)
return response
def update_entity_request(self, table_name: str, partition_key: str, row_key: str, entity_fields: dict) -> Response:
"""
Update an existing entity in a table.
Args:
table_name (str): Table name.
partition_key (str): Unique identifier for the partition within a given table.
row_key (str): Unique identifier for an entity within a given partition.
entity_fields (dict): Entity fields data.
Returns:
Response: API response from Azure.
"""
headers = {'Content-Type': 'application/json'}
url_suffix = f'{table_name}(PartitionKey=\'{partition_key}\',RowKey=\'{row_key}\')'
response = self.ms_client.http_request(method='MERGE', url_suffix=url_suffix,
headers=headers, return_empty_response=True, json_data=entity_fields)
return response
def replace_entity_request(self, table_name: str, partition_key: str, row_key: str,
entity_fields: dict) -> Response:
"""
Replace an existing entity in a table.
Args:
table_name (str): Table name.
partition_key (str): Unique identifier for the partition within a given table.
row_key (str): Unique identifier for an entity within a given partition.
entity_fields (dict): Entity fields data.
Returns:
Response: API response from Azure.
"""
headers = {'Content-Type': 'application/json'}
url_suffix = f'{table_name}(PartitionKey=\'{partition_key}\',RowKey=\'{row_key}\')'
response = self.ms_client.http_request(method='PUT', url_suffix=url_suffix,
headers=headers, return_empty_response=True, json_data=entity_fields)
return response
def query_entity_request(self, table_name: str, partition_key: str = None, row_key: str = None,
query_filter: str = None, select: str = None, limit: str = None,
next_partition_key: str = None, next_row_key: str = None) -> Response:
"""
Query entities in a table.
Args:
table_name (str): Table name.
partition_key (str): Unique identifier for the partition within a given table.
row_key (str): Unique identifier for an entity within a given partition.
query_filter (str): Query expression.
select (str): Entity properties to return.
limit (str): Retrieve top n entities.
next_partition_key (str): Identifies the portion of the list to be returned.
next_row_key (str): Identifies the portion of the list to be returned.
Returns:
Response: API response from Azure.
"""
headers = {'Accept': 'application/json;odata=nometadata'}
params = remove_empty_elements({"$filter": query_filter,
"$select": select,
"$top": limit,
"NextPartitionKey": next_partition_key,
"NextRowKey": next_row_key})
url_suffix = f'{table_name}(PartitionKey=\'{partition_key}\',RowKey=\'{row_key}\')' if partition_key \
else f'{table_name}()'
response = self.ms_client.http_request(method='GET', url_suffix=url_suffix,
params=params, headers=headers, return_empty_response=True)
return response
def delete_entity_request(self, table_name: str, partition_key: str, row_key: str) -> Response:
"""
Delete an existing entity in a table
Args:
table_name (str): Table name.
partition_key (str): Unique identifier for the partition within a given table.
row_key (str): Unique identifier for an entity within a given partition.
Returns:
Response: API response from Azure.
"""
headers = {"If-Match": "*"}
url_suffix = f'{table_name}(PartitionKey=\'{partition_key}\',RowKey=\'{row_key}\')'
response = self.ms_client.http_request(method='DELETE', url_suffix=url_suffix, headers=headers,
return_empty_response=True)
return response
def create_table_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Creates a new table in a storage account.
Args:
client (Client): Azure Table Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
table_name = args['table_name']
table_name_regex = "^[A-Za-z][A-Za-z0-9]{2,62}$"
# Rules for naming tables can be found here:
# https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-the-table-service-data-model
if not re.search(table_name_regex, table_name):
raise Exception('The specified table name is invalid.')
response = client.create_table_request(table_name)
outputs = {"name": response.get("TableName")}
command_results = CommandResults(
readable_output=f'Table {table_name} successfully created.',
outputs_prefix='AzureStorageTable.Table',
outputs_key_field='name',
outputs=outputs,
raw_response=response
)
return command_results
def delete_table_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Delete the specified table and any data it contains.
Args:
client (Client): Azure Table Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
table_name = args['table_name']
client.delete_table_request(table_name)
command_results = CommandResults(
readable_output=f'Table {table_name} successfully deleted.'
)
return command_results
def query_tables_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
List tables under the specified account.
Args:
client (Client): Azure Table Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
limit = args.get('limit') or '50'
query_filter = args.get('filter')
page = arg_to_number(args.get('page') or '1')
next_table = None
readable_message = f'Tables List:\n Current page size: {limit}\n Showing page {page} out others that may exist'
if page > 1: # type: ignore
offset = int(limit) * (page - 1) # type: ignore
response = client.query_tables_request(str(offset), query_filter)
response_headers = response.headers
next_table = response_headers.get('x-ms-continuation-NextTableName')
if not next_table:
return CommandResults(
readable_output=readable_message,
outputs_prefix='AzureStorageTable.Table',
outputs=[],
raw_response=[]
)
raw_response = client.query_tables_request(limit, query_filter, next_table).json()
outputs = []
for table in raw_response.get("value"):
outputs.append({"name": table.get("TableName")})
readable_output = tableToMarkdown(
readable_message,
outputs,
headerTransform=pascalToSpace
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureStorageTable.Table',
outputs_key_field='name',
outputs=outputs,
raw_response=raw_response
)
return command_results
def convert_dict_time_format(data: dict, keys: list):
"""
Convert dictionary data values time format.
Args:
data (dict): Data.
keys (list): Keys list to convert
"""
for key in keys:
if data.get(key):
str_time = data.get(key)[:-2] + 'Z' # type: ignore
iso_time = FormatIso8601(datetime.strptime(str_time, DATE_FORMAT))
data[key] = iso_time
def insert_entity_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Insert a new entity into a table.
Args:
client (Client): Azure Table Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
table_name = args['table_name']
partition_key = args['partition_key']
row_key = args['row_key']
entity_fields = args['entity_fields']
try:
entity_fields = json.loads(entity_fields)
except ValueError:
raise ValueError('Failed to parse entity_fields argument. Please provide valid JSON format entity data.')
entity_fields['PartitionKey'] = partition_key
entity_fields['RowKey'] = row_key
response = client.insert_entity_request(table_name, entity_fields)
outputs = {"name": table_name, "Entity": [copy.deepcopy(response)]}
convert_dict_time_format(outputs.get('Entity')[0], ['Timestamp']) # type: ignore
readable_output = tableToMarkdown(
f'Entity Fields for {table_name} Table:',
outputs.get('Entity'),
headerTransform=pascalToSpace
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureStorageTable.Table',
outputs_key_field='name',
outputs=outputs,
raw_response=response
)
return command_results
def replace_entity_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Replace an existing entity in a table.
The Replace Entity operation replace the entire entity and can be used to remove properties.
Args:
client (Client): Azure Table Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
table_name = args['table_name']
partition_key = args['partition_key']
row_key = args['row_key']
entity_fields = args['entity_fields']
try:
entity_fields = json.loads(entity_fields)
except ValueError:
raise ValueError('Failed to parse entity_fields argument. Please provide valid JSON format entity data.')
client.replace_entity_request(table_name, partition_key, row_key, entity_fields)
command_results = CommandResults(
readable_output=f'Entity in {table_name} table successfully replaced.'
)
return command_results
def update_entity_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Update an existing entity in a table.
This operation does not replace the existing entity.
Args:
client (Client): Azure Table Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
table_name = args['table_name']
partition_key = args['partition_key']
row_key = args['row_key']
entity_fields = args['entity_fields']
try:
entity_fields = json.loads(entity_fields)
except ValueError:
raise ValueError('Failed to parse entity_fields argument. Please provide valid JSON format entity data.')
client.update_entity_request(table_name, partition_key, row_key, entity_fields)
command_results = CommandResults(
readable_output=f'Entity in {table_name} table successfully updated.'
)
return command_results
def create_query_entity_output(table_name: str, raw_response: dict, is_entity_query: bool) -> dict:
"""
Create query_entity_command outputs.
Args:
table_name (str): Command table name.
raw_response (str): API response from Azure.
is_entity_query (bool): Indicates to path to the response data.
Returns:
dict: Command response.
"""
outputs = {"name": table_name}
response_copy = copy.deepcopy(raw_response)
if is_entity_query:
outputs["Entity"] = [response_copy] # type: ignore
else:
outputs["Entity"] = response_copy.get('value') # type: ignore
for entity in outputs.get("Entity"): # type: ignore
convert_dict_time_format(entity, ['Timestamp']) # type: ignore
return outputs
def query_entity_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Query entities in a table.
Args:
client (Client): Azure Table Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
table_name = args['table_name']
partition_key = args.get('partition_key')
row_key = args.get('row_key')
query_filter = args.get('filter')
select = args.get('select')
limit = None if partition_key else args.get('limit') or '50'
page = None if partition_key else arg_to_number(args.get('page') or '1')
next_partition_key = None
next_row_key = None
if (partition_key and not row_key) or (row_key and not partition_key):
raise Exception('Please provide both \'partition_key\' and \'row_key\' arguments, or no one of them.')
readable_message = f'Entity Fields for {table_name} table:\n Current page size: {limit or 50}\n ' \
f'Showing page {page or 1} out others that may exist'
if page and page > 1:
offset = int(limit) * (page - 1) # type: ignore
response = client.query_entity_request(table_name, partition_key, row_key, query_filter, select, str(offset))
response_headers = response.headers
next_partition_key = response_headers.get('x-ms-continuation-NextPartitionKey')
next_row_key = response_headers.get('x-ms-continuation-NextRowKey')
if not next_partition_key:
return CommandResults(
readable_output=readable_message,
outputs_prefix='AzureStorageTable.Table',
outputs=[],
raw_response=[]
)
raw_response = client.query_entity_request(table_name, partition_key, row_key, query_filter, select, limit,
next_partition_key, next_row_key).json()
outputs = create_query_entity_output(table_name, raw_response, is_entity_query=partition_key is not None)
readable_output = tableToMarkdown(
readable_message,
outputs.get("Entity"),
headerTransform=pascalToSpace
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureStorageTable.Table',
outputs_key_field='name',
outputs=outputs,
raw_response=raw_response
)
return command_results
def delete_entity_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Delete an existing entity in a table
Args:
client (Client): Azure Table Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
Returns:
"""
table_name = args['table_name']
partition_key = args['partition_key']
row_key = args['row_key']
client.delete_entity_request(table_name, partition_key, row_key)
command_results = CommandResults(
readable_output=f'Entity in {table_name} table successfully deleted.'
)
return command_results
def test_module(client: Client) -> None:
"""
Tests API connectivity and authentication.
Args:
client (Client): Azure Table API client.
Returns:
str : 'ok' if test passed, anything else will fail the test.
"""
try:
client.query_tables_request()
except Exception as exception:
if 'ResourceNotFound' in str(exception):
return return_results('Authorization Error: make sure API Credentials are correctly set')
if 'Error Type' in str(exception):
return return_results(
'Verify that the storage account name is correct and that you have access to the server from your host.')
raise exception
return_results('ok')
def main() -> None:
"""
Main function
"""
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
verify_certificate: bool = not params.get('insecure', False)
proxy = params.get('proxy', False)
global account_sas_token
global storage_account_name
account_sas_token = params['credentials']['password']
storage_account_name = params['credentials']['identifier']
api_version = "2020-10-02"
base_url = f'https://{storage_account_name}.table.core.windows.net/'
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client: Client = Client(base_url, verify_certificate, proxy, account_sas_token, storage_account_name,
api_version)
commands = {
'azure-storage-table-create': create_table_command,
'azure-storage-table-delete': delete_table_command,
'azure-storage-table-query': query_tables_command,
'azure-storage-table-entity-insert': insert_entity_command,
'azure-storage-table-entity-update': update_entity_command,
'azure-storage-table-entity-query': query_entity_command,
'azure-storage-table-entity-delete': delete_entity_command,
'azure-storage-table-entity-replace': replace_entity_command,
}
if command == 'test-module':
test_module(client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
from MicrosoftAzureStorageApiModule import * # noqa: E402
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| 33.622257 | 121 | 0.639131 |
56c697e3c813a98f5c52b4da82cf6709ec7ed9f8
| 702 |
py
|
Python
|
prak_2/draw.py
|
Paeti/krypto
|
f71b794a73170aeae4aa17712eea199a8e46afc0
|
[
"MIT"
] | null | null | null |
prak_2/draw.py
|
Paeti/krypto
|
f71b794a73170aeae4aa17712eea199a8e46afc0
|
[
"MIT"
] | null | null | null |
prak_2/draw.py
|
Paeti/krypto
|
f71b794a73170aeae4aa17712eea199a8e46afc0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
__author__ = "Patrick Reckeweg"
__copyright__ = ""
import matplotlib as mlp
import matplotlib.pyplot as plt
from random import *
import array as arr
import miller_rabbin as ml
def draw_experiment():
x = arr.array('i', [10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000])
a = arr.array('i', [1, 2, 3, 4, 5, 6, 7, 8, 9])
b = [10**i for i in range (1,10)]
plt.figure(2)
plt.subplot(211)
plt.plot(a, b, 'y--')
#plt.xlim(1, 9)
plt.ylim(0, 10**9)
plt.grid(True)
plt.subplot(212)
plt.plot(a, b, 'y--')
#plt.xlim(1, 9)
plt.ylim(10**1, 10**9)
plt.yscale("log")
plt.grid(True)
plt.show()
| 20.057143 | 96 | 0.591168 |
a4671a928c0178fbe90016c417d1223e45dca1ef
| 346 |
py
|
Python
|
abfahrt/classes/__init__.py
|
Team-Zugig-zum-Erfolg/InformatiCup
|
788076ac38bf6d8f462465b7fb96db14d13bed30
|
[
"MIT"
] | 1 |
2022-01-30T14:30:02.000Z
|
2022-01-30T14:30:02.000Z
|
abfahrt/classes/__init__.py
|
Team-Zugig-zum-Erfolg/InformatiCup
|
788076ac38bf6d8f462465b7fb96db14d13bed30
|
[
"MIT"
] | null | null | null |
abfahrt/classes/__init__.py
|
Team-Zugig-zum-Erfolg/InformatiCup
|
788076ac38bf6d8f462465b7fb96db14d13bed30
|
[
"MIT"
] | null | null | null |
"""
This is the classes-Package that includes all entity classes
"""
from abfahrt.classes.Station import *
from abfahrt.classes.Passenger import *
from abfahrt.classes.Line import *
from abfahrt.classes.Train import *
from abfahrt.classes.TrainInLine import *
from abfahrt.classes.TrainInStation import *
from abfahrt.classes.Travel import *
| 31.454545 | 64 | 0.794798 |
74e23aa0849fabd80d8166930d324fc90dbd8401
| 339 |
py
|
Python
|
Online-Judges/DimikOJ/Python/54-hash-key.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3 |
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Online-Judges/DimikOJ/Python/54-hash-key.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Online-Judges/DimikOJ/Python/54-hash-key.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
def hash_key(string):
multiplication = 1
for i in string:
multiplication *= ord(i)
return (multiplication % 97)
for i in range(int(input())):
str1, str2 = input().split()
str1_key = hash_key(str1)
str2_key = hash_key(str2)
if str1_key == str2_key:
print("YES")
else:
print("NO")
| 18.833333 | 32 | 0.584071 |
74883fb137b6d9b8c4bddfc5b03ee2e5d4283ca7
| 245 |
py
|
Python
|
exercises/ja/exc_02_05_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/ja/exc_02_05_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/ja/exc_02_05_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
import spacy
nlp = spacy.blank("ja")
# Docクラスをインポート
from ____ import ____
# 作りたいテキスト:「さあ、始めよう!」
words = ["さあ", "、", "初めよう", "!"]
spaces = [____, ____, ____, ____]
# wordsとspacesからDocを作成
doc = ____(____, ____=____, ____=____)
print(doc.text)
| 16.333333 | 38 | 0.661224 |
bb28eda778d2dcf8196aafa9adc32135b8939c8f
| 942 |
py
|
Python
|
examples/miniportal/portal/MacrosLoader.py
|
Jumpscale/web
|
8e8ec2ce01f3105c7647ee8a0c90af09311cbbeb
|
[
"Apache-2.0"
] | 1 |
2015-10-26T10:38:32.000Z
|
2015-10-26T10:38:32.000Z
|
examples/miniportal/portal/MacrosLoader.py
|
Jumpscale/web
|
8e8ec2ce01f3105c7647ee8a0c90af09311cbbeb
|
[
"Apache-2.0"
] | null | null | null |
examples/miniportal/portal/MacrosLoader.py
|
Jumpscale/web
|
8e8ec2ce01f3105c7647ee8a0c90af09311cbbeb
|
[
"Apache-2.0"
] | null | null | null |
import os
import glob
import imp
from portal import Page
class Macro(object):
pass
class MacrosLoader(object):
def __init__(self, path):
self.path = path
self.macros = None
def load_macros(self, blueprint, actors):
self.actors = actors
for macro in os.listdir(self.path):
if not self.is_valid(macro):
continue
macro_path = os.path.join(self.path, macro)
module = imp.load_source(macro[:-3], macro_path)
blueprint.add_app_template_global(self.getCallback(getattr(module, macro[:-3])), macro[:-3])
# app.add_template_global(getattr(module, macro[:-3]), macro[:-3])
def is_valid(self, macro):
if macro.startswith("__"):
return False
return True
def getCallback(self, fn):
def wrapper(*args, **kwargs):
return fn(self.actors, *args, **kwargs)
return wrapper
| 29.4375 | 104 | 0.605096 |
79793c4cf5656f47193ba07ea804afb6dcf0520f
| 4,086 |
py
|
Python
|
tests/addons/log2reqs/test_csv.py
|
ihatov08/jumeaux
|
7d983474df4b6dcfa57ea1a66901fbc99ebababa
|
[
"MIT"
] | 11 |
2017-10-02T01:29:12.000Z
|
2022-03-31T08:37:22.000Z
|
tests/addons/log2reqs/test_csv.py
|
ihatov08/jumeaux
|
7d983474df4b6dcfa57ea1a66901fbc99ebababa
|
[
"MIT"
] | 79 |
2017-07-16T14:47:17.000Z
|
2022-03-31T08:49:14.000Z
|
tests/addons/log2reqs/test_csv.py
|
ihatov08/jumeaux
|
7d983474df4b6dcfa57ea1a66901fbc99ebababa
|
[
"MIT"
] | 2 |
2019-01-28T06:11:58.000Z
|
2021-01-25T07:21:21.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pytest
from owlmixin.util import load_yaml
from jumeaux.addons.log2reqs.csv import Executor
from jumeaux.models import Log2ReqsAddOnPayload, HttpMethod
CSV = """
"name1","GET","/path1","q1=1&q2=2","header1=1&header2=2"
"name2","POST","/path2","q1=1&q2=&q3","header1=1&header2=&header3"
"name3","GET","/path3","q1=1"
"name4","POST","/path4",,"header1=1&header2=2"
"name5","GET","/path5"
""".strip()
TSV = """
"name1" "GET" "/path1" "q1=1&q2=2" "header1=1&header2=2"
"name2" "POST" "/path2" "q1=1&q2=&q3" "header1=1&header2=&header3"
"name3" "GET" "/path3" "q1=1"
"name4" "POST" "/path4" "header1=1&header2=2"
"name5" "GET" "/path5"
""".strip()
OVER5 = """
"name","GET","/path","q1=1","header1=1","evil"
""".strip()
def create_expected(no: int, method: HttpMethod, qs: dict, headers: dict) -> dict:
return {
"name": f"name{no}",
"method": method.to_value(True, True),
"path": f"/path{no}",
"qs": qs,
"headers": headers,
"url_encoding": "utf-8",
}
class TestExec:
@pytest.mark.parametrize(
"title, requests, config_yml, expected",
[
(
"CSV",
CSV,
"""
""",
[
create_expected(
1,
HttpMethod.GET,
{"q1": ["1"], "q2": ["2"]},
{"header1": "1", "header2": "2"},
),
create_expected(2, HttpMethod.POST, {"q1": ["1"]}, {"header1": "1"}),
create_expected(3, HttpMethod.GET, {"q1": ["1"]}, {}),
create_expected(4, HttpMethod.POST, {}, {"header1": "1", "header2": "2"}),
create_expected(5, HttpMethod.GET, {}, {}),
],
),
(
"TSV",
TSV,
"""
dialect: excel-tab
""",
[
create_expected(
1,
HttpMethod.GET,
{"q1": ["1"], "q2": ["2"]},
{"header1": "1", "header2": "2"},
),
create_expected(2, HttpMethod.POST, {"q1": ["1"]}, {"header1": "1"}),
create_expected(3, HttpMethod.GET, {"q1": ["1"]}, {}),
create_expected(4, HttpMethod.POST, {}, {"header1": "1", "header2": "2"}),
create_expected(5, HttpMethod.GET, {}, {}),
],
),
(
"CSV with keep_blank true",
CSV,
"""
keep_blank: True
""",
[
create_expected(
1,
HttpMethod.GET,
{"q1": ["1"], "q2": ["2"]},
{"header1": "1", "header2": "2"},
),
create_expected(
2,
HttpMethod.POST,
{"q1": ["1"], "q2": [""], "q3": [""]},
{"header1": "1", "header2": "", "header3": ""},
),
create_expected(3, HttpMethod.GET, {"q1": ["1"]}, {}),
create_expected(4, HttpMethod.POST, {}, {"header1": "1", "header2": "2"}),
create_expected(5, HttpMethod.GET, {}, {}),
],
),
],
)
def test(self, create_tmpfile_from, title, requests, config_yml, expected):
tmp = create_tmpfile_from(requests)
actual = Executor(load_yaml(config_yml)).exec(Log2ReqsAddOnPayload.from_dict({"file": tmp}))
assert actual.to_dicts() == expected
def test_length_over_5(self, create_tmpfile_from):
tmp = create_tmpfile_from(OVER5)
with pytest.raises(ValueError):
Executor({"encoding": "utf8"}).exec(Log2ReqsAddOnPayload.from_dict({"file": tmp}))
| 34.336134 | 100 | 0.424131 |
30bfd99b18a3b42bf378f11e32343322a618917a
| 664 |
py
|
Python
|
Python/M01_ProgrammingBasics/L05_WhileLoop/Exercises/Solutions/P04_Walking.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L05_WhileLoop/Exercises/Solutions/P04_Walking.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L05_WhileLoop/Exercises/Solutions/P04_Walking.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | 1 |
2022-02-23T13:03:14.000Z
|
2022-02-23T13:03:14.000Z
|
target_steps = 10000
steps_done = 0
while True:
line = input()
if line == "Going home":
home_steps = int(input())
steps_done = steps_done + home_steps
if steps_done >= target_steps:
print("Goal reached! Good job!")
print(f"{steps_done - target_steps} steps over the goal!")
break
else:
print(f"{target_steps - steps_done} more steps to reach goal.")
break
steps = int(line)
steps_done += steps
if steps_done >= target_steps:
print("Goal reached! Good job!")
print(f"{steps_done - target_steps} steps over the goal!")
break
| 25.538462 | 75 | 0.579819 |
d6a5d004fb389a41c39d360372bf5b4b81ef363b
| 753 |
py
|
Python
|
envs/base.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | 2 |
2021-01-07T01:10:49.000Z
|
2022-01-21T09:37:16.000Z
|
envs/base.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | null | null | null |
envs/base.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | null | null | null |
# Base environment implementation
from enum import Enum
class StateFormat(Enum):
"""环境观测/状态格式"""
VECTOR = 0 # 一维向量
MATRIX = 1 # 二维矩阵
class BaseEnvironment:
def step(self, action):
"""Perform one timestep.
Args:
action: action to be performed.
Returns:
new_state: The new environment state.
reward: The reward for the action.
game_over: Whether current round of game is over.
For example, if the actor is dead or when the actor
win the game, then current round of game is over.
"""
raise NotImplementedError
def reset(self):
"""Reset the environment.
"""
raise NotImplementedError
| 24.290323 | 67 | 0.588313 |
ba663b9659fd12553abbb8dd2cc8f6730dac18b8
| 1,399 |
py
|
Python
|
wagtail_localize/migrations/0010_overridablesegment.py
|
th3hamm0r/wagtail-localize
|
e9e0ba9245060c65e3247b62739abbed71bc2516
|
[
"BSD-3-Clause"
] | 6 |
2019-09-10T19:53:55.000Z
|
2019-11-14T16:57:07.000Z
|
wagtail_localize/migrations/0010_overridablesegment.py
|
th3hamm0r/wagtail-localize
|
e9e0ba9245060c65e3247b62739abbed71bc2516
|
[
"BSD-3-Clause"
] | 17 |
2019-07-11T11:17:37.000Z
|
2019-11-19T16:40:31.000Z
|
wagtail_localize/migrations/0010_overridablesegment.py
|
th3hamm0r/wagtail-localize
|
e9e0ba9245060c65e3247b62739abbed71bc2516
|
[
"BSD-3-Clause"
] | 2 |
2019-09-30T20:23:39.000Z
|
2019-10-31T14:09:31.000Z
|
# Generated by Django 3.1 on 2020-08-28 10:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("wagtail_localize", "0009_stringtranslation_errors"),
]
operations = [
migrations.CreateModel(
name="OverridableSegment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("order", models.PositiveIntegerField()),
("data_json", models.TextField()),
(
"context",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="wagtail_localize.translationcontext",
),
),
(
"source",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="wagtail_localize.translationsource",
),
),
],
options={
"abstract": False,
},
),
]
| 29.145833 | 68 | 0.419585 |
233fcc6f11287ea68423d2a8c5aa6ad65a9a8978
| 7,908 |
py
|
Python
|
src/onegov/activity/matching/score.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/activity/matching/score.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/activity/matching/score.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import hashlib
from onegov.activity.models import Activity, Attendee, Booking, Occasion
from onegov.user import User
from sqlalchemy import func
class Scoring(object):
""" Provides scoring based on a number of criteria.
A criteria is a callable which takes a booking and returns a score.
The final score is the sum of all criteria scores.
"""
def __init__(self, criteria=None):
self.criteria = criteria or [PreferMotivated()]
def __call__(self, booking):
return sum(criterium(booking) for criterium in self.criteria)
@classmethod
def from_settings(cls, settings, session):
scoring = cls()
# always prefer groups
scoring.criteria.append(PreferGroups.from_session(session))
if settings.get('prefer_in_age_bracket'):
scoring.criteria.append(
PreferInAgeBracket.from_session(session))
if settings.get('prefer_organiser'):
scoring.criteria.append(
PreferOrganiserChildren.from_session(session))
if settings.get('prefer_admins'):
scoring.criteria.append(
PreferAdminChildren.from_session(session))
return scoring
@property
def settings(self):
classes = {c.__class__ for c in self.criteria}
settings = {}
if PreferInAgeBracket in classes:
settings['prefer_in_age_bracket'] = True
if PreferOrganiserChildren in classes:
settings['prefer_organiser'] = True
if PreferAdminChildren in classes:
settings['prefer_admins'] = True
return settings
class PreferMotivated(object):
""" Scores "motivated" bookings higher. A motivated booking is simply a
booking with a higher priority (an attendee would favor a booking he's
excited about.)
"""
@classmethod
def from_session(cls, session):
return cls()
def __call__(self, booking):
return booking.priority
class PreferInAgeBracket(object):
""" Scores bookings whose attendees fall into the age-bracket of the
occasion higher.
If the attendee falls into the age-bracket, the score is 1.0. Each year
difference results in a penalty of 0.1, until 0.0 is reached.
"""
def __init__(self, get_age_range, get_attendee_age):
self.get_age_range = get_age_range
self.get_attendee_age = get_attendee_age
@classmethod
def from_session(cls, session):
attendees = None
occasions = None
def get_age_range(booking):
nonlocal occasions, session
if occasions is None:
occasions = {
o.id: o.age
for o in session.query(Occasion.id, Occasion.age)
.filter(Occasion.period_id == booking.period_id)}
return (
occasions[booking.occasion_id].lower,
occasions[booking.occasion_id].upper - 1
)
def get_attendee_age(booking):
nonlocal attendees, session
if attendees is None:
attendees = {a.id: a.age for a in session.query(
Attendee.id, Attendee.age)}
return attendees[booking.attendee_id]
return cls(get_age_range, get_attendee_age)
def __call__(self, booking):
min_age, max_age = self.get_age_range(booking)
attendee_age = self.get_attendee_age(booking)
if min_age <= attendee_age and attendee_age <= max_age:
return 1.0
else:
difference = min(
abs(min_age - attendee_age),
abs(max_age - attendee_age)
)
return 1.0 - min(1.0, difference / 10.0)
class PreferOrganiserChildren(object):
""" Scores bookings of children higher if their parents are organisers.
This is basically an incentive to become an organiser. A child whose parent
is an organiser gets a score of 1.0, if the parent is not an organiser
a score 0.0 is returned.
"""
def __init__(self, get_is_organiser_child):
self.get_is_organiser_child = get_is_organiser_child
@classmethod
def from_session(cls, session):
organisers = None
def get_is_organiser_child(booking):
nonlocal organisers
if organisers is None:
organisers = {
a.username
for a in session.query(Activity.username)
.filter(Activity.id.in_(
session.query(Occasion.activity_id)
.filter(Occasion.period_id == booking.period_id)
.subquery()
))
}
return booking.username in organisers
return cls(get_is_organiser_child)
def __call__(self, booking):
return self.get_is_organiser_child(booking) and 1.0 or 0.0
class PreferAdminChildren(object):
""" Scores bookings of children higher if their parents are admins. """
def __init__(self, get_is_association_child):
self.get_is_association_child = get_is_association_child
@classmethod
def from_session(cls, session):
members = None
def get_is_association_child(booking):
nonlocal members
if members is None:
members = {
u.username for u in session.query(User)
.filter(User.role == 'admin')
.filter(User.active == True)
}
return booking.username in members
return cls(get_is_association_child)
def __call__(self, booking):
return self.get_is_association_child(booking) and 1.0 or 0.0
class PreferGroups(object):
""" Scores group bookings higher than other bookings. Groups get a boost
by size:
- 2 people: 1.0
- 3 people: 0.8
- 4 people: 0.6
- more people: 0.5
This preference gives an extra boost to unprioritised bookings, to somewhat
level out bookings in groups that used no star (otherwise a group
might be split up because someone didn't star the booking).
Additionally a unique boost between 0.010000000 to 0.099999999 is given to
each group depending on the group name. This should ensure that competing
groups generally do not have the same score. So an occasion will generally
prefer the members of one group over members of another group.
"""
def __init__(self, get_group_score):
self.get_group_score = get_group_score
@classmethod
def from_session(cls, session):
group_scores = None
def unique_score_modifier(group_code):
digest = hashlib.sha1(group_code.encode('utf-8')).hexdigest()[:8]
number = int(digest, 16)
return float('0.0' + str(number)[:8])
def get_group_score(booking):
nonlocal group_scores
if group_scores is None:
query = session.query(Booking).with_entities(
Booking.group_code,
func.count(Booking.group_code).label('count')
).filter(
Booking.group_code != None,
Booking.period_id == booking.period_id
).group_by(
Booking.group_code
).having(
func.count(Booking.group_code) > 1
)
group_scores = {
r.group_code:
max(.5, 1.0 - 0.2 * (r.count - 2))
+ unique_score_modifier(r.group_code)
for r in query
}
return group_scores.get(booking.group_code, 0)
return get_group_score
def __call__(self, booking):
offset = 0 if booking.priority else 1
return self.get_group_score(booking) + offset
| 30.183206 | 79 | 0.609004 |
cc86da43000f9585fe4c9e409e4413dc2512339f
| 8,995 |
py
|
Python
|
Apps/Auswertung/tests/test_comparison.py
|
RGreinacher/bachelor-thesis
|
60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8
|
[
"MIT"
] | 1 |
2021-04-13T10:00:46.000Z
|
2021-04-13T10:00:46.000Z
|
Apps/Auswertung/tests/test_comparison.py
|
RGreinacher/bachelor-thesis
|
60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8
|
[
"MIT"
] | null | null | null |
Apps/Auswertung/tests/test_comparison.py
|
RGreinacher/bachelor-thesis
|
60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# import python libs
import sys
sys.path.append('../')
# import project libs
from compare_annotations import compare_annotations_in_sentences
test_cases =[
{
'reference': [{'annotation': {'label': 'PER', 'length': 2}, 'term': 'Wolfgang'}, {'term': 'Schwetz'}, {'term': ','}, {'term': 'Inhaber'}, {'term': 'der'}, {'annotation': {'label': 'COM', 'length': 2}, 'term': 'Schwetz'}, {'term': 'Consulting'}, {'term': 'und'}, {'term': 'Mitglied'}, {'term': 'des'}, {'term': 'CRM'}, {'term': '-'}, {'term': 'Expertenrates'}, {'term': ','}, {'term': 'betont'}, {'term': ':'}, {'term': '``'}, {'term': 'Ziel'}, {'term': 'unserer'}, {'term': 'Initiative'}, {'term': 'ist'}, {'term': 'eine'}, {'term': 'Qualitätssicherung'}, {'term': 'der'}, {'term': 'Praxistauglichkeit'}, {'term': 'von'}, {'term': 'CRM'}, {'term': '-'}, {'term': 'Softwarelösungen'}, {'term': '.'}],
'annotation': [{'annotation': {'label': 'COM', 'length': 2}, 'term': 'Wolfgang'}, {'term': 'Schwetz'}, {'term': ','}, {'term': 'Inhaber'}, {'annotation': {'label': 'PER', 'length': 3}, 'term': 'der'}, {'term': 'Schwetz'}, {'term': 'Consulting'}, {'term': 'und'}, {'term': 'Mitglied'}, {'term': 'des'}, {'term': 'CRM'}, {'term': '-'}, {'term': 'Expertenrates'}, {'term': ','}, {'term': 'betont'}, {'term': ':'}, {'term': '``'}, {'term': 'Ziel'}, {'term': 'unserer'}, {'term': 'Initiative'}, {'term': 'ist'}, {'term': 'eine'}, {'term': 'Qualitätssicherung'}, {'term': 'der'}, {'term': 'Praxistauglichkeit'}, {'term': 'von'}, {'term': 'CRM'}, {'term': '-'}, {'term': 'Softwarelösungen'}, {'term': '.'}],
'target_output': [2, 3]
},
{
'reference': [{'annotation': {'label': 'COM', 'length': 2}, 'term': 'Schwetz'}, {'term': 'Consulting'}, {'term': 'bescheinigt'}, {'annotation': {'label': 'COM', 'length': 1}, 'term': 'WICE'}, {'term': 'X'}, {'term': 'insgesamt'}, {'term': 'eine'}, {'term': 'überdurchschnittlich'}, {'term': 'gute'}, {'term': 'Bewertung'}, {'term': 'und'}, {'term': 'stellt'}, {'term': 'dabei'}, {'term': 'einige'}, {'term': 'Aspekte'}, {'term': 'besonders'}, {'term': 'heraus'}, {'term': '.'}],
'annotation': [{'term': 'Schwetz'}, {'annotation': {'label': 'PER', 'length': 0}, 'term': 'Consulting'}, {'term': 'bescheinigt'}, {'term': 'WICE'}, {'term': 'X'}, {'term': 'insgesamt'}, {'term': 'eine'}, {'term': 'überdurchschnittlich'}, {'term': 'gute'}, {'term': 'Bewertung'}, {'term': 'und'}, {'term': 'stellt'}, {'term': 'dabei'}, {'term': 'einige'}, {'term': 'Aspekte'}, {'term': 'besonders'}, {'term': 'heraus'}, {'term': '.'}],
'target_output': [3, 5]
},
{
'reference': [{'term': 'Nähere'}, {'term': 'Informationen'}, {'term': 'zur'}, {'term': 'Integration'}, {'term': 'von'}, {'term': 'Social'}, {'term': 'Media'}, {'term': 'und'}, {'term': 'anderen'}, {'term': 'Aspekten'}, {'term': 'von'}, {'annotation': {'label': 'COM', 'length': 1}, 'term': 'WICE'}, {'term': 'X'}, {'term': 'sind'}, {'term': 'nachzulesen'}, {'term': 'auf'}, {'term': 'www.wice.de'}, {'term': '.'}],
'annotation': [{'term': 'Nähere'}, {'term': 'Informationen'}, {'term': 'zur'}, {'term': 'Integration'}, {'annotation': {'label': 'COM', 'length': 2}, 'term': 'von'}, {'term': 'Social'}, {'term': 'Media'}, {'term': 'und'}, {'term': 'anderen'}, {'term': 'Aspekten'}, {'term': 'von'}, {'term': 'WICE'}, {'term': 'X'}, {'term': 'sind'}, {'term': 'nachzulesen'}, {'term': 'auf'}, {'term': 'www.wice.de'}, {'term': '.'}],
'target_output': [4, 5]
},
{
'reference': [{'annotation': {'label': 'COM', 'length': 1}, 'term': 'meinestadt.de'}, {'term': 'liefert'}, {'term': 'Internetnutzern'}, {'term': 'lokale'}, {'term': ','}, {'term': 'kulturelle'}, {'term': ','}, {'term': 'wirtschaftliche'}, {'term': 'und'}, {'term': 'touristische'}, {'term': 'Informationen'}, {'term': '.'}],
'annotation': [{'term': 'meinestadt.de'}, {'annotation': {'label': 'PER', 'length': 1}, 'term': 'liefert'}, {'term': 'Internetnutzern'}, {'term': 'lokale'}, {'term': ','}, {'term': 'kulturelle'}, {'term': ','}, {'term': 'wirtschaftliche'}, {'term': 'und'}, {'term': 'touristische'}, {'term': 'Informationen'}, {'term': '.'}],
'target_output': [5, 4]
},
{
'reference': [{'annotation': {'label': 'PER', 'length': 2}, 'term': 'Wolfgang'}, {'term': 'Schwetz'}, {'term': ','}, {'term': 'Inhaber'}, {'term': 'der'}, {'annotation': {'label': 'COM', 'length': 2}, 'term': 'Schwetz'}, {'term': 'Consulting'}, {'term': 'und'}, {'term': 'Mitglied'}, {'term': 'des'}, {'term': 'CRM'}, {'term': '-'}, {'term': 'Expertenrates'}, {'term': ','}, {'term': 'betont'}, {'term': ':'}, {'term': '``'}, {'term': 'Ziel'}, {'term': 'unserer'}, {'term': 'Initiative'}, {'term': 'ist'}, {'term': 'eine'}, {'term': 'Qualitätssicherung'}, {'term': 'der'}, {'term': 'Praxistauglichkeit'}, {'term': 'von'}, {'term': 'CRM'}, {'term': '-'}, {'term': 'Softwarelösungen'}, {'term': '.'}],
'annotation': [{'term': 'Wolfgang'}, {'term': 'Schwetz'}, {'term': ','}, {'term': 'Inhaber'}, {'term': 'der'}, {'term': 'Schwetz'}, {'term': 'Consulting'}, {'term': 'und'}, {'term': 'Mitglied'}, {'term': 'des'}, {'term': 'CRM'}, {'term': '-'}, {'term': 'Expertenrates'}, {'term': ','}, {'term': 'betont'}, {'term': ':'}, {'term': '``'}, {'term': 'Ziel'}, {'term': 'unserer'}, {'term': 'Initiative'}, {'term': 'ist'}, {'term': 'eine'}, {'term': 'Qualitätssicherung'}, {'term': 'der'}, {'term': 'Praxistauglichkeit'}, {'term': 'von'}, {'term': 'CRM'}, {'term': '-'}, {'term': 'Softwarelösungen'}, {'term': '.'}],
'target_output': [5, 5]
},
{
'reference': [{'term': 'Der'}, {'term': 'vollständige'}, {'term': 'Bericht'}, {'term': 'der'}, {'term': 'CRM'}, {'term': '-'}, {'term': 'Software'}, {'term': '-'}, {'term': 'Zertifizierung'}, {'term': 'steht'}, {'term': 'sowohl'}, {'term': 'auf'}, {'term': 'der'}, {'term': 'Webseite'}, {'term': 'der'}, {'annotation': {'label': 'COM', 'length': 2}, 'term': 'WICE'}, {'term': 'GmbH'}, {'term': 'unter'}, {'term': 'www.wice.de'}, {'term': 'als'}, {'term': 'auch'}, {'term': 'auf'}, {'term': 'der'}, {'term': 'Homepage'}, {'term': 'von'}, {'annotation': {'label': 'COM', 'length': 2}, 'term': 'schwetz'}, {'term': 'consulting'}, {'term': 'auf'}, {'term': 'www.schwetz.de'}, {'term': 'zum'}, {'term': 'Download'}, {'term': 'bereit'}, {'term': '.'}],
'annotation': [{'term': 'Der'}, {'term': 'vollständige'}, {'term': 'Bericht'}, {'term': 'der'}, {'term': 'CRM'}, {'term': '-'}, {'term': 'Software'}, {'term': '-'}, {'term': 'Zertifizierung'}, {'term': 'steht'}, {'term': 'sowohl'}, {'term': 'auf'}, {'term': 'der'}, {'term': 'Webseite'}, {'term': 'der'}, {'annotation': {'label': 'COM', 'length': 2}, 'term': 'WICE'}, {'term': 'GmbH'}, {'term': 'unter'}, {'term': 'www.wice.de'}, {'term': 'als'}, {'term': 'auch'}, {'term': 'auf'}, {'term': 'der'}, {'term': 'Homepage'}, {'term': 'von'}, {'term': 'schwetz'}, {'term': 'consulting'}, {'term': 'auf'}, {'term': 'www.schwetz.de'}, {'term': 'zum'}, {'term': 'Download'}, {'term': 'bereit'}, {'term': '.'}], 'target_output': [0, 5]
},
{
'reference': [{'term': 'Der'}, {'term': 'vollständige'}, {'term': 'Bericht'}, {'term': 'der'}, {'term': 'CRM'}, {'term': '-'}, {'term': 'Software'}, {'term': '-'}, {'term': 'Zertifizierung'}, {'term': 'steht'}, {'term': 'sowohl'}, {'term': 'auf'}, {'term': 'der'}, {'term': 'Webseite'}, {'term': 'der'}, {'annotation': {'label': 'COM', 'length': 1}, 'term': 'WICE'}, {'term': 'GmbH'}, {'term': 'unter', 'annotation': {'label': 'PER', 'length': 1}}, {'term': 'www.wice.de'}, {'term': 'als'}, {'term': 'auch'}, {'term': 'auf'}, {'term': 'der'}, {'term': 'Homepage'}, {'term': 'von'}, {'term': 'schwetz'}, {'term': 'consulting'}, {'term': 'auf'}, {'term': 'www.schwetz.de'}, {'term': 'zum'}, {'term': 'Download'}, {'term': 'bereit'}, {'term': '.'}],
'annotation': [{'term': 'Der'}, {'term': 'vollständige'}, {'term': 'Bericht'}, {'term': 'der'}, {'term': 'CRM'}, {'term': '-'}, {'term': 'Software'}, {'term': '-'}, {'term': 'Zertifizierung'}, {'term': 'steht'}, {'term': 'sowohl'}, {'term': 'auf'}, {'term': 'der'}, {'term': 'Webseite'}, {'term': 'der'}, {'annotation': {'label': 'COM', 'length': 3}, 'term': 'WICE'}, {'term': 'GmbH'}, {'term': 'unter'}, {'term': 'www.wice.de'}, {'term': 'als'}, {'term': 'auch'}, {'term': 'auf'}, {'term': 'der'}, {'term': 'Homepage'}, {'term': 'von'}, {'term': 'schwetz'}, {'term': 'consulting'}, {'term': 'auf'}, {'term': 'www.schwetz.de'}, {'term': 'zum'}, {'term': 'Download'}, {'term': 'bereit'}, {'term': '.'}],
'target_output': [1, 5]
}
]
for index, test in enumerate(test_cases):
analyzed_annotations = compare_annotations_in_sentences(test['reference'], test['annotation'])
if analyzed_annotations == test['target_output']:
print('✓ Test', index)
else:
print('✗ Test', index, '- should be', test['target_output'], 'but is', analyzed_annotations)
break
| 157.807018 | 754 | 0.498388 |
d1ff7f65e6d9186d05dcdff849a26b3ad31393e5
| 1,382 |
py
|
Python
|
code/snake/Algorithms/SitationsechnerDavid.py
|
BogyMitutoyoCTL/AI-Preparation
|
ef535741816b02e5e63d426a3232a688c9abd726
|
[
"MIT"
] | 1 |
2020-03-30T09:25:53.000Z
|
2020-03-30T09:25:53.000Z
|
code/snake/Algorithms/SitationsechnerDavid.py
|
BogyMitutoyoCTL/AI-Preparation
|
ef535741816b02e5e63d426a3232a688c9abd726
|
[
"MIT"
] | 2 |
2020-02-05T14:00:23.000Z
|
2020-03-30T19:57:19.000Z
|
code/snake/Algorithms/SitationsechnerDavid.py
|
BogyMitutoyoCTL/AI-Preparation
|
ef535741816b02e5e63d426a3232a688c9abd726
|
[
"MIT"
] | null | null | null |
from Field import Field
from GameData import GameData
from Snake import Snake
# 1 2 4
#
# 8 - 16
#
# 32 64 128
def situation_number(kantenlänge: int, maske: str, field: GameData):
maske = maske.replace(" ", "")
beginn = int(- (kantenlänge - 1) / 2)
ende = int((kantenlänge - 1) / 2)
auschnitt = []
for deltay in range(beginn, ende + 1):
for deltax in range(beginn, ende + 1):
print(deltax, deltay)
kästchen_x = field.head_x + deltax
kästchen_y = field.head_y + deltay
leer = field.can_move_to(kästchen_x, kästchen_y)
if leer:
auschnitt.append(0)
else:
auschnitt.append(1)
# Schritt 2: in eine Zahl umwandeln
wertigkeit = 1
summe = 0
# for Binearziffer in auschnitt:
for stelle in range(kantenlänge ** 2):
Binearziffer = auschnitt[stelle]
maskiert = int(maske[stelle])
if maskiert != 0:
if Binearziffer == 1:
summe += wertigkeit
wertigkeit = wertigkeit * 2
return summe
if __name__ == "__main__":
field = Field(10, 20)
snake = Snake(field)
number = situation_number(3, "111 111 111", snake.get_info())
assert number == 16 + 128
number = situation_number(3, "111 101 111", snake.get_info())
assert number == 64
| 27.64 | 68 | 0.575977 |
ae52a5c6c2e194377121a5e35198ab56ccf887f1
| 707 |
py
|
Python
|
Curso_Python/Secao3-Python-Intermediario-Programacao-Procedural/74_map-75_filter-76_reduce/75_filter.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao3-Python-Intermediario-Programacao-Procedural/74_map-75_filter-76_reduce/75_filter.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao3-Python-Intermediario-Programacao-Procedural/74_map-75_filter-76_reduce/75_filter.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
from dados import produtos, pessoas, lista
# new_list = filter(lambda x: x > 5, lista) # feito com filter
# new_list = [x for x in lista if x > 5] # feito com list comprehension
# print(list(new_list))
# outro codigo
# def new_filter(produto):
# if produto['preco'] > 50:
# produto['e_caro'] = True
# return True
#
#
# new_list = filter(new_filter, produtos)
#
# for c in new_list:
# print(c)
# Outro codigo
def new_filter(pessoas):
if pessoas['idade'] >= 18:
pessoas['Maior_de_idade'] = pessoas['idade']
else:
pessoas['Menor_de_idade'] = pessoas['idade']
return True
new_list = filter(new_filter, pessoas)
for produto in new_list:
print(produto)
| 20.794118 | 71 | 0.649222 |
ae747704dbd6c1724819b013e6d1bdc702378234
| 95 |
py
|
Python
|
musicbeats/apps.py
|
VICTOR4046/MusiX
|
4a13d849e9db9a20b3ef8f286a8d047c0de86170
|
[
"MIT"
] | null | null | null |
musicbeats/apps.py
|
VICTOR4046/MusiX
|
4a13d849e9db9a20b3ef8f286a8d047c0de86170
|
[
"MIT"
] | null | null | null |
musicbeats/apps.py
|
VICTOR4046/MusiX
|
4a13d849e9db9a20b3ef8f286a8d047c0de86170
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class MusicbeatsConfig(AppConfig):
name = 'musicbeats'
| 15.833333 | 34 | 0.768421 |
884ccd57b51e086d5e8dfa1675a25cda717c5b31
| 5,343 |
py
|
Python
|
app/models/forms.py
|
brunomileto/gamebet_website
|
b315569ff5c18538cbc374d19011591edb5f8b26
|
[
"MIT"
] | null | null | null |
app/models/forms.py
|
brunomileto/gamebet_website
|
b315569ff5c18538cbc374d19011591edb5f8b26
|
[
"MIT"
] | null | null | null |
app/models/forms.py
|
brunomileto/gamebet_website
|
b315569ff5c18538cbc374d19011591edb5f8b26
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from flask_login import LoginManager
from flask_wtf import FlaskForm
from wtforms import StringField, FileField, PasswordField, IntegerField, SelectField
from wtforms.validators import Email, DataRequired, InputRequired
from wtforms.fields.html5 import DateField, TelField
from wtforms_alchemy import PhoneNumberField
login_manager = LoginManager(),
GAME_CHOICES = [('', "Escolha um jogo:"), ('1', 'FIFA19'), ('2', 'FIFA20'), ('3', 'FIFA21')]
PLATFORM_CHOICES = [('', "Escolha uma plataforma:"), ('1', 'XOne'), ('2', 'PS4')]
BET_VALUE_CHOICES = [('', "Escolha um valor de aposta:"), ('1', 5), ('2', 10), ('3', 15), ('4', 20)]
RULES_CHOICES = [('', "Escolha uma regra, se quiser:"), ('1', 'REGRA 1'), ('2', 'REGRA 2'), ('3', 'REGRA 3')]
GAME_MODE_CHOICES = [('', "Escolha um modo de jogo:"), ('1', 'Elencos Online 1'), ('2', 'Ultimate Team')]
MATCH_RESULT_CHOICES = [('', "Qual foi o Seu Resultado, na partida?"), ('1', 'Vitória'), ('2', 'Derrota'), ('3', 'Empate')]
USER_STATUS_CHOICES = [('', 'Escolha uma opção'), ('1', 'Reabilitar'), ('2', 'Bloquear'), ('3', 'Excluir')]
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
class RegisterForm(FlaskForm):
name = StringField('Name')
# first_name = StringField('First_name', validators=[DataRequired()])
# last_name = StringField('Last_name', validators=[DataRequired()])
# phone = IntegerField('Phone', validators=[DataRequired()])
# cpf = IntegerField('cpf', validators=[DataRequired()])
# birth_date = StringField('Birth_date', validators=[DataRequired()])
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
class EditProfileForm(FlaskForm):
first_name = StringField('First_name', validators=[DataRequired()])
last_name = StringField('Last_name', validators=[DataRequired()])
phone = StringField('phone', validators=[DataRequired()])
cpf = StringField('cpf', validators=[DataRequired()])
rg = StringField('rg', validators=[DataRequired()])
birth_date = StringField('birth_date', validators=[DataRequired()])
xbox_gametag = StringField('xbox_gametag')
psn_gametag = StringField('psn_gametag')
bank_name = StringField('bank_name')
bank_account = StringField('bank_account')
bank_agency = StringField('bank_agency')
class MatchCreationForm(FlaskForm):
game_name = SelectField('game', choices=GAME_CHOICES, validators=[DataRequired()])
platform = SelectField('platform', choices=PLATFORM_CHOICES, validators=[DataRequired()])
bet_value = SelectField('bet_value', choices=BET_VALUE_CHOICES, validators=[DataRequired()])
game_tag = StringField('game_tag', validators=[DataRequired()])
rules = SelectField('rules', choices=RULES_CHOICES)
comments = StringField('comments')
game_mode = SelectField('game_mode', choices=GAME_MODE_CHOICES, validators=[DataRequired()])
class MatchEditForm(FlaskForm):
game_name = SelectField('game_name', choices=GAME_CHOICES, validators=[DataRequired()])
platform = SelectField('platform', choices=PLATFORM_CHOICES, validators=[DataRequired()])
bet_value = SelectField('bet_value', choices=BET_VALUE_CHOICES, validators=[DataRequired()])
game_rules = SelectField('game_rules', choices=RULES_CHOICES)
comment = StringField('comment')
game_mode = SelectField('game_mode', choices=GAME_MODE_CHOICES, validators=[DataRequired()])
class InsertResults(FlaskForm):
# match_result = StringField('match_result', validators=[DataRequired()])
match_result = SelectField('match_result', choices=MATCH_RESULT_CHOICES, validators=[DataRequired()])
match_creator_goals = IntegerField('match_creator_goals', validators=[InputRequired()])
competitor_goals = IntegerField('competitor_goals', validators=[InputRequired()])
# print = FileField('image', validators=[FileRequired(), FileAllowed(images, 'Somente Imagens!')])
# images = FileField('images')
class GetMoneyForm(FlaskForm):
value_wanted = IntegerField('value_wanted', validators=[InputRequired()])
class InsertGameTagForm(FlaskForm):
gametag = StringField('gametag', validators=[DataRequired()])
class ChangeUserStatusForm(FlaskForm):
user_status = SelectField('user_status', choices=USER_STATUS_CHOICES)
def match_winner_form(form, current_match_users):
match_winner_choices = [('', 'Escolha a GameTag do Ganhador ou o Status da Partida!'), ('1', str(current_match_users[0])),
('2', str(current_match_users[1])), ('3', 'Empatar'), ('4', 'Excluir'),
('5', 'Manter em Análise')]
class MatchWinnerForm(FlaskForm):
match_winner = SelectField('match_winner', choices=match_winner_choices, validators=[DataRequired()])
form = MatchWinnerForm(form)
returned_list = [form, match_winner_choices]
return returned_list
class RequestDefinitionForm(FlaskForm):
request_definition = SelectField('request_definition', choices=[('', 'Defina: '), ('1', 'Aceitar'), ('2', 'Recusar')], validators=[DataRequired()])
| 49.018349 | 151 | 0.706532 |
ee7a59d63c1301156ba63eb8ab5e480b3fbf2ae8
| 492 |
py
|
Python
|
playlist/playlist.py
|
samcost/POO
|
5c280407abb7aa9db1c82e52c34fd372465e8fe2
|
[
"MIT"
] | null | null | null |
playlist/playlist.py
|
samcost/POO
|
5c280407abb7aa9db1c82e52c34fd372465e8fe2
|
[
"MIT"
] | null | null | null |
playlist/playlist.py
|
samcost/POO
|
5c280407abb7aa9db1c82e52c34fd372465e8fe2
|
[
"MIT"
] | null | null | null |
import random
class Playlist:
def __init__ (self, musicas):
self.__list = musicas
def imprime (self):
print('____________________')
for i in self.__list:
print(i)
print('____________________')
def adiciona (self, newmusic):
self.__list.append(newmusic)
def toca_proxima (self):
print('Tocando agora: ', self.__list[0])
self.__list.pop(0)
def embaralha (self):
random.shuffle(self.__list)
| 23.428571 | 48 | 0.603659 |
c9acbc80efc0911e35853cee9202a5f449141a0f
| 718 |
py
|
Python
|
leetcode/121-Best-Time-to-Buy-and-Sell-Stock/BestTimetoBuyandSellStock_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-12-29T03:26:39.000Z
|
2016-12-29T03:26:39.000Z
|
leetcode/121-Best-Time-to-Buy-and-Sell-Stock/BestTimetoBuyandSellStock_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/121-Best-Time-to-Buy-and-Sell-Stock/BestTimetoBuyandSellStock_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution:
# @param {integer[]} prices
# @return {integer}
def maxProfit(self, prices):
if len(prices) < 2:
return 0
minn = prices[:-1]
maxn = prices[1:]
mi = minn[0]
for i in range(1, len(minn)):
if minn[i] > mi:
minn[i] = mi
else:
mi = minn[i]
ma = maxn[-1] # the last element
for i in range(len(maxn) - 1, 0, -1):
if maxn[i] > ma:
ma = maxn[i]
else:
maxn[i] = ma
res = 0
for i in range(len(maxn)):
if maxn[i] - minn[i] > res:
res = maxn[i] - minn[i]
return res
| 23.933333 | 45 | 0.401114 |
a004ca33400e52f820d9e4c60d713b54a8f7f962
| 658 |
py
|
Python
|
7-assets/past-student-repos/Whiteboard-Pairing-master/LargestContiguousSum/model_solution.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/Whiteboard-Pairing-master/LargestContiguousSum/model_solution.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/Whiteboard-Pairing-master/LargestContiguousSum/model_solution.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# We'll use a greedy algorithm to check to see if we have a
# new max sum as we iterate along the along. If at any time
# our sum becomes negative, we reset the sum.
def largestContiguousSum(arr):
maxSum = 0
currentSum = 0
for i, _ in enumerate(arr):
currentSum += arr[i]
maxSum = max(currentSum, maxSum)
if currentSum < 0:
currentSum = 0
return maxSum
# Tests
print(largestContiguousSum([5, -9, 6, -2, 3])) # should print 7
print(largestContiguousSum([1, 23, 90, 0, -9])) # should print 114
print(largestContiguousSum([2, 3, -8, -1, 2, 4, -2, 3])) # should print 7
| 27.416667 | 75 | 0.604863 |
094582dc87e2e3cd9431c8c9b63c3633a0aea3b3
| 187 |
py
|
Python
|
DataCollections/Twitter/Config.py
|
moasgh/BumbleBee
|
2b0aae7970ab316c7b8b12dd4032b41ee1772aad
|
[
"MIT"
] | 7 |
2020-03-06T05:53:43.000Z
|
2022-01-30T17:31:18.000Z
|
DataCollections/Twitter/Config.py
|
moasgh/BumbleBee
|
2b0aae7970ab316c7b8b12dd4032b41ee1772aad
|
[
"MIT"
] | null | null | null |
DataCollections/Twitter/Config.py
|
moasgh/BumbleBee
|
2b0aae7970ab316c7b8b12dd4032b41ee1772aad
|
[
"MIT"
] | null | null | null |
class Twitter():
Consumer_Key = ''
Consumer_Secret = ''
Access_Token = ''
Access_Token_Secret = ''
CONNECTION_STRING = "sqlite:///Twitter.db"
LANG = ["en"]
| 23.375 | 47 | 0.57754 |
11ec5e2e101318dcaf3d5d623b008c975f33454c
| 8,380 |
py
|
Python
|
shippolink.py
|
fundthmcalculus/reserautomation
|
31fb18815885815dffbb775dd019e28f57ac9228
|
[
"MIT"
] | null | null | null |
shippolink.py
|
fundthmcalculus/reserautomation
|
31fb18815885815dffbb775dd019e28f57ac9228
|
[
"MIT"
] | null | null | null |
shippolink.py
|
fundthmcalculus/reserautomation
|
31fb18815885815dffbb775dd019e28f57ac9228
|
[
"MIT"
] | null | null | null |
import logging
import shippo
from datetime import datetime
from typing import List, Dict, Union, Set, Iterator, Tuple
import requests
from smartetailing import objects
from httpconnection import HttpConnectionBase
class ShippoConstants:
US_DOLLAR = "USD"
US = "US"
UNITED_STATES = "United States"
POUND = "lb"
INCH = "in"
class ShippoOrderStatus:
PAID = "PAID"
class ShippoConnection(HttpConnectionBase):
SHIPPO_BASE_URL = "https://api.goshippo.com/orders/"
def __init__(self, api_key: str, skip_shipping_classification=None, include_order_status=None):
if skip_shipping_classification is None:
skip_shipping_classification = ["in-store pickup", "store pickup"]
if include_order_status is None:
include_order_status = ["recieved", "processing"]
shippo.config.api_key = api_key
self.__api_key = api_key
self.__existing_shippo_order_ids: Set[str] = set()
self.__skip_shipping_classification = skip_shipping_classification
self.__include_order_status = include_order_status
@property
def existing_shippo_order_ids(self) -> Set[str]:
if not self.__existing_shippo_order_ids or len(self.__existing_shippo_order_ids) == 0:
self.__existing_shippo_order_ids = self.__get_existing_shippo_order_ids()
return self.__existing_shippo_order_ids
def send_to_shippo(self, return_address: Dict[str, str], orders: Iterator[objects.Order]) -> List[str]:
shippo_orders: Iterator[objects.Order] = list(self.skip_existing_orders(
self.use_only_received_orders(
self.skip_in_store_pickup(orders))))
created_orders = []
for order in shippo_orders:
order_json = create_shippo_order(return_address, order)
self.__create_order(order_json)
created_orders.append(order.id)
return created_orders
def skip_existing_orders(self, orders: Iterator[objects.Order]) -> Iterator[objects.Order]:
for order in orders:
if '#' + order.id in self.existing_shippo_order_ids:
logging.info(f"SKIPPED: Order #{order.id} already in Shippo")
else:
yield order
def skip_in_store_pickup(self, orders: Iterator[objects.Order]) -> Iterator[objects.Order]:
for order in orders:
if order.shipping.classification.lower() in self.__skip_shipping_classification:
logging.info(f"SKIPPED: Order #{order.id} shipping={order.shipping.classification}")
else:
yield order
def use_only_received_orders(self, orders: Iterator[objects.Order]) -> Iterator[objects.Order]:
for order in orders:
if order.status.lower() in self.__include_order_status:
yield order
else:
logging.info(f"SKIPPED: Order #{order.id} in status={order.status}")
def __get_existing_shippo_order_ids_paged(self, page=1, page_size=50) -> Tuple[Set[str],bool]:
response = requests.get(ShippoConnection.SHIPPO_BASE_URL, headers={
"Authorization": f"ShippoToken {self.__api_key}",
}, params={'results': str(page_size), 'page': str(page)})
self._handle_response(response)
response_json = response.json()
order_numbers = set([obj["order_number"] for obj in response_json["results"]])
return order_numbers, len(order_numbers) == page_size
def __get_existing_shippo_order_ids(self) -> Set[str]:
order_number_set = set()
# Shippo doesn't currently report the paging correctly, so just request until we get an empty list back.
page = 1
has_next_page = True
while has_next_page:
page_set, has_next_page = self.__get_existing_shippo_order_ids_paged(page)
order_number_set = order_number_set.union(page_set)
page += 1
return order_number_set
def __create_order(self, order_json: dict) -> None:
response = requests.post(ShippoConnection.SHIPPO_BASE_URL, headers={
"Authorization": f"ShippoToken {self.__api_key}",
}, json=order_json)
# Assert success
self._handle_response(response)
logging.info(f"Created shippo order {order_json['order_number']}")
def create_shippo_order(return_address: Dict[str, str], order: objects.Order) -> Dict:
# Get the shipment, load the addresses
ship_to_address = create_address(order.ship_address)
# Load the items list
line_items: List[Dict[str, str]] = [create_line_item(item) for item in order.items]
order_json = create_order(order.id,
ship_to_address,
return_address,
line_items,
order.order_total,
order.shipping.method)
return order_json
def create_address(address: objects.AddressInfo) -> Dict[str, str]:
# TODO - Debug remove
address_name = address.name.full
if address.address1 == "648 Monmouth St":
address_name = "Reser Bicycle"
return {
"name": address_name,
"street1": address.address1,
"street2": address.address2,
"city": address.city,
"state": address.state,
"zip": address.zip,
"country": address.country,
"phone": address.phone
}
def create_parcel(weight: float = 0, length: int = 1, width: int = 1, height: int = 1, line_items: List[dict] = None) \
-> Dict[str, Union[str, List[dict]]]:
"""
Create the parcel object with a defined weight and dimensions
:param line_items: items in the parcel - optional
:param height: inches
:param width: inches
:param length: inches
:param weight: lbs
:return:
"""
weight = override_weight(line_items, weight)
return {
"length": f"{length:.1f}",
"width": f"{width:.1f}",
"height": f"{height:.1f}",
"distance_unit": ShippoConstants.INCH,
"weight": f"{weight:.1f}",
"mass_unit": ShippoConstants.POUND,
"line_items": line_items
}
def override_weight(line_items, weight):
if weight == 0:
if len(line_items) == 0:
logging.warn("Define weight or line items!")
return 1
weight = sum([float(x["weight"]) for x in line_items])
return weight
def create_customs_item(item: objects.Item) -> dict:
return {
"description": item.description,
"quantity": item.quantity,
"net_weight": item.weight,
"mass_unit": ShippoConstants.POUND,
"value_amount": item.unit_price,
"value_currency": ShippoConstants.US_DOLLAR,
"origin_country": ShippoConstants.US,
"tariff_number": ""
}
def create_line_item(item: objects.Item) -> dict:
return {
"title": f"{item.description}",
"sku": item.mpn,
"quantity": item.quantity,
"total_price": format_dollar(item.unit_price),
"currency": ShippoConstants.US_DOLLAR,
"weight": f"{item.weight:.2f}",
"weight_unit": ShippoConstants.POUND,
"manufacture_country": ShippoConstants.US
}
def format_dollar(value: float) -> str:
"""
Return a proper 2 decimal currency value
:param value: Currency amount
:return: currency value string
"""
return f"{value:0.2f}"
def create_order(order_number: int, to_address: dict, from_address: dict, line_items: List[dict],
price_data: objects.OrderTotal, shipping_method: str) -> dict:
weight = override_weight(line_items, 0)
return {
"order_number": f"#{order_number}",
"order_status": ShippoOrderStatus.PAID,
"to_address": to_address,
"from_address": from_address,
"line_items": line_items,
"placed_at": datetime.now().isoformat(),
"weight": f"{weight:.2f}",
"weight_unit": ShippoConstants.POUND,
"shipping_method": shipping_method,
"shipping_cost": format_dollar(price_data.shipping),
"shipping_cost_currency": ShippoConstants.US_DOLLAR,
"subtotal_price": format_dollar(price_data.subtotal),
"total_price": format_dollar(price_data.total),
"total_tax": format_dollar(price_data.tax),
"currency": ShippoConstants.US_DOLLAR
}
| 37.244444 | 119 | 0.647375 |
eefabbd72b3541cd3d6daeab01811065a4c9bd82
| 64 |
py
|
Python
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ActivityNet/__init__.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 883 |
2020-11-12T11:46:46.000Z
|
2022-03-31T18:27:10.000Z
|
paddlevideo/metrics/ActivityNet/__init__.py
|
arkofgalaxy/PaddleVideo
|
64251233c83b7eb681061b454da198a9082309a6
|
[
"Apache-2.0"
] | 233 |
2020-12-09T06:04:59.000Z
|
2022-03-28T08:16:51.000Z
|
paddlevideo/metrics/ActivityNet/__init__.py
|
arkofgalaxy/PaddleVideo
|
64251233c83b7eb681061b454da198a9082309a6
|
[
"Apache-2.0"
] | 225 |
2020-11-13T06:21:55.000Z
|
2022-03-31T05:36:11.000Z
|
from .anet_prop import ANETproposal
__all__ = ['ANETproposal']
| 16 | 35 | 0.78125 |
0166891b3193276fa9611a81173e7df62690b314
| 846 |
py
|
Python
|
book/_build/jupyter_execute/docs/1006_Vielseitige_Kunststoffe.py
|
tom-tubeless/Chemie
|
bcd10e84b341121c260526c306f86b1556a6c034
|
[
"MIT"
] | null | null | null |
book/_build/jupyter_execute/docs/1006_Vielseitige_Kunststoffe.py
|
tom-tubeless/Chemie
|
bcd10e84b341121c260526c306f86b1556a6c034
|
[
"MIT"
] | null | null | null |
book/_build/jupyter_execute/docs/1006_Vielseitige_Kunststoffe.py
|
tom-tubeless/Chemie
|
bcd10e84b341121c260526c306f86b1556a6c034
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # 1006_Vielseitige Kunststoffe
#
# **4h**
#
# ## Leitfragen
#
# - Warum werden bestimmte Kunststoffe im Alltag verwendet?
#
# ## Inhaltsfelder & Inhaltliche Schwerpunkte
#
# - **IF10: Organische Chemie**
# - Makromoleküle: ausgewählte Kunststoffe
#
# ## Schwerpunkte der Kompetenzentwicklung
#
# - **UF2 Auswahl und Anwendung**
# - zielgerichtetes Anwenden von chemischem Fachwissen
# - **B3 Abwägung und Entscheidung**
# - Auswählen von Handlungsoptionen durch Abwägen von Kriterien und nach Abschätzung der Folgen für Natur, das Individuum und die Gesellschaft
# - **B4 Stellungnahme und Reflexion**
# - argumentatives Vertreten von Bewertungen
# - **K4 Argumentation**
# - faktenbasiertes Argumentieren auf Grundlage chemischer Erkenntnisse und naturwissenschaftlicher Denkweisen
| 31.333333 | 145 | 0.741135 |
28cc5b607436dfa9c83e8ee3d5d1e23f10aad297
| 3,334 |
py
|
Python
|
src/onegov/winterthur/collections/mission_report.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/winterthur/collections/mission_report.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/winterthur/collections/mission_report.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import sedate
from datetime import datetime, date
from onegov.core.collection import GenericCollection, Pagination
from onegov.org.models.file import ImageFileCollection
from onegov.winterthur.models import MissionReport
from onegov.winterthur.models import MissionReportFile
from onegov.winterthur.models import MissionReportVehicle
from sqlalchemy import and_, or_, desc, func
class MissionReportFileCollection(ImageFileCollection):
def __init__(self, session, report):
super().__init__(session)
self.type = 'mission-report-file'
self.report = report
@property
def id(self):
return self.report.id
@property
def association(self):
return MissionReportFile.registered_links['linked_mission_reports']
def add(self, *args, **kwargs):
file = super().add(*args, **kwargs)
self.report.pictures.append(file)
return file
def query(self):
query = super().query()
table = self.association.table
query = query.filter(MissionReportFile.id.in_(
self.session.query(table)
.with_entities(table.c.missionreportfile_id)
.filter(table.c.mission_reports_id == self.report.id)
.subquery()
))
return query
class MissionReportCollection(GenericCollection, Pagination):
def __init__(self, session, page=0, include_hidden=False, year=None):
self.session = session
self.page = page
self.include_hidden = include_hidden
self.year = year or date.today().year
@property
def model_class(self):
return MissionReport
def __eq__(self, other):
return self.page == other.page
def by_id(self, id):
# use the parent to get a report by id, so the date filter is
# not included, which is not desirable on this lookup
return super().query().filter(self.primary_key == id).first()
def query(self):
query = super().query()
if not self.include_hidden:
query = query.filter(or_(
MissionReport.meta['access'] == 'public',
MissionReport.meta['access'] == None
))
query = self.filter_by_year(query)
return query.order_by(desc(MissionReport.date))
def subset(self):
return self.query()
@property
def page_index(self):
return self.page
def page_by_index(self, index):
return self.__class__(self.session, page=index, year=self.year)
def filter_by_year(self, query):
timezone = 'Europe/Zurich'
start = sedate.replace_timezone(datetime(self.year, 1, 1), timezone)
end = sedate.replace_timezone(datetime(self.year + 1, 1, 1), timezone)
return query.filter(and_(
start <= MissionReport.date, MissionReport.date < end
))
def mission_count(self):
""" The mission count, including hidden missions. """
query = self.filter_by_year(super().query())
return query.with_entities(
func.sum(MissionReport.mission_count)).scalar()
class MissionReportVehicleCollection(GenericCollection):
@property
def model_class(self):
return MissionReportVehicle
def query(self):
return super().query().order_by(MissionReportVehicle.name)
| 28.991304 | 78 | 0.653269 |
e9446dd64157de23d9e03272a7526e470c5228be
| 7,620 |
py
|
Python
|
Packs/Carbon_Black_Enterprise_Response/Scripts/CBLiveGetFile_V2/CBLiveGetFile_V2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Carbon_Black_Enterprise_Response/Scripts/CBLiveGetFile_V2/CBLiveGetFile_V2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Carbon_Black_Enterprise_Response/Scripts/CBLiveGetFile_V2/CBLiveGetFile_V2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import json
from contextlib import contextmanager
from typing import Tuple, Union
'''Globals'''
ERROR_SENSOR = -1
ERROR_SESSION = -1
''' STANDALONE FUNCTION '''
def search_sensor_id(endpoint: str) -> int:
""" Retrieve list of connected sensors from:
Integration: VMware Carbon Black EDR (Live Response API).
Command: cb-list-sensors.
Args:
endpoint: Endpoint name - hostname/IP
Returns:
str: sensor id if found else empty string.
"""
sensor_id = ERROR_SENSOR
# Execute command and extract sensors
output = demisto.executeCommand("cb-list-sensors", {})
sensors = dict_safe_get(output, [0, 'EntryContext', 'CbResponse.Sensors(val.CbSensorID==obj.CbSensorID)'],
default_return_value=[], return_type=list) # type: ignore
# Search for sensor with endpoint or ip
for sensor in sensors:
is_same_ipaddress = endpoint in dict_safe_get(sensor, ["IPAddress", "IPAddresses"],
default_return_value=[], return_type=list)
is_same_endpoint = sensor.get("Hostname") == endpoint
if is_same_endpoint or is_same_ipaddress:
sensor_id = sensor.get("CbSensorID", ERROR_SENSOR)
break
return sensor_id
def search_active_session(sensor_id: int) -> int:
""" Search if exists current active session to sensor (It exists will use this session).
Args:
sensor_id: Sensor id to search session for.
Returns:
str: Exists active session to sensor, If not exists return '0'.
"""
output = demisto.executeCommand("cb-list-sessions", {'sensor': sensor_id, 'status': 'active'})
session_id = dict_safe_get(output, [0, 'EntryContext', 'CbLiveResponse.Sessions(val.CbSessionID==obj.CbSessionID)',
0, 'CbSessionID'], ERROR_SESSION, int)
return session_id
def create_active_session(sensor_id: int, timeout: str) -> int:
""" Create active session to sensor.
Args:
sensor_id: Sensor to create new session for.
timeout: Session timeout.
Returns:
str: New active session to sensor, If not able to create session return '0'.
"""
session_id = ERROR_SESSION
for trial in range(3):
try:
output = demisto.executeCommand("cb-session-create-and-wait", {'sensor': sensor_id, 'command-timeout': timeout})
raw_response = json.loads(dict_safe_get(output, [0, 'Contents']))
session_id = dict_safe_get(raw_response, ["id"], ERROR_SESSION)
break
except json.JSONDecodeError:
# Session could be failing due to Carbon Response bug, We retry to get session 3 times, Before failing.
if trial == 2:
raise Exception("Unable to parse entry context while creating session, try to raise timeout argument.")
return session_id
def close_session(session_id):
""" Close sensor session.
Args:
session_id: Session id to be closed
"""
demisto.executeCommand("cb-session-close", {'session': session_id})
@contextmanager
def open_session(endpoint: str, timeout: str):
""" Handler to Carbon Black sessions.
Enter:
1. Translate endpoint name to sensor id.
2. Search for current active session to sensor id.
3. If not exists -> Create new active session.
Args:
endpoint: Endpoint name to be handled.
timeout: Session timeout.
Yields:
int: active session id.
Raises:
Exception: If session not succefully established.
"""
active_session = ERROR_SESSION
try:
# Get sensor id from endpoint name (IP/Hostname)
sensor_id = search_sensor_id(endpoint)
if sensor_id == ERROR_SENSOR:
raise Exception(f"Sensor with {endpoint} is not connected!")
# Get session to communicate with sensor.
active_session = search_active_session(sensor_id)
if active_session == ERROR_SESSION:
active_session = create_active_session(sensor_id, timeout)
# Validate that session established succesfully
if active_session == ERROR_SESSION:
raise Exception(f"Unable to establish active session to {endpoint}, sensor: {sensor_id}")
# Yield active session for communication.
yield active_session
except Exception as e:
raise Exception(f"Unable to establish session to endpoint {endpoint}.\nError:{e}")
finally:
close_session(active_session)
def get_file_from_endpoint_path(session_id: str, path: str) -> Tuple[Union[dict, list], dict]:
""" Get file from file from session (endpoint/sensor).
Args:
session_id: Actvie session id.
path: Path of file to be retrieved.
Returns:
dict/list: entry context.
dict: raw response.
Raises:
Exception: If file can't be retrieved.
"""
try:
# Get file from enpoint
output = demisto.executeCommand("cb-get-file-from-endpoint", {'session': session_id, 'path': path})
entry_context = dict_safe_get(output, [0, 'EntryContext'])
# Output file to war-room as soon as possible, But removing human-readable so it will be a single summary in the end.
output[0]['HumanReadable'] = ""
demisto.results(output)
except Exception as e:
raise Exception(f"Session established but file can't retrieved from endpoint.\nError:{e}")
return entry_context
def cb_live_get_file(endpoint: str, path: str, timeout: str):
""" Download list of files from endpoint.
Args:
endpoint: Endpoint name to be handled.
path: List of file paths to download from endpoint.
timeout: Session timeout.
Returns:
list: collected entry contexts from command "cb-get-file-from-endpoint".
"""
entry_contexts = []
with open_session(endpoint, timeout) as active_session:
for single_path in argToList(path):
entry_context = get_file_from_endpoint_path(active_session, single_path)
entry_contexts.append(entry_context)
return entry_contexts
def build_table_dict(entry_contexts: List[dict]) -> List[dict]:
""" Create table from all retirieved entry context.
Args:
entry_contexts: List of entry contexts from command "cb-get-file-from-endpoint"
Returns:
list: filtered list with modified headers
"""
table = []
for ec in entry_contexts:
table_entry = {}
for file_ec in ec.values():
for key, value in file_ec.items():
if key == "FileID":
table_entry["File ID"] = value
elif key == "OperandObject":
table_entry["File path"] = value
table.append(table_entry)
return table
''' COMMAND FUNCTION '''
def cb_live_get_file_command(**kwargs) -> Tuple[str, dict, dict]:
entry_contexts = cb_live_get_file(**kwargs)
human_readable = tableToMarkdown(name=f"Files downloaded from endpoint {kwargs.get('endpoint')}",
t=build_table_dict(entry_contexts))
return human_readable, {}, {}
''' MAIN FUNCTION '''
def main():
try:
return_outputs(*cb_live_get_file_command(**demisto.args()))
except Exception as e:
return_error(f'Failed to execute CBLiveGetFile_v2. Error: {str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 32.288136 | 125 | 0.64895 |
3ae10265edf0fc5cefc9438100a7b04177bb505a
| 245 |
py
|
Python
|
src/hw_raid_3ware.py
|
magistersart/ZTC_fork
|
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
|
[
"PostgreSQL"
] | null | null | null |
src/hw_raid_3ware.py
|
magistersart/ZTC_fork
|
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
|
[
"PostgreSQL"
] | null | null | null |
src/hw_raid_3ware.py
|
magistersart/ZTC_fork
|
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
|
[
"PostgreSQL"
] | null | null | null |
#!/usr/bin/python
"""
3ware raid monitoring item script
Copyright (c) 2010-2011 Vladimir Rusinov <[email protected]>
Copyright (c) 2010 Murano Software [http://muranosoft.com]
"""
import ztc.hw
tw = ztc.hw.RAID_3Ware()
tw.get('status')
| 18.846154 | 66 | 0.730612 |
6e7ba1174b5e55b041392d08b91c01037d8502e0
| 10,692 |
py
|
Python
|
Utils/py/ActionSelection/experimental/QLearning/gamefield.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
Utils/py/ActionSelection/experimental/QLearning/gamefield.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
Utils/py/ActionSelection/experimental/QLearning/gamefield.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from tools import field_info as field
from Tkinter import *
import threading
import time
master = Tk()
# create functions to draw field + decisions made / q values ranging from green to red
# draw field + update field
square_size = 200 # relative to field
field_x = field.x_field_length + 200 # add a few pixels, for better devision
field_y = field.y_field_length + 200 # no consequences for the calculation since outside the gamefield
columns = field_x / square_size
rows = field_y / square_size
square_display_size = 10
board_x = columns * square_display_size
board_y = rows * square_display_size
actions = ["right" ,"up" ,"down", "left"]
field = {} # general field i.e. cells and arrows
specials = {} # goals, outlying area
def fill_specials():
for i in range(columns):
for j in range(rows):
# add own goal
if (3000 <= j * square_size < 4600) and (0 <= i * square_size < 800):
specials[(i, j)] = (-1., "red")
elif (3000 <= j * square_size < 4600) and (9800 <= i * square_size < 10600):
specials[(i, j)] = (1., "green")
elif j * square_size < 800 or j * square_size >= 6800 or i * square_size < 800 or i * square_size >= 9800:
specials[(i, j)] = (-0.7, "orange")
fill_specials()
agent_start_position = (30,30)
agent_position = agent_start_position
reset = False
score_min, score_max = -0.2, 0.2
score = 1
move_cost = -0.01
def move_cost_action():
# move cost in relation to actio
pass
board = Canvas(master, width= board_x, height=board_y)
def create_world():
# create world elements
global square_display_size, rows, columns, field
for j in range(rows):
for i in range(columns):
field[(i,j)] = []
# create fields
# add special fields
if (i,j) in specials:
field[(i, j)].append(board.create_rectangle(i * square_display_size, j * square_display_size,
(i + 1) * square_display_size, (j + 1) * square_display_size, fill=specials[(i,j)][1], width=1))
# fill everything else white - denoted as neutral
else:
field[(i, j)].append(board.create_rectangle(i * square_display_size, j * square_display_size,
(i + 1) * square_display_size, (j + 1) * square_display_size, fill="white", width=1))
# add arrows in fields
field[(i,j)].append(board.create_line(i * square_display_size + 2, j * square_display_size + 0.5 * square_display_size,
(i + 1) * square_display_size - 2 , j * square_display_size + 0.5 * square_display_size,
fill="black"))
def update_arrow(coords, action):
(i, j) = coords
# update arrow(heads)
if action == "up":
board.coords(field[coords][1], i*square_display_size + 0.5*square_display_size, (j+1)*square_display_size,
i*square_display_size + 0.5*square_display_size, j*square_display_size)
board.itemconfigure(field[coords][1],arrow="last")
elif action == "down":
board.coords(field[coords][1], i*square_display_size + 0.5*square_display_size, j*square_display_size,
i*square_display_size + 0.5*square_display_size, (j+1)*square_display_size)
board.itemconfigure(field[coords][1],arrow="last")
elif action == "right":
board.coords(field[coords][1], i*square_display_size, j*square_display_size + 0.5*square_display_size,
(i+1)*square_display_size, j*square_display_size + 0.5*square_display_size)
board.itemconfigure(field[coords][1],arrow="last")
elif action == "left":
board.coords(field[coords][1], (i+1)*square_display_size, j*square_display_size + 0.5*square_display_size,
i*square_display_size, j*square_display_size + 0.5*square_display_size)
board.itemconfigure(field[coords][1],arrow="last")
else:
print "no arrow drawn for field: ", (i,j)
def set_cell_color(coords, val):
global score_min, score_max
cell = field[coords][0]
# color setting
green_dec = int(min(255, max(0, (val - score_min) * 255.0 / (score_max - score_min))))
green = hex(green_dec)[2:]
red = hex(255-green_dec)[2:]
if len(red) == 1:
red += "0"
if len(green) == 1:
green += "0"
color = "#" + red + green + "00"
board.itemconfigure(cell, fill=color)
def convert((x,y)):
# TODO: So nicht richtig, da Anstosspunkt = Mittelpunkt
# converts field position to cell in the grid
i = int( x / square_size )
j = int( y / square_size )
return (i,j)
def move_agent(absolute = None, relative = None):
global agent_position
# set new coords for the agent
if absolute != None:
agent_position = absolute
if relative != None:
agent_position = (agent_position[0] + relative[0], agent_position[1] + relative[1])
# display agent at new place
board.coords(agent_display, agent_position[0] * square_display_size, agent_position[1] * square_display_size,
(agent_position[0] + 1) * square_display_size, (agent_position[1] + 1) * square_display_size)
def reset_agent():
global agent_position, score, agent_display, reset, agent_start_position
agent_position = agent_start_position
score = 1 # don't know yet what this is for, remains since it could be forgotten otherwise
reset = False
board.coords(agent_display, agent_position[0] * square_display_size, agent_position[1] * square_display_size,
(agent_position[0] + 1) * square_display_size, (agent_position[1] + 1) * square_display_size)
def check_infield(coords):
(i,j) = coords
if (800 <= j * square_size < 6800) and (800 <= i * square_size < 9800):
return True
else:
return False
# TODO: def check_special for special events like goal or out
def check_special(coords):
if coords in specials:
return True
else:
return False
"""
def render_grid():
global square_display_size, rows, columns
for i in range(rows):
for j in range(columns):
# own and opp goal as green and red fields
if (3000 <= j*square_size < 4600) and (0 <= i*square_size < 800):
board.create_rectangle(i * square_display_size, j * square_display_size, (i + 1) * square_display_size,
(j + 1) * square_display_size, fill="red", width=1)
elif (3000 <= j*square_size < 4600) and (9800 <= i*square_size < 10600):
board.create_rectangle(i * square_display_size, j * square_display_size, (i + 1) * square_display_size,
(j + 1) * square_display_size, fill="green", width=1)
# outer field in orange
elif j*square_size < 800 or j*square_size >= 6800 or i*square_size < 800 or i*square_size >= 9800:
board.create_rectangle(i * square_display_size, j * square_display_size, (i + 1) * square_display_size,
(j + 1) * square_display_size, fill="orange", width=1)
# innerfield white
else:
board.create_rectangle(i*square_display_size, j*square_display_size, (i+1)*square_display_size,
(j+1)*square_display_size, fill="white", width=1)
"""
"""
def draw_arrow(coords,action):
(i, j) = coords
if action == "up":
board.create_line(i*square_display_size + 0.5*square_display_size, (j+1)*square_display_size, i*square_display_size + 0.5*square_display_size, j*square_display_size, fill="black", arrow="last")
elif action == "down":
board.create_line(i*square_display_size + 0.5*square_display_size, j*square_display_size, i*square_display_size + 0.5*square_display_size, (j+1)*square_display_size, fill="black", arrow="last")
elif action == "right":
board.create_line(i*square_display_size, j*square_display_size + 0.5*square_display_size, (i+1)*square_display_size, j*square_display_size + 0.5*square_display_size, fill="black", arrow="last")
elif action == "left":
board.create_line((i+1)*square_display_size, j*square_display_size + 0.5*square_display_size, i*square_display_size, j*square_display_size + 0.5*square_display_size, fill="black", arrow="last")
else:
print "no arrow drawn for field: ", (i,j)
"""
"""
def create_arrows():
global arrows
for i in range(rows):
for j in range(columns):
if (800 <= j * square_size < 6800) and (800 <= i * square_size < 9800):
arrows[(i,j)] = board.create_line(i * square_display_size + 2, j * square_display_size + 0.5 * square_display_size,
(i + 1) * square_display_size - 2 , j * square_display_size + 0.5 * square_display_size,
fill="black")
"""
# TODO: add try_move function
def try_move(relative_coords):
global agent_position, reset, score, specials
if reset == True:
reset_agent() # used in special occasions e.g. force stop or enemies / opponents
(dx ,dy) = relative_coords
if agent_position[0] + dx < agent_start_position[0]-1: # don't allow to go behind starting position
reset = True
score -= 1.
print "don't go back!"
return
if check_special((agent_position[0] + dx, agent_position[1] + dy)):
special_field = specials[(agent_position[0] + dx, agent_position[1] + dy)]
score += move_cost
score += special_field[0]
if score > 1:
print "GOAL!"
else:
print "next time..."
print "Final score: ", score
reset = True
return
elif check_infield((agent_position[0] + dx, agent_position[1] + dy)):
score += move_cost
move_agent(relative=(dx, dy))
else:
print "unkown situation \n resetting agent..."
reset = True
return
if score < -40.:
reset = True
#print "score: ", score
create_world()
board.grid(row=0, column=0)
agent_display = board.create_oval(agent_position[0]*square_display_size,agent_position[1]*square_display_size,
(agent_position[0]+1)*square_display_size,(agent_position[1]+1)*square_display_size,fill="orange", width=1, tag="agent")
def test_run():
global agent_position
# run routine
while True:
try_move((1,1))
time.sleep(0.05)
def start_field():
master.mainloop()
"""
t = threading.Thread(target=test_run)
t.daemon = True
t.start()
master.mainloop()
"""
| 36.868966 | 202 | 0.62832 |
6e94312e6d8456f053a4ac2ed87ef108234d1a39
| 1,137 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/ch02_math/solutions/ex08_combinatorics_cubic.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch02_math/solutions/ex08_combinatorics_cubic.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch02_math/solutions/ex08_combinatorics_cubic.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import math
def solve_cubic_simple():
for a in range(1, 100):
for b in range(1, 100):
for c in range(1, 100):
for d in range(1, 100):
if a * a + b * b == c * c + d * d:
print("a =", a, " / b =", b, " / c =", c, " / d =", d)
def solve_cubic():
for a in range(1, 100):
for b in range(1, 100):
for c in range(1, 100):
value = a * a + b * b - c * c
if value > 0:
d = int(math.sqrt(value))
if d < 100 and a * a + b * b == c * c + d * d:
print("a =", a, " / b =", b, " / c =", c, " / d =", d)
def solve_cubic_shorter():
return [(a,b,c,d) for a in range(1, 100) for b in range(1, 100)
for c in range(1, 100) for d in range(1, 100)
if a * a + b * b == c * c + d * d]
def main():
solve_cubic_simple()
solve_cubic()
print(solve_cubic_shorter())
if __name__ == "__main__":
main()
| 25.840909 | 78 | 0.431838 |
28789533552f9226dee5e6ef9f83705b84e57305
| 5,573 |
py
|
Python
|
2017/11/elex17-results-va-20171107/custom_filters.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14 |
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2017/11/elex17-results-va-20171107/custom_filters.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2017/11/elex17-results-va-20171107/custom_filters.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7 |
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
"""
Custom filters for shaping election results for use by front-end code.
All of this comes from the google-spreadsheet-election-loader
project:
https://github.com/nprapps/google-spreadsheet-election-loader/blob/master/loader.py
"""
# In Python 3.0, 5 / 2 will return 2.5 and 5 // 2 will return 2.
# We want this to work in Python 2 as well.
from __future__ import division
import functools
import json
def compose(*functions):
"""
Compose multiple functions into a single function
Create a function that calls a series of functions, passing the output of
one function as the input of the next.
See https://mathieularose.com/function-composition-in-python/
"""
return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
convert_to_int = compose(int, float)
def convert_to_dict(sheet):
"""Convert the copytext Sheet object into a JSON serializeable value"""
dict_keys = [
'name',
'party',
'winner',
'vote_count',
'precincts_reporting',
'precincts_total',
'updated_date',
'updated_time',
'aggregate_as_other',
]
converters = {
'vote_count': convert_to_int,
'precincts_reporting': convert_to_int,
'precincts_total': convert_to_int,
}
output = []
for row in sheet:
output_row = {}
for k in dict_keys:
val = row[k]
try:
val = converters[k](val)
except KeyError:
# There's no converter for this particular key
pass
output_row[k] = val
output.append(output_row)
return output
def calculate_vote_pct(results):
total_votes = 0
output_results = []
for row in results:
total_votes += row['vote_count']
for row in results:
updated_row = dict(**row)
if total_votes == 0:
updated_row['vote_pct'] = 0
else:
updated_row['vote_pct'] = (row['vote_count'] / total_votes) * 100
output_results.append(updated_row)
return output_results
def calculate_pct_precincts_reporting(precincts_reporting, precincts_total):
if precincts_total == 0:
return 0
precincts_pct_raw = (precincts_reporting / precincts_total) * 100
precincts_pct = round(precincts_pct_raw, 0)
if precincts_pct_raw > 0 and precincts_pct == 0:
precincts_pct = '<1'
elif precincts_pct_raw < 100 and precincts_pct == 100:
precincts_pct = '>99'
else:
precincts_pct = round(precincts_pct_raw, 1)
return precincts_pct
def calculate_precinct_pct(results):
output_results = []
precincts_total = results[0]['precincts_total']
precincts_reporting = results[0]['precincts_reporting']
precincts_pct = calculate_pct_precincts_reporting(precincts_reporting, precincts_total)
# Return
for row in results:
updated_row = dict(**row)
updated_row['precincts_pct'] = precincts_pct
output_results.append(updated_row)
return output_results
def aggregate_other(results):
"""
Aggregate third-party candidate votes into an "Other" candidate.
Some third-party candidates receive so few votes that editors requested
that their votes be rolled up into an "Other" pseudo-candidate.
"""
output_rows = []
# Declare other row as dict
other_row = {
'name': 'Other',
'party': '',
'winner': '',
'vote_count': 0,
'vote_pct': 0,
'precincts_reporting': 0,
'precincts_total': 0,
'precincts_pct': 0,
'updated_date': '',
'updated_time': '',
}
# Track whether we actually have third party candidates that we're
# rolling up into an "Other" candidate.
aggregated_other = False
for row in results:
if row['aggregate_as_other'] != 'yes':
# If this is just a normal candidate, just pass this row straight
# to the output and move on to the next row.
output_rows.append(row)
continue
# Candidate is a third-party candidate that we want to aggregate into an
# "Other" pseudo-candidate
aggregated_other = True
other_row['vote_count'] += row['vote_count']
other_row['vote_pct'] += row.get('vote_pct', 0)
# If we haven't set the common fields, set them
if other_row['precincts_reporting'] != '':
other_row['precincts_reporting'] = int(row['precincts_reporting'])
other_row['precincts_total'] = int(row['precincts_total'])
other_row['updated_date'] = str(row['updated_date'])
# TODO: Check if updated time is same or after, then update
other_row['updated_time'] = str(row['updated_time'])
# If we encountered any third-party candidates that were aggregated into
# an "Other" pseudo-candidate, append the pseudo-candidate row to the output
# data.
if aggregated_other:
output_rows.append(other_row)
return output_rows
# Create a single transformation function that runs a series of other
# transformation functions. Note that the functions are called in reverse
# order.
_transform_results = compose(
aggregate_other,
calculate_vote_pct,
calculate_precinct_pct,
convert_to_dict,
)
# HACK: Lambda functions don't work as a filter, apparently
def transform_results(val):
return _transform_results(val)
def jsonify(val):
return json.dumps(val)
FILTERS = [
transform_results,
jsonify,
]
| 29.026042 | 91 | 0.649202 |
c2e49174047ddd3601739cc66a9b0b3cedcdaac4
| 371 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/agriculture/doctype/crop/test_crop.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/agriculture/doctype/crop/test_crop.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/agriculture/doctype/crop/test_crop.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
test_dependencies = ["Fertilizer"]
class TestCrop(unittest.TestCase):
def test_crop_period(self):
basil = frappe.get_doc('Crop', 'Basil from seed')
self.assertEqual(basil.period, 15)
| 26.5 | 68 | 0.754717 |
6602daa1722c3e7d7d8c85a350c6861f178286f6
| 3,065 |
py
|
Python
|
code/game_board.py
|
obernbef/vwa
|
7d8edaefe6a6e38235b696112fde11ce0eba2a86
|
[
"MIT"
] | null | null | null |
code/game_board.py
|
obernbef/vwa
|
7d8edaefe6a6e38235b696112fde11ce0eba2a86
|
[
"MIT"
] | null | null | null |
code/game_board.py
|
obernbef/vwa
|
7d8edaefe6a6e38235b696112fde11ce0eba2a86
|
[
"MIT"
] | 1 |
2021-11-10T10:36:01.000Z
|
2021-11-10T10:36:01.000Z
|
"""
This module handles the game board.
TODO
----
* Add `win_test()` function to `Board` class
"""
# %%
from __future__ import annotations
__author__ = "Florian Obernberger"
__all__ = ['Board', 'Position', 'Stone']
from typing import List, NamedTuple, Tuple
from enum import IntEnum
import numpy as np
from numba import njit
class Position(NamedTuple):
"""A position consisting of a `x` and a `y` Value.
Inherits from `NamedTuple`.
Parameters
----------
x : int
Posiition in `x` direction.
y : int
Position in `y` direction.
"""
x: int
y: int = 0
P = Position # Set alias for `Position`
class Stone(IntEnum):
empty: int = 0
p_red: int = 1
p_yel: int = 2
_mark: int = 9
WIDTH: int = 7
HEIGHT: int = 6
class Board:
"""Creates a connect four gameboard and handles wintesting.
"""
__slots__ = ['board']
def __init__(self) -> None:
self.board: List[List[int]] = np.zeros((HEIGHT, WIDTH), dtype=np.int8)
def drop(self) -> None:
"""Automatically drops every Stone in the board to the lowest
possible position.
"""
for _ in range(HEIGHT + 1):
for row_number in range(HEIGHT - 1, -1, -1):
for stone_pos in range(len(self.board[row_number])):
try:
if self.board[row_number][stone_pos] != Stone.empty \
and self.board[row_number + 1][stone_pos] == Stone.empty:
self.board[row_number + 1][stone_pos] = self.board[
row_number][stone_pos]
self.board[row_number][stone_pos] = Stone.empty
except IndexError:
... # who cares?
def win_test(self) -> Tuple[True, Stone]:
...
def __eq__(self, other: Board):
return self.board == other.board
def __setitem__(self, key, value):
self.board[key] = value
def __getitem__(self, key) -> Stone:
return self.board[key]
def set(self, pos: Position, stone: Stone, static: bool = False) -> None:
"""Place a Stone on the board at the given Position.
Parameters
----------
pos
The Position of the Stone.
stone
The type of Stone to be placed.
static : optional
If True calls `self.drop`, by default False.
"""
self.board[pos.y][pos.x] = np.intp(stone)
if not static:
self.drop()
def get(self, pos: Position) -> Stone:
"""Get the type of Stone at the given Position.
Parameters
----------
pos
The Position of the Stone.
Returns
-------
Stone
The type of Stone.
"""
return Stone(self.board[pos.y][pos.x])
def __repr__(self) -> str:
return str(self.board)
# %%
if __name__ == "__main__":
board = Board()
board.set(P(1), Stone.p_red)
board.set()
print(board)
# %%
| 23.945313 | 85 | 0.54062 |
05ab2309b718229dae4c0d1f17407cce4f2eda5a
| 716 |
py
|
Python
|
01.DataStructure/Stack&Queue/NY/B1158_NY.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 1 |
2021-11-21T06:03:06.000Z
|
2021-11-21T06:03:06.000Z
|
01.DataStructure/Stack&Queue/NY/B1158_NY.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 2 |
2021-10-13T07:21:09.000Z
|
2021-11-14T13:53:08.000Z
|
01.DataStructure/Stack&Queue/NY/B1158_NY.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | null | null | null |
# 백준 - 1158
import sys
num, k = map(int, sys.stdin.readline().split())
circleQ = []
arr = []
removeIndex = k - 1
#1~n까지 삽입하기
for n in range(num) :
circleQ.append(n+1)
while len(circleQ) > 0 :
if len(circleQ) < k :
removeIndex = k % len(circleQ) - 1
arr.append(circleQ.pop(removeIndex))
for i in range (0, removeIndex, 1) :
circleQ.append(circleQ.pop(0))
else :
arr.append(circleQ.pop(removeIndex))
for i in range (0, removeIndex, 1) :
circleQ.append(circleQ.pop(0))
result = ""
for i in range (len(arr)) :
if i == len(arr) - 1:
result += str(arr[i])
else :
result += str(arr[i]) + ", "
print("<%s>" %result)
| 20.457143 | 47 | 0.548883 |
3c53a51fb85e7ea9eee1667f168aaac337b056ac
| 5,345 |
py
|
Python
|
TimeSeries/not_finished/vae_time_series/time_series_gen.py
|
stanton119/data-analysis
|
b6fda815c6cc1798ba13a5d2680369b7e5dfcdf9
|
[
"Apache-2.0"
] | null | null | null |
TimeSeries/not_finished/vae_time_series/time_series_gen.py
|
stanton119/data-analysis
|
b6fda815c6cc1798ba13a5d2680369b7e5dfcdf9
|
[
"Apache-2.0"
] | 1 |
2021-02-11T23:44:52.000Z
|
2021-02-11T23:44:52.000Z
|
TimeSeries/not_finished/vae_time_series/time_series_gen.py
|
stanton119/data-analysis
|
b6fda815c6cc1798ba13a5d2680369b7e5dfcdf9
|
[
"Apache-2.0"
] | 1 |
2021-12-16T01:02:23.000Z
|
2021-12-16T01:02:23.000Z
|
# %%
"""
Fit a VAE to time series data
Generate novel time series from the latent space
"""
# %%
import numpy as np
import pandas as pd
from time import process_time, time
import matplotlib.pyplot as plt
plt.style.use("seaborn-whitegrid")
# %% Load stocks data
df = pd.read_csv("aapl_us.csv")
df.dtypes
df["Date"] = pd.to_datetime(df["Date"])
df = df.set_index("Date")
df["Open"].plot()
# %%
def gen_time_series(df: pd.Series, ts_len: int = 500) -> np.array:
start_idx = np.random.randint(low=0, high=len(df) - ts_len)
return df.iloc[start_idx : start_idx + ts_len].to_numpy()
# %% Generate loads of time series
x_ts = []
n = 100
for ii in range(n):
x_ts.append(gen_time_series(df["Open"]))
x_ts = np.stack(x_ts).transpose()
# %% Scale
ts = x_ts[:,np.random.randint(n)]
def scale_ts(ts:np.array)->np.array:
return (ts - ts.mean())/ts.std()
plt.plot(ts)
plt.plot(scale_ts(ts))
for ii in range(n):
x_ts[:,ii] = scale_ts(x_ts[:,ii])
x_ts.mean(axis=0)
x_ts.std(axis=0)
# %% Train/test split
test_frac = 0.2
x_ts_train = x_ts[:, :-int(n*0.2)]
x_ts_test = x_ts[:, -int(n*0.2):]
# %%
"""
%load_ext autoreload
%autoreload 2
"""
import vae as models
vae, encoder, decoder = models.create_vae_model_2()
_ = vae.fit(train_dataset,
epochs=15,
validation_data=eval_dataset)
# %% Create dummy data
if 1:
df = pd.DataFrame()
df["ds"] = pd.date_range(start="2010-01-01", end="2025-01-01", freq="1D")
df["y"] = np.random.rand(df.shape[0], 1)
df["x1"] = np.random.rand(df.shape[0], 1)
else:
data_location = (
"https://raw.githubusercontent.com/ourownstory/neural_prophet/master/"
)
df = pd.read_csv(data_location + "example_data/wp_log_peyton_manning.csv")
df_train = df.iloc[: int(df.shape[0] / 2)]
df_test = df.iloc[int(df.shape[0] / 2) :]
# %% AR data
def gen_time_series():
df = pd.DataFrame()
df["ds"] = pd.date_range(start="2010-01-01", end="2025-01-01", freq="1D")
freq = np.random.rand() * 20
df["x1"] = np.sin(
np.linspace(start=0, stop=freq * 2 * np.math.pi, num=df.shape[0])
)
freq = np.random.rand() * 20
df["x2"] = np.sin(
np.linspace(start=0, stop=freq * 2 * np.math.pi, num=df.shape[0])
)
df["y"] = df["x1"] + df["x2"]
return df
# %% Generate loads of time series
x_ts = []
for ii in range(100):
df = gen_time_series()
x_ts.append(df["y"].to_numpy())
x_ts = np.stack(x_ts).transpose()
# plt.plot(x_ts[:,3])
# %%
# %% Fit VAE
# %% Explore latent space
# %% Fit CNN
df.set_index("ds")["y"].plot()
df_train = df.iloc[: int(df.shape[0] / 2)]
df_test = df.iloc[int(df.shape[0] / 2) :]
# %%
# univariate data preparation
from numpy import array
# split a univariate sequence into samples
def split_sequence(sequence, n_steps, n_forecast=1):
X, y = list(), list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence) - 1 - n_forecast:
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix + n_forecast - 1]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# define input sequence
# choose a number of time steps
n_steps = 3
n_features = 1
n_forecast = 50
# split into samples
X_train, y_train = split_sequence(df_train["y"].to_numpy(), n_steps, n_forecast)
def shape_tf_input(X):
return X.reshape((X.shape[0], X.shape[1], n_features))
# %%
import tensorflow as tf
# define model
model = tf.keras.Sequential()
model.add(
tf.keras.layers.Conv1D(
filters=64,
kernel_size=2,
activation="relu",
input_shape=(n_steps, n_features),
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(20, activation="relu"))
model.add(tf.keras.layers.Dense(1))
model.compile(
loss="mse",
optimizer=tf.optimizers.Adam(),
# optimizer=tf.optimizers.SGD(learning_rate=0.01),
# metrics=["mae"],
)
# model.compile(optimizer='adam', loss='mse')
model.summary()
# %%
# early stopping
early_stopping = tf.keras.callbacks.EarlyStopping(
patience=20, monitor="val_loss", restore_best_weights=True
)
# fit model
history = model.fit(
shape_tf_input(X_train),
y_train,
validation_split=0.25,
batch_size=64,
epochs=1000,
verbose=True,
callbacks=[early_stopping],
)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
# %% Predict
y_train_hat = model.predict(shape_tf_input(X_train), verbose=1)
df_results_train = pd.DataFrame(index=df_train["ds"])
df_results_train["y"] = df_train["y"].to_numpy()
df_results_train["y_hat"] = np.nan
df_results_train.loc[:, "y_hat"].iloc[
n_steps + n_forecast :
] = y_train_hat.flatten()
df_results_train
df_results_train.plot()
X_test, y_test = split_sequence(df_test["y"].to_numpy(), n_steps, n_forecast)
y_test_hat = model.predict(shape_tf_input(X_test), verbose=1)
df_results_test = pd.DataFrame(index=df_test["ds"])
df_results_test["y"] = df_test["y"].to_numpy()
df_results_test["y_hat"] = np.nan
df_results_test.loc[:, "y_hat"].iloc[
n_steps + n_forecast :
] = y_test_hat.flatten()
df_results_test
df_results_test.plot()
# %%
| 23.038793 | 80 | 0.656314 |
594219c5331ba33121655c55bd601f632ff2b6ae
| 5,892 |
py
|
Python
|
oldp/apps/search/views.py
|
ImgBotApp/oldp
|
575dc6f711dde3470d910e21c9440ee9b79a69ed
|
[
"MIT"
] | 3 |
2020-06-27T08:19:35.000Z
|
2020-12-27T17:46:02.000Z
|
oldp/apps/search/views.py
|
ImgBotApp/oldp
|
575dc6f711dde3470d910e21c9440ee9b79a69ed
|
[
"MIT"
] | null | null | null |
oldp/apps/search/views.py
|
ImgBotApp/oldp
|
575dc6f711dde3470d910e21c9440ee9b79a69ed
|
[
"MIT"
] | null | null | null |
import logging
from math import ceil
from urllib.parse import urlparse
from django.conf import settings
from django.contrib import messages
from django.shortcuts import render
from django.utils.translation import ugettext_lazy as _
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError
from elasticsearch_dsl import Search
from elasticsearch_dsl.query import MultiMatch
from oldp.apps.cases.models import Case
from oldp.apps.laws.models import Law
from oldp.apps.search.models import SearchableContent, SearchQuery
logger = logging.getLogger(__name__)
class Searcher(object):
PER_PAGE = 10
MAX_PAGES = 10
es = None
es_index = None
es_use_ssl = False
es_urls = None
query = None
response = None
took = 0 # Milliseconds for query execution
hits = None
total_hits = 0 # Total number of found documents
page = 1 # Current page
doc_type = None # Filter for document type (None = all types)
models = {
'law': Law,
'case': Case,
}
def __init__(self, query: str):
if query is None:
query = ''
self.query = query.lower()
def parse_es_url(self):
es_url = settings.ES_URL
self.es_urls = []
for url in str(es_url).split(','):
parsed = urlparse(url)
if parsed.scheme == 'https':
self.es_use_ssl = True
self.es_index = parsed.path.replace('/', '')
self.es_urls.append(parsed.scheme + '://' + parsed.netloc)
if self.es_index is None:
raise ValueError('Cannot parse ES url from: {}'.format(es_url))
def get_es_urls(self):
if self.es_urls is None:
self.parse_es_url()
return self.es_urls
def get_es_index(self):
if self.es_index is None:
self.parse_es_url()
return self.es_index
def get_es(self):
if self.es is None:
self.es = Elasticsearch(self.es_urls, use_ssl=self.es_use_ssl, verify_certs=False)
return self.es
def set_page(self, page):
if page is None:
self.page = 1
return
try:
page = int(page)
except ValueError:
self.page = 1
return
page = max(1, page)
page = min(self.MAX_PAGES, page)
self.page = page
def set_doc_type(self, doc_type):
if doc_type is not None and doc_type in self.models:
self.doc_type = doc_type
logger.debug('Doc type set to: %s' % doc_type)
def search(self):
# Define search query
q = MultiMatch(query=self.query, fields=['title', 'text', 'slug^4', 'book_slug', 'book_code^2'], type='cross_fields')
logger.debug('ES query: %s' % q)
s = Search(using=self.get_es(), index=self.get_es_index(), doc_type=self.doc_type)\
.highlight('text', fragment_size=50)\
.query(q)
# .query("match", title=self.query)
# .filter('term', author=author)
# s.aggs.bucket('per_tag', 'terms', field='tags') \
# .metric('max_lines', 'max', field='lines')
# Pagination
page_from = (self.page - 1) * self.PER_PAGE
page_to = page_from + self.PER_PAGE
s = s[page_from:page_to]
self.response = s.execute()
self.took = self.response._d_['took']
self.total_hits = self.response._d_['hits']['total']
# Save query to DB if hits exist
if self.total_hits > 0:
query_obj, created = SearchQuery.objects.get_or_create(query=self.query)
if not created:
query_obj.counter += 1
query_obj.save()
def get_pages(self) -> int:
return min(self.MAX_PAGES, ceil(self.total_hits / self.PER_PAGE))
def get_page_range(self):
return range(1, self.get_pages() + 1)
def get_results(self):
# Handle aggregations (for filters)
# for tag in response.aggregations.per_tag.buckets:
# print(tag.key, tag.max_lines.value)
# logger.debug('ES response length: %s' % len(self.response))
self.results = []
# Handle search hits
for hit in self.response:
source = hit._d_
if hit.meta.doc_type in self.models:
item_model = self.models[hit.meta.doc_type]
item = item_model().from_hit(source) # type: SearchableContent
item.set_search_score(hit.meta.score)
# logger.debug('Search hit (score=%f): %s' % (item.get_search_score(), item.get_title()))
if hasattr(hit.meta, 'highlight'):
# logger.debug('-- Highlight: %s' % hit.meta.highlight['text'])
item.set_search_snippet(' ... '.join(hit.meta.highlight['text']))
else:
# Can happen if match is not in 'text' field
# logger.debug('NO highlight')
pass
self.results.append(item)
else:
raise ValueError('Search returned unsupported document type: %s' % hit.meta.doc_type)
# logger.debug('Search results: %s' % self.results)
return self.results
def search_view(request):
query = request.GET.get('query') or request.GET.get('q')
search = Searcher(query)
search.set_doc_type(request.GET.get('type'))
search.set_page(request.GET.get('page'))
try:
search.search()
items = search.get_results()
except ConnectionError:
items = []
messages.error(request, _('Search service is currently not available.'))
return render(request, 'search/index.html', {
'items': items,
'title': _('Search') + ' %s' % search.query,
'searchQuery': search.query,
'search': search
})
| 29.757576 | 125 | 0.59131 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.