hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3caa8c2fe47cf10713e3e66ed8e6985477f4487d
| 206 |
py
|
Python
|
Licence 1/I11/TP3/tp3_2_3.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 8 |
2020-11-26T20:45:12.000Z
|
2021-11-29T15:46:22.000Z
|
Licence 1/I11/TP3/tp3_2_3.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | null | null | null |
Licence 1/I11/TP3/tp3_2_3.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 6 |
2020-10-23T15:29:24.000Z
|
2021-05-05T19:10:45.000Z
|
from pocketnoobj import *
characters = input("saisir une chaine de caracteres :")
contains = 0
for char in characters:
if char == " ": contains += 1
print("la chaine contient", contains, "fois ' '")
| 20.6 | 55 | 0.674757 |
3cdba889124713972e3312d0ad3587989af46e37
| 225 |
py
|
Python
|
Kapitel 1/Kugel.py
|
mqng/HS-CO_WS2122_IF_FProg
|
b52470e0991bdbaeba22b154c4029e6cded51fd7
|
[
"MIT"
] | null | null | null |
Kapitel 1/Kugel.py
|
mqng/HS-CO_WS2122_IF_FProg
|
b52470e0991bdbaeba22b154c4029e6cded51fd7
|
[
"MIT"
] | null | null | null |
Kapitel 1/Kugel.py
|
mqng/HS-CO_WS2122_IF_FProg
|
b52470e0991bdbaeba22b154c4029e6cded51fd7
|
[
"MIT"
] | null | null | null |
import math
r = float(input("Geben Sie den Radius einer Kugel ein: "))
v = (4/3) * math.pi * math.pow(r,3)
o = math.pi * 4 * math.pow(r,2)
print("Radius: {} | Volumen: {:.3f} | Oberflächenvolumen: {:.3f} ".format(r, v, o))
| 28.125 | 83 | 0.6 |
597df9fd9b80612e97c32372d924a5a116e45ba2
| 833 |
py
|
Python
|
___Python/Jonas/Python/p11_uebungen/m01_excel.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Jonas/Python/p11_uebungen/m01_excel.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Jonas/Python/p11_uebungen/m01_excel.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from datetime import date, datetime, time
import xlrd
from xlrd import open_workbook, cellname, XL_CELL_TEXT, xldate_as_tuple, xldate_as_datetime
file_location = "O:\___Python\personen.xlsx"
book = open_workbook("O:\___Python\personen.xlsx")
#print(book.nsheets)
#for sheet_index in range(book.nsheets):
# print(book.sheet_by_index(sheet_index))
sheet = book.sheet_by_index(0)
#print(book.sheet_names())
#print(sheet.ncols)
#print(sheet.nrows)
# for row_index in range(sheet.nrows):
# for col_index in range(sheet.ncols):
# print(sheet.cell(row_index,col_index).value)
cell = sheet.cell(0, 0)
for i in range(sheet.ncols-1):
print(sheet.cell_value(1, i))
date_value = xldate_as_tuple(sheet.cell(1,2).value, book.datemode)
print(datetime(*date_value))
| 24.5 | 92 | 0.720288 |
051f9560c8573bfec1acf813ddc345d417467807
| 487 |
py
|
Python
|
Packs/IntegrationsAndIncidentsHealthCheck/Scripts/IncidentsCheck_Widget_UnassignedFailingIncidents/IncidentsCheck_Widget_UnassignedFailingIncidents_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/IntegrationsAndIncidentsHealthCheck/Scripts/IncidentsCheck_Widget_UnassignedFailingIncidents/IncidentsCheck_Widget_UnassignedFailingIncidents_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/IntegrationsAndIncidentsHealthCheck/Scripts/IncidentsCheck_Widget_UnassignedFailingIncidents/IncidentsCheck_Widget_UnassignedFailingIncidents_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import pytest
import demistomock as demisto
from IncidentsCheck_Widget_UnassignedFailingIncidents import main
@pytest.mark.parametrize('list_, expected', [
([{'Contents': '7,4,1'}], 3),
([{'Contents': ''}], 0),
([{}], 0)
])
def test_script(mocker, list_, expected):
mocker.patch.object(demisto, 'executeCommand', return_value=list_)
mocker.patch.object(demisto, 'results')
main()
contents = demisto.results.call_args[0][0]
assert contents == expected
| 25.631579 | 70 | 0.685832 |
2f0387b293e47579b3930645936d42460a0c022c
| 1,892 |
py
|
Python
|
kernel/slovaki/migrations/0005_auto_20180605_2110.py
|
sageteam/behpack
|
3b8afb81dc7da70807308af4c8a2d2ab92b1a133
|
[
"MIT"
] | null | null | null |
kernel/slovaki/migrations/0005_auto_20180605_2110.py
|
sageteam/behpack
|
3b8afb81dc7da70807308af4c8a2d2ab92b1a133
|
[
"MIT"
] | null | null | null |
kernel/slovaki/migrations/0005_auto_20180605_2110.py
|
sageteam/behpack
|
3b8afb81dc7da70807308af4c8a2d2ab92b1a133
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-06-05 21:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('slovaki', '0004_auto_20180605_2105'),
]
operations = [
migrations.AlterField(
model_name='slovakiawardscontent',
name='sku',
field=models.CharField(default='lw1CGr1PCDE', help_text='Unique code for refrence to supervisors', max_length=15),
),
migrations.AlterField(
model_name='slovakinews',
name='sku',
field=models.CharField(default='iBsM1oTURdY', help_text='Unique code for refrence to supervisors', max_length=15),
),
migrations.AlterField(
model_name='slovakinewsmovies',
name='sku',
field=models.CharField(default='2ltt2vqJJe4', help_text='Unique code for refrence to supervisors', max_length=15),
),
migrations.AlterField(
model_name='slovakinewsphotos',
name='sku',
field=models.CharField(default='Bmlhxg-x_50', help_text='Unique code for refrence to supervisors', max_length=15),
),
migrations.AlterField(
model_name='slovakiproduct',
name='sku',
field=models.CharField(default='kFZdhKfRcko', help_text='Unique code for refrence to supervisors', max_length=15),
),
migrations.AlterField(
model_name='slovakiproductmovies',
name='sku',
field=models.CharField(default='EhWg7JiBUxk', help_text='Unique code for refrence to supervisors', max_length=15),
),
migrations.AlterField(
model_name='slovakiproductphotos',
name='sku',
field=models.CharField(default='p_kWMN38ID8', help_text='Unique code for refrence to supervisors', max_length=15),
),
]
| 38.612245 | 126 | 0.624207 |
b5d7d9e0ed5498e6c806bfd4075818dd22f1fcff
| 1,568 |
py
|
Python
|
python/pickle/text_based_rpg_engine/game_engine.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/pickle/text_based_rpg_engine/game_engine.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/pickle/text_based_rpg_engine/game_engine.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
import sys, time, random
import pickle
def slow_type(t):
typing_speed = 75 # wpm
for l in t:
sys.stdout.write(l)
sys.stdout.flush()
time.sleep(random.random() * 10.0 / typing_speed)
print("")
def get_input(valid_input: list):
while True:
user_entered = input()
if user_entered not in valid_input:
print("Invalid input. Please use one of the following inputs:\n")
print(valid_input)
user_entered = None
else:
return user_entered
def display_page_text(lines: list):
for line in lines:
slow_type(line)
# Make the user press enter to see the next line
get_input([""])
def get_response(options: list):
for index, option in enumerate(options):
print(f"{index}. {option[0]}")
valid_inputs = [str(num) for num in range(len(options))]
option_index = int(get_input(valid_inputs))
return options[option_index][1]
def story_flow(story: dict):
curr_page = 1
while curr_page != None:
page = story.get(curr_page, None)
if page == None:
curr_page = None
break
display_page_text(page["Text"])
if len(page["Options"]) == 0:
curr_page = None
break
curr_page = get_response(page["Options"])
if __name__ == "__main__":
story = {}
with open("chapter1.ch", "rb") as chapter:
story = pickle.load(chapter)
story_flow(story)
| 23.058824 | 78 | 0.567602 |
952bdbad783529d6b66d3ff4fa272a3a6c52be6c
| 1,067 |
py
|
Python
|
Packs/PaloAltoNetworks_IoT/Scripts/iot_alert_post_processing/iot_alert_post_processing_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/PaloAltoNetworks_IoT/Scripts/iot_alert_post_processing/iot_alert_post_processing_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/PaloAltoNetworks_IoT/Scripts/iot_alert_post_processing/iot_alert_post_processing_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
import iot_alert_post_processing
from iot_alert_post_processing import iot_resolve_alert
_INCIDENT = {
'id': 28862,
'labels': [
{
'type': 'id',
'value': '5ed08587fe03d30d000016e8'
}
]
}
def test_iot_resolve_alert(monkeypatch, mocker):
"""
Scenario: resolving alert in post processing after closing the XSOAR incident
Given
- An alert incident
When
- Resolving an alert in IoT Security Portal
Then
- Ensure the correct parameters to the iot-security-resolve-alert command
"""
monkeypatch.setattr(iot_alert_post_processing, "_get_incident", lambda: _INCIDENT)
execute_mocker = mocker.patch.object(demisto, 'executeCommand')
expected_command = 'iot-security-resolve-alert'
expected_args = {
'id': '5ed08587fe03d30d000016e8',
'reason': 'resolved by XSOAR incident 28862',
'reason_type': 'No Action Needed'
}
iot_resolve_alert()
execute_mocker.assert_called_with(expected_command, expected_args)
| 27.358974 | 86 | 0.689784 |
1f1162ab030c193c3f03ee665b2ca57140c35709
| 876 |
py
|
Python
|
Python/Courses/Object-Oriented-Programming.Python-Engineer/01-Class-and-Instance.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Object-Oriented-Programming.Python-Engineer/01-Class-and-Instance.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Object-Oriented-Programming.Python-Engineer/01-Class-and-Instance.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
# position, name, age, level, salary
se1 = ["Software Engineer", "Max", 20, "Junior", 5000]
se2 = ["Software Engineer", "Lisa", 25, "Senior", 7000]
# class
class SoftwareEngineer:
# class attributes
alias = "Keyboard Magician"
def __init__(self, name, age, level, salary):
# instance attributes
self.name = name
self.age = age
self.level = level
self.salary = salary
# instance
se1 = SoftwareEngineer("Max", 20, "Junior", 5000)
print(se1.name, se1.age)
se2 = SoftwareEngineer("Lisa", 25, "Senior", 7000)
print(se2.alias)
print(se1.alias)
print(SoftwareEngineer.alias)
SoftwareEngineer.alias = "Something Else"
print(se2.alias)
print(se1.alias)
print(SoftwareEngineer.alias)
# Recap
# create a class (blueprint)
# create a instance (object)
# instance attributes: defined in __init__(self) method
# class attribute
| 21.9 | 55 | 0.68379 |
1f51d98d09c8a7571a84fdd27efadfc23c78e464
| 5,808 |
py
|
Python
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladenextboot_lib.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 5 |
2019-11-11T07:57:26.000Z
|
2022-03-28T08:26:53.000Z
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladenextboot_lib.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 3 |
2019-09-05T21:47:07.000Z
|
2019-09-17T18:10:45.000Z
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladenextboot_lib.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 11 |
2019-07-20T00:16:32.000Z
|
2022-01-11T14:17:48.000Z
|
# Copyright (C) Microsoft Corporation. All rights reserved.
# This program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ipmicmd_library import *
class boot_type:
none = "0x00"
pxe = "0x04"
disk = "0x08"
bios = "0x18"
floppy = "0x3C"
class persistent:
legacy_persistent = "0xC0"
legacy_nonpersistent = "0x80"
efi_persistent = "0xE0"
efi_nonpersistent = "0xA0"
def get_nextboot(serverid):
try:
interface = get_ipmi_interface(serverid)
ipmi_cmd = 'chassis bootparam get 5' # IPMI command to get next boot details
cmdinterface = interface + ' ' + ipmi_cmd
get_next_boot = parse_get_nextboot_result(cmdinterface, "getnextboot")
if get_next_boot is None or not get_next_boot: # Check empty or none
return set_failure_dict("Empty data for getnetxtboot", completion_code.failure)
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict("get_nextboot: Exception {0}".format(e), completion_code.failure)
return get_next_boot
def set_nextboot(serverid, boottype, mode=0, ispersist=0):
try:
persistent_val = ''
if mode == 0 and ispersist == 0:
persistent_val = persistent.legacy_nonpersistent
elif mode == 0 and ispersist == 1:
persistent_val = persistent.legacy_persistent
elif mode == 1 and ispersist == 0:
persistent_val = persistent.efi_nonpersistent
elif mode == 1 and ispersist == 1:
persistent_val = persistent.efi_persistent
boot_value = ''
if boottype == "none":
boot_value = boot_type.none
elif boottype == "pxe":
boot_value = boot_type.pxe
elif boottype == "disk":
boot_value = boot_type.disk
elif boottype == "bios":
boot_value = boot_type.bios
elif boottype == "floppy":
boot_value = boot_type.floppy
interface = get_ipmi_interface(serverid, ["raw","0x00","0x08","0x05", persistent_val,boot_value , "0x00", "0x00", "0x00"])
set_next_boot = parse_set_nextboot_result(interface, "setnextboot")
if set_next_boot is None or not set_next_boot: # Check empty or none
return set_failure_dict("Empty data for setnetxtboot", completion_code.failure)
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict("set_nextboot: Exception {0}".format(e), completion_code.failure)
return set_next_boot
# Parse setnextboot output
def parse_set_nextboot_result(interface, command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return output
setnextboot = {}
if(output['status_code'] == 0):
setnextboot[completion_code.cc_key] = completion_code.success
return setnextboot
else:
error_data = output['stderr']
return set_failure_dict (error_data.split (":")[-1].strip ())
except Exception, e:
#log.exception("serverNextBoot Command: %s Exception error is: %s ", command, e)
return set_failure_dict(("SetNextBoot: parse_set_nextboot_result() Exception:", e) , completion_code.failure)
# Parse getnextboot output
def parse_get_nextboot_result(interface, command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return output
getnextboot = {}
if(output['status_code'] == 0):
getnextbootopt = output['stdout'].split('\n')
for bootval in getnextbootopt:
if "Boot Device Selector" in bootval:
boot = bootval.split (":")[-1]
getnextboot["Next boot is"] = boot
getnextboot["BootSourceOverrideTarget"] = boot
elif "BIOS PC Compatible (legacy) boot" in bootval:
getnextboot["BootSourceOverrideMode"] = "Legacy"
elif "BIOS EFI boot" in bootval:
getnextboot["BootSourceOverrideMode"] = "UEFI"
elif "Options apply to only next boot" in bootval:
getnextboot["BootSourceOverrideEnabled"] = "Once"
elif "Options apply to all future boots" in bootval:
getnextboot["BootSourceOverrideEnabled"] = "Persistent"
getnextboot[completion_code.cc_key] = completion_code.success
return getnextboot
else:
error_data = output['stderr']
getnextboot[completion_code.cc_key] = completion_code.failure
getnextboot[completion_code.desc] = error_data.split(":")[-1]
return getnextboot
except Exception, e:
#log.exception("serverNextBoot Command: %s Exception error is: %s ", command, e)
#print "serverNextBoot: Failed to parse setnextboot output. Exception: " ,e
return set_failure_dict(("GetNextBoot: parse_get_nextboot_result() Exception ",e) , completion_code.failure)
| 40.615385 | 139 | 0.579201 |
c85ffe1675c528eb5ba2ba2c060da3a824b300a1
| 1,225 |
py
|
Python
|
crypto/rotoRSA/src/source.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
crypto/rotoRSA/src/source.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
crypto/rotoRSA/src/source.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
from sympy import poly, symbols
from collections import deque
import Crypto.Random.random as random
from Crypto.Util.number import getPrime, bytes_to_long, long_to_bytes
def build_poly(coeffs):
x = symbols('x')
return poly(sum(coeff * x ** i for i, coeff in enumerate(coeffs)))
def encrypt_msg(msg, poly, e, N):
return long_to_bytes(pow(poly(msg), e, N)).hex()
p = getPrime(256)
q = getPrime(256)
N = p * q
e = 11
flag = bytes_to_long(open("/challenge/flag.txt", "rb").read())
coeffs = deque([random.randint(0, 128) for _ in range(16)])
welcome_message = f"""
Welcome to RotorSA!
With our state of the art encryption system, you have two options:
1. Encrypt a message
2. Get the encrypted flag
The current public key is
n = {N}
e = {e}
"""
print(welcome_message)
while True:
padding = build_poly(coeffs)
choice = int(input('What is your choice? '))
if choice == 1:
message = int(input('What is your message? '), 16)
encrypted = encrypt_msg(message, padding, e, N)
print(f'The encrypted message is {encrypted}')
elif choice == 2:
encrypted_flag = encrypt_msg(flag, padding, e, N)
print(f'The encrypted flag is {encrypted_flag}')
coeffs.rotate(1)
| 27.222222 | 70 | 0.679184 |
c863807714070600292378e7ba7e81fb0de972c7
| 4,188 |
py
|
Python
|
tool_discovery/tool_discoverer/tool_discoverer/html_temp.py
|
FAIRplus/WP3_FAIR_tooling
|
3c6470c4f5fc3d686b4571711bb7ed6f849a9622
|
[
"Apache-2.0"
] | null | null | null |
tool_discovery/tool_discoverer/tool_discoverer/html_temp.py
|
FAIRplus/WP3_FAIR_tooling
|
3c6470c4f5fc3d686b4571711bb7ed6f849a9622
|
[
"Apache-2.0"
] | 13 |
2021-06-01T10:07:02.000Z
|
2022-03-24T12:16:26.000Z
|
tool_discovery/tool_discoverer/tool_discoverer/html_temp.py
|
FAIRplus/WP3_FAIR_tooling
|
3c6470c4f5fc3d686b4571711bb7ed6f849a9622
|
[
"Apache-2.0"
] | null | null | null |
template = '''
<head>
<link rel="stylesheet" href="https://cdn.datatables.net/1.10.21/css/jquery.dataTables.min.css">
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.21/css/jquery.dataTables.min.css" />
<script src="https://code.jquery.com/jquery-3.5.1.js"></script>
<script src="https://cdn.datatables.net/1.10.21/js/jquery.dataTables.min.js"></script>
<style>
#title {{
font-size: 2em;
font-family: sans-serif;
text-align: center;
}}
#subtitle {{
font-size: 1em;
font-family: sans-serif;
text-align: center;
}}
.parameters {{
font-size: 1em;
font-family: sans-serif;
width: 80%;
margin-left: auto;
margin-right: auto;
}}
.styled-table{{
width: 95%;
margin-left: auto;
margin-right: auto;
font-size: 0.9em;
font-family: sans-serif;
white-space: pre-line;
}}
.dataTable {{
border-collapse: collapse;
font-size: 0.9em;
font-family: sans-serif;
width: 80%;
box-shadow: 0 0 20px rgba(0, 0, 0, 0.15);
}}
.dataTable thead tr {{
background-color: #156094;
color: #ffffff;
text-align: left;
padding-top: 5%;
}}
table.dataTable td {{
min-width: 50px;
box-sizing: border-box;
}}
table.dataTable thead tr th input{{
min-width: 50px;
max-width: 200px;
width:100%;
box-sizing: border-box;
}}
.dataTable tbody tr {{
border-bottom: thin solid #dddddd;
}}
.dataTable tbody tr:nth-of-type(even) {{
background-color: #f3f3f3;
}}
.dataTables_filter{{
margin-top: 1%;
margin-bottom: 1%;
}}
.dataTables_length{{
margin-top: 1%;
margin-bottom: 1%;
}}
.short{{
max-height: 150px;
overflow: hidden;
}}
.link{{
max-width: 200px;
font-size: 0.9em;
padding-left: 2%;
padding-top: 2%;
word-wrap: break-word;
}}
.citations{{
max-width: 50px;
text-align: center;
}}
.name {{
min-width: 120px;
}}
.desc {{
text-align: left;
padding-left: 1.2%;
padding-top: 1.2%;
padding-right:1.2%;
padding-bottom: 1.2%;
font-size: 0.9em;
min-width: 200px;
}}
.type {{
min-width: 180px;
vertical-align: text-top;
}}
.topic {{
min-width: 200px;
vertical-align: text-top;
}}
.operation {{
min-width: 200px;
vertical-align: text-top;
}}
.formats {{
min-width: 200px;
vertical-align: text-top;
}}
</style>
</style>
</head>
<body>
<h1 id=title>Tools discovery results</h1>
<h2 id=subtitle>{name}</h2>
<div class=parameters>
<h3> Search parameters: </h3>
<ul>
<li><span style="font-weight: bold">Name</span>: {name}</li>
<li><span style="font-weight: bold">Keywords</span>: {keywords}</li>
</ul>
</div>
<div class=parameters>
<h3> Results: </h3>
<div class="styled-table">
{content}
</div>
</div>
</body>
<script>
$('#my-table').dataTable( {{
"order": [],
}} );
var userSelection = document.getElementsByClassName('click_expand');
for(var i = 0; i < userSelection.length; i++) {{
(function(index) {{
userSelection[index].addEventListener("click", function() {{
console.log("Clicked index: ");
$(this).closest("tr").find('div').toggleClass("short");
}})
}})(i);
}}
$('#my-table thead th').each(function() {{
var title = $('#my-table thead th').eq($(this).index()).text();
$(this).html(title+'</br><input type="text" placeholder="Search"' + title + '/>');
$(this).css('text-align', 'left');
}});
var r = $('#my-table thead th');
r.find('input').each(function(){{
$(this).css('margin', 4);
$(this).css('padding', 4);
}});
// DataTable
var table = $('#my-table').DataTable();
// Apply the search
table.columns().eq(0).each(function(colIdx) {{
$('input', table.column(colIdx).header()).on('keyup change', function() {{
table
.column(colIdx)
.search(this.value)
.draw();
}});
$('input', table.column(colIdx).header()).on('click', function(e) {{
e.stopPropagation();
}});
}});
</script>
'''
| 19.47907 | 117 | 0.56256 |
23d33235975b56858506cc00736575e9ec781ea3
| 4,612 |
py
|
Python
|
official/cv/ADNet/src/utils/get_wrapper_utils.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/ADNet/src/utils/get_wrapper_utils.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/ADNet/src/utils/get_wrapper_utils.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
from mindspore import dataset as ds
from mindspore.communication.management import get_rank, get_group_size
def get_dataLoader(source, opts, args, column_names):
if args.distributed:
rank_id = get_rank()
rank_size = get_group_size()
if isinstance(source, tuple):
data_loaders_pos = []
data_loaders_neg = []
datasets_pos, datasets_neg = source
if not args.distributed:
for dataset_pos in datasets_pos:
dataset = ds.GeneratorDataset(source=dataset_pos,
column_names=column_names,
num_parallel_workers=args.num_workers, shuffle=True)
dataset = dataset.batch(batch_size=opts['minibatch_size'])
data_loaders_pos.append(dataset)
for dataset_neg in datasets_neg:
dataset = ds.GeneratorDataset(source=dataset_neg,
column_names=column_names,
num_parallel_workers=args.num_workers, shuffle=True)
dataset = dataset.batch(batch_size=opts['minibatch_size'])
data_loaders_neg.append(dataset)
else:
for dataset_pos in datasets_pos:
dataset = ds.GeneratorDataset(source=dataset_pos,
column_names=column_names,
num_parallel_workers=args.num_workers, shuffle=True, num_shards=rank_size,
shard_id=rank_id)
dataset = dataset.batch(batch_size=opts['minibatch_size'])
data_loaders_pos.append(dataset)
for dataset_neg in datasets_neg:
dataset = ds.GeneratorDataset(source=dataset_neg,
column_names=["im", "bbox", "action_label", "score_label", "vid_idx"],
num_parallel_workers=args.num_workers, shuffle=True, num_shards=rank_size,
shard_id=rank_id)
dataset = dataset.batch(batch_size=opts['minibatch_size'])
data_loaders_neg.append(dataset)
return data_loaders_pos, data_loaders_neg
if args.distributed:
dataset = ds.GeneratorDataset(source=source,
column_names=column_names,
num_parallel_workers=args.num_workers, shuffle=True, num_shards=rank_size,
shard_id=rank_id)
dataset = dataset.batch(batch_size=opts['minibatch_size'])
else:
dataset = ds.GeneratorDataset(source=source,
column_names=column_names,
num_parallel_workers=args.num_workers, shuffle=True)
dataset = dataset.batch(batch_size=opts['minibatch_size'])
return dataset
def get_groundtruth(gt_path):
if not os.path.exists(gt_path):
bboxes = []
t_sum = 0
return bboxes, t_sum
# parse gt
gtFile = open(gt_path, 'r')
gt = gtFile.read().split('\n')
for i in range(len(gt)):
if gt[i] == '' or gt[i] is None:
continue
if ',' in gt[i]:
separator = ','
elif '\t' in gt[i]:
separator = '\t'
elif ' ' in gt[i]:
separator = ' '
else:
separator = ','
gt[i] = gt[i].split(separator)
gt[i] = list(map(float, gt[i]))
gtFile.close()
if len(gt[0]) >= 6:
for gtidx in range(len(gt)):
if gt[gtidx] == "":
continue
x = gt[gtidx][0:len(gt[gtidx]):2]
y = gt[gtidx][1:len(gt[gtidx]):2]
gt[gtidx] = [min(x), min(y), max(x) - min(x), max(y) - min(y)]
return gt
| 43.509434 | 120 | 0.549219 |
b593fa8d45128963d9cc83e5fce6c881c62ee955
| 1,977 |
py
|
Python
|
assets/support/faq/docbook-xsl/extensions/xsltproc/python/xslt.py
|
brnnnfx/openoffice-org
|
8b1023c59fd9c7a58d108bb0b01dd1f8884c9163
|
[
"Apache-2.0"
] | 5 |
2019-10-14T23:00:48.000Z
|
2021-11-06T22:21:06.000Z
|
assets/support/faq/docbook-xsl/extensions/xsltproc/python/xslt.py
|
brnnnfx/openoffice-org
|
8b1023c59fd9c7a58d108bb0b01dd1f8884c9163
|
[
"Apache-2.0"
] | 31 |
2020-11-14T09:27:16.000Z
|
2022-03-08T17:09:15.000Z
|
assets/support/faq/docbook-xsl/extensions/xsltproc/python/xslt.py
|
brnnnfx/openoffice-org
|
8b1023c59fd9c7a58d108bb0b01dd1f8884c9163
|
[
"Apache-2.0"
] | 15 |
2020-11-10T17:04:25.000Z
|
2022-01-31T12:12:48.000Z
|
#!/usr/bin/python -u
# THIS IS ALPHA CODE AND MAY NOT WORK CORRECTLY!
import sys
import string
import libxml2
import libxslt
from docbook import adjustColumnWidths
# Check the arguments
usage = "Usage: %s xmlfile.xml xslfile.xsl [outputfile] [param1=val [param2=val]...]" % sys.argv[0]
xmlfile = None
xslfile = None
outfile = "-"
params = {}
try:
xmlfile = sys.argv[1]
xslfile = sys.argv[2]
except IndexError:
print usage;
sys.exit(1)
try:
outfile = sys.argv[3]
if string.find(outfile, "=") > 0:
name, value = string.split(outfile, "=", 2);
params[name] = value
count = 4;
while (sys.argv[count]):
try:
name, value = string.split(sys.argv[count], "=", 2);
if params.has_key(name):
print "Warning: '%s' re-specified; replacing value" % name
params[name] = value
except ValueError:
print "Invalid parameter specification: '" + sys.argv[count] + "'"
print usage
sys.exit(1);
count = count+1;
except IndexError:
pass
# ======================================================================
# Memory debug specific
# libxml2.debugMemory(1)
# Setup environment
libxml2.lineNumbersDefault(1)
libxml2.substituteEntitiesDefault(1)
libxslt.registerExtModuleFunction("adjustColumnWidths",
"http://nwalsh.com/xslt/ext/xsltproc/python/Table",
adjustColumnWidths)
# Initialize and run
styledoc = libxml2.parseFile(xslfile)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseFile(xmlfile)
result = style.applyStylesheet(doc, params)
# Save the result
style.saveResultToFilename(outfile, result, 0)
# Free things up
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
# Memory debug specific
#libxslt.cleanup()
#if libxml2.debugMemory(1) != 0:
# print "Memory leak %d bytes" % (libxml2.debugMemory(1))
# libxml2.dumpMemory()
| 25.346154 | 99 | 0.621649 |
8d9733bda06d92a0de48eb3114bc5ef322a25396
| 7,553 |
py
|
Python
|
Hackathons_19_20/Brainwaves 2019/complaint status tracking/comp track 75677.py
|
aviggithub/Hackathons_20
|
a1bbc63cff3bd71982017749a0cc162d684e452b
|
[
"Apache-2.0"
] | null | null | null |
Hackathons_19_20/Brainwaves 2019/complaint status tracking/comp track 75677.py
|
aviggithub/Hackathons_20
|
a1bbc63cff3bd71982017749a0cc162d684e452b
|
[
"Apache-2.0"
] | null | null | null |
Hackathons_19_20/Brainwaves 2019/complaint status tracking/comp track 75677.py
|
aviggithub/Hackathons_20
|
a1bbc63cff3bd71982017749a0cc162d684e452b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 16 13:16:36 2019
@author: avi
"""
import pandas as pd #data manipulation and data anlysis (read files)
import numpy as np #transform data into format that model can understand
import sklearn #helps to create machine learning model
import matplotlib.pyplot as plt #visualize data
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import LogisticRegression
import re
from textblob import Word
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
stop = stopwords.words('english')
stop_f=stopwords.words('spanish')
stop_s=stopwords.words('french')
from nltk.stem.snowball import SnowballStemmer
sbFr = SnowballStemmer('french')
sbEsp = SnowballStemmer('spanish')
sbEng = SnowballStemmer('english')
######################################Pre Process procedure
com_int_cat ={0:'Closed with explanation',
1:'Closed with non-monetary relief',
2:'Closed',
3:'Closed with monetary relief',
4:'Untimely response'}
input_int_cat ={'Closed with explanation':0,
'Closed with non-monetary relief':1,
'Closed':2,
'Closed with monetary relief':3,
'Untimely response':4}
##############################Data load
file_read_train='D:\\Python project\\brainwaves 2019\\complaint status tracking\\train.csv'
file_read_test='D:\\Python project\\brainwaves 2019\\complaint status tracking\\test.csv'
df_train = pd.read_csv(file_read_train)
df_test = pd.read_csv(file_read_test)
#####bkp copy
df_train_cp=df_train
df_test_cp=df_test
#### Combining text
df_train["processed_summary"]=df_train["Consumer-complaint-summary"].fillna('') +" "+ df_train['Transaction-Type'].fillna('No')+" "+ df_train['Consumer-disputes'].fillna('') + " " +df_train['Company-response'].fillna('')+" "+df_train['Complaint-reason'].fillna('')
df_test["processed_summary"]=df_test["Consumer-complaint-summary"].fillna('') +" "+ df_test['Transaction-Type'].fillna('No')+" "+ df_test['Consumer-disputes'].fillna('') + " " +df_test['Company-response'].fillna('')+" "+df_test['Complaint-reason'].fillna('')
##### Cleaning data
### A: Train
df_train['Complaint_Status'] = df_train['Complaint-Status'].map(input_int_cat)
df_train['processed_summary']= df_train['processed_summary'].apply(lambda x: " ".join(x for x in x.split() if x not in stop))
df_train['processed_summary']= df_train['processed_summary'].apply(lambda x: " ".join(x for x in x.split() if x not in stop_f))
df_train['processed_summary']= df_train['processed_summary'].apply(lambda x: " ".join(x for x in x.split() if x not in stop_s))
#df_train['processed_summary']= df_train['processed_summary'].apply(lambda x: " ".join([x for x in x.split() if len(x)>2]))
#### B: Test
#df_test['processed_summary']= df_test['processed_summary'].str.lower()
df_test['processed_summary']= df_test['processed_summary'].apply(lambda x: " ".join(x for x in x.split() if x not in stop))
df_test['processed_summary']= df_test['processed_summary'].apply(lambda x: " ".join(x for x in x.split() if x not in stop_f))
df_test['processed_summary']= df_test['processed_summary'].apply(lambda x: " ".join(x for x in x.split() if x not in stop_s))
#df_test['processed_summary']= df_test['processed_summary'].apply(lambda x: " ".join([x for x in x.split() if len(x)>2]))
#stemming
# A - Train Stemming
df_train['processed_summary'] = df_train['processed_summary'].apply(lambda x: " ".join([Word(word).lemmatize() for word in x.split()]))
df_train['processed_summary'] = df_train['processed_summary'].apply(lambda x: " ".join([sbFr.stem(item) for item in x.split()]))
df_train['processed_summary'] = df_train['processed_summary'].apply(lambda x: " ".join([sbEsp.stem(item) for item in x.split()]))
df_train['processed_summary'] = df_train['processed_summary'].str.replace(r"[^a-zA-Z]+", " ")
df_train['processed_summary']=df_train['processed_summary'].str.replace("XXXX"," ")
df_train['processed_summary']=df_train['processed_summary'].str.replace("XX"," ")
df_train['processed_summary']=df_train['processed_summary'].str.replace(",","")
df_train['processed_summary']=df_train['processed_summary'].str.replace(".","")
df_train['processed_summary']=df_train['processed_summary'].str.replace(" "," ")
df_train['processed_summary']=df_train['processed_summary'].str.replace(" "," ")
df_train['processed_summary']=df_train['processed_summary'].str.replace(" "," ")
df_train['processed_summary']=df_train['processed_summary'].str.replace(" "," ")
# B - Test Stemming
df_test['processed_summary'] = df_test['processed_summary'].apply(lambda x: " ".join([Word(word).lemmatize() for word in x.split()]))
df_test['processed_summary'] = df_test['processed_summary'].apply(lambda x: " ".join([sbFr.stem(item) for item in x.split()]))
df_test['processed_summary'] = df_test['processed_summary'].apply(lambda x: " ".join([sbEsp.stem(item) for item in x.split()]))
df_test['processed_summary'] = df_test['processed_summary'].str.replace(r"[^a-zA-Z]+", " ")
df_test['processed_summary']=df_test['processed_summary'].str.replace("XXXX"," ")
df_test['processed_summary']=df_test['processed_summary'].str.replace("XX"," ")
df_test['processed_summary']=df_test['processed_summary'].str.replace(",","")
df_test['processed_summary']=df_test['processed_summary'].str.replace(".","")
df_test['processed_summary']=df_test['processed_summary'].str.replace(" "," ")
df_test['processed_summary']=df_test['processed_summary'].str.replace(" "," ")
df_test['processed_summary']=df_test['processed_summary'].str.replace(" "," ")
df_test['processed_summary']=df_test['processed_summary'].str.replace(" "," ")
##############################################################
### Split data -train into : test-train
"""
df_train_d, df_test_d = train_test_split(df_train,test_size=0.1,random_state=0)
df_test_d['Complaint_Status_acc']=df_test_d['Complaint-Status']
"""
######################## test / train assigment
df_train_d, df_test_d = train_test_split(df_train,test_size=0.0,random_state=0)
df_test_d=df_test
##################################################Model
### Model execution
fr_text_clf=Pipeline([('vect',TfidfVectorizer(norm='l2',ngram_range=(1,5),use_idf=True,smooth_idf=True, sublinear_tf=False)),('clf',LinearSVC(C=1.0,tol=0.1))])
#svc=LinearSVC(C=2.3,tol=0.1)
model = fr_text_clf.fit(df_train_d['processed_summary'],df_train_d['Complaint_Status'])
df_test_d['new_complain_status']=model.predict(df_test_d["processed_summary"])
df_test_d['Complaint-Status'] = df_test_d['new_complain_status'].map(com_int_cat)
df_test_d['Complaint-Status'].value_counts()
#######################################Accuracy Check
"""
df_test_d['Complaint-Status'].value_counts()
from sklearn.metrics import confusion_matrix
confusion_matrix(df_test_d["Complaint_Status_acc"], df_test_d['Complaint-Status'])
accuracy_score(df_test_d["Complaint_Status_acc"], df_test_d["Complaint-Status"])
"""
##################################################
### ##Output File creation
df_test_output= df_test_d[['Complaint-ID','Complaint-Status']]
df_test_output.to_csv("D:\\Python project\\brainwaves 2019\\complaint status tracking\\output_new270119_ra2cbcbxcb.csv", index=False, header=True)
##################################################
| 54.338129 | 266 | 0.699457 |
9e171c2db2ce83e95935911d2bd31e1429302352
| 2,347 |
py
|
Python
|
research/cv/ICNet/src/loss.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/ICNet/src/loss.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/ICNet/src/loss.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Custom losses."""
import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops
from src.losses import SoftmaxCrossEntropyLoss
__all__ = ['ICNetLoss']
class ICNetLoss(nn.Cell):
"""Cross Entropy Loss for ICNet"""
def __init__(self, aux_weight=0.4, ignore_index=-1):
super(ICNetLoss, self).__init__()
self.aux_weight = aux_weight
self.ignore_index = ignore_index
self.sparse = True
self.base_loss = SoftmaxCrossEntropyLoss(num_cls=19, ignore_label=-1)
self.resize_bilinear = nn.ResizeBilinear() # 输入必须为4D
def construct(self, *inputs):
"""construct"""
preds, target = inputs
pred = preds[0]
pred_sub4 = preds[1]
pred_sub8 = preds[2]
pred_sub16 = preds[3]
# [batch, H, W] -> [batch, 1, H, W]
expand_dims = ops.ExpandDims()
if target.shape[0] == 720 or target.shape[0] == 1024:
target = expand_dims(target, 0).astype(ms.dtype.float32)
target = expand_dims(target, 0).astype(ms.dtype.float32)
else:
target = expand_dims(target, 1).astype(ms.dtype.float32)
h, w = pred.shape[2:]
target_sub4 = self.resize_bilinear(target, size=(h / 4, w / 4)).squeeze(1)
target_sub8 = self.resize_bilinear(target, size=(h / 8, w / 8)).squeeze(1)
target_sub16 = self.resize_bilinear(target, size=(h / 16, w / 16)).squeeze(1)
loss1 = self.base_loss(pred_sub4, target_sub4)
loss2 = self.base_loss(pred_sub8, target_sub8)
loss3 = self.base_loss(pred_sub16, target_sub16)
return loss1 + loss2 * self.aux_weight + loss3 * self.aux_weight
| 36.107692 | 85 | 0.643801 |
f557a735dc3b79a38cb10ff4065a1511955a96c3
| 476 |
py
|
Python
|
nz_django/day3/db_relation_demo/front/migrations/0004_auto_20200220_1513.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | null | null | null |
nz_django/day3/db_relation_demo/front/migrations/0004_auto_20200220_1513.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 27 |
2020-02-12T07:55:58.000Z
|
2022-03-12T00:19:09.000Z
|
nz_django/day3/db_relation_demo/front/migrations/0004_auto_20200220_1513.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 2 |
2020-02-18T01:54:55.000Z
|
2020-02-21T11:36:28.000Z
|
# Generated by Django 2.0 on 2020-02-20 07:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('front', '0003_auto_20200220_1510'),
]
operations = [
migrations.AlterField(
model_name='userextension',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='front.FrontUser'),
),
]
| 23.8 | 106 | 0.644958 |
1990ac9337e3bcc737c4073560d05d8460b5b92c
| 686 |
py
|
Python
|
find-bottom-left-tree-value/find-bottom-left-tree-value.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2 |
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
find-bottom-left-tree-value/find-bottom-left-tree-value.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
find-bottom-left-tree-value/find-bottom-left-tree-value.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findBottomLeftValue(self, root: Optional[TreeNode]) -> int:
bottomleft=root.val
stack=[(root, 0)]
prev_depth=0
while(stack):
cur, depth = stack.pop(0)
if depth!=prev_depth:
bottomleft=cur.val
if cur.left:
stack.append((cur.left, depth+1))
if cur.right:
stack.append((cur.right, depth+1))
prev_depth=depth
return bottomleft
| 32.666667 | 67 | 0.542274 |
5ff5ff9c9414eec6e594e1649ce3b3f35b8a669a
| 787 |
py
|
Python
|
662/tuwulisu_662_tree.py
|
Leetcode-Secret-Society/warehouse
|
40d7969683b1296f361e799cda37f15ceec52af8
|
[
"MIT"
] | null | null | null |
662/tuwulisu_662_tree.py
|
Leetcode-Secret-Society/warehouse
|
40d7969683b1296f361e799cda37f15ceec52af8
|
[
"MIT"
] | null | null | null |
662/tuwulisu_662_tree.py
|
Leetcode-Secret-Society/warehouse
|
40d7969683b1296f361e799cda37f15ceec52af8
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
if not root:
return 0
queue=[[root,0]]
max_width=1
while queue:
new_queue=[]
for node,node_id in queue:
if node.left:
new_queue.append([node.left,node_id*2])
if node.right:
new_queue.append([node.right,node_id*2+1])
if len(new_queue)>=2:
max_width=max(max_width,new_queue[-1][1] - new_queue[0][1] + 1)
queue=new_queue
return max_width
| 32.791667 | 79 | 0.526048 |
27b73ab20b9e387550a33ab9205adeaaf4633a19
| 613 |
py
|
Python
|
profiles/admin.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
profiles/admin.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
profiles/admin.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Profile, ForbiddenUser, Feedback
class ProfileAdmin(admin.ModelAdmin):
fields = ['user', 'account', 'verified', 'accepted_general_terms_and_conditions', 'accepted_privacy_policy']
list_display = ['user', 'verified']
class ForbiddenUserAdmin(admin.ModelAdmin):
list_display = ['name', 'has_account']
class FeedbackAdmin(admin.ModelAdmin):
list_display = ['provided_by', 'feedback', 'resolved']
admin.site.register(Profile, ProfileAdmin)
admin.site.register(ForbiddenUser, ForbiddenUserAdmin)
admin.site.register(Feedback, FeedbackAdmin)
| 29.190476 | 112 | 0.769984 |
d024fc5d3790d7475bbc9e59c08f275bfee78358
| 1,333 |
py
|
Python
|
tools/pythonpkg/tests/fast/test_case_alias.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 2,816 |
2018-06-26T18:52:52.000Z
|
2021-04-06T10:39:15.000Z
|
tools/pythonpkg/tests/fast/test_case_alias.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 1,310 |
2021-04-06T16:04:52.000Z
|
2022-03-31T13:52:53.000Z
|
tools/pythonpkg/tests/fast/test_case_alias.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 270 |
2021-04-09T06:18:28.000Z
|
2022-03-31T11:55:37.000Z
|
import pandas
import numpy as np
import datetime
import duckdb
class TestCaseAlias(object):
def test_case_alias(self, duckdb_cursor):
import pandas
import numpy as np
import datetime
import duckdb
con = duckdb.connect(':memory:')
df = pandas.DataFrame([{"COL1": "val1", "CoL2": 1.05},{"COL1": "val3", "CoL2": 17}])
r1 = con.from_df(df).query('df', 'select * from df').fetchdf()
assert r1["COL1"][0] == "val1"
assert r1["COL1"][1] == "val3"
assert r1["CoL2"][0] == 1.05
assert r1["CoL2"][1] == 17
r2 = con.from_df(df).query('df', 'select COL1, COL2 from df').fetchdf()
assert r2["COL1"][0] == "val1"
assert r2["COL1"][1] == "val3"
assert r2["CoL2"][0] == 1.05
assert r2["CoL2"][1] == 17
r3 = con.from_df(df).query('df', 'select COL1, COL2 from df ORDER BY COL1').fetchdf()
assert r3["COL1"][0] == "val1"
assert r3["COL1"][1] == "val3"
assert r3["CoL2"][0] == 1.05
assert r3["CoL2"][1] == 17
r4 = con.from_df(df).query('df', 'select COL1, COL2 from df GROUP BY COL1, COL2 ORDER BY COL1').fetchdf()
assert r4["COL1"][0] == "val1"
assert r4["COL1"][1] == "val3"
assert r4["CoL2"][0] == 1.05
assert r4["CoL2"][1] == 17
| 33.325 | 113 | 0.535634 |
ef8cadc119a3ac98d061a16fa77fef11daee59ee
| 1,946 |
py
|
Python
|
test/test_npu/test_network_ops/test_apply_adam.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_apply_adam.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_apply_adam.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from common_utils import TestCase, run_tests
from common_device_type import instantiate_device_type_tests
class TestApplyAdam(TestCase):
def test_apply_adam_fp32(self, device):
var = torch.randn(2, 2, 2, 2, dtype=torch.float32).to("npu")
m = torch.randn(2, 2, 2, 2, dtype=torch.float32).to("npu")
v = torch.randn(2, 2, 2, 2, dtype=torch.float32).to("npu")
grad = torch.randn(2, 2, 2, 2, dtype=torch.float32).to("npu")
bt1p = 1
bt2p = 1
lr = 0.2
bt1 = 0.2
bt2 = 0.2
ep = 0.2
ul = False
un = False
var_o, m_o, v_o = torch.npu_apply_adam(var, m, v, bt1p, bt2p, lr, bt1, bt2, ep, grad, ul, un)
expect_vo = torch.tensor([[[[1.7452, 0.1779],
[1.6296, 3.0590]],
[[1.7282, 0.0648],
[0.6864, 0.4539]]],
[[[1.5883, 2.6426],
[0.3080, 0.1884]],
[[0.3690, 1.9991],
[3.0633, 0.4669]]]], dtype = torch.float32)
self.assertRtolEqual(expect_vo, v_o.cpu())
instantiate_device_type_tests(TestApplyAdam, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 41.404255 | 101 | 0.578109 |
efa3324f837577bb44837764431d5d1ec15540a5
| 1,443 |
py
|
Python
|
projects/api/templates.py
|
Matheus158257/projects
|
26a6148046533476e625a872a2950c383aa975a8
|
[
"Apache-2.0"
] | null | null | null |
projects/api/templates.py
|
Matheus158257/projects
|
26a6148046533476e625a872a2950c383aa975a8
|
[
"Apache-2.0"
] | null | null | null |
projects/api/templates.py
|
Matheus158257/projects
|
26a6148046533476e625a872a2950c383aa975a8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Templates blueprint."""
from flask import Blueprint, jsonify, request
from ..controllers.templates import list_templates, create_template, \
get_template, update_template, delete_template
from ..utils import to_snake_case
bp = Blueprint("templates", __name__)
@bp.route("", methods=["GET"])
def handle_list_templates():
"""Handles GET requests to /."""
return jsonify(list_templates())
@bp.route("", methods=["POST"])
def handle_post_templates():
"""Handles POST requests to /."""
kwargs = request.get_json(force=True)
kwargs = {to_snake_case(k): v for k, v in kwargs.items()}
template = create_template(**kwargs)
return jsonify(template)
@bp.route("<template_id>", methods=["GET"])
def handle_get_template(template_id):
"""Handles GET requests to /<template_id>."""
return jsonify(get_template(uuid=template_id))
@bp.route("<template_id>", methods=["PATCH"])
def handle_patch_template(template_id):
"""Handles PATCH requests to /<template_id>."""
kwargs = request.get_json(force=True)
kwargs = {to_snake_case(k): v for k, v in kwargs.items()}
template = update_template(uuid=template_id, **kwargs)
return jsonify(template)
@bp.route("<template_id>", methods=["DELETE"])
def handle_delete_template(template_id):
"""Handles DELETE requests to /<template_id>."""
template = delete_template(uuid=template_id)
return jsonify(template)
| 30.0625 | 70 | 0.704782 |
4be7d027414a5f997b3893b769640f5824944d8c
| 369 |
py
|
Python
|
main.py
|
pchchv/getqr
|
f8858d6d539632309841059422004f8f3a6e358e
|
[
"MIT"
] | null | null | null |
main.py
|
pchchv/getqr
|
f8858d6d539632309841059422004f8f3a6e358e
|
[
"MIT"
] | null | null | null |
main.py
|
pchchv/getqr
|
f8858d6d539632309841059422004f8f3a6e358e
|
[
"MIT"
] | null | null | null |
def _check_box_size(size):
if int(size) <= 0:
raise ValueError(f"Invalid box size. Must be larger than 0")
def _check_border(size):
if int(size) <= 0:
raise ValueError(f"Invalid border value. Must be larger than 0")
class QRCode:
def __init__(self, box_size=10, border=2):
_check_box_size(box_size)
_check_border(border)
| 24.6 | 72 | 0.663957 |
3262015dc57743176fd8f07e02c8037b984bbe5f
| 972 |
py
|
Python
|
tag_1/p_2_4_schaltjahr.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
tag_1/p_2_4_schaltjahr.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
tag_1/p_2_4_schaltjahr.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
"""
2 If-Abfragen (Tag 1)
2.4 Überprüfe, ob ein vorher festgelegtes Jahr ein Schaltjahr ist.
Hinweise:
- Jahreszahl nicht durch 4 teilbar: kein Schaltjahr
- Jahreszahl durch 4 teilbar: Schaltjahr
- Jahreszahl durch 100 teilbar: kein Schaltjahr
- Jahreszahl durch 400 teilbar: Schaltjahr
Beispiele:
- 2000, 2004 sind Schaltjahre
- 1900, 2006 sind keine Schaltjahre
"""
def ist_schaltjahr(jahr):
return (jahr % 4 == 0 and jahr % 100 != 0) or jahr % 400 == 0
if __name__ == '__main__':
print(f'Das Jahr 1900 ist', " ein" if ist_schaltjahr(1900) else "kein", 'Schaltjahr.')
print(f'Das Jahr 2000 ist', " ein" if ist_schaltjahr(2000) else "kein", 'Schaltjahr.')
print(f'Das Jahr 2001 ist', " ein" if ist_schaltjahr(2001) else "kein", 'Schaltjahr.')
print(f'Das Jahr 2004 ist', " ein" if ist_schaltjahr(2004) else "kein", 'Schaltjahr.')
print(f'Das Jahr 2006 ist', " ein" if ist_schaltjahr(2006) else "kein", 'Schaltjahr.')
| 36 | 90 | 0.682099 |
329899291405ade21285cff7fc99ae518ffc3286
| 14,609 |
py
|
Python
|
puppet/puppet_v4.py
|
pchaos/wanggejiaoyi
|
60242d465bf10d4be46ee6eafc99557affc2a52e
|
[
"MIT"
] | 15 |
2018-05-16T02:39:01.000Z
|
2021-05-22T13:12:55.000Z
|
puppet/puppet_v4.py
|
pchaos/wanggejiaoyi
|
60242d465bf10d4be46ee6eafc99557affc2a52e
|
[
"MIT"
] | null | null | null |
puppet/puppet_v4.py
|
pchaos/wanggejiaoyi
|
60242d465bf10d4be46ee6eafc99557affc2a52e
|
[
"MIT"
] | 9 |
2018-05-16T00:47:34.000Z
|
2021-11-26T05:39:48.000Z
|
"""
扯线木偶界面自动化应用编程接口(Puppet UIAutomation API)
技术群:624585416
"""
__author__ = "睿瞳深邃(https://github.com/Raytone-D)"
__project__ = 'Puppet'
__version__ = "0.4.33"
__license__ = 'MIT'
# coding: utf-8
import ctypes
from functools import reduce
import time
import sys
import platform
try:
import pyperclip
except Exception as e:
print("{}\n请先在命令行下运行:pip install pyperclip,再使用puppet!".format(e))
MSG = {'WM_SETTEXT': 12,
'WM_GETTEXT': 13,
'WM_KEYDOWN': 256,
'WM_KEYUP': 257,
'WM_COMMAND': 273,
'BM_CLICK': 245,
'CB_GETCOUNT': 326,
'CB_SETCURSEL': 334,
'CBN_SELCHANGE': 1,
'COPY_DATA': 57634}
INIT = {'买入': 161, '卖出': 162, '撤单': 163}
NODE = {'FRAME': (59648, 59649),
'FORM': (59648, 59649, 1047, 200, 1047),
'ACCOUNT': (59392, 0, 1711),
'COMBO': (59392, 0, 2322),
'BUY': (1032, 1033, 1034, '买入[B]', 1036, 1018),
'SELL': (1032, 1033, 1034, '卖出[S]', 1036, 1038),
'CANCEL': (3348, '查询代码', '撤单'),
'ENTRUSTMENT': 168,
'撤单': 163,
'双向委托': 512,
'新股申购': 554,
'中签查询': 1070}
TWO_WAY = {'买入代码': 1032,
'买入价格': 1033,
'买入数量': 1034,
'买入': 1006,
'卖出代码': 1035,
'卖出价格': 1058,
'卖出数量': 1039,
'卖出': 1008,
'可用余额': 1038,
'刷新': 32790,
'报表': 1047}
NEW = {'新股代码': 1032,
'新股名称': 1036,
'申购价格': 1033,
'可申购数量': 1018,
'申购数量': 1034,
'申购': 1006}
RAFFLE = ['新股代码', '证券代码', '申购价格'] # , '申购上限']
VKCODE = {'F1': 112,
'F2': 113,
'F3': 114,
'F4': 115,
'F5': 116,
'F6': 117}
import platform
sysstr = platform.system()
if (sysstr == "Windows"):
op = ctypes.windll.user32
def switch_combo(index, idCombo, hCombo):
op.SendMessageW(hCombo, MSG['CB_SETCURSEL'], index, 0)
op.SendMessageW(op.GetParent(hCombo), MSG['WM_COMMAND'], MSG['CBN_SELCHANGE'] << 16 | idCombo, hCombo)
def click_button(dialog, label):
handle = op.FindWindowExW(dialog, 0, 0, label)
id_btn = op.GetDlgCtrlID(handle)
op.PostMessageW(dialog, MSG['WM_COMMAND'], id_btn, 0)
def fill_in(container, _id_item, _str):
op.SendDlgItemMessageW(container, _id_item, MSG['WM_SETTEXT'], 0, _str)
def kill_popup(hDlg, name='是(&Y)'):
for x in range(100):
time.sleep(0.01)
popup = op.GetLastActivePopup(hDlg)
if popup != hDlg and op.IsWindowVisible(popup):
yes = op.FindWindowExW(popup, 0, 0, name)
idYes = op.GetDlgCtrlID(yes)
op.PostMessageW(popup, MSG['WM_COMMAND'], idYes, 0)
print('popup has killed.')
break
class Puppet:
"""
界面自动化操控包装类
# 方法 # '委买': buy(), '委卖': sell(), '撤单': cancel(), '打新': raffle(),
# 属性 # '帐号': account, '可用余额': balance, '持仓': position, '成交': deals, '可撤委托': cancelable,
# # '新股': new, '中签': bingo,
"""
def __init__(self, main=None, title='网上股票交易系统5.0'):
print('木偶: 欢迎使用Puppet TraderApi, version {}'.format(__version__))
print('{}\nPython version: {}'.format(platform.platform(), platform.python_version()))
self._main = main or op.FindWindowW(0, title)
self.buff = ctypes.create_unicode_buffer(32)
self.switch = lambda node: op.SendMessageW(self._main, MSG['WM_COMMAND'], node, 0)
if self._main:
self._container = {label: self._get_item(_id) for label, _id in INIT.items()}
self._position, self._cancelable, self._entrustment = None, None, None
self.switch(NODE['双向委托'])
time.sleep(0.5)
self.two_way = reduce(op.GetDlgItem, NODE['FRAME'], self._main)
self.members = {k: op.GetDlgItem(self.two_way, v) for k, v in TWO_WAY.items()}
self._position = reduce(op.GetDlgItem, NODE['FORM'], self._main)
if not self._main:
print("木偶:客户交易端没登录,我先撤了!")
sys.exit('木偶:错误的标题字符串"{}"!'.format(title))
# 获取登录账号
self.account = reduce(op.GetDlgItem, NODE['ACCOUNT'], self._main)
op.SendMessageW(self.account, MSG['WM_GETTEXT'], 32, self.buff)
self.account = self.buff.value
# self.combo = reduce(op.GetDlgItem, NODE['COMBO'], self._main)
# self.count = op.SendMessageW(self.combo, MSG['CB_GETCOUNT'])
def _get_item(self, _id, sec=0.5):
self.switch(_id)
time.sleep(sec)
return reduce(op.GetDlgItem, NODE['FRAME'], self._main)
def switch_tab(self, hCtrl, keyCode, param=0): # 单击
op.PostMessageW(hCtrl, MSG['WM_KEYDOWN'], keyCode, param)
time.sleep(0.1)
op.PostMessageW(hCtrl, MSG['WM_KEYUP'], keyCode, param)
def copy_data(self, hCtrl, key=0):
"将CVirtualGridCtrl|Custom<n>的数据复制到剪贴板"
_replace = {'参考市值': '市值', '最新市值': '市值'} # 兼容国金/平安"最新市值"、银河“参考市值”。
start = time.time()
if key:
self.switch(NODE['双向委托']) # 激活对话框窗口,保证正常切换到成交和委托控件。
self.switch_tab(self.two_way, key)
for i in range(10):
time.sleep(0.3)
op.SendMessageW(hCtrl, MSG['WM_COMMAND'], MSG['COPY_DATA'], NODE['FORM'][-1])
ret = pyperclip.paste().splitlines()
if len(ret) > 1:
break
temp = (x.split('\t') for x in ret)
header = next(temp)
for tag, value in _replace.items():
if tag in header:
header.insert(header.index(tag), value)
header.remove(tag)
print('it take {} loop, {} seconds.'.format(i, time.time() - start))
return tuple(dict(zip(header, x)) for x in temp)
def _wait(self, container, id_item):
self.buff.value = '' # False,待假成真
for n in range(500):
time.sleep(0.01)
op.SendDlgItemMessageW(container, id_item, MSG['WM_GETTEXT'], 64, self.buff)
if self.buff.value:
break
def _order(self, container, id_items, *triple):
# self.switch(NODE['BUY'][0])
fill_in(container, id_items[0], triple[0]) # 证券代码
self._wait(container, id_items[-2]) # 证券名称
fill_in(container, id_items[1], triple[1]) # 价格
self._wait(container, id_items[-1]) # 可用数量
fill_in(container, id_items[2], triple[2]) # 数量
click_button(container, id_items[3]) # 下单按钮
if len(str(triple[1]).split('.')[1]) == 3: # 基金三位小数价格弹窗
kill_popup(self._main)
def buy(self, symbol, price, qty):
# self.switch(NODE['BUY'][0])
self._order(self._container['买入'], NODE['BUY'], symbol, price, qty)
def sell(self, symbol, price, qty):
# self.switch(NODE['SELL'][0])
self._order(self._container['卖出'], NODE['SELL'], symbol, price, qty)
def buy2(self, symbol, price, qty, sec=0.3): # 买入(B)
self.switch(NODE['双向委托'])
op.SendMessageW(self.members['买入代码'], MSG['WM_SETTEXT'], 0, str(symbol))
time.sleep(0.1)
op.SendMessageW(self.members['买入价格'], MSG['WM_SETTEXT'], 0, str(price))
time.sleep(0.1)
op.SendMessageW(self.members['买入数量'], MSG['WM_SETTEXT'], 0, str(qty))
# op.SendMessageW(self.members['买入'], MSG['BM_CLICK'], 0, 0)
time.sleep(sec)
op.PostMessageW(self.two_way, MSG['WM_COMMAND'], TWO_WAY['买入'], 0)
if len(price.split('.')[1]) == 3: # 基金三位小数价格弹窗
kill_popup(self._main)
def sell2(self, symbol, price, qty, sec=0.3): # 卖出(S)
self.switch(NODE['双向委托'])
op.SendMessageW(self.members['卖出代码'], MSG['WM_SETTEXT'], 0, str(symbol))
time.sleep(0.1)
op.SendMessageW(self.members['卖出价格'], MSG['WM_SETTEXT'], 0, str(price))
time.sleep(0.1)
op.SendMessageW(self.members['卖出数量'], MSG['WM_SETTEXT'], 0, str(qty))
# op.SendMessageW(self.members['卖出'], MSG['BM_CLICK'], 0, 0)
time.sleep(sec)
op.PostMessageW(self.two_way, MSG['WM_COMMAND'], TWO_WAY['卖出'], 0)
if len(price.split('.')[1]) == 3: # 基金三位小数价格弹窗
kill_popup(self._main)
def refresh(self): # 刷新(F5)
op.PostMessageW(self.two_way, MSG['WM_COMMAND'], TWO_WAY['刷新'], 0)
def cancel(self, symbol=None, choice='buy'):
# print("请尽快将"buy"改成"cancel_buy", "sell"改成"cancel_sell",并移植到cancel_order方法。")
time.sleep(3)
cases = {'buy': 'cancel_buy', 'sell': 'cancel_sell'}
self.cancel_order(cases.get(choice))
def cancel_order(self, symbol=None, choice='cancel_all', symbolid=3348, nMarket=None, orderId=None):
"""撤销订单,choice选择操作的结果,默认“cancel_all”,可选“cancel_buy”、“cancel_sell”或"cancel"
"cancel"是撤销指定股票symbol的全部委托。
"""
hDlg = self._container['撤单']
symbol = str(symbol)
if symbol:
fill_in(hDlg, symbolid, symbol)
for i in range(10):
time.sleep(0.3)
click_button(hDlg, '查询代码')
hButton = op.FindWindowExW(hDlg, 0, 0, '撤单')
# 撤单按钮的状态检查
if op.IsWindowEnabled(hButton):
break
cases = {
'cancel_all': '全撤(Z /)',
'cancel_buy': '撤买(X)',
'cancel_sell': '撤卖(C)',
'cancel': '撤单'
}
click_button(hDlg, cases.get(choice))
@property
def balance(self):
self.switch(NODE['双向委托'])
self.refresh()
op.SendMessageW(self.members['可用余额'], MSG['WM_GETTEXT'], 32, self.buff)
return float(self.buff.value)
@property
def position(self):
return self.copy_data(self._position, ord('W'))
@property
def market_value(self):
ret = self.position
return sum((float(pair['市值']) for pair in ret)) if ret else 0.0
@property
def deals(self):
return self.copy_data(self._position, ord('E'))
@property
def entrustment(self):
"""
委托
:return:
"""
if not self._entrustment:
self.switch(NODE['ENTRUSTMENT'])
self._entrustment = reduce(op.GetDlgItem, NODE['FORM'], self._main)
return self.copy_data(self._entrustment)
@property
def cancelable(self):
if not self._cancelable:
self.switch(NODE['撤单'])
self._cancelable = reduce(op.GetDlgItem, NODE['FORM'], self._main)
return self.copy_data(self._cancelable)
# ret = self.entrustment
# return [pair for pair in ret if '已报' in pair['备注']] if ret else ret
@property
def new(self):
self.switch(NODE['新股申购'])
time.sleep(0.5)
self._new = reduce(op.GetDlgItem, NODE['FORM'], self._main)
return self.copy_data(self._new)
@property
def bingo(self):
self.switch(NODE['中签查询'])
time.sleep(0.5)
self._bingo = reduce(op.GetDlgItem, NODE['FORM'], self._main)
return self.copy_data(self._bingo)
def cancel_all(self): # 全撤(Z) # 只有撤单窗的按钮才能做到无弹窗撤单
print("请用trader.cancel_order('cancel_all') 取代trader.cancel_all()")
click_button(self._container['撤单'], '全撤(Z /)')
def cancel_buy(self): # 撤买(X)
print("请用trader.cancel_order('cancel_buy') 取代trader.cancel_buy()")
click_button(self._container['撤单'], '撤买(X)')
def cancel_sell(self): # 撤卖(C)
print("请用trader.cancel_order('cancel_sell') 取代trader.cancel_sell()")
click_button(self._container['撤单'], '撤卖(C)')
def raffle(self, skip=False): # 打新
# op.SendMessageW(self._main, MSG['WM_COMMAND'], NODE['新股申购'], 0)
# self._raffle = reduce(op.GetDlgItem, NODE['FORM'], self._main)
# close_pop() # 弹窗无需关闭,不影响交易。
# schedule = self.copy_data(self._raffle)
ret = self.new
if not ret:
print("是日无新!")
return ret
self._raffle = reduce(op.GetDlgItem, NODE['FRAME'], self._main)
self._raffle_parts = {k: op.GetDlgItem(self._raffle, v) for k, v in NEW.items()}
# new = [x.split() for x in schedule.splitlines()]
# index = [new[0].index(x) for x in RAFFLE if x in new[0]] # 索引映射:代码0, 价格1, 数量2
# new = map(lambda x: [x[y] for y in index], new[1:])
for new in ret:
symbol, price = [new[y] for y in RAFFLE if y in new.keys()]
if symbol[0] == '3' and skip:
print("跳过创业板新股: {}".format(symbol))
continue
op.SendMessageW(self._raffle_parts['新股代码'], MSG['WM_SETTEXT'], 0, symbol)
time.sleep(0.3)
op.SendMessageW(self._raffle_parts['申购价格'], MSG['WM_SETTEXT'], 0, price)
time.sleep(0.3)
op.SendMessageW(self._raffle_parts['可申购数量'], MSG['WM_GETTEXT'], 32, self.buff)
if not int(self.buff.value):
print('跳过零数量新股:{}'.format(symbol))
continue
op.SendMessageW(self._raffle_parts['申购数量'], MSG['WM_SETTEXT'], 0, self.buff.value)
time.sleep(0.3)
op.PostMessageW(self._raffle, MSG['WM_COMMAND'], NEW['申购'], 0)
# op.SendMessageW(self._main, MSG['WM_COMMAND'], NODE['双向委托'], 0) # 切换到交易操作台
return [new for new in self.cancelable if '配售申购' in new['操作']]
if __name__ == '__main__':
trader = Puppet()
# trader = Puppet(title='广发证券核新网上交易系统7.60')
if trader.account:
print(trader.account) # 帐号
print(trader.new) # 查当天新股名单
# trader.raffle() # 打新,skip=True, 跳过创业板不打。
# print(trader.balance) # 可用余额
print(trader.position) # 实时持仓
# print(trader.deals) # 当天成交
# print(trader.cancelable) # 可撤委托
print(trader.market_value)
print(trader.entrustment) # 当日委托(可撤委托,已成委托,已撤销委托)
# print(trader.bingo) # 注意只兼容部分券商!
# trader.cancel_all()
# trader.cancel_buy()
# trader.cancel_sell()
# limit = '510160', '0.557', '1000'
# trader.buy(*limit)
# trader.cancel_order('000001', 'cancel')
# trader.cancel_order(stcode, 'cancel_buy')
stcode = '150153'
limit = stcode, '0.644', '5400'
trader.buy2(*limit)
limit = stcode, '0.678', '1700'
trader.sell2(*limit)
limit = stcode, '0.659', '1600'
# trader.sell2(*limit)
| 36.98481 | 107 | 0.551852 |
eb220d9ee93f34921d37f851cb91dad10b4b4707
| 1,658 |
py
|
Python
|
official/cv/c3d/src/lr_schedule.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/c3d/src/lr_schedule.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/c3d/src/lr_schedule.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
def linear_warmup_learning_rate(lr_max, epoch_step, global_step=0, lr_init=1e-8,
warmup_epochs=0, total_epochs=1, steps_per_epoch=1):
"""Set learning rate."""
lr_each_step = []
total_steps = steps_per_epoch * total_epochs
warmup_steps = steps_per_epoch * warmup_epochs
if warmup_steps != 0:
inc_each_step = (float(lr_max) - float(lr_init)) / float(warmup_steps)
else:
inc_each_step = 0
lr_value = lr_max
for i in range(total_steps):
if i <= warmup_steps:
lr_value = float(lr_init) + inc_each_step * float(i)
else:
if i // steps_per_epoch in epoch_step and i % steps_per_epoch == 0:
lr_value *= 0.1
if lr_value < 0.0:
lr_value = 0.0
lr_each_step.append(lr_value)
lr_each_step = np.array(lr_each_step).astype(np.float32)
learning_rate = lr_each_step[global_step:]
return learning_rate
| 37.681818 | 84 | 0.646562 |
de4246269a934b5715df066dd7198a69067008cf
| 2,225 |
py
|
Python
|
Test.py
|
software-engineering-hsfhh/Asteroids_Team_All-Mann
|
cfc28e81322ce7b5c1b1b111a447714bb3f586d8
|
[
"MIT"
] | 1 |
2020-10-22T14:57:44.000Z
|
2020-10-22T14:57:44.000Z
|
Test.py
|
software-engineering-hsfhh/Asteroids_Team_All-Mann
|
cfc28e81322ce7b5c1b1b111a447714bb3f586d8
|
[
"MIT"
] | 26 |
2020-10-17T09:05:53.000Z
|
2020-11-12T17:57:19.000Z
|
Test.py
|
software-engineering-hsfhh/Asteroids_Team_All-Mann
|
cfc28e81322ce7b5c1b1b111a447714bb3f586d8
|
[
"MIT"
] | null | null | null |
"""
This program shows how to:
* Display a sequence of screens in your game. The "arcade.View"
class makes it easy to separate the code for each screen into
its own class.
* This example shows the absolute basics of using "arcade.View".
See the "different_screens_example.py" for how to handle
screen-specific data.
Make a separate class for each view (screen) in your game.
The class will inherit from arcade.View. The structure will
look like an arcade.Window as each View will need to have its own draw,
update and window event methods. To switch a View, simply create a View
with `view = MyView()` and then use the "self.window.set_view(view)" method.
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.view_screens_minimal
"""
import arcade
WIDTH = 1600
HEIGHT = 900
class GameView(arcade.View):
""" Manage the 'game' view for our program. """
def __init__(self):
super().__init__()
# Create variables here
def setup(self):
""" This should set up your game and get it ready to play """
# Replace 'pass' with the code to set up your game
pass
def on_show(self):
""" Called when switching to this view"""
arcade.set_background_color(arcade.color.BLACK)
def on_draw(self):
""" Draw everything for the game. """
arcade.start_render()
arcade.draw_text("Achim Allmann ist auf einer ewig währenden Reise durchs Weltall auf\n"
"der Suche nach dem Sinn des Lebens als plötzlich eine bösartige\n"
"Gruppe Asteroiden auftaucht!\n\n\nDrücke die Leertaste, um fortzufahren", WIDTH/2, HEIGHT/2,
arcade.color.WHITE, font_size=25, anchor_x="center")
def on_key_press(self, key, _modifiers):
"""Drücke Leertaste um fortzufahren"""
if key == arcade.key.SPACE:
self.game_over = True
print ("Game Over")
def main():
""" Startup """
window = arcade.Window(1600, 900, "Asteroids", fullscreen=False)
Game_View = GameView()
window.show_view(Game_View)
arcade.run()
if __name__ == "__main__":
main()
| 32.246377 | 118 | 0.660674 |
a037fcfc8dd8f8addb4268d02c2c36c9fcd33ad9
| 58 |
py
|
Python
|
___Python/Daniel/2018-06-25-VHS-Bielefeld-Python/p09_isbn/m02_init_example.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Daniel/2018-06-25-VHS-Bielefeld-Python/p09_isbn/m02_init_example.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Daniel/2018-06-25-VHS-Bielefeld-Python/p09_isbn/m02_init_example.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
from p10_requests import *
print(FOO)
print(math.pi)
| 11.6 | 27 | 0.706897 |
a0946cf0268b1dae5b2e48f45e274f9d3252cf5e
| 881 |
py
|
Python
|
core/embeds.py
|
Pug234/BytesBump
|
d5ff3130bffae92e1c5c671db4ed8904c403e9dc
|
[
"MIT"
] | 11 |
2020-11-14T17:28:50.000Z
|
2021-05-19T18:21:07.000Z
|
core/embeds.py
|
AnimeDyno/BytesBump
|
a0cf0bfc4c13592c7b10ad46faa46a2a98dc1443
|
[
"MIT"
] | 3 |
2021-01-22T15:48:41.000Z
|
2021-06-22T17:16:50.000Z
|
core/embeds.py
|
zImPinguin/Bump-Bot
|
3f449a4e5581a35a5cff998e94a13ae33dbe2b04
|
[
"MIT"
] | 13 |
2020-11-18T05:20:31.000Z
|
2021-06-19T16:31:30.000Z
|
import random
from discord import Embed, Color
class Embeds:
def __init__(self, message):
self.message = message
def success(self, **kwargs):
embed = Embed(
description=self.message,
color=Color.green()
)
for i in kwargs:
embed.add_field(name=i.replace("_", " "), value=kwargs[i])
return embed
def error(self, **kwargs):
embed = Embed(
description=self.message,
color=Color.red()
)
for i in kwargs:
embed.add_field(name=i.replace("_", " "), value=kwargs[i])
return embed
def warn(self, **kwargs):
embed = Embed(
description=self.message,
color=Color.orange()
)
for i in kwargs:
embed.add_field(name=i.replace("_", " "), value=kwargs[i])
return embed
| 26.69697 | 70 | 0.53462 |
3e77cfff7cdcfa192004292baf261d166602206d
| 423 |
py
|
Python
|
webapp/data_viewer/streamlit/dataViewer.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
webapp/data_viewer/streamlit/dataViewer.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
webapp/data_viewer/streamlit/dataViewer.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
import streamlit as st
import pandas as pd
from pydataset import data
df_data = data().sort_values('dataset_id').reset_index(drop=True)
st.dataframe(df_data) # choices
options = st.selectbox(
'select a dataset do you like best?', df_data['dataset_id'])
dataset = data(options)
if isinstance(dataset, (pd.core.frame.DataFrame,
pd.core.series.Series)):
st.dataframe(dataset)
st.line_chart(dataset)
| 23.5 | 65 | 0.728132 |
f2f1ba4eeb291db85d118c86c2e8bf2638aa983a
| 1,714 |
py
|
Python
|
mod/units/eat_handler.py
|
HeraldStudio/wechat
|
b023b7460a6b4284ea782333e13f24d169ddaff4
|
[
"MIT"
] | 1 |
2015-06-28T15:26:52.000Z
|
2015-06-28T15:26:52.000Z
|
mod/units/eat_handler.py
|
HeraldStudio/wechat
|
b023b7460a6b4284ea782333e13f24d169ddaff4
|
[
"MIT"
] | null | null | null |
mod/units/eat_handler.py
|
HeraldStudio/wechat
|
b023b7460a6b4284ea782333e13f24d169ddaff4
|
[
"MIT"
] | 6 |
2015-03-20T16:36:22.000Z
|
2021-08-28T07:58:18.000Z
|
# -*- coding: utf-8 -*-
# @Date : 2015-05-28
import tornado.web
from ..models.eat import Eat
from config import eat_token
import datetime,time
from sqlalchemy.orm.exc import NoResultFound
class EatHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
def get(self):
self.render('eat.html')
def post(self):
status = self.get_argument('status',default = None)
token = self.get_argument('token',default = None)
if not status or not token:
self.write('请填写完整信息哦')
self.finish()
else:
if not token==eat_token:
self.write('token不正确')
self.finish()
else:
day = time.strftime('%Y-%m-%d',time.localtime(time.time()))
today = time.strftime('%Y-%m-%d-%H',time.localtime(time.time()))
try:
item = self.db.query(Eat).filter(Eat.day == day).one()
item.status = status
item.time = today
except NoResultFound:
eat = Eat(
day = day,
time = today,
status = status)
self.db.add(eat)
try:
self.db.commit()
self.write('success')
self.finish()
except Exception,e:
print str(e)
self.db.rollback()
self.write('发布失败T T')
self.finish()
self.db.close()
| 32.961538 | 81 | 0.446908 |
4b8c97d92940bc9748b145e44bfea9c2dbd8eba9
| 757 |
py
|
Python
|
solution/data_structure/5397/main.py
|
gkgg123/baekjoon
|
4ff8a1238a5809e4958258b5f2eeab7b22105ce9
|
[
"MIT"
] | 2,236 |
2019-08-05T00:36:59.000Z
|
2022-03-31T16:03:53.000Z
|
solution/data_structure/5397/main.py
|
juy4556/baekjoon
|
bc0b0a0ebaa45a5bbd32751f84c458a9cfdd9f92
|
[
"MIT"
] | 225 |
2020-12-17T10:20:45.000Z
|
2022-01-05T17:44:16.000Z
|
solution/data_structure/5397/main.py
|
juy4556/baekjoon
|
bc0b0a0ebaa45a5bbd32751f84c458a9cfdd9f92
|
[
"MIT"
] | 602 |
2019-08-05T00:46:25.000Z
|
2022-03-31T13:38:23.000Z
|
# // Authored by : chj3748
# // Co-authored by : -
# // Link : http://boj.kr/471d69f455a544769c6c2fa7199442d1
import sys
from collections import deque
def input():
return sys.stdin.readline().rstrip()
T = int(input())
for test in range(T):
answer_l = deque()
answer_r = deque()
for string in input():
if string == '<':
if answer_l:
temp = answer_l.pop()
answer_r.appendleft(temp)
elif string == '>':
if answer_r:
temp = answer_r.popleft()
answer_l.append(temp)
elif string == '-':
if answer_l:
answer_l.pop()
else:
answer_l.append(string)
print(''.join(answer_l + answer_r))
| 26.103448 | 58 | 0.532365 |
4b12fcb105f6d3f2213110a67fff9dda133fcce5
| 556 |
py
|
Python
|
Pythonjunior2020/Woche3/Aufgabe_3_3_2.py
|
Zeyecx/HPI-Potsdam
|
ed45ca471cee204dde74dd2c3efae3877ee71036
|
[
"MIT"
] | null | null | null |
Pythonjunior2020/Woche3/Aufgabe_3_3_2.py
|
Zeyecx/HPI-Potsdam
|
ed45ca471cee204dde74dd2c3efae3877ee71036
|
[
"MIT"
] | null | null | null |
Pythonjunior2020/Woche3/Aufgabe_3_3_2.py
|
Zeyecx/HPI-Potsdam
|
ed45ca471cee204dde74dd2c3efae3877ee71036
|
[
"MIT"
] | null | null | null |
# 3.3.2, Woche 3, Block 3, Aufgabe 2
# Import
from daten import satz
from daten import woerterbuch
# Funktionen
def uebersetze(s):
# Reset x
x = []
w = woerterbuch
# Satz in Array umwandeln
s = s.split(" ")
# Gehe den Satz durch
for i in range(len(s)):
# Woertbuch[String[Iteration]]
x.append(w[s[i]])
# String combine
for i in range(len(x)):
if i == 0 :
y = x[i]
else:
y += " "+x[i]
# Return des Strings
return y+"."
# Main
print(uebersetze(satz))
| 17.935484 | 38 | 0.534173 |
8a5734442f6d89cf63f8f9d805b8fa2c9d2fe877
| 12,706 |
py
|
Python
|
Hackathons_19_20/Club Mahindra DataOlympics/Data Olympics.py
|
aviggithub/Hackathons_20
|
a1bbc63cff3bd71982017749a0cc162d684e452b
|
[
"Apache-2.0"
] | null | null | null |
Hackathons_19_20/Club Mahindra DataOlympics/Data Olympics.py
|
aviggithub/Hackathons_20
|
a1bbc63cff3bd71982017749a0cc162d684e452b
|
[
"Apache-2.0"
] | null | null | null |
Hackathons_19_20/Club Mahindra DataOlympics/Data Olympics.py
|
aviggithub/Hackathons_20
|
a1bbc63cff3bd71982017749a0cc162d684e452b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri May 3 09:10:46 2019
@author: avi
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from lightgbm import LGBMClassifier,LGBMRegressor
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
train_data=pd.read_csv("D:\\Python project\\Club Mahindra DataOlympics\\train.csv")
test_data=pd.read_csv("D:\\Python project\\Club Mahindra DataOlympics\\test.csv")
train_data_c=pd.read_csv("D:\\Python project\\Club Mahindra DataOlympics\\train.csv")
test_data_c=pd.read_csv("D:\\Python project\\Club Mahindra DataOlympics\\test.csv")
#print top rows
train_data.head(5)
#find correlation
train_data.corr()
#summary of data
train_data.describe()
#count null values per variable
train_data.isnull().sum()
#total null values
train_data.isnull().sum().sum()
#fill the null data function
def fillnull_data(df):
return df.fillna(df.mean())
#fill null value using mean
train_data.state_code_residence=fillnull_data(train_data.state_code_residence)
train_data.season_holidayed_code=fillnull_data(train_data.season_holidayed_code)
#find value counts of feature
train_data.channel_code.value_counts()
#plot
sns.countplot(train_data.channel_code)
#boxplot
sns.barplot(train_data.state_code_residence)
#find data type of all features
train_data.dtypes
#find diff data type column names and group them
str_col_datatype=train_data.columns.astype("object")
#replace / in booking date
train_data.booking_date=train_data.booking_date.str.replace("/","")
train_data.booking_date.head()
#function for replace / in dateformate
def replace_bacs(df):
return df.str.replace("/","")
train_data.checkin_date=replace_bacs(train_data.checkin_date)
train_data.checkout_date=replace_bacs(train_data.checkout_date)
train_data.head(5)
#from string get first 4 digit
def get_day(txt):
#txt="1234566"
return txt[:2]
def get_month(txt):
#txt="1234566"
return txt[2:4]
def get_year(txt):
#txt="1234566"
return txt[4:]
#use apply function for process each element want
#train_data.booking_date=train_data_c.booking_date.apply(get_daymonth)
#train_data.checkin_date=train_data_c.checkin_date.apply(get_daymonth)
#train_data.checkout_date=train_data_c.checkout_date.apply(get_daymonth)
train_data["checkin_date_day"]=train_data.checkin_date.apply(get_day)
train_data["checkin_date_month"]=train_data.checkin_date.apply(get_month)
train_data["checkin_date_year"]=train_data.checkin_date.apply(get_year)
train_data["checkin_date_day"]=train_data["checkin_date_day"].astype("int64")
train_data["checkin_date_month"]=train_data["checkin_date_month"].astype("int64")
train_data["checkin_date_year"]=train_data["checkin_date_year"].astype("int64")
train_data["checkout_date_day"]=train_data.checkout_date.apply(get_day)
train_data["checkout_date_month"]=train_data.checkout_date.apply(get_month)
train_data["checkout_date_year"]=train_data.checkout_date.apply(get_year)
train_data["checkout_date_day"]=train_data["checkout_date_day"].astype("int64")
train_data["checkout_date_month"]=train_data["checkout_date_month"].astype("int64")
train_data["checkout_date_year"]=train_data["checkout_date_year"].astype("int64")
train_data["booking_date_day"]=train_data.booking_date.apply(get_day)
train_data["booking_date_month"]=train_data.booking_date.apply(get_month)
train_data["booking_date_year"]=train_data.booking_date.apply(get_year)
train_data["booking_date_day"]=train_data["booking_date_day"].astype("int64")
train_data["booking_date_month"]=train_data["booking_date_month"].astype("int64")
train_data["booking_date_year"]=train_data["booking_date_year"].astype("int64")
train_data["checkout_date_day"]=train_data.checkout_date.apply(get_day)
train_data["checkout_date_month"]=train_data.checkout_date.apply(get_month)
train_data["checkout_date_year"]=train_data.checkout_date.apply(get_year)
train_data["checkout_date_day"]=train_data["checkin_date_day"].astype("int64")
train_data["checkout_date_month"]=train_data["checkin_date_month"].astype("int64")
train_data["checkout_date_year"]=train_data["checkin_date_year"].astype("int64")
train_data["chkin_chkout_day"]=train_data["checkout_date_day"]-train_data["checkin_date_day"]
#object to int
train_data["booking_date_int"]=train_data["booking_date"].astype("int64")
train_data["checkin_date_int"]=train_data["checkin_date"].astype("int64")
train_data["checkout_date_int"]=train_data["checkout_date"].astype("int64")
#object category to code
def cat_to_codes(df):
df=df.astype("category").cat.codes
return df.astype("int64")
train_data.resort_id.value_counts()
train_data["memberid_code"]=train_data.memberid.astype("category").cat.codes
train_data["member_age_buckets_code"]=cat_to_codes(train_data["member_age_buckets"])
train_data["cluster_code _code"]=cat_to_codes(train_data.cluster_code )
train_data["reservationstatusid_code_code"]=cat_to_codes(train_data.reservationstatusid_code)
train_data["resort_id _code"]=cat_to_codes(train_data.resort_id )
#train_data["booking_date"]=cat_to_codes(train_data.booking_date)
#train_data["checkout_date"]=cat_to_codes(train_data.checkout_date)
train_data["booking_date_code"]=train_data_c.booking_date
train_data["checkin_date_code"]=train_data_c.checkin_date
train_data["checkout_date_code"]=train_data_c.checkout_date
from datetime import datetime
from dateutil.relativedelta import relativedelta
from datetime import date
#convert object to Date time format
train_data["booking_date_code"]=pd.to_datetime(train_data["booking_date_code"], format='%d/%m/%y')
train_data["checkin_date_code"]=pd.to_datetime(train_data["checkin_date_code"], format='%d/%m/%y')
train_data["checkout_date_code"]=pd.to_datetime(train_data["checkout_date_code"],format='%d/%m/%y')
train_data["checkout_date_code"].head()
#find the days stay in there(diff betn checkin and checkout)
train_data['diff_days'] = train_data['checkout_date_code'] - train_data['checkin_date_code']
train_data['diff_days']=train_data['diff_days']/np.timedelta64(1,'D')
train_data['diff_book_check'] = train_data['checkin_date_code'] - train_data['booking_date_code']
train_data['diff_book_check']=train_data['diff_book_check']/np.timedelta64(1,'D')
train_data['diff_book_chkout'] = train_data['checkout_date_code'] - train_data['booking_date_code']
train_data['diff_book_chkout']=train_data['diff_book_chkout']/np.timedelta64(1,'D')
train_data['diff_btn_nights_day'] = train_data['diff_days'] - train_data['roomnights']
train_data['roomnights'].max()
train_data.columns
##############################test data
test_data.isnull().sum()
test_data.state_code_residence=fillnull_data(test_data.state_code_residence)
test_data.season_holidayed_code=fillnull_data(test_data.season_holidayed_code)
test_data.booking_date=replace_bacs(test_data.booking_date)
test_data.checkin_date=replace_bacs(test_data.checkin_date)
test_data.checkout_date=replace_bacs(test_data.checkout_date)
test_data["checkin_date_day"]=test_data.checkin_date.apply(get_day)
test_data["checkin_date_month"]=test_data.checkin_date.apply(get_month)
test_data["checkin_date_year"]=test_data.checkin_date.apply(get_year)
test_data["checkin_date_day"]=test_data["checkin_date_day"].astype("int64")
test_data["checkin_date_month"]=test_data["checkin_date_month"].astype("int64")
test_data["checkin_date_year"]=test_data["checkin_date_year"].astype("int64")
test_data["checkout_date_day"]=test_data.checkout_date.apply(get_day)
train_data["checkout_date_month"]=train_data.checkout_date.apply(get_month)
train_data["checkout_date_year"]=train_data.checkout_date.apply(get_year)
test_data["checkout_date_day"]=test_data["checkout_date_day"].astype("int64")
train_data["checkout_date_month"]=train_data["checkout_date_month"].astype("int64")
train_data["checkout_date_year"]=train_data["checkout_date_year"].astype("int64")
test_data["booking_date_day"]=test_data.booking_date.apply(get_day)
test_data["booking_date_month"]=test_data.booking_date.apply(get_month)
test_data["booking_date_year"]=test_data.booking_date.apply(get_year)
test_data["booking_date_day"]=test_data["booking_date_day"].astype("int64")
test_data["booking_date_month"]=test_data["booking_date_month"].astype("int64")
test_data["booking_date_year"]=test_data["booking_date_year"].astype("int64")
test_data["chkin_chkout_day"]=test_data["checkout_date_day"]-test_data["checkin_date_day"]
test_data["memberid_code"]=test_data.memberid.astype("category").cat.codes
test_data["member_age_buckets_code"]=cat_to_codes(test_data["member_age_buckets"])
test_data["cluster_code _code"]=cat_to_codes(test_data.cluster_code )
test_data["reservationstatusid_code_code"]=cat_to_codes(test_data.reservationstatusid_code)
test_data["resort_id _code"]=cat_to_codes(test_data.resort_id )
test_data["booking_date_code"]=pd.to_datetime(test_data_c["booking_date"], format='%d/%m/%y')
test_data["checkin_date_code"]=pd.to_datetime(test_data_c["checkin_date"], format='%d/%m/%y')
test_data["checkout_date_code"]=pd.to_datetime(test_data_c["checkout_date"],format='%d/%m/%y')
test_data['diff_days'] =(test_data['checkout_date_code']) - (test_data['checkin_date_code'])
test_data['diff_days']=test_data['diff_days']/np.timedelta64(1,'D')
test_data['diff_book_check'] = test_data['checkin_date_code'] - test_data['booking_date_code']
test_data['diff_book_check']=test_data['diff_book_check']/np.timedelta64(1,'D')
test_data['diff_book_chkout'] = test_data['checkout_date_code'] - test_data['booking_date_code']
test_data['diff_book_chkout']= test_data['diff_book_chkout']/np.timedelta64(1,'D')
test_data['diff_btn_nights_day'] = test_data['diff_days'] - test_data['roomnights']
#totatl persons
train_data["total_persons"]=train_data.numberofadults + train_data.numberofchildren
test_data["total_persons"]=test_data.numberofadults + test_data.numberofchildren
test_data.dtypes
train_data.dtypes
all_inputs=["diff_days","checkin_date_day","checkin_date_month","checkin_date_year","resort_id _code","reservationstatusid_code_code","cluster_code _code","member_age_buckets_code","memberid_code","booking_type_code","total_pax","state_code_resort","state_code_residence","season_holidayed_code","roomnights","room_type_booked_code","resort_type_code","resort_region_code","persontravellingid","numberofchildren","numberofadults","main_product_code","channel_code"]
#all_inputs=["diff_days","checkin_date_day","checkin_date_month","checkin_date_year","resort_id _code","reservationstatusid_code_code","cluster_code _code","member_age_buckets_code","memberid_code","booking_type_code","total_pax","state_code_resort","state_code_residence","season_holidayed_code","roomnights","room_type_booked_code","resort_type_code","resort_region_code","persontravellingid","numberofchildren","numberofadults","main_product_code","channel_code"]
op_var=["amount_spent_per_room_night_scaled"]
new_col=["diff_days","diff_book_check","diff_book_chkout","total_persons","numberofadults","numberofchildren","roomnights","booking_type_code","total_pax","state_code_residence","resort_type_code","resort_id _code","reservationstatusid_code_code"]
X_train, X_test, y_train, y_test = train_test_split(
train_data[all_inputs],train_data["amount_spent_per_room_night_scaled"], test_size=0.2, random_state=42)
#clf=LinearRegression()
#97.99
clf=LGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,
importance_type='split', learning_rate=0.111, max_depth=-1,
min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0,
n_estimators=225, n_jobs=-1, num_leaves=31, objective=None,
random_state=None, reg_alpha=0.0, reg_lambda=0.0, silent=True,
subsample=1.0, subsample_for_bin=200000, subsample_freq=0)
#clf=LGBMClassifier()
#clf=LinearSVC()
model=clf.fit(X_train,y_train)
pred=model.predict(X_test)
train_data_c.dtypes
from sklearn.metrics import mean_squared_error
from math import sqrt
rms = sqrt(mean_squared_error(y_test, pred))
print(rms)
model2=clf.fit(train_data[all_inputs],train_data[op_var])
pred_v=model2.predict(test_data[all_inputs])
test_data["amount_spent_per_room_night_scaled"]=pred_v
op_file=test_data[["reservation_id","amount_spent_per_room_night_scaled"]]
#op_file.head(2)
op_file.to_csv("D:\\Python project\\Club Mahindra DataOlympics\\output.csv",index=False,header=True)
| 45.870036 | 467 | 0.79372 |
8a67d40fcf341e06108d4cbf7ff08865af1229bc
| 133 |
py
|
Python
|
Shivani/circle.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
Shivani/circle.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
Shivani/circle.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
r=int(input("enter radius\n"))
area=3.14*r*r
print(area)
r=int(input("enter radius\n"))
circumference=2*3.14*r
print(circumference)
| 16.625 | 30 | 0.721805 |
0a5be50a580709abbd3da29e2935ca49e6acb24a
| 4,932 |
py
|
Python
|
src/onegov/directory/collections/directory_entry.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/directory/collections/directory_entry.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/directory/collections/directory_entry.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from itertools import groupby
from onegov.core.collection import GenericCollection, Pagination
from onegov.core.utils import toggle
from onegov.directory.models import DirectoryEntry
from onegov.form import as_internal_id
from sqlalchemy import and_, desc
from sqlalchemy.orm import object_session
from sqlalchemy.dialects.postgresql import array
class DirectoryEntryCollection(GenericCollection, Pagination):
""" Provides a view on a directory's entries.
The directory itself might be a natural place for lots of these methods
to reside, but ultimately we want to avoid mixing the concerns of the
directory model and this view-supporting collection.
"""
def __init__(self, directory, type='*', keywords=None, page=0,
searchwidget=None):
super().__init__(object_session(directory))
self.type = type
self.directory = directory
self.keywords = keywords or {}
self.page = page
self.searchwidget = searchwidget
def __eq__(self, other):
return self.type == other.type and self.page == other.page
def subset(self):
return self.query()
@property
def search(self):
return self.searchwidget and self.searchwidget.name
@property
def search_query(self):
return self.searchwidget and self.searchwidget.search_query
@property
def page_index(self):
return self.page
def page_by_index(self, index):
return self.__class__(
self.directory,
self.type,
self.keywords,
page=index
)
def by_name(self, name):
return self.query().filter_by(name=name).first()
def query(self):
cls = self.model_class
query = super().query().filter_by(directory_id=self.directory.id)
keywords = self.valid_keywords(self.keywords)
def keyword_group(value):
return value.split(':')[0]
values = [
':'.join((keyword, value))
for keyword in keywords
for value in keywords[keyword]
]
values.sort(key=keyword_group)
query = query.filter(and_(
cls._keywords.has_any(array(group_values))
for group, group_values in groupby(values, key=keyword_group)
))
if self.directory.configuration.direction == 'desc':
query = query.order_by(desc(cls.order))
else:
query = query.order_by(cls.order)
if self.searchwidget:
query = self.searchwidget.adapt(query)
return query
def valid_keywords(self, parameters):
return {
as_internal_id(k): v for k, v in parameters.items()
if k in {
as_internal_id(kw)
for kw in self.directory.configuration.keywords
}
}
@property
def directory_name(self):
return self.directory.name
@property
def model_class(self):
return DirectoryEntry.get_polymorphic_class(self.type, DirectoryEntry)
def available_filters(self, sort_choices=False, sortfunc=None):
""" Retrieve the filters with their choices. Return by default in the
order of how the are defined in the structrue.
To filter alphabetically, set sort_choices=True. """
keywords = tuple(
as_internal_id(k)
for k in self.directory.configuration.keywords or tuple()
)
fields = {f.id: f for f in self.directory.fields if f.id in keywords}
def _sort(values):
if not sort_choices:
return values
if not sortfunc:
return sorted(values)
return sorted(values, key=sortfunc)
return (
(k, fields[k].label, _sort([c.label for c in fields[k].choices]))
for k in keywords if hasattr(fields[k], 'choices')
)
def for_filter(self, singular=False, **keywords):
if not self.directory.configuration.keywords:
return self
parameters = self.keywords.copy()
for keyword, value in self.valid_keywords(keywords).items():
collection = set(parameters.get(keyword, []))
if singular:
collection = set() if value in collection else {value}
else:
collection = toggle(collection, value)
if collection:
parameters[keyword] = list(collection)
elif keyword in parameters:
del parameters[keyword]
return self.__class__(
directory=self.directory,
type=self.type,
searchwidget=self.searchwidget,
keywords=parameters)
def without_keywords(self):
return self.__class__(
directory=self.directory,
type=self.type,
page=self.page,
searchwidget=self.searchwidget
)
| 30.63354 | 78 | 0.615369 |
6a7aabfc4cb21c04ec36ae3668c14375b3193b77
| 7,903 |
py
|
Python
|
hashing/scripts/old/latency_figure5.py
|
ShuhaoZhangTony/WalnutDB
|
9ccc10b23351aa2e6793e0f5c7bd3dd511d7b050
|
[
"MIT"
] | null | null | null |
hashing/scripts/old/latency_figure5.py
|
ShuhaoZhangTony/WalnutDB
|
9ccc10b23351aa2e6793e0f5c7bd3dd511d7b050
|
[
"MIT"
] | null | null | null |
hashing/scripts/old/latency_figure5.py
|
ShuhaoZhangTony/WalnutDB
|
9ccc10b23351aa2e6793e0f5c7bd3dd511d7b050
|
[
"MIT"
] | null | null | null |
import itertools as it
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pylab
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import LogLocator
OPT_FONT_NAME = 'Helvetica'
TICK_FONT_SIZE = 20
LABEL_FONT_SIZE = 22
LEGEND_FONT_SIZE = 24
LABEL_FP = FontProperties(style='normal', size=LABEL_FONT_SIZE)
LEGEND_FP = FontProperties(style='normal', size=LEGEND_FONT_SIZE)
TICK_FP = FontProperties(style='normal', size=TICK_FONT_SIZE)
MARKERS = (['o', 's', 'v', "^", "h", "v", ">", "x", "d", "<", "|", "", "+", "_"])
# you may want to change the color map for different figures
COLOR_MAP = ('#F15854', '#5DA5DA', '#60BD68', '#B276B2', '#DECF3F', '#F17CB0', '#B2912F', '#FAA43A', '#AFAFAF')
# you may want to change the patterns for different figures
PATTERNS = (["|", "\\", "/", "+", "-", ".", "*", "x", "o", "O", "////", ".", "|||", "o", "---", "+", "\\\\", "*"])
LABEL_WEIGHT = 'bold'
LINE_COLORS = COLOR_MAP
LINE_WIDTH = 3.0
MARKER_SIZE = 13.0
MARKER_FREQUENCY = 1000
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['xtick.labelsize'] = TICK_FONT_SIZE
matplotlib.rcParams['ytick.labelsize'] = TICK_FONT_SIZE
matplotlib.rcParams['font.family'] = OPT_FONT_NAME
FIGURE_FOLDER = '/data1/xtra/results/figure'
# there are some embedding problems if directly exporting the pdf figure using matplotlib.
# so we generate the eps format first and convert it to pdf.
def ConvertEpsToPdf(dir_filename):
os.system("epstopdf --outfile " + dir_filename + ".pdf " + dir_filename + ".eps")
os.system("rm -rf " + dir_filename + ".eps")
def DrawLegend(legend_labels, filename):
fig = pylab.figure()
ax1 = fig.add_subplot(111)
FIGURE_LABEL = legend_labels
LINE_WIDTH = 8.0
MARKER_SIZE = 12.0
LEGEND_FP = FontProperties(style='normal', size=26)
figlegend = pylab.figure(figsize=(12, 0.5))
idx = 0
lines = [None] * (len(FIGURE_LABEL))
data = [1]
x_values = [1]
idx = 0
for group in xrange(len(FIGURE_LABEL)):
lines[idx], = ax1.plot(x_values, data,
color=LINE_COLORS[idx], linewidth=LINE_WIDTH,
marker=MARKERS[idx], markersize=MARKER_SIZE, label=str(group))
idx = idx + 1
# LEGEND
figlegend.legend(lines, FIGURE_LABEL, prop=LEGEND_FP,
loc=1, ncol=len(FIGURE_LABEL), mode="expand", shadow=False,
frameon=False, borderaxespad=0.0, handlelength=2)
if not os.path.exists(FIGURE_FOLDER):
os.makedirs(FIGURE_FOLDER)
# no need to export eps in this case.
figlegend.savefig(FIGURE_FOLDER + '/' + filename + '.pdf')
# draw a bar chart
def DrawFigure(x_values, y_values, legend_labels, x_label, y_label, y_min, y_max, filename, allow_legend):
# you may change the figure size on your own.
fig = plt.figure(figsize=(8, 3))
figure = fig.add_subplot(111)
FIGURE_LABEL = legend_labels
if not os.path.exists(FIGURE_FOLDER):
os.makedirs(FIGURE_FOLDER)
# values in the x_xis
index = np.arange(len(x_values))
# the bar width.
# you may need to tune it to get the best figure.
width = 0.1
# draw the bars
bars = [None] * (len(FIGURE_LABEL))
for i in range(len(y_values)):
bars[i] = plt.bar(index + i * width + width / 2,
y_values[i], width,
hatch=PATTERNS[i],
color=LINE_COLORS[i],
label=FIGURE_LABEL[i])
# sometimes you may not want to draw legends.
if allow_legend == True:
plt.legend(bars, FIGURE_LABEL,
prop=LEGEND_FP,
ncol=4,
loc='upper center',
# mode='expand',
shadow=False,
bbox_to_anchor=(0.45, 1.6),
columnspacing=0.1,
handletextpad=0.2,
# bbox_transform=ax.transAxes,
# frameon=True,
# columnspacing=5.5,
# handlelength=2,
)
# you may need to tune the xticks position to get the best figure.
plt.xticks(index + 2.4 * width, x_values)
plt.yscale('log')
plt.grid(axis='y', color='gray')
figure.yaxis.set_major_locator(LogLocator(base=10))
# figure.xaxis.set_major_locator(LinearLocator(5))
figure.get_xaxis().set_tick_params(direction='in', pad=10)
figure.get_yaxis().set_tick_params(direction='in', pad=10)
plt.xlabel(x_label, fontproperties=LABEL_FP)
plt.ylabel(y_label, fontproperties=LABEL_FP)
plt.savefig(FIGURE_FOLDER + "/" + filename + ".pdf", bbox_inches='tight')
# example for reading csv file
def ReadFile():
y = []
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
for id in it.chain(range(20, 25)):
file = '/data1/xtra/results/latency/PRJ_{}.txt'.format(id)
f = open(file, "r")
read = f.readlines()
x = float(read.pop(int(len(read) * 0.99)).strip("\n")) # get last timestamp
col1.append(x)
y.append(col1)
for id in it.chain(range(20, 25)):
file = '/data1/xtra/results/latency/NPJ_{}.txt'.format(id)
f = open(file, "r")
read = f.readlines()
x = float(read.pop(int(len(read) * 0.99)).strip("\n")) # get last timestamp
col2.append(x)
y.append(col2)
for id in it.chain(range(20, 25)):
file = '/data1/xtra/results/latency/MPASS_{}.txt'.format(id)
f = open(file, "r")
read = f.readlines()
x = float(read.pop(int(len(read) * 0.99)).strip("\n")) # get last timestamp
col3.append(x)
y.append(col3)
for id in it.chain(range(20, 25)):
file = '/data1/xtra/results/latency/MWAY_{}.txt'.format(id)
f = open(file, "r")
read = f.readlines()
x = float(read.pop(int(len(read) * 0.99)).strip("\n")) # get last timestamp
col4.append(x)
y.append(col4)
for id in it.chain(range(20, 25)):
file = '/data1/xtra/results/latency/SHJ_JM_NP_{}.txt'.format(id)
f = open(file, "r")
read = f.readlines()
x = float(read.pop(int(len(read) * 0.99)).strip("\n")) # get last timestamp
col5.append(x)
y.append(col5)
for id in it.chain(range(20, 25)):
file = '/data1/xtra/results/latency/SHJ_JBCR_NP_{}.txt'.format(id)
f = open(file, "r")
read = f.readlines()
x = float(read.pop(int(len(read) * 0.99)).strip("\n")) # get last timestamp
col6.append(x)
y.append(col6)
for id in it.chain(range(20, 25)):
file = '/data1/xtra/results/latency/PMJ_JM_NP_{}.txt'.format(id)
f = open(file, "r")
read = f.readlines()
x = float(read.pop(int(len(read) * 0.99)).strip("\n")) # get last timestamp
col7.append(x)
y.append(col7)
for id in it.chain(range(20, 25)):
file = '/data1/xtra/results/latency/PMJ_JBCR_NP_{}.txt'.format(id)
f = open(file, "r")
read = f.readlines()
x = float(read.pop(int(len(read) * 0.99)).strip("\n")) # get last timestamp
col8.append(x)
y.append(col8)
return y
if __name__ == "__main__":
# x_values = ['Unique', 'Zipf(0)', 'Zipf(0.2)', 'Zipf(0.4)', 'Zipf(0.8)', 'Zipf(1)']
x_values = [0, 0.2, 0.4, 0.8, 1]
y_values = ReadFile()
legend_labels = ['PRJ', 'NPJ', 'M-PASS', 'M-WAY', 'SHJ$^M$', 'SHJ$^B$', 'PMJ$^M$', 'PMJ$^B$']
DrawFigure(x_values, y_values, legend_labels,
'Key Skewness (zipf)', '$99^{th}$ latency (ms)', 0,
400, 'latency_figure5', False)
# DrawLegend(legend_labels, 'factor_legend')
| 34.969027 | 114 | 0.580159 |
7cfc4a289194c5f16e035bb36148a8271cb1250d
| 4,856 |
py
|
Python
|
test/test_npu/test_network_ops/test_nllloss_backward.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_nllloss_backward.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_nllloss_backward.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestNlllossbackward(TestCase):
def cpu_op_exec_new(self, input1, target, reduction, ignore_index):
if not ignore_index:
ignore_index = -100 # 默认值
input1.requires_grad_(True)
output = torch.nn.functional.nll_loss(input1, target, reduction=reduction, ignore_index=ignore_index)
input_cpu = output.detach().numpy()
output.backward(torch.ones_like(output))
res = input1.grad
res = res.numpy()
return input_cpu, res
def npu_op_exec_new(self, input1, target, reduction, ignore_index):
if not ignore_index:
ignore_index = -100 # 默认值
target = target.to(torch.int32)
target = target.to("npu")
input1.requires_grad_(True)
output = torch.nn.functional.nll_loss(input1, target, reduction=reduction, ignore_index=ignore_index)
output.backward(torch.ones_like(output))
input_npu = output.to("cpu")
input_npu = input_npu.detach().numpy()
res = input1.grad.to("cpu")
res = res.numpy()
return input_npu, res
def test_nllloss_shape_format_fp32(self, device):
# 当前仅支持设置正数, 若np.sum(ignore_index == np_target) == 0,则ignore_index设置任意数值不影响
ignore_index = 1
for reduction in ['mean', 'none', 'sum']:
shape_format = [
[[np.float32, 0, [256, 100]], [np.int32, 0, [256]], reduction, None],
[[np.float32, 3, [256, 100]], [np.int32, 0, [256]], reduction, ignore_index],
[[np.float32, 0, [4800, 3003]], [np.int32, 0, [4800]], reduction, ignore_index],
[[np.float32, 3, [4800, 3003]], [np.int32, 0, [4800]], reduction, ignore_index],
[[np.float32, 0, [4800, 3003]], [np.int32, 0, [4800]], reduction, None],
]
for item in shape_format:
np_target = np.random.randint(0, item[0][2][1], (item[1][2])).astype(np.long)
target = torch.from_numpy(np_target)
cpu_input1, npu_input1 = create_common_tensor(item[0], -100, 100)
cpu_input, cpu_output = self.cpu_op_exec_new(cpu_input1, target, item[2], item[3])
npu_input, npu_output = self.npu_op_exec_new(npu_input1, target, item[2], item[3])
self.assertRtolEqual(cpu_input, npu_input)
self.assertRtolEqual(cpu_output, npu_output)
def test_nllloss_shape_format_fp16(self, device):
# 当前仅支持设置正数, 若np.sum(ignore_index == np_target) == 0,则ignore_index设置任意数值不影响
ignore_index = 1
for reduction in ['mean', 'none', 'sum']:
shape_format = [
[[np.float16, 0, [256, 100]], [np.int32, 0, [256]], reduction, ignore_index],
[[np.float16, 3, [256, 100]], [np.int32, 0, [256]], reduction, ignore_index],
[[np.float16, 0, [4800, 3003]], [np.int32, 0, [4800]], reduction, ignore_index],
[[np.float16, 3, [4800, 3003]], [np.int32, 0, [4800]], reduction, ignore_index],
[[np.float16, 0, [4800, 3003]], [np.int32, 0, [4800]], reduction, None],
]
for item in shape_format:
np_target = np.random.uniform(0, item[0][2][1], (item[1][2])).astype(np.long)
target = torch.from_numpy(np_target)
cpu_input1, npu_input1 = create_common_tensor(item[0], -100, 100)
cpu_input1 = cpu_input1.to(torch.float32)
cpu_input, cpu_output = self.cpu_op_exec_new(cpu_input1, target, item[2], item[3])
npu_input, npu_output = self.npu_op_exec_new(npu_input1, target, item[2], item[3])
cpu_input = cpu_input.astype(np.float16)
cpu_output = cpu_output.astype(np.float16)
self.assertRtolEqual(cpu_input, npu_input)
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestNlllossbackward, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 50.061856 | 109 | 0.629736 |
861ea1651ae9d88e8a5ab6e9b805cc0603e2857a
| 30,548 |
py
|
Python
|
src/demo.py
|
bela127/Pruning_with_Saliency_Information
|
d0d67c88c863c49def3011862a9a26e94e6f5bf9
|
[
"MIT"
] | null | null | null |
src/demo.py
|
bela127/Pruning_with_Saliency_Information
|
d0d67c88c863c49def3011862a9a26e94e6f5bf9
|
[
"MIT"
] | null | null | null |
src/demo.py
|
bela127/Pruning_with_Saliency_Information
|
d0d67c88c863c49def3011862a9a26e94e6f5bf9
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
from tensorflow import keras
from matplotlib import pyplot as plt
from matplotlib import colors
import keras.backend as K
sess = tf.InteractiveSession()
mnist = keras.datasets.mnist#.fashion_mnist#.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# pixel werte auf 0 bis 1 skalieren
train_images = train_images / 255.0
test_images = test_images / 255.0
# pixel werte auf -1 bis 1 skalieren
train_images = train_images * 2 - 1
test_images = test_images * 2 - 1
# one hot encoding of labels
def one_hot_encode(a, length):
temp = np.zeros((a.shape[0], length))
temp[np.arange(a.shape[0]), a] = 1.0
return temp
# one hot one cold encoding of labels
def one_hot_one_cold_encode(a, length):
temp = np.ones((a.shape[0], length))
temp = temp * -1
temp[np.arange(a.shape[0]), a] = 1.0
return temp
labels_size = 10
#encoding für labels anwenden
train_numeric_labels = train_labels
test_numeric_labels = test_labels
train_labels_one_hot = one_hot_encode(train_labels, labels_size)
test_labels_one_hot = one_hot_encode(test_labels, labels_size)
train_labels_one_hot_one_cold = one_hot_one_cold_encode(train_labels, labels_size)
test_labels_one_hot_one_cold = one_hot_one_cold_encode(test_labels, labels_size)
#dataset infos
(ds_size,image_size,_) = train_images.shape
ds_test_size = int(test_labels.shape[-1])
#augmentation
rauschen = True #False ; True
#loading of Modell
model_to_load = "model_step_0_acc_0.8704000115394592" #"model_step_0_acc_0.932699978351593" # "None";
#learning infos
learning_rate = 0.1
steps_number = 1551
batch_size = 200
#pruning
pruning_loss = False #False ; True
loss_change = False #False ; True
pruning_faktor = 0.95
pruning_steps = 150
#info
display_model = False #False ; True
display_pruning = False #False ; True
train_images = np.reshape(train_images, [-1, image_size*image_size])
test_images = np.reshape(test_images, [-1, image_size*image_size])
def augment_with_gaus(images, mean = 0.0, std = 0.2, max = 1, min = -1):
images_augmented = []
for image in images:
rausch = np.random.normal(mean,std,(image_size*image_size))
images_augmented.append(np.clip(image + rausch, min, max))
return np.asarray(images_augmented)
def augment_with_salt_peper(images, percentage = 0.15, max = 1, min = -1):
images_augmented = []
rausch_count = int(image_size*image_size*percentage)
for image in images:
rausch_index = np.random.randint(0,image_size*image_size,rausch_count)
#salt
image[rausch_index[:rausch_count//2]] = max
#peper
image[rausch_index[rausch_count//2:]] = min
images_augmented.append(image)
return np.asarray(images_augmented)
if rauschen :
print("augment data")
# add rauschen, else minist is too easy
# gaus um 0 std 0.2
train_images = augment_with_gaus(train_images)
test_images = augment_with_gaus(test_images)
#salt/peper 0.15
train_images = augment_with_salt_peper(train_images)
test_images = augment_with_salt_peper(test_images)
# create dataset objects from the arrays
dx = tf.data.Dataset.from_tensor_slices(train_images)
dy = tf.data.Dataset.from_tensor_slices(train_labels_one_hot_one_cold)
#dy = tf.data.Dataset.from_tensor_slices(train_labels_one_hot)
batches = tf.data.Dataset.zip((dx, dy)).shuffle(30000).batch(batch_size)
test_labels = test_labels_one_hot_one_cold
#test_labels = test_labels_one_hot
# create a one-shot iterator
iterator = batches.make_initializable_iterator()
# extract an element
next_element = iterator.get_next()
weight_counts = [[],[],[]]
sparcitys = [[],[],[]]
accuracys = []
def main():
model = create_base_model(image_size*image_size,10)
model_train = create_train_model(model)
model_eval = create_evaluation_model(model)
model_prun = create_pruning_model(model)
saver = tf.train.Saver()
loaded = load_or_init_model(saver)
if not loaded:
train_model(model_train,model_eval,learning_rate, steps_number)
accuracy = evaluate_model(model_eval)
if display_model:
display_model_with_samples(model_prun, 1)
important_weights = calculate_important_weights(model_prun,2000)
if display_pruning:
display_important_weights(important_weights)
pruning_step = 0
while pruning_step < pruning_steps:#True:
if not loaded:
save_model(model,f"step_{pruning_step}_acc_{accuracy}",saver)
pruning_step += 1
prune_model(model_prun,important_weights, pruning_faktor)
train_model(model_train,model_eval,learning_rate, steps_number//2)
accuracy = evaluate_model(model_eval)
important_weights = calculate_important_weights(model_prun,1000)
if display_pruning:
display_important_weights(important_weights)
loaded = False
def create_base_model(inputs, outputs):
x = tf.placeholder(tf.float32, shape=(None, inputs), name="input")
tf.add_to_collection("layer_out",x)
y, mask = connection(x)
y, y_no_act, weights, mask = fc_layer(y, 36, activation=tf.nn.tanh)
y, mask = connection(y)
y, y_no_act, weights, mask = fc_layer(y, 25, activation=tf.nn.tanh)
y, mask = connection(y)
y, y_no_act, weights, mask = fc_layer(y, outputs, activation=tf.nn.tanh)
y, mask = connection(y)
return (x ,y)
def create_train_model(model):
x,y = model
with tf.variable_scope("train"):
ground_truth = tf.placeholder(tf.float32, (None, y.shape[-1]),name="ground_truth")
tf.add_to_collection("train_labels", ground_truth)
with tf.variable_scope("loss"):
loss = tf.losses.mean_squared_error(ground_truth, y)
tf.add_to_collection("train_losses", loss)
# Training step
learning_rate = tf.placeholder(tf.float32, None,name="learning_rate")
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
return x, ground_truth, loss, train_op, learning_rate
def create_pruning_model(model):
x,y = model
with tf.variable_scope("prun"):
ground_truth = tf.placeholder(tf.float32, (None, y.shape[-1]),name="ground_truth")
tf.add_to_collection("prun_labels", ground_truth)
with tf.variable_scope("loss"):
if pruning_loss:
minimum = tf.reduce_min(y)
out = tf.subtract(y,minimum)
masked_out = tf.multiply(ground_truth,out)
loss = tf.reduce_max(masked_out)
else:
loss = tf.losses.mean_squared_error(ground_truth, y)
tf.add_to_collection("prun_losses", loss)
with tf.variable_scope("gradients"):
layer_weights = tf.get_collection("layer_weights")
connection_out = tf.get_collection("connection_out")
for weights in layer_weights:
if loss_change:
weight_grad = tf.multiply(weights,tf.gradients(loss, weights))
else:
weight_grad = tf.gradients(loss, weights)
tf.add_to_collection("weight_grads", weight_grad)
for layer_in in connection_out:
if loss_change:
input_grad = tf.multiply(layer_in,tf.gradients(loss, layer_in))
else:
input_grad = tf.gradients(loss, layer_in)
tf.add_to_collection("input_grads", input_grad)
return x, ground_truth, loss
def create_evaluation_model(model):
x,y = model
with tf.variable_scope("eval"):
ground_truth = tf.placeholder(tf.float32, (None, y.shape[-1]),name="ground_truth")
tf.add_to_collection("eval_labels", ground_truth)
with tf.variable_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(ground_truth, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.add_to_collection("evaluations", accuracy)
return x, ground_truth, [accuracy]
def load_or_init_model(saver):
#saver.restore(sess, "./models/model.ckpt")
try:
saver.restore(sess, f"./models/{model_to_load}.ckpt")
print("======-- model initiliced --======")
return True
except:
print("========-- Warning! --========")
print("= failed to load model =")
print("= initiliced random =")
print("========-- Warning! --========")
sess.run(tf.global_variables_initializer())
return False
def train_model(model_train, model_eval, learning_rate, steps_number):
x_train, gt_train, loss, training_op, lr = model_train
x_eval, gt_eval, [accuracy] = model_eval
# Run the training
sess.run(iterator.initializer)
for step in range(steps_number):
# get batch of images and labels
(batch_x,batch_y) = sess.run(next_element)
feed_dict_train = {x_train: batch_x, gt_train: batch_y, lr: learning_rate}
# Run the training step
training_op.run(feed_dict=feed_dict_train)
# Print the accuracy progress on the batch every 100 steps
if step%100 == 0:
feed_dict_eval = {x_eval: batch_x, gt_eval: batch_y}
train_accuracy = accuracy.eval(feed_dict=feed_dict_eval)
print("Step %d, training batch accuracy %g %%"%(step, train_accuracy*100))
if (step + 1) % (ds_size // batch_size) == 0 and step > 0:
sess.run(iterator.initializer)
def evaluate_model(model_eval):
x_eval, gt_eval, [accuracy] = model_eval
feed_dict_eval = {x_eval: test_images, gt_eval: test_labels}
test_accuracy = accuracy.eval(feed_dict=feed_dict_eval)
print("Test accuracy: %g %%"%(test_accuracy*100))
accuracys.append(test_accuracy*100)
return test_accuracy
def save_model(model,name,saver):
save_path = saver.save(sess, f"./models/model_{name}.ckpt")
print("Model saved in path: %s" % save_path)
def display_model_with_samples(model_prun, sampels):
weight_grads = tf.get_collection("weight_grads")
input_grads = tf.get_collection("input_grads")
connection_out = tf.get_collection("connection_out")
#Display some sample images
for i in range(sampels):
print(i, " from ", sampels)
#choose random sample images
test_image_nr = np.random.randint(0,ds_test_size)
test_image_nr = 11 # select fixed image for testing
image = np.reshape(test_images[test_image_nr],[1,-1])
if pruning_loss:
label_mask = np.reshape(test_labels_one_hot[test_image_nr],[1,-1])
else:
label_mask = np.reshape(test_labels_one_hot_one_cold[test_image_nr],[1,-1])
x, ground_truth, loss = model_prun
feed_dict = {x: image, ground_truth: label_mask}
# calculate the output (feature map) of each layer, Input of the next
# -> perform a layer vice forward pass
print("Layer Inputs:")
outs = []
for outputs in connection_out:
out = sess.run(outputs, feed_dict=feed_dict)
outs.append(out)
out = out[0]
#display the output
print(outputs.name)
print(outputs.shape)
plt.title(outputs.name)
x = np.sqrt(len(out))
y = np.sqrt(len(out))
plt.xticks(np.arange(0, x))
plt.yticks(np.arange(0, y))
if x * y == len(out):
out = np.reshape(out,[int(x),int(y)])
#print("Graident of inputs\n=", grad)
plt.imshow(out, cmap='binary')
plt.colorbar()
plt.show()
elif 2 * 5 == len(out):
out = np.reshape(out,[2,5])
#print("Graident of inputs\n=", grad)
plt.imshow(out, cmap='binary')
plt.colorbar()
plt.show()
else:
print("Error:",x,"*",y,"=",x * y," != ",len(grad))
print("Weight Importance:")
#calculate impact of weights directly on final loss
for weight_grad in weight_grads:
weight_grad = weight_grad[0]
grad = sess.run(weight_grad, feed_dict=feed_dict)
print(weight_grad.name)
print(weight_grad.shape)
grad = np.abs(grad)
x = np.sqrt(len(grad))
y = np.sqrt(len(grad))
if x * y == len(grad):
show_images(grad,[int(x),int(y)],weight_grad.name)
else:
print("Error:",x,"*",y,"=",x * y," != ",len(grad))
print("Input Gradients:")
#calculate impact of input directly on final loss
for input_grad in input_grads:
input_grad = input_grad[0]
grad = sess.run(input_grad, feed_dict=feed_dict)
grad = grad[0]
print(input_grad.name)
print(input_grad.shape)
plt.title(input_grad.name)
x = np.sqrt(len(grad))
y = np.sqrt(len(grad))
plt.xticks(np.arange(0, x))
plt.yticks(np.arange(0, y))
if x * y == len(grad):
grad = np.reshape(grad,[int(x),int(y)])
plt.imshow(grad, cmap='binary')
plt.colorbar()
plt.show()
elif 2 * 5 == len(grad):
grad = np.reshape(grad,[2,5])
plt.imshow(grad, cmap='binary')
plt.colorbar()
plt.show()
else:
print("Error:",x,"*",y,"=",x * y," != ",len(out))
print("Input Importance:")
#calculate abs impact of input directly on final loss
for input_grad in input_grads:
input_grad = input_grad[0]
grad = sess.run(input_grad, feed_dict=feed_dict)
grad = np.abs(grad[0])
print(input_grad.name)
print(input_grad.shape)
plt.title(input_grad.name)
x = np.sqrt(len(grad))
y = np.sqrt(len(grad))
plt.xticks(np.arange(0, x))
plt.yticks(np.arange(0, y))
if x * y == len(grad):
grad = np.reshape(grad,[int(x),int(y)])
plt.imshow(grad, cmap='binary')
plt.colorbar()
plt.show()
elif 2 * 5 == len(grad):
grad = np.reshape(grad,[2,5])
plt.imshow(grad, cmap='binary')
plt.colorbar()
plt.show()
else:
print("Error:",x,"*",y,"=",x * y," != ",len(out))
print("Scaled Importance:")
#calculate input importance (scale abs grade 0 to 1)
input_importance = []
for input_grad in input_grads:
input_grad = input_grad[0]
grad = sess.run(input_grad, feed_dict=feed_dict)
grad = np.abs(grad[0])
minimum = np.min(grad)
maximum = np.max(grad)
if minimum < maximum:
importance = grad - minimum
importance = importance / (maximum - minimum)
else:
importance = grad - minimum
input_importance.append(importance)
print(input_grad.name)
print(input_grad.shape)
plt.title(input_grad.name)
x = np.sqrt(len(importance))
y = np.sqrt(len(importance))
plt.xticks(np.arange(0, x))
plt.yticks(np.arange(0, y))
if x * y == len(importance):
importance = np.reshape(importance,[int(x),int(y)])
plt.imshow(importance, cmap='binary')
plt.colorbar()
plt.show()
elif 2 * 5 == len(importance):
importance = np.reshape(importance,[2,5])
plt.imshow(importance, cmap='binary')
plt.colorbar()
plt.show()
else:
print("Error:",x,"*",y,"=",x * y," != ",len(grad))
print("Weight Importance:")
#calculate weight importance from input importance
for importance_1, importance_2 in zip(input_importance[:-1],input_importance[1:]):
print(len(importance_1))
print(len(importance_2))
weight_importance = []
for importance in importance_2:
singel_weight_importance = importance_1 * importance
weight_importance.append(singel_weight_importance)
weight_importance = np.asarray(weight_importance).T
print(weight_importance.shape)
x = np.sqrt(len(weight_importance))
y = np.sqrt(len(weight_importance))
if x * y == len(weight_importance):
show_images(weight_importance,[int(x),int(y)],"weight_importance")
else:
print("Error:",x,"*",y,"=",x * y," != ",len(weight_importance))
def calculate_important_weights(model_prun,samples):
input_grads = tf.get_collection("input_grads")
cummulated_weight_importance=[]
import time
for i in range(samples):#ds_test_size):
if display_pruning:
print(i, " from ", samples)
test_image_nr = np.random.randint(0,ds_test_size)
image = np.reshape(test_images[test_image_nr],[1,-1])
if pruning_loss:
label_mask = np.reshape(test_labels_one_hot[test_image_nr],[1,-1])
else:
label_mask = np.reshape(test_labels_one_hot_one_cold[test_image_nr],[1,-1])
x, ground_truth, loss = model_prun
feed_dict = {x: image, ground_truth: label_mask}
input_importance = []
for input_grad in input_grads:
input_grad = input_grad[0]
grad = sess.run(input_grad, feed_dict=feed_dict)
grad = np.abs(grad[0])
minimum = np.min(grad)
maximum = np.max(grad)
#Min Max norm of Gradients
if minimum < maximum:
importance = grad - minimum
importance = importance / (maximum - minimum)
else:
importance = grad - minimum
input_importance.append(importance)
all_weight_importance=[]
for importance_1, importance_2 in zip(input_importance[:-1],input_importance[1:]):
weight_importance = []
for importance in importance_2:
singel_weight_importance = importance_1 * importance
weight_importance.append(singel_weight_importance)
weight_importance = np.asarray(weight_importance).T
all_weight_importance.append(weight_importance)
if len(cummulated_weight_importance) == 0:
cummulated_weight_importance = np.asarray(all_weight_importance)
else:
cummulated_weight_importance += np.asarray(all_weight_importance)
## Mask out pruned weights
layer_masks = tf.get_collection("layer_masks")
layer_masks_values=[]
for layer_mask in layer_masks:
layer_mask_value = layer_mask.eval()
layer_masks_values.append(layer_mask_value)
cummulated_weight_importance = cummulated_weight_importance * np.asarray(layer_masks_values)
return cummulated_weight_importance
def display_important_weights(cummulated_weight_importance):
for weight_importance_sum in cummulated_weight_importance:
x = np.sqrt(len(weight_importance_sum))
y = np.sqrt(len(weight_importance_sum))
if x * y == len(weight_importance_sum):
show_images(weight_importance_sum,[int(x),int(y)])
else:
print("Error:",x,"*",y,"=",x * y," != ",len(weight_importance_sum))
def prune_model(prune_model,important_weights,sparcification_factor):
layer_masks = tf.get_collection("layer_masks")
# Calculate pruning mask
# Go through every layer
for i,(important_weight,layer_mask) in enumerate(zip(important_weights,layer_masks)):
layer_mask_value = layer_mask.eval()
# Go through every neuron
layer_mask_value = layer_mask_value.T
important_weight = important_weight.T
masks = []
for weight,weight_mask in zip(important_weight,layer_mask_value):
maximum = np.max(weight)
##else here maybe empty list
if sum(weight_mask) > 0 and maximum > 0:
sparcity = len(weight_mask) / sum(weight_mask)
#minimum = np.min(weight[np.nonzero(weight)])
#weight = weight - minimum
median = np.median(weight[np.nonzero(weight)])
mask = weight > pow(sparcification_factor,sparcity) * median
else:
mask = weight_mask
masks.append(mask)
masks = np.asarray(masks).T
if display_pruning:
display_puning_masks(masks)
print(sum(masks.flatten())," from ", len(masks.flatten())," sparsity: ", sum(masks.flatten())/len(masks.flatten()))
weight_counts[i].append(sum(masks.flatten()))
sparcitys[i].append(sum(masks.flatten())/len(masks.flatten()))
layer_mask.load(masks, sess)
def display_puning_masks(masks):
x = np.sqrt(len(masks))
y = np.sqrt(len(masks))
if x * y == len(masks):
show_images(masks,[int(x),int(y)])
else:
print("Error:",x,"*",y,"=",x * y," != ",len(masks))
def connection(x, name = None):
with tf.variable_scope(name, "connection",[x]):
print(tf.get_variable_scope().name)
print(x.shape)
mask = tf.get_variable("mask",shape=x.shape[-1],initializer=tf.constant_initializer(1),trainable=False)
tf.add_to_collection("connection_masks", mask)
y = tf.multiply(x, mask)
tf.add_to_collection("connection_out", y)
print(y.shape)
return y, mask
def fc_layer(x, outputs, activation = tf.nn.sigmoid, name = None):
with tf.variable_scope(name, "fc_layer", [x]):
print(tf.get_variable_scope().name)
print(x.shape)
weights = tf.get_variable("weights", [x.shape[-1], outputs])
tf.add_to_collection("layer_weights", weights)
print(weights.shape)
biases = tf.get_variable("biases", [outputs])
tf.add_to_collection("layer_biases", biases)
print(biases.shape)
mask = tf.get_variable("mask",[x.shape[-1], outputs],initializer=tf.constant_initializer(1),trainable=False)
tf.add_to_collection("layer_masks", mask)
print(mask.shape)
masked_weights = tf.multiply(weights, mask)
y_no_activation = tf.nn.bias_add(tf.matmul(x, masked_weights), biases)
tf.add_to_collection("layer_out_no_activation", y_no_activation)
if activation == None:
tf.add_to_collection("layer_out", y_no_activation)
print(y_no_activation.shape)
return y_no_activation, y_no_activation, weights, mask
else:
y = activation(y_no_activation)
tf.add_to_collection("layer_out", y)
print(y.shape)
return y, y_no_activation, weights, mask
def show_images(grad,image_shape,titel = 'Multiple images'):
size, neurons = grad.shape
Nc = 5
Nr = int(neurons/Nc)
cmap = 'binary'#'coolwarm_r'#'hot'#'jet'#"cool"
fig, axs = plt.subplots(Nr, Nc)
fig.suptitle(titel)
images = []
for i in range(Nr):
for j in range(Nc):
# Generate data with a range that varies from one plot to the next.
neuron_grads = grad[:,i*j]
data = np.reshape(neuron_grads,image_shape)
images.append(axs[i, j].imshow(data, cmap=cmap))
axs[i, j].set_xticks(np.arange(0, image_shape[0]))
axs[i, j].set_yticks(np.arange(0, image_shape[1]))
axs[i, j].label_outer()
# Find the min and max of all colors for use in setting the color scale.
vmin = min(image.get_array().min() for image in images)
vmax = max(image.get_array().max() for image in images)
norm = colors.Normalize(vmin=vmin, vmax=vmax)
for im in images:
im.set_norm(norm)
fig.colorbar(images[0], ax=axs, orientation='horizontal', fraction=.1)
# Make images respond to changes in the norm of other images (e.g. via the
# "edit axis, curves and images parameters" GUI on Qt), but be careful not to
# recurse infinitely!
def update(changed_image):
for im in images:
if (changed_image.get_cmap() != im.get_cmap()
or changed_image.get_clim() != im.get_clim()):
im.set_cmap(changed_image.get_cmap())
im.set_clim(changed_image.get_clim())
for im in images:
im.callbacksSM.connect('changed', update)
plt.show()
if __name__ == "__main__":
main()
for i in range(3):
values = weight_counts[i]
fig, ax = plt.subplots()
ax.plot(values, color="blue")
ax.set(xlabel='Pruning Step', ylabel='Weight Count',
title=f'Convergence of Weight Count for Layer {i}')
ax.grid()
#fig.savefig("test.png")
plt.show()
values = sparcitys
fig, ax = plt.subplots()
ax.plot(values, color="blue")
ax.set(xlabel='Layer', ylabel='Sparcity',
title='Convergence of Sparcity')
ax.set_xticks([0,1,2])
ax.set_xticklabels(['zero', 'one','two'])
ax.grid()
#fig.savefig("test.png")
plt.show()
values = accuracys
fig, ax = plt.subplots()
ax.plot(values, color="blue")
ax.set(xlabel='Pruning Step', ylabel='Accuracy',
title='Behavior of Accuracy')
ax.grid()
#fig.savefig("test.png")
plt.show()
| 41.114401 | 131 | 0.520296 |
863f9abefe15c05212e7522a515c3db972360a5a
| 18,386 |
py
|
Python
|
data-import/src/main.py
|
FoxComm/highlander
|
1aaf8f9e5353b94c34d574c2a92206a1c363b5be
|
[
"MIT"
] | 10 |
2018-04-12T22:29:52.000Z
|
2021-10-18T17:07:45.000Z
|
data-import/src/main.py
|
FoxComm/highlander
|
1aaf8f9e5353b94c34d574c2a92206a1c363b5be
|
[
"MIT"
] | null | null | null |
data-import/src/main.py
|
FoxComm/highlander
|
1aaf8f9e5353b94c34d574c2a92206a1c363b5be
|
[
"MIT"
] | 1 |
2018-07-06T18:42:05.000Z
|
2018-07-06T18:42:05.000Z
|
#!/usr/bin/python3
#
import argparse
import itertools
import json
import os.path
import logging
import urllib.request
import ssl
from collections import defaultdict
from urllib.error import HTTPError
from adidas_convert import convert_taxonomies, convert_products
class Taxon:
def __init__(self, taxon_id: int, parent_id: int, name: str, taxonomy_id: int):
self.taxonomyId = taxonomy_id
self.parentId = parent_id
self.name = name
self.taxon_id = taxon_id
self.path = [name] if parent_id is None else None
class Taxonomy:
def __init__(self, taxonomy_id, name, taxons):
self.taxons = [] if taxons is None else taxons
self.name = name
self.taxonomy_id = taxonomy_id
self.build_paths()
def get_taxon_by_name(self, taxon_name, parent_id=None):
def matches(taxon: Taxon):
result = taxon.name == taxon_name
if parent_id is not None:
return result and parent_id == taxon.parentId
else:
return result
return next(iter([taxon for taxon in self.taxons if matches(taxon)]), None)
def get_taxon_by_path(self, path: list):
parent_id = None
taxon = None
for name in path:
taxon = self.get_taxon_by_name(name, parent_id)
parent_id = taxon.parentId
return taxon
def get_taxon_by_id(self, taxon_id):
for taxon in self.taxons:
if taxon.taxon_id == taxon_id:
return taxon
return None
def get_path(self, taxon: Taxon):
if taxon.path is None:
parent = self.get_taxon_by_id(taxon.parentId)
taxon.path = parent.path + [taxon.name]
return taxon.path
def build_paths(self):
for taxon in self.taxons:
self.get_path(taxon)
def merge(self, taxonomy, taxon: Taxon):
if taxonomy is not None:
assert type(taxonomy) == Taxonomy
self.taxons = list(set(self.taxons + taxonomy.taxons))
if taxon is not None:
self.taxons = list(set(self.taxons.append(taxon)))
self.build_paths()
return self
class Elasticsearch:
def __init__(self, jwt, host):
self.host = host
self.jwt = jwt
self.taxonomies = {}
def do_query(self, view_name: str):
endpoint = 'https://' + self.host + '/api/search/admin/' + view_name + '/_search/?size=10000&pretty=0'
req = urllib.request.Request(endpoint, headers={"Content-Type": "application/json", "JWT": self.jwt})
try:
context = ssl.create_default_context()
context.check_hostname = False
response = urllib.request.urlopen(req, context=context)
except HTTPError as err:
logging.error(repr(err))
raise
return json.loads(response.read().decode('utf-8'))
def get_taxonomies(self):
response = self.do_query('taxonomies_search_view')
return [(item['name'], item['taxonomyId']) for item in response["result"] if
item['context'] == 'default' and item['archivedAt'] is None]
def get_taxons(self):
def read_item(item):
return Taxon(item['taxonId'], item['parentId'], item['name'], item['taxonomyId'])
response = self.do_query('taxons_search_view')
taxonomies = [read_item(item) for item in response["result"] if
('context' in item and item['context'] == 'default' and item['archivedAt'] is None)]
return taxonomies
def get_products(self):
response = self.do_query('products_search_view')
return response['result']
def get_inventory(self):
response = self.do_query('inventory_search_view')
return response['result']
class Phoenix:
def __init__(self, host, user, password, org):
self.host = host
self.user = user
self.password = password
self.org = org
self.prefix = "https://" + host + "/api/v1"
self.jwt = None
def ensure_logged_in(self):
if self.jwt is None:
return self.do_login()
else:
return True
def do_query(self, endpoint_suffix, data, method="GET"):
self.ensure_logged_in()
endpoint = self.prefix + endpoint_suffix
payload = None if (data is None or method == "GET") else json.dumps(data).encode()
req = urllib.request.Request(endpoint, payload, headers={"Content-Type": "application/json", "JWT": self.jwt},
method=method)
try:
context = ssl.create_default_context()
context.check_hostname = False
response = urllib.request.urlopen(req, context=context)
except HTTPError as err:
logging.error("HTTP error. code: {}. message: {}".format(err.code, err.read()))
raise
code = response.getcode()
if code == 204:
return code, None
return code, json.loads(response.read().decode('utf-8'))
def do_login(self):
logging.info("logging in: host:{}, user:{}, organization: {}".format(self.login_endpoint(), self.user, self.org))
payload = json.dumps({'email': self.user, 'password': self.password, 'org': self.org}).encode()
context = ssl.create_default_context()
context.check_hostname = False
req = urllib.request.Request(self.login_endpoint(), payload, method='POST')
req.add_header('Content-Type', 'application/json')
try:
response = urllib.request.urlopen(req, context=context)
except urllib.error.URLError as err:
logging.error("Cannot connect to %s %s", self.login_endpoint(), err)
raise
content = json.loads(response.read().decode('utf-8'))
self.jwt = dict(response.info())['Jwt']
logging.info("logged in: " + self.prefix + " name: " + content['name'] + " scope: " + content['scope'])
return True
def create_taxonomy(self, taxonomy_json):
self.ensure_logged_in()
data = {k: v for k, v in taxonomy_json.items() if k != "taxons"}
code, response = self.do_query("/taxonomies/default", data, method="POST")
logging.info("taxonomy created: id:%d, attributes: %r" % (response['id'], response['attributes']))
return Taxonomy(response['id'], response["attributes"]["name"]["v"], [])
def create_taxon(self, taxon_json, taxonomy_id, parent_id):
self.ensure_logged_in()
if parent_id is not None:
taxon_json = taxon_json.copy()
taxon_json['location'] = {'parent': parent_id}
code, response = self.do_query("/taxonomies/default/" + str(taxonomy_id) + "/taxons", taxon_json, method="POST")
logging.info("taxon created: id:%d, attributes: %r" % (response['id'], response['attributes']))
return Taxon(response['id'], parent_id, taxon_json["attributes"]["name"]["v"], taxonomy_id)
def login_endpoint(self):
return self.prefix + "/public/login"
def upload_product(self, code, product):
logging.info("uploading: " + code)
self.ensure_logged_in()
try:
code, response = self.do_query("/products/default", product, method="POST")
if code != 200:
logging.error("error uploading: " + response)
return False, None
except HTTPError as err:
logging.error("error uploading: " + repr(err))
return False, None
return True, response
def assign_taxon(self, product_id: int, taxon_id: int):
self.ensure_logged_in()
try:
self.do_query("/taxons/default/{}/product/{}".format(taxon_id, product_id), data=None, method="PATCH")
except HTTPError:
logging.error("cannot assign taxon {} to product {}".format(taxon_id, product_id))
def load_taxonomies(file_name):
return json.load(open(file_name, 'r'))
def load_products(file_name):
return json.load(open(file_name, 'r'))
def query_es_taxonomies(jwt: str, host: str):
es = Elasticsearch(jwt, host=host)
taxons = es.get_taxons()
taxonomies = es.get_taxonomies()
result = defaultdict(lambda: None)
for (name, taxonomy_id) in taxonomies:
taxonomy_taxons = [taxon for taxon in taxons if taxon.taxonomyId == taxonomy_id]
result[name] = Taxonomy(taxonomy_id, name, taxonomy_taxons)
return result
def assign_taxonomies(p: Phoenix, settings, taxonomies, data_product, product_id):
def get_sku_taxonomies(sku_record):
if 'taxonomies' in sku_record:
return sku_record['taxonomies']
elif 'taxonomies' in sku_record['attributes']:
return sku_record["attributes"]["taxonomies"]["v"]
else:
return []
data_taxonomies = defaultdict(set)
for sku in data_product["skus"]:
product_taxonomies = get_sku_taxonomies(sku)
for (taxonomy, taxon) in product_taxonomies.items():
if type(taxon) is list:
data_taxonomies[taxonomy] = data_taxonomies[taxonomy].union(taxon)
else:
data_taxonomies[taxonomy].add(taxon)
for (taxonomy, taxons) in data_taxonomies.items():
for taxon in taxons:
es_taxonomy, es_taxon = map_to_es_taxon(p, settings, data_product, taxon, taxonomies, taxonomy)
if es_taxonomy is None or es_taxon is None:
logging.info("Skipping taxon '{}' (Taxonomy: '{}')".format(taxon, taxonomy))
else:
p.assign_taxon(product_id, es_taxon.taxon_id)
logging.info("taxon {} is assigned to product {}".format(es_taxon.taxon_id, product_id))
def map_to_es_taxon(p: Phoenix, settings, data_product, taxon, taxonomies, taxonomy):
es_taxonomy = taxonomies[taxonomy]
if es_taxonomy is None:
if settings.unknown_taxonomies[0] == 'fail':
raise ValueError(
"product '{}' references to unknown taxonomy '{}'".format(product_code(data_product), taxonomy))
elif settings.unknown_taxonomies[0] == 'ignore':
return None, None
else:
assert settings.unknown_taxonomies[0] == 'create'
r = create_taxon_from_name(p, taxonomy, taxon)
es_taxonomy = r[0].merge(None, r[1])
taxonomies[taxonomy] = es_taxonomy
es_taxon = next(iter([t for t in es_taxonomy.taxons if t.name == taxon]), None)
if es_taxon is None:
if settings.unknown_taxonomies[0] == 'fail':
raise ValueError(
"product '{}' references to unknown taxon '{}' in taxonomy '{}'".format(product_code(data_product), taxon,
taxonomy))
elif settings.unknown_taxonomies[0] == 'ignore':
return es_taxonomy, None
else:
assert settings.unknown_taxonomies[0] == 'create'
r = create_taxon_from_name(p, es_taxonomy, taxon)
taxonomies[taxonomy] = r[0].merge(es_taxonomy, r[1])
es_taxon = r[1]
return es_taxonomy, es_taxon
def create_taxon_from_name(p: Phoenix, taxonomy, taxon):
if type(taxonomy) == Taxonomy:
es_taxonomy = taxonomy
else:
assert type(taxonomy) == str
es_taxonomy = p.create_taxonomy({"attributes": {"name": {"t": "string", "v": taxonomy}}, "hierarchical": False})
assert type(taxon) == str
es_taxon = p.create_taxon({"attributes": {"name": {"t": "string", "v": taxon}}}, es_taxonomy.taxonomy_id, None)
return es_taxonomy, es_taxon
def import_taxons(p: Phoenix, taxons, existing_taxonomy, parent_id=None):
for taxon in taxons:
name = taxon["attributes"]["name"]["v"]
existing_taxon = existing_taxonomy.get_taxon_by_name(name)
if existing_taxon is None:
t = p.create_taxon(taxon, existing_taxonomy.taxonomy_id, parent_id)
taxon_id = t.taxon_id
else:
logging.info("skipping taxon '{}' as soon as it already exists. id: {}".format(taxon,
existing_taxon.taxon_id))
taxon_id = existing_taxon.taxon_id
if 'children' in taxon and taxon['children'] is not None:
import_taxons(p, taxon['children'], existing_taxonomy, taxon_id)
def import_taxonomies(p: Phoenix, input_dir, import_from_adidas):
print("Importing taxonomies\n")
if import_from_adidas:
taxonomies = convert_taxonomies(input_dir)
else:
taxonomies_json = load_taxonomies(input_dir + "/taxonomies.json")
taxonomies = taxonomies_json["taxonomies"]
if p.ensure_logged_in():
imported = query_es_taxonomies(p.jwt, p.host)
print("about to add {} taxonomies with overall {} taxons".format(len(taxonomies),
sum([len(k["taxons"]) for k in
taxonomies])))
for taxonomy in taxonomies:
name = taxonomy["attributes"]["name"]["v"]
taxons = taxonomy["taxons"]
existing_taxonomy = imported[name]
if existing_taxonomy is None:
existing_taxonomy = p.create_taxonomy(taxonomy)
else:
msg = "skipping taxonomy '{}' as soon as it already exists. id: {}"
logging.info(msg.format(taxonomy, existing_taxonomy.taxonomy_id))
import_taxons(p, taxons, existing_taxonomy, )
def import_products(p: Phoenix, settings, max_products, input_dir, import_from_adidas):
print("Importing products\n")
if import_from_adidas:
products = convert_products(input_dir)
else:
products_json = load_products(input_dir + "/products.json")
products = products_json["products"]
cache_dir = "cache"
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
p.ensure_logged_in()
taxonomies = query_es_taxonomies(p.jwt, p.host)
products = products if max_products is None else itertools.islice(products, int(max_products))
for product in products:
code = product_code(product)
cache_file = cache_dir + "/" + code + ".json"
skip = os.path.exists(cache_file)
if not skip:
uploaded, result = p.upload_product(code, product)
if uploaded:
json.dump(product, open(cache_file, 'w'))
assign_taxonomies(p, settings, taxonomies, product, result['id'])
def product_code(product):
return product['skus'][0]['attributes']['code']['v']
def get_inventory(phoenix):
es = Elasticsearch(phoenix.jwt, phoenix.host)
return es.get_inventory()
def add_inventory_to_stock_item(phoenix, stock_item, amount):
typ = stock_item['type']
if typ != 'Sellable':
return
itm = stock_item['stockItem']
item_id = str(itm['id'])
sku = itm['sku']
old_amount = str(stock_item['onHand'])
logging.info(sku + ' (' + item_id + ') ' + old_amount + ' => ' + str(amount))
increment = {"qty": amount, "type": "Sellable", "status": "onHand"}
try:
code, response = phoenix.do_query("/inventory/stock-items/" + item_id + "/increment", increment, method="PATCH")
if code != 204:
logging.error("error adding inventory: " + response)
except HTTPError as err:
logging.error("error adding inventory: " + repr(err))
def add_inventory(phoenix, amount):
phoenix.ensure_logged_in()
inventory = get_inventory(phoenix)
for itm in inventory:
add_inventory_to_stock_item(phoenix, itm, amount)
def config_logging():
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG)
def main():
config_logging()
options = read_cmd_line()
print("HOST: ", options.host)
print("CMD: ", options.command[0])
if options.max_products is not None:
print("MAX: ", options.max_products[0])
max_products = None if options.max_products is None else options.max_products[0]
p = Phoenix(host=options.host, user='[email protected]', password='password', org='tenant')
if options.command[0] == 'taxonomies':
import_taxonomies(p, options.input[0], options.adidas)
elif options.command[0] == 'products':
import_products(p, options, max_products, options.input[0], options.adidas)
elif options.command[0] == 'both':
import_taxonomies(p, options.input[0], options.adidas)
import_products(p, options, max_products, options.input[0], options.adidas)
elif options.command[0] == 'inventory':
add_inventory(p, options.inventory_amount[0])
else:
print("Valid commands are, 'taxonomies', 'products', 'both', or 'inventory'")
def read_cmd_line():
pp = argparse.ArgumentParser(
description='Data import')
pp.add_argument("--host", type=str, required=True, help="host of the API to import into")
pp.add_argument("--max-products", "-m", nargs=1, type=int,
help="Max products. Provides a way to restrict how many products are imported")
pp.add_argument("--input", "-i", nargs=1, type=str, default=['data'], help="input directory")
pp.add_argument("--inventory_amount", nargs=1, type=int, default=[100], help="inventory amount")
pp.add_argument("--adidas", action='store_true', default=False,
help="treat input directory as container of listing.json and products.json with adidas data")
pp.add_argument("command", nargs=1, choices=['taxonomies', 'products', 'both', 'inventory'],
type=str, help="Command")
pp.add_argument("--unknown-taxonomies", nargs=1, choices=['ignore', 'create', 'fail'], type=str,
help="defines behavior in case if product references on taxonomy/taxon which wasn't created before."
" <ignore> - ignore the taxon and continue import."
" <fail> - stop import. Prints error message."
" <create> - creates the absent taxonomy and taxon. The created taxonomy is flat.",
default='fail')
return pp.parse_args()
if __name__ == "__main__":
main()
| 38.707368 | 122 | 0.617426 |
866b520bdb782b0e5f997f32ae5ef0e2833e2651
| 2,723 |
py
|
Python
|
service/api/main.py
|
netzbegruenung/schaufenster
|
c0860570cf6b46dc0fade9cef7562edd2fa7f3a0
|
[
"Apache-2.0"
] | 1 |
2021-07-20T06:56:38.000Z
|
2021-07-20T06:56:38.000Z
|
service/api/main.py
|
netzbegruenung/schaufenster
|
c0860570cf6b46dc0fade9cef7562edd2fa7f3a0
|
[
"Apache-2.0"
] | 1 |
2018-01-23T22:36:49.000Z
|
2018-01-24T18:52:27.000Z
|
service/api/main.py
|
netzbegruenung/schaufenster
|
c0860570cf6b46dc0fade9cef7562edd2fa7f3a0
|
[
"Apache-2.0"
] | 2 |
2018-01-23T21:25:57.000Z
|
2018-01-24T21:46:41.000Z
|
# -*- coding: utf-8 -*-
from . import events
from . import jsonhandler
from . import feeds
from datetime import datetime
from falcon import media
from falcon_cors import CORS
import falcon
import logging
import requests
class IndexResource(object):
def __init__(self):
self.logger = logging.getLogger('api.' + __name__)
def on_get(self, req, resp):
resp.media = {
"message": "Hallo! Hier läuft der Schaufenster-Service",
"url": "https://github.com/netzbegruenung/schaufenster",
"endpoints": [
"/events/",
"/feed/",
"/luftdaten.info/v1/sensor/{sensor_id}/",
],
}
class EventsResource(object):
def __init__(self):
self.logger = logging.getLogger('api.' + __name__)
def on_get(self, req, resp):
"""
Loads an ical Calendar and returns the next events
"""
ical_url = req.get_param("ical_url", required=True)
charset = req.get_param("charset")
num = int(req.get_param("num", required=False, default="10"))
client = events.Client(url=ical_url, charset=charset)
next_events = client.next_events(num)
del client
resp.media = next_events
maxage = 60 * 60 # 1 hour
resp.cache_control = ["max_age=%d" % maxage]
class FeedResource(object):
def on_get(self, req, resp):
feed_url = req.get_param("url", required=True)
num = int(req.get_param("num", required=False, default="1"))
c = feeds.Client(feed_url)
resp.media = {
"meta": c.metadata(),
"items": c.recent_items(num=num)
}
class ParticleSensorResource(object):
def on_get(self, req, resp, sensor_id):
"""
Delivers data for a particular luftdaten.info sensor
"""
url = "http://api.luftdaten.info/v1/sensor/%s/" % sensor_id
r = requests.get(url)
if r.status_code == 200:
maxage = 60 * 5 # 5 minutes
resp.cache_control = ["max_age=%d" % maxage]
resp.media = r.json()
else:
resp.media = r.text
resp.status = str(r.status_code) + " Unknown Error"
handlers = media.Handlers({
'application/json': jsonhandler.JSONHandler(),
})
cors = CORS(allow_all_origins=True,
allow_all_headers=True)
app = falcon.API(middleware=[cors.middleware])
app.req_options.media_handlers = handlers
app.resp_options.media_handlers = handlers
app.add_route('/events/', EventsResource())
app.add_route('/feed/', FeedResource())
app.add_route('/luftdaten.info/v1/sensor/{sensor_id}/', ParticleSensorResource())
app.add_route('/', IndexResource())
| 28.968085 | 81 | 0.609254 |
07f94d1801ba040a7ccd5691cf1f22424c75214c
| 22,157 |
py
|
Python
|
benchmark/dnn_binary.py
|
zentonllo/tfg-tensorflow
|
095469a906de26984b4d781699e76bec02b1ef75
|
[
"MIT"
] | null | null | null |
benchmark/dnn_binary.py
|
zentonllo/tfg-tensorflow
|
095469a906de26984b4d781699e76bec02b1ef75
|
[
"MIT"
] | null | null | null |
benchmark/dnn_binary.py
|
zentonllo/tfg-tensorflow
|
095469a906de26984b4d781699e76bec02b1ef75
|
[
"MIT"
] | null | null | null |
"""
Module used to model Deep Neural Networks which solve binary classification problems (1 output neuron)
Code obtained and adapted from:
https://www.tensorflow.org/get_started/
https://github.com/ageron/handson-ml/blob/master/11_deep_learning.ipynb
https://github.com/aymericdamien/TensorFlow-Examples
https://github.com/zentonllo/gcom
"""
import tensorflow as tf
import time
import numpy as np
import os
import matplotlib.pyplot as plt
import itertools
from tensorflow.contrib.layers import fully_connected, dropout
from tensorflow.contrib.framework import arg_scope
from sklearn.metrics import auc, roc_auc_score, roc_curve, confusion_matrix
# Disable info warnings from TF
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
# Source: https://www.tensorflow.org/get_started/summaries_and_tensorboard
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def print_execution_time(start, end):
"""Helper function to print execution times properly formatted."""
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
print("Execution time:","{:0>2}:{:0>2}:{:0>2}".format(int(hours),int(minutes),int(seconds)))
"""
n_inputs: Número de variables de entrada
n_outputs: Número de clases objetivo (2 problemas clasificación binaria, >2 problemas clasificación multiclase)
learning_rate: Tasa de aprendizaje (suele ser 0.001)
hidden_list: Lista de capas ocultas (incluidas neuronas input y outpt, ej: [n_input, 300, 300, n_output])
activation_function: Funciones de activación (ej: tf.nn.relu, tf.nn.elu, tf.nn.sigmoid, tf.nn.tanh, tf.nn.identity)
keep_prob: Keep probability para el Dropout (suele ser 0.5)
regularizer: Regularizer a usar (ej: tf.contrib.layers.l1_regularizer(scale=beta, scope=None), tf.contrib.layers.l2_regularizer(scale=beta, scope=None))
normalizer_fn: función de normalización (None o batch_norm para realizar batch normalization)
normalizer_params = {
'is_training': None,
# 0.9 o 0.99 o 0.999 o 0.9999 ...
# Segun performance guide de TF: menor si va bien en training y peor en validation/test
# Según A.Geron, aumentar cuando el dataset es grande y los batches pequeños
'decay': 0.9,
'updates_collections': None,
# Si usamos funciones de activacion que no sean relus --> scale_term debe ser True
'scale': scale_term,
# Aumenta rendimiento según la performance guide de TF
'fused': True
# Try zero_debias_moving_mean=True for improved stability
# 'zero_debias_moving_mean':True
}
optimizer: = tf.train.AdamOptimizer, tf.train.RMSPropOptimizer, tf.train.AdadeltaOptimizer, tf.train.AdagradOptimizer, tf.train.MomentumOptimizer (este requiere cambios)
El optimizer debe estar instanciado, ej: tf.train.AdamOptimizer(learning_rate=0.001, name='optimizer')
"""
class DNN(object):
"""Class that models a Deep Neural Network with just one output neuron
There are training and predicting methods, as well as tools that generate plots.
Most of the neural network hyperparameters are set when a class object is instanciated.
Mostly similar to DNN class in dnn_multiclass (it might be a better way to merge both classes into one)
Attributes
----------
file_writer :
tf.summary.FileWriter object which adds summaries to TensorBoard
saver :
tf.train.Saver() used to save the model
merged :
TF node that if it is executed will generate the TensorBoard summaries
hidden_list :
List with the following shape [input_neurons, neurons_hidden_layer_1, neurons_hidden_layer_2, ..., 1]
activation_function :
TF activation function (tf.nn.relu, tf.nn.elu, tf.nn.sigmoid, tf.nn.tanh, tf.nn.identity, etc.)
keep_prob :
Probability to keep a neuron active during dropout (that is, 1 - dropout_rate, use None to avoid dropout)
regularizer :
TF regularizer to use (tf.contrib.layers.l1_regularizer(scale=beta, scope=None), tf.contrib.layers.l2_regularizer(scale=beta, scope=None))
normalizer_fn :
Normalizer function to use. Use batch_norm for batch normalization and None to avoid normalizer functions
normalizer_params :
Extra parameters for the normalizer function
optimizer :
TF Optimizer during Gradient Descent (tf.train.AdamOptimizer, tf.train.RMSPropOptimizer, tf.train.AdadeltaOptimizer or tf.train.AdagradOptimizer)
log_dir :
Path used to save all the needed TensorFlow and TensorBoard information to save (graph, models, etc.)
batch_size :
Batch size to be used during training
y_casted :
Label column (NP array) casted to float (casted must be made in order to use the TF cross entropy function)
predictions :
NP array with class predictions (0.5 threshold used)
"""
def __init__(self,
log_dir,
hidden_list,
activation_function=tf.nn.relu,
keep_prob = None,
regularizer = None,
normalizer_fn = None,
normalizer_params = None,
optimizer = tf.train.AdamOptimizer(learning_rate=0.001, name='optimizer')
):
"""__init__ method for the DNN class
Saves the hyperparameters as attributes and instatiates a deep neural network
Parameters
----------
log_dir :
Path used to save all the needed TensorFlow and TensorBoard information to save (graph, models, etc.)
hidden_list :
List with the following shape [input_neurons, neurons_hidden_layer_1, neurons_hidden_layer_2, ..., 1]
activation_function :
TF activation function (tf.nn.relu, tf.nn.elu, tf.nn.sigmoid, tf.nn.tanh, tf.nn.identity, etc.)
keep_prob :
Probability to keep a neuron active during dropout (that is, 1 - dropout_rate, use None to avoid dropout)
regularizer :
TF regularizer to use (tf.contrib.layers.l1_regularizer(scale=beta, scope=None), tf.contrib.layers.l2_regularizer(scale=beta, scope=None))
normalizer_fn :
Normalizer function to use. Use batch_norm for batch normalization and None to avoid normalizer functions
normalizer_params :
Extra parameters for the normalizer function
optimizer :
TF Optimizer during Gradient Descent (tf.train.AdamOptimizer, tf.train.RMSPropOptimizer, tf.train.AdadeltaOptimizer or tf.train.AdagradOptimizer)
"""
# Create a new TF graph from scratch
tf.reset_default_graph()
self.file_writer = None
self.saver = None
self.merged = None
self.hidden_list = hidden_list
self.activation_function = activation_function
self.keep_prob = keep_prob
self.regularizer = regularizer
self.normalizer_fn = normalizer_fn
self.normalizer_params = normalizer_params
self.optimizer = optimizer
self.log_dir = log_dir
self.batch_size = None
self.y_casted = None
self.predictions = None
# Instantiate the neural network
self.create_net()
def create_net(self):
"""Method that instatiates a neural network using the hyperparameters passed to the DNN object.
Most of the code was obtained and adapted from
https://github.com/ageron/handson-ml/blob/master/11_deep_learning.ipynb
"""
hidden_list = self.hidden_list
n_inputs = hidden_list[0]
# This is hardcoded to show how this class works
# hidden_list[-1] should be 1 and we should check it out right here
n_outputs = 1
self.X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
self.y = tf.placeholder(tf.int64, shape=(None), name="y")
self.is_training = tf.placeholder(tf.bool, shape=(), name='is_training')
if self.normalizer_params is not None:
self.normalizer_params['is_training'] = self.is_training
with tf.name_scope("dnn"):
he_init = tf.contrib.layers.variance_scaling_initializer()
with arg_scope(
[fully_connected],
activation_fn=self.activation_function,
weights_initializer=he_init,
normalizer_fn=self.normalizer_fn,
normalizer_params=self.normalizer_params):
# Build the fully-connected layers
Z = self.X
n_iter = len(hidden_list[1:])
for i in range(1,n_iter):
name_scope = "hidden" + str(i)
Z = fully_connected(inputs=Z, num_outputs=hidden_list[i], scope=name_scope)
if self.keep_prob is not None:
Z = dropout(Z, self.keep_prob, is_training=self.is_training)
self.logits = fully_connected(inputs=Z, num_outputs=n_outputs, activation_fn=None, weights_initializer=he_init, normalizer_fn=self.normalizer_fn, normalizer_params=self.normalizer_params, scope="outputs")
with tf.name_scope("softmaxed_output"):
self.softmaxed_logits = tf.nn.sigmoid(self.logits)
with tf.name_scope("loss"):
y_casted = tf.cast(self.y, tf.float32)
self.y_casted = tf.reshape(y_casted, [-1,1])
# Compute cross_entropy from logits (that is, dnn output without applying the sigmoid function)
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y_casted, logits=self.logits)
self.loss = tf.reduce_mean(xentropy)
if self.regularizer is not None:
self.loss += tf.contrib.layers.apply_regularization(self.regularizer, tf.trainable_variables())
tf.summary.scalar('cross_entropy', self.loss)
with tf.name_scope("train"):
opt = self.optimizer
# Minimize the loss function
self.train_step = opt.minimize(self.loss, name='train_step')
with tf.name_scope("eval"):
self.predictions = tf.round(self.softmaxed_logits)
incorrect = tf.abs(tf.subtract(self.predictions, self.y_casted))
incorrect_casted = tf.cast(incorrect, tf.float32)
self.accuracy = tf.subtract(tf.cast(100, tf.float32),tf.reduce_mean(incorrect_casted))
tf.summary.scalar('accuracy', self.accuracy)
# TensorBoard summaries for the hidden layers weights
for i in range(1,n_iter):
with tf.variable_scope('hidden'+str(i), reuse=True):
variable_summaries(tf.get_variable('weights'))
with tf.variable_scope('outputs', reuse=True):
variable_summaries(tf.get_variable('weights'))
self.merged = tf.summary.merge_all()
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
def feed_dict(self, dataset, mode):
"""Method that builds a dictionary to feed the neuronal network.
Parameters
----------
dataset :
Dataset object
mode :
String that points which feed dictionary we want to get. Possible values: 'batch_training', 'training_test', 'validation_test'
Returns
-------
fd
Dictionary that feeds the TensorFlow model
"""
fd = None
if mode is 'batch_training':
x_batch, y_batch = dataset.next_batch(self.batch_size)
fd = {self.is_training: True, self.X: x_batch, self.y: y_batch}
elif mode is 'training_test':
fd = {self.is_training: False, self.X: dataset.x_train, self.y: dataset.y_train}
elif mode is 'validation_test':
fd = {self.is_training: False, self.X: dataset.x_val, self.y: dataset.y_val}
return fd
def train(self, dataset, model_path, train_path, nb_epochs=100, batch_size=10, silent_mode=False):
"""Method that trains a deep neuronal network.
Parameters
----------
dataset :
Dataset object
model_path :
Path where the optimal TensorFlow model will be saved
train_path :
Path where the training TensorFlow models will be saved. After the training process, the model trained in the very last epoch will
be the only one saved
nb_epochs :
Number of epochs to train the model
batch_size :
Batch size to be used during training
silent_mode :
Flag which enables whether to print progress on the terminal during training.
Returns
-------
None
"""
start_time = time.time()
x_training = dataset.x_train
y_training = dataset.y_train
x_validation = dataset.x_val
y_validation = dataset.y_val
nb_data = dataset._num_examples
# nb_batches = nb_data // batch_size (integer division)
self.batch_size= batch_size
nb_batches = int(nb_data/batch_size)
# Records best validation AUC during training, which will allow to save that model as optimal
best_auc = 0
self.aucs = []
with tf.Session() as sess:
sess.run(self.init)
self.file_writer = tf.summary.FileWriter(self.log_dir, sess.graph)
for epoch in range(nb_epochs):
# Iterate through batches and keep training
for batch in range(nb_batches):
sess.run(self.train_step,
feed_dict=self.feed_dict(dataset, mode='batch_training'))
self.saver.save(sess, train_path)
# Get the summaries for TensorBoard
summary = sess.run(self.merged, feed_dict=self.feed_dict(dataset, mode='training_test'))
self.file_writer.add_summary(summary, epoch)
# We use a sklearn function to compute AUC. Couldn't manage to make tf.metrics.auc work due to some odd 'local variables'
cur_auc = self.auc_roc(x_validation, y_validation, train_path)
summary_auc = tf.Summary(value=[tf.Summary.Value(tag="AUCs_Validation", simple_value=cur_auc)])
self.file_writer.add_summary(summary_auc, epoch)
# Only save best model if it gets the best AUC over the validation set
if cur_auc > best_auc:
best_auc = cur_auc
self.saver.save(sess, model_path)
if not silent_mode:
acc_train = sess.run(self.accuracy, feed_dict=self.feed_dict(dataset, mode='training_test'))
auc_train = self.auc_roc(x_training, y_training, train_path)
acc_val = sess.run(self.accuracy, feed_dict=self.feed_dict(dataset, mode='validation_test'))
print("Epoch:", (epoch+1), "Train accuracy:", acc_train, "Train AUC:", auc_train )
print("Validation accuracy:", acc_val, "Validation AUC:", cur_auc, "Best Validation AUC:", best_auc, "\n")
self.file_writer.close()
print_execution_time(start_time, time.time())
if silent_mode:
print("Best Validation AUC:", best_auc)
def predict(self, x_test, model_path):
"""Method that gets predictions from a trained deep neuronal network.
Get a Numpy array of predictions P(y=1| W) for all the x_test
Parameters
----------
x_test :
Numpy array with data test to get predictions for
model_path :
Path where the TensorFlow model is located
Returns
-------
Numpy array with predictions (probabilities between 0 and 1)
"""
with tf.Session() as sess:
self.saver.restore(sess, model_path)
y_pred = sess.run(self.softmaxed_logits, feed_dict={self.is_training: False, self.X: x_test})
return y_pred
def predict_class(self, x_test, model_path):
"""Method that gets predicted classes (0 or 1) from a trained deep neuronal network.
Get a Numpy array of predicted classes for all the x_test
Parameters
----------
x_test :
Numpy array with data test to get their predicted classes
model_path :
Path where the TensorFlow model is located
Returns
-------
Numpy array with predicted classes (0 or 1)
"""
with tf.Session() as sess:
self.saver.restore(sess, model_path)
y_pred = sess.run(self.predictions, feed_dict={self.is_training: False, self.X: x_test})
return y_pred
def test(self, x_test, y_test, model_path):
"""Method that prints accuracy and AUC for test data after getting predictions a trained deep neuronal network.
Parameters
----------
x_test :
Numpy array with data test to get their predicted classes
y_test :
Numpy array with the labels belonging to x_test
model_path :
Path where the TensorFlow model is located
Returns
-------
None
"""
start_time = time.time()
with tf.Session() as sess:
self.saver.restore(sess, model_path)
acc_test = sess.run(self.accuracy, feed_dict={self.is_training: False, self.X: x_test, self.y: y_test})
print("Test accuracy:", acc_test)
auc_test = self.auc_roc(x_test, y_test, model_path)
print("Test AUC:", auc_test)
print_execution_time(start_time, time.time())
def auc_roc(self, x_test, y_test, model_path):
"""Method that computes AUC for some data after getting predictions from a trained deep neural network.
Parameters
----------
x_test :
Numpy array with data test to get their predicted classes
y_test :
Numpy array with the labels belonging to x_test
model_path :
Path where the TensorFlow model is located
Returns
-------
AUC value for the test data (x_test and y_test)
"""
y_score = self.predict(x_test, model_path)
auc = roc_auc_score(y_true=y_test, y_score=y_score)
return auc*100
def save_roc(self, x_test, y_test, model_path, roc_path):
"""Method that computes a ROC curve from a model and save it as a png file.
Parameters
----------
x_test :
Numpy array with data test to get their predicted classes
y_test :
Numpy array with the labels belonging to x_test
model_path :
Path where the TensorFlow model is located
roc_path :
Path that points where to save the png file with the ROC curve
Returns
-------
None
"""
y_score = self.predict(x_test, model_path)
fpr, tpr, thresholds = roc_curve(y_true=y_test, y_score=y_score)
roc_auc = auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc*100)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig(roc_path, bbox_inches='tight')
def save_cm(self, x_test, y_test, model_path, cm_path, classes, normalize=True):
"""Method that computes a confusion matrix from a model and save it as a png file.
Parameters
----------
x_test :
Numpy array with data test to get their predicted classes
y_test :
Numpy array with the labels belonging to x_test
model_path :
Path where the TensorFlow model is located
cm_path :
Path that points where to save the png file with the confusion matrix
classes :
List with labels for the confusion matrix rows and columns. For instance: ['Normal Transactions', 'Fraudulent transactions']
Returns
-------
None
"""
y_pred = self.predict_class(x_test, model_path)
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
plt.figure()
cmap=plt.cm.Blues
if normalize:
plt.title('Normalized confusion matrix')
else:
plt.title('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(cm_path, bbox_inches='tight')
| 39.922523 | 216 | 0.623099 |
71f58f5d351fc9e9245f841e5c5e5119531cad91
| 274 |
py
|
Python
|
src/lcdoc/const.py
|
axiros/docutools
|
f99874a64afba8f5bc740049d843151ccd9ceaf7
|
[
"BSD-2-Clause"
] | 24 |
2021-10-04T22:11:59.000Z
|
2022-02-02T21:51:43.000Z
|
src/lcdoc/const.py
|
axiros/docutools
|
f99874a64afba8f5bc740049d843151ccd9ceaf7
|
[
"BSD-2-Clause"
] | 2 |
2021-10-04T21:51:30.000Z
|
2021-10-05T14:15:31.000Z
|
src/lcdoc/const.py
|
axiros/docutools
|
f99874a64afba8f5bc740049d843151ccd9ceaf7
|
[
"BSD-2-Clause"
] | null | null | null |
import time
AttrDict = dict
# class AttrDict(dict):
# def __getattr__(self, k):
# self[k] = 0
# return 0
Stats = AttrDict()
PageStats = {}
LogStats = {}
now_ms = lambda: int(time.time() * 1000)
t0 = [now_ms()]
lprunner_sep = ['<!-- lprunner -->']
| 13.7 | 40 | 0.569343 |
139cf6e95d1772ad057c4aa1758d2566aceb2dfe
| 404 |
py
|
Python
|
cs/python/python_general/30-seconds-of-python-code/test/bubble_sort/bubble_sort.test.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
cs/python/python_general/30-seconds-of-python-code/test/bubble_sort/bubble_sort.test.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | 8 |
2020-03-24T17:47:23.000Z
|
2022-03-12T00:33:21.000Z
|
cs/python/python_general/30-seconds-of-python-code/test/bubble_sort/bubble_sort.test.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
import types
import functools
from pytape import test
from bubble_sort import bubble_sort
def bubble_sort_test(t):
t.true(
isinstance(bubble_sort, (types.BuiltinFunctionType, types.FunctionType,
functools.partial)),
'<util.read_snippets.<locals>.snippet object at 0x7fc8ea4c6978> is a function'
)
test('Testing bubble_sort', bubble_sort_test)
| 25.25 | 86 | 0.69802 |
13b3e524813c43b597c3fff3534673dc31d99b19
| 3,270 |
py
|
Python
|
test/test_find_sets.py
|
vidagy/setsolver
|
1d69dc33768ddb5b2110b6321106947de87cb7ac
|
[
"Apache-2.0"
] | null | null | null |
test/test_find_sets.py
|
vidagy/setsolver
|
1d69dc33768ddb5b2110b6321106947de87cb7ac
|
[
"Apache-2.0"
] | null | null | null |
test/test_find_sets.py
|
vidagy/setsolver
|
1d69dc33768ddb5b2110b6321106947de87cb7ac
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from setsolver.board import Board
from setsolver.card import Card, GameSet
from setsolver.properties import Color, Count, Fill, Shape
from setsolver.set_finder import (
find_all_sets,
is_same_or_all_different,
is_set,
)
class TestSetFinder(TestCase):
card1 = Card(Fill.FULL, Count.ONE, Color.RED, Shape.OVAL)
card2 = Card(Fill.STRIPED, Count.TWO, Color.PURPLE, Shape.OVAL)
card3 = Card(Fill.EMPTY, Count.THREE, Color.GREEN, Shape.OVAL)
card4 = Card(Fill.EMPTY, Count.THREE, Color.PURPLE, Shape.WAVE)
card5 = Card(Fill.EMPTY, Count.THREE, Color.RED, Shape.DIAMOND)
card6 = Card(Fill.EMPTY, Count.THREE, Color.PURPLE, Shape.DIAMOND)
card7 = Card(Fill.STRIPED, Count.ONE, Color.RED, Shape.DIAMOND)
card8 = Card(Fill.STRIPED, Count.TWO, Color.GREEN, Shape.DIAMOND)
card9 = Card(Fill.FULL, Count.ONE, Color.GREEN, Shape.WAVE)
card10 = Card(Fill.EMPTY, Count.ONE, Color.RED, Shape.DIAMOND)
card11 = Card(Fill.FULL, Count.TWO, Color.GREEN, Shape.DIAMOND)
card12 = Card(Fill.EMPTY, Count.ONE, Color.PURPLE, Shape.DIAMOND)
def test_is_set(self):
self.assertTrue(is_set(self.card1, self.card2, self.card3))
def test_is_set_false(self):
card1 = Card(Fill.FULL, Count.ONE, Color.GREEN, Shape.OVAL)
card2 = Card(Fill.STRIPED, Count.TWO, Color.PURPLE, Shape.OVAL)
card3 = Card(Fill.EMPTY, Count.THREE, Color.GREEN, Shape.OVAL)
self.assertFalse(is_set(card1, card2, card3))
def test_is_same_or_all_different_same(self):
self.assertTrue(
is_same_or_all_different(Fill.FULL, Fill.EMPTY, Fill.STRIPED)
)
self.assertTrue(
is_same_or_all_different(Count.ONE, Count.TWO, Count.THREE)
)
self.assertTrue(
is_same_or_all_different(Color.PURPLE, Color.RED, Color.GREEN)
)
self.assertTrue(
is_same_or_all_different(Shape.OVAL, Shape.WAVE, Shape.DIAMOND)
)
for p in [
Fill.FULL,
Fill.EMPTY,
Fill.STRIPED,
Count.ONE,
Count.TWO,
Count.THREE,
Color.PURPLE,
Color.RED,
Color.GREEN,
Shape.OVAL,
Shape.WAVE,
Shape.DIAMOND,
]:
self.assertTrue(is_same_or_all_different(p, p, p))
def test_find_all_sets(self):
board = Board(
{
self.card1,
self.card2,
self.card3,
self.card4,
self.card5,
self.card6,
self.card7,
self.card8,
self.card9,
self.card10,
self.card11,
self.card12,
}
)
expected_sets = [
GameSet({self.card1, self.card2, self.card3}),
GameSet({self.card3, self.card4, self.card5}),
GameSet({self.card3, self.card8, self.card9}),
GameSet({self.card2, self.card5, self.card9}),
GameSet({self.card1, self.card4, self.card8}),
GameSet({self.card6, self.card7, self.card11}),
]
self.assertCountEqual(expected_sets, find_all_sets(board))
| 35.543478 | 75 | 0.596636 |
13cedf07d8effbd73a16410582b2e0bae1bfe8f9
| 12,840 |
py
|
Python
|
test/test_npu/test_network_ops/test_renorm.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_renorm.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_renorm.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestRenorm(TestCase):
def generate_data(self, min_d, max_d, shape, dtype):
input_x = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input = torch.from_numpy(input_x)
return npu_input
def get_p0_result_cpu(self, input_x, dim, maxnorm=1.0):
input_x = input_x.numpy()
dims = len(input_x.shape)
shape_list = []
for i in range(dims):
if(i != dim):
shape_list = shape_list + [i]
shape_list = tuple(shape_list)
tmp = (input_x!=0)
N = np.sum(tmp, shape_list, keepdims=True)
N = np.where(N > maxnorm, maxnorm/(N+1e-7), 1.0)
output = input_x * N
return output
def cpu_op_exec(self, input_x, p, dim, maxnorm):
if(p==0):
output = self.get_p0_result_cpu(input_x, dim, maxnorm)
else:
output = torch.renorm(input_x, p, dim, maxnorm)
output = output.numpy()
return output.astype(np.float32)
def npu_op_exec(self, input_x, p, dim, maxnorm):
input1 = input_x.to("npu")
output = torch.renorm(input1, p, dim, maxnorm)
output = output.to("cpu")
output = output.numpy()
return output
def npu_op_exec_out(self, input_x, p, dim, maxnorm, output_y):
input_x = input_x.to("npu")
output_y = output_y.to("npu")
torch.renorm(input_x, p, dim, maxnorm, out=output_y)
output_y = output_y.to("cpu")
output_y = output_y.numpy()
return output_y
def npu_op_exec_inplace(self, input_x, p, dim, maxnorm):
input_x = input_x.to("npu")
input_x.renorm_(p, dim, maxnorm)
output = input_x.to("cpu")
output = output.numpy()
return output
def test_renorm_3_3_4_0_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 4, 0, 1)
npu_output1 = self.npu_op_exec(input_x1, 4, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_1_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 1, 1, 1)
npu_output1 = self.npu_op_exec(input_x1, 1, 1, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_0_0_1_float16(self, device):
input_x1 = self.generate_data(-10, 10, (3, 3), np.float16)
input_x1_cpu = input_x1.float()
cpu_output1 = self.cpu_op_exec(input_x1_cpu, 0, 0, 1).astype(np.float16)
npu_output1 = self.npu_op_exec(input_x1, 0, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_0_0_1(self, device):
input_x1 = self.generate_data(-10, 10, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 0, 0, 1)
npu_output1 = self.npu_op_exec(input_x1, 0, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_4_0_1_float16(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float16)
input_x1_cpu = input_x1.float()
cpu_output1 = self.cpu_op_exec(input_x1_cpu, 4, 0, 1).astype(np.float16)
npu_output1 = self.npu_op_exec(input_x1, 4, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_1_1_float16(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float16)
input_x1_cpu = input_x1.float()
cpu_output1 = self.cpu_op_exec(input_x1_cpu, 1, 1, 1).astype(np.float16)
npu_output1 = self.npu_op_exec(input_x1, 1, 1, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_0_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 1, 0, 1)
npu_output1 = self.npu_op_exec(input_x1, 1, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_1_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 3, 1, 1)
npu_output1 = self.npu_op_exec(input_x1, 3, 1, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_2_2_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 2, 2, 1)
npu_output1 = self.npu_op_exec(input_x1, 2, 2, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_2_0_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 2, 0, 1)
npu_output1 = self.npu_op_exec(input_x1, 2, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_3_3_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 3, 3, 1)
npu_output1 = self.npu_op_exec(input_x1, 3, 3, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_3_4_4_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 4, 4, 1)
npu_output1 = self.npu_op_exec(input_x1, 4, 4, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_4_0_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 4, 0, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 4, 0, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_1_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 1, 1, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 1, 1, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_0_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 1, 0, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 1, 0, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_1_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 3, 1, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 3, 1, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_30_40_50_2_1_1_out_fp16(self, device):
input_x1 = self.generate_data(-1, 1, (30, 40, 50), np.float16)
output_y = self.generate_data(-1, 1, (30, 40, 50), np.float16)
input_cpu = input_x1.float()
cpu_output1 = self.cpu_op_exec(input_cpu, 2, 1, 1)
cpu_output1 = cpu_output1.astype(np.float16)
npu_output1 = self.npu_op_exec_out(input_x1, 2, 1, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_30_40_50_2_0_2_out_fp16(self, device):
input_x1 = self.generate_data(-1, 1, (30, 40, 50), np.float16)
output_y = self.generate_data(-1, 1, (30, 40, 50), np.float16)
input_cpu = input_x1.float()
cpu_output1 = self.cpu_op_exec(input_cpu, 2, 0, 2)
cpu_output1 = cpu_output1.astype(np.float16)
npu_output1 = self.npu_op_exec_out(input_x1, 2, 0, 2, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_2_2_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 2, 2, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 2, 2, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_2_0_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 2, 0, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 2, 0, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_3_3_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 3, 3, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 3, 3, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_3_4_4_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 4, 4, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 4, 4, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_4_0_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 4, 0, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 4, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_1_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 1, 1, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 1, 1, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_0_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 1, 0, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 1, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_1_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 3, 1, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 3, 1, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_2_2_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 2, 2, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 2, 2, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_2_0_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 2, 0, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 2, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_3_3_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 3, 3, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 3, 3, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_3_4_4_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 4, 4, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 4, 4, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
instantiate_device_type_tests(TestRenorm, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
| 47.032967 | 80 | 0.657399 |
13e3d41212376737db1f2bc90807c8cf5cb99f96
| 38 |
py
|
Python
|
python_lessons/freecodecamp_python/009_fruit_banana.py
|
1986MMartin/coding-sections-markus
|
e13be32e5d83e69250ecfb3c76a04ee48a320607
|
[
"Apache-2.0"
] | null | null | null |
python_lessons/freecodecamp_python/009_fruit_banana.py
|
1986MMartin/coding-sections-markus
|
e13be32e5d83e69250ecfb3c76a04ee48a320607
|
[
"Apache-2.0"
] | null | null | null |
python_lessons/freecodecamp_python/009_fruit_banana.py
|
1986MMartin/coding-sections-markus
|
e13be32e5d83e69250ecfb3c76a04ee48a320607
|
[
"Apache-2.0"
] | null | null | null |
fruit = "banana"
x = fruit[1]
print(x)
| 12.666667 | 16 | 0.631579 |
b9249ca4ced9ac73dad0aa8a6e0d563081958b02
| 500 |
py
|
Python
|
Zh3r0/2021/crypto/1n_jection/challenge.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
Zh3r0/2021/crypto/1n_jection/challenge.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
Zh3r0/2021/crypto/1n_jection/challenge.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
from secret import flag
def nk2n(nk):
l = len(nk)
if l==1:
return nk[0]
elif l==2:
i,j = nk
return ((i+j)*(i+j+1))//2 +j
return nk2n([nk2n(nk[:l-l//2]), nk2n(nk[l-l//2:])])
print(nk2n(flag))
#2597749519984520018193538914972744028780767067373210633843441892910830749749277631182596420937027368405416666234869030284255514216592219508067528406889067888675964979055810441575553504341722797908073355991646423732420612775191216409926513346494355434293682149298585
| 35.714286 | 266 | 0.754 |
b9a8464dd110c56545caf49b7733cce22bf42c9f
| 1,276 |
py
|
Python
|
src/main/python/correlation/convert.py
|
gwdgithubnom/ox-patient
|
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
|
[
"MIT"
] | null | null | null |
src/main/python/correlation/convert.py
|
gwdgithubnom/ox-patient
|
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
|
[
"MIT"
] | null | null | null |
src/main/python/correlation/convert.py
|
gwdgithubnom/ox-patient
|
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
|
[
"MIT"
] | 1 |
2021-04-14T00:45:38.000Z
|
2021-04-14T00:45:38.000Z
|
from PIL import Image
filename='test-50x50.jpg'
im = Image.open(filename)
import numpy
from pandas import DataFrame
# pixels = list(im.getdata())
width, height = im.size
#imgarray=numpy.array(img)
# pixels = [pixels[i * width:(i + 1) * width] for i in range(height)]
# pixels = numpy.asarray(im)
img = Image.open(filename)
# img = img.convert("LA")
img = img.convert("RGB")
pixdata = img.load()
rows=img.size[0]
cols=img.size[1]
#scan by cols
"""
for y in range(cols):
for x in range(rows):
pixdata[x,y]=0 if pixdata[x,y]>=128 else 255
"""
x_variable=[]
y_variable=[]
pixels=numpy.zeros((rows,cols))
tag=0
for i in range():
for width_x in range(img.size[0]):
count=0
for height_y in range(img.size[1]):
pixel = img.getpixel((width_x, height_y))
# print('start' + str(img.getpixel((width_x, height_y))))
gray = pixel[0] * 0.299 + pixel[1] * 0.587 + pixel[2] * 0.114
if (gray >10):
# pixels[height_y][width_x]=1
count=count+1
# z = (255 - z) / 255 * 255
pixels[tag][width_x]=count
pixels=DataFrame(pixels)
pixels.to_csv('pixel_data-01.csv')
# numpy.savetxt("pixel_data.csv", pixels, delimiter=",")
| 29.674419 | 74 | 0.590125 |
b9e1656ac04bb19843019aa1e296d7b670033b74
| 1,576 |
py
|
Python
|
101-symmetric-tree/101-symmetric-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2 |
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
101-symmetric-tree/101-symmetric-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
101-symmetric-tree/101-symmetric-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSymmetric(self, root: Optional[TreeNode]) -> bool:
# both does not exist
if not root.left and not root.right:
return True
# one exists - left or right
elif not root.left or not root.right:
return False
# both left and right exists
stack1=[root.left]
output1=[root.left.val]
stack2=[root.right]
output2=[root.right.val]
# append left first
while(stack1):
cur = stack1.pop(0)
if cur.left:
stack1.append(cur.left)
output1.append(cur.left.val)
else:
output1.append(101) # 101 = null
if cur.right:
stack1.append(cur.right)
output1.append(cur.right.val)
else:
output1.append(101)
# append right first
while(stack2):
cur = stack2.pop(0)
if cur.right:
output2.append(cur.right.val)
stack2.append(cur.right)
else:
output2.append(101)
if cur.left:
output2.append(cur.left.val)
stack2.append(cur.left)
else:
output2.append(101)
if output1==output2:
return True
else:
return False
| 30.901961 | 60 | 0.499365 |
6a26d705512ea29e001746622e4f91c9eb18ed9c
| 273 |
py
|
Python
|
programm/skype.py
|
team172011/ps_cagebot
|
ab6f7bdbc74ad3baee3feebc4b7b0fa4f726b179
|
[
"MIT"
] | null | null | null |
programm/skype.py
|
team172011/ps_cagebot
|
ab6f7bdbc74ad3baee3feebc4b7b0fa4f726b179
|
[
"MIT"
] | null | null | null |
programm/skype.py
|
team172011/ps_cagebot
|
ab6f7bdbc74ad3baee3feebc4b7b0fa4f726b179
|
[
"MIT"
] | null | null | null |
"""
Script to start video chat by calling a skype contact
@author: wimmer, simon-justus
"""
import subprocess
def call(username):
command = "C:\Users\ITM2\Surrogate\ps_cagebot\programm\callsimelton91.cmd {}".format(username)
subprocess.call(command, shell=False)
| 24.818182 | 98 | 0.747253 |
6a3559781bbede3aaca8efbc6d2bcbb75ca6c516
| 2,972 |
py
|
Python
|
contrib/0.挖宝行动/youzidata-机坪跑道航空器识别/src/utils/label_converter.py
|
huaweicloud/ModelArts-Lab
|
75d06fb70d81469cc23cd422200877ce443866be
|
[
"Apache-2.0"
] | 1,045 |
2019-05-09T02:50:43.000Z
|
2022-03-31T06:22:11.000Z
|
contrib/0.挖宝行动/youzidata-机坪跑道航空器识别/src/utils/label_converter.py
|
huaweicloud/ModelArts-Lab
|
75d06fb70d81469cc23cd422200877ce443866be
|
[
"Apache-2.0"
] | 1,468 |
2019-05-16T00:48:18.000Z
|
2022-03-08T04:12:44.000Z
|
contrib/0.挖宝行动/youzidata-机坪跑道航空器识别/src/utils/label_converter.py
|
huaweicloud/ModelArts-Lab
|
75d06fb70d81469cc23cd422200877ce443866be
|
[
"Apache-2.0"
] | 1,077 |
2019-05-09T02:50:53.000Z
|
2022-03-27T11:05:32.000Z
|
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from xml.dom import minidom
import random
import cv2
import os
def generateXml(xml_path, boxes, w, h, d):
impl = minidom.getDOMImplementation()
doc = impl.createDocument(None, None, None)
rootElement = doc.createElement('annotation')
sizeElement = doc.createElement("size")
width = doc.createElement("width")
width.appendChild(doc.createTextNode(str(w)))
sizeElement.appendChild(width)
height = doc.createElement("height")
height.appendChild(doc.createTextNode(str(h)))
sizeElement.appendChild(height)
depth = doc.createElement("depth")
depth.appendChild(doc.createTextNode(str(d)))
sizeElement.appendChild(depth)
rootElement.appendChild(sizeElement)
for item in boxes:
objElement = doc.createElement('object')
nameElement = doc.createElement("name")
nameElement.appendChild(doc.createTextNode(str(item[0])))
objElement.appendChild(nameElement)
difficultElement = doc.createElement("difficult")
difficultElement.appendChild(doc.createTextNode(str(0)))
objElement.appendChild(difficultElement)
bndElement = doc.createElement('bndbox')
xmin = doc.createElement('xmin')
xmin.appendChild(doc.createTextNode(str(item[1])))
bndElement.appendChild(xmin)
ymin = doc.createElement('ymin')
ymin.appendChild(doc.createTextNode(str(item[2])))
bndElement.appendChild(ymin)
xmax = doc.createElement('xmax')
xmax.appendChild(doc.createTextNode(str(item[3])))
bndElement.appendChild(xmax)
ymax = doc.createElement('ymax')
ymax.appendChild(doc.createTextNode(str(item[4])))
bndElement.appendChild(ymax)
objElement.appendChild(bndElement)
rootElement.appendChild(objElement)
doc.appendChild(rootElement)
f = open(xml_path, 'w')
doc.writexml(f, addindent=' ', newl='\n')
f.close()
Index = 0
exp_path='./DeepLeague100K/origin_data/train'
def export(npz_file_name, exp_path):
global Index
np_obj = np.load(npz_file_name)
print (len(np_obj['images']))
for image, boxes in zip(np_obj['images'], np_obj['boxes']):
img = Image.fromarray(image)
img = np.array(img, dtype = np.uint8)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
generateXml(exp_path + '/Annotations/' + str(Index) + '.xml', boxes, img.shape[0], img.shape[1], img.shape[2])
cv2.imwrite(exp_path + '/Images/' + str(Index) + '.jpg', img)
Index += 1
if __name__ == '__main__':
root_path = './DeepLeague100K/clusters_cleaned/train/'
npz_names = os.listdir(root_path)
for item in npz_names:
export(os.path.join(root_path, item), './DeepLeague100K/lol/train')
root_path = './DeepLeague100K/clusters_cleaned/val/'
npz_names = os.listdir(root_path)
for item in npz_names:
export(os.path.join(root_path, item), './DeepLeague100K/lol/eval')
| 36.691358 | 118 | 0.68607 |
e02989408f61397fb05fddf021831b6b7fab7062
| 27,549 |
py
|
Python
|
src/onegov/swissvotes/models/vote.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/swissvotes/models/vote.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/swissvotes/models/vote.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from cached_property import cached_property
from collections import OrderedDict
from onegov.core.orm import Base
from onegov.core.orm.mixins import ContentMixin
from onegov.core.orm.mixins import content_property
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm.types import JSON
from onegov.core.utils import Bunch
from onegov.pdf.utils import extract_pdf_info
from onegov.swissvotes import _
from onegov.swissvotes.models.actor import Actor
from onegov.swissvotes.models.file import FileSubCollection
from onegov.swissvotes.models.file import LocalizedFile
from onegov.swissvotes.models.file import LocalizedFiles
from onegov.swissvotes.models.policy_area import PolicyArea
from onegov.swissvotes.models.region import Region
from sqlalchemy import Column
from sqlalchemy import Date
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import Numeric
from sqlalchemy import Text
from sqlalchemy_utils import observes
from sqlalchemy.dialects.postgresql import TSVECTOR
from sqlalchemy.orm import deferred
from urllib.parse import urlparse
from urllib.parse import urlunparse
class encoded_property(object):
""" A shorthand property to return the label of an encoded value. Requires
the instance the have a `codes`-lookup function. Creates the SqlAlchemy
Column (with a prefixed underline).
Example:
class MyClass(object):
value = encoded_property()
def codes(self, attributes):
return {0: 'None', 1: 'One'}
"""
def __init__(self, nullable=True):
self.nullable = nullable
def __set_name__(self, owner, name):
self.name = name
assert not hasattr(owner, f'_{name}')
setattr(
owner, f'_{name}', Column(name, Integer, nullable=self.nullable)
)
def __get__(self, instance, owner):
value = getattr(instance, f'_{self.name}')
return instance.codes(self.name).get(value)
class localized_property(object):
""" A shorthand property to return a localized attribute. Requires at least
a `xxx_de` attribute and falls back to this.
Example:
class MyClass(object):
value_de = Column(Text)
value_fr = Column(Text)
value = localized_property()
"""
def __set_name__(self, owner, name):
self.name = name
def __get__(self, instance, owner):
lang = instance.session_manager.current_locale[:2]
attribute = f'{self.name}_{lang}'
if hasattr(instance, attribute):
return getattr(instance, attribute)
return getattr(instance, f'{self.name}_de', None)
class SwissVote(Base, TimestampMixin, LocalizedFiles, ContentMixin):
""" A single vote as defined by the code book.
There are a lot of columns:
- Some general, ready to be used attributes (bfs_number, ...)
- Encoded attributes, where the raw integer value is stored prefixed with
an underline and the attribute returns a translatable label by using the
``codes`` function, e.g. ``_legal_form``, ``legal_form`` and
``codes(' _legal_form')``.
- Descriptors, easily accessible by using ``policy_areas``.
- A lot of lazy loaded, cantonal results only used when importing/exporting
the dataset.
- Recommendations from different parties and assocciations. Internally
stored as JSON and easily accessible and group by slogan with
``recommendations_parties``, ``recommendations_divergent_parties`` and
``recommendations_associations``.
- Different localized attachments, some of them indexed for full text
search.
- Metadata from external information sources such as Museum für Gestaltung
can be stored in the content or meta field provided by the
``ContentMixin``.
"""
__tablename__ = 'swissvotes'
ORGANIZATION_NO_LONGER_EXISTS = 9999
@staticmethod
def codes(attribute):
""" Returns the codes for the given attribute as defined in the code
book.
"""
if attribute == 'legal_form':
return OrderedDict((
(1, _("Mandatory referendum")),
(2, _("Optional referendum")),
(3, _("Popular initiative")),
(4, _("Direct counter-proposal")),
(5, _("Tie-breaker")),
))
if attribute == 'result' or attribute.endswith('_accepted'):
return OrderedDict((
(0, _("Rejected")),
(1, _("Accepted")),
(3, _("Majority of the cantons not necessary")),
(8, _("Counter-proposal preferred")),
(9, _("Popular initiative preferred")),
))
if attribute in (
'position_council_of_states',
'position_federal_council',
'position_national_council',
'position_parliament',
):
return OrderedDict((
(1, _("Accepting")),
(2, _("Rejecting")),
(3, _("None")),
(8, _("Preference for the counter-proposal")),
(9, _("Preference for the popular initiative")),
))
if attribute == 'recommendation':
# Sorted by how it should be displayed in strengths table
return OrderedDict((
(1, _("Yea")),
(9, _("Preference for the popular initiative")),
(2, _("Nay")),
(8, _("Preference for the counter-proposal")),
(4, _("Empty")),
(5, _("Free vote")),
(3, _("None")),
(66, _("Neutral")),
(9999, _("Organization no longer exists")),
(None, _("unknown"))
))
@staticmethod
def metadata_codes(attribute):
if attribute == 'position':
return OrderedDict((
('yes', _("Yes")),
('no', _("No")),
('neutral', _("Neutral")),
('mixed', _("Mixed")),
))
if attribute == 'language':
return OrderedDict((
('de', _('German')),
('fr', _('French')),
('it', _('Italian')),
('rm', _('Rhaeto-Romanic')),
('mixed', _('Mixed')),
))
if attribute == 'doctype':
return OrderedDict((
('argument', _('Argumentarium')),
('article', _('Press article')),
('release', _('Media release')),
('lecture', _('Lecture')),
('leaflet', _('Leaflet')),
('essay', _('Essay')),
('letter', _('Letter')),
('legal', _('Legal text')),
('other', _('Other')),
))
raise RuntimeError(f"No codes available for '{attribute}'")
id = Column(Integer, nullable=False, primary_key=True)
# Formal description
bfs_number = Column(Numeric(8, 2), nullable=False)
date = Column(Date, nullable=False)
title_de = Column(Text, nullable=False)
title_fr = Column(Text, nullable=False)
title = localized_property()
short_title_de = Column(Text, nullable=False)
short_title_fr = Column(Text, nullable=False)
short_title = localized_property()
brief_description_title = Column(Text)
keyword = Column(Text)
legal_form = encoded_property(nullable=False)
initiator = Column(Text)
anneepolitique = Column(Text)
bfs_map_de = Column(Text)
bfs_map_fr = Column(Text)
bfs_map = localized_property()
@property
def bfs_map_host(self):
""" Returns the Host of the BFS Map link for CSP. """
try:
return urlunparse(list(urlparse(self.bfs_map)[:2]) + 4 * [''])
except ValueError:
pass
# Additional links
link_curia_vista_de = content_property()
link_curia_vista_fr = content_property()
link_curia_vista = localized_property()
link_bk_results_de = content_property()
link_bk_results_fr = content_property()
link_bk_results = localized_property()
link_bk_chrono_de = content_property()
link_bk_chrono_fr = content_property()
link_bk_chrono = localized_property()
link_federal_council_de = content_property()
link_federal_council_fr = content_property()
link_federal_council_en = content_property()
link_federal_council = localized_property()
link_federal_departement_de = content_property()
link_federal_departement_fr = content_property()
link_federal_departement_en = content_property()
link_federal_departement = localized_property()
link_federal_office_de = content_property()
link_federal_office_fr = content_property()
link_federal_office_en = content_property()
link_federal_office = localized_property()
link_post_vote_poll_de = content_property()
link_post_vote_poll_fr = content_property()
link_post_vote_poll_en = content_property()
link_post_vote_poll = localized_property()
# space-separated poster URLs coming from the dataset
posters_mfg_yea = Column(Text)
posters_mfg_nay = Column(Text)
posters_sa_yea = Column(Text)
posters_sa_nay = Column(Text)
# Fetched list of image urls using MfG API
posters_mfg_yea_imgs = content_property(default=dict)
posters_mfg_nay_imgs = content_property(default=dict)
# Fetched list of image urls using SA API
posters_sa_yea_imgs = content_property(default=dict)
posters_sa_nay_imgs = content_property(default=dict)
def posters(self, request):
result = {'yea': [], 'nay': []}
for key, attribute, label in (
('yea', 'posters_mfg_yea', _('Link eMuseum.ch')),
('nay', 'posters_mfg_nay', _('Link eMuseum.ch')),
('yea', 'posters_sa_yea', _('Link Social Archives')),
('nay', 'posters_sa_nay', _('Link Social Archives')),
):
images = getattr(self, f'{attribute}_imgs')
urls = (getattr(self, attribute) or '').strip().split(' ')
for url in urls:
image = images.get(url)
if image:
result[key].append(
Bunch(
thumbnail=image,
image=image,
url=url,
label=label
)
)
for key, attribute, label in (
('yea', 'campaign_material_yea', _('Swissvotes database')),
('nay', 'campaign_material_nay', _('Swissvotes database')),
):
for image in getattr(self, attribute):
result[key].append(
Bunch(
thumbnail=request.link(image, 'thumbnail'),
image=request.link(image),
url=None,
label=label
)
)
return result
# Media
media_ads_total = Column(Integer)
media_ads_yea_p = Column(Numeric(13, 10))
media_coverage_articles_total = Column(Integer)
media_coverage_tonality_total = Column(Numeric(13, 10))
# Descriptor
descriptor_1_level_1 = Column(Numeric(8, 4))
descriptor_1_level_2 = Column(Numeric(8, 4))
descriptor_1_level_3 = Column(Numeric(8, 4))
descriptor_2_level_1 = Column(Numeric(8, 4))
descriptor_2_level_2 = Column(Numeric(8, 4))
descriptor_2_level_3 = Column(Numeric(8, 4))
descriptor_3_level_1 = Column(Numeric(8, 4))
descriptor_3_level_2 = Column(Numeric(8, 4))
descriptor_3_level_3 = Column(Numeric(8, 4))
@cached_property
def policy_areas(self):
""" Returns the policy areas / descriptors of the vote. """
def get_level(number, level):
value = getattr(self, f'descriptor_{number}_level_{level}')
if value is not None:
return PolicyArea(value, level)
result = []
for number in (1, 2, 3):
for level in (3, 2, 1):
area = get_level(number, level)
if area:
result.append(area)
break
return result
# Result
result = encoded_property()
result_turnout = Column(Numeric(13, 10))
result_people_accepted = encoded_property()
result_people_yeas_p = Column(Numeric(13, 10))
result_cantons_accepted = encoded_property()
result_cantons_yeas = Column(Numeric(3, 1))
result_cantons_nays = Column(Numeric(3, 1))
result_ag_accepted = encoded_property()
result_ai_accepted = encoded_property()
result_ar_accepted = encoded_property()
result_be_accepted = encoded_property()
result_bl_accepted = encoded_property()
result_bs_accepted = encoded_property()
result_fr_accepted = encoded_property()
result_ge_accepted = encoded_property()
result_gl_accepted = encoded_property()
result_gr_accepted = encoded_property()
result_ju_accepted = encoded_property()
result_lu_accepted = encoded_property()
result_ne_accepted = encoded_property()
result_nw_accepted = encoded_property()
result_ow_accepted = encoded_property()
result_sg_accepted = encoded_property()
result_sh_accepted = encoded_property()
result_so_accepted = encoded_property()
result_sz_accepted = encoded_property()
result_tg_accepted = encoded_property()
result_ti_accepted = encoded_property()
result_ur_accepted = encoded_property()
result_vd_accepted = encoded_property()
result_vs_accepted = encoded_property()
result_zg_accepted = encoded_property()
result_zh_accepted = encoded_property()
@cached_property
def results_cantons(self):
""" Returns the results of all cantons. """
result = {}
for canton in Region.cantons():
value = getattr(self, f'_result_{canton}_accepted')
if value is not None:
result.setdefault(value, []).append(Region(canton))
codes = self.codes('result_accepted')
return OrderedDict([
(codes[key], result[key])
for key in sorted(result.keys())
])
# Authorities
procedure_number = Column(Text)
position_federal_council = encoded_property()
position_parliament = encoded_property()
position_national_council = encoded_property()
position_national_council_yeas = Column(Integer)
position_national_council_nays = Column(Integer)
position_council_of_states = encoded_property()
position_council_of_states_yeas = Column(Integer)
position_council_of_states_nays = Column(Integer)
# Duration
duration_federal_assembly = Column(Integer)
duration_initative_collection = Column(Integer)
duration_referendum_collection = Column(Integer)
signatures_valid = Column(Integer)
# Voting recommendations
recommendations = Column(JSON, nullable=False, default=dict)
recommendations_other_yes = Column(Text)
recommendations_other_no = Column(Text)
recommendations_other_counter_proposal = Column(Text)
recommendations_other_popular_initiative = Column(Text)
recommendations_other_free = Column(Text)
recommendations_divergent = Column(JSON, nullable=False, default=dict)
def get_recommendation(self, name):
""" Get the recommendations by name. """
return self.codes('recommendation').get(
self.recommendations.get(name)
)
def get_recommendation_of_existing_parties(self):
""" Get only the existing parties as when this vote was conducted """
if not self.recommendations:
return {}
return {
k: v for k, v in self.recommendations.items()
if v != self.ORGANIZATION_NO_LONGER_EXISTS
}
def group_recommendations(self, recommendations, ignore_unknown=False):
""" Group the given recommendations by slogan. """
codes = self.codes('recommendation')
recommendation_codes = list(codes.keys())
def by_recommendation(reco):
return recommendation_codes.index(reco)
result = {}
for actor, recommendation in recommendations:
if recommendation == self.ORGANIZATION_NO_LONGER_EXISTS:
continue
if ignore_unknown and recommendation is None:
continue
result.setdefault(recommendation, []).append(actor)
return OrderedDict([
(codes[key], result[key])
for key in sorted(result.keys(), key=by_recommendation)
])
def get_actors_share(self, actor):
assert isinstance(actor, str), 'Actor must be a string'
attr = f'national_council_share_{actor}'
return getattr(self, attr, 0) or 0
@cached_property
def sorted_actors_list(self):
"""
Returns a list of actors of the current vote sorted by:
1. codes for recommendations (strength table)
2. by electoral share (descending)
It filters out those parties who have no electoral share
"""
result = []
for slogan, actor_list in self.recommendations_parties.items():
actors = (d.name for d in actor_list)
# Filter out those who have None as share
result.extend(
sorted(actors, key=self.get_actors_share, reverse=True)
)
return result
@cached_property
def recommendations_parties(self):
""" The recommendations of the parties grouped by slogans. """
recommendations = self.recommendations or {}
return self.group_recommendations((
(Actor(name), recommendations.get(name))
for name in Actor.parties()
), ignore_unknown=True)
@cached_property
def recommendations_divergent_parties(self, ignore_unknown=True):
""" The divergent recommendations of the parties grouped by slogans.
"""
recommendations = self.recommendations_divergent or {}
return self.group_recommendations((
(
(Actor(name.split('_')[0]), Region(name.split('_')[1])),
recommendation,
)
for name, recommendation in sorted(recommendations.items())
), ignore_unknown=ignore_unknown)
@cached_property
def recommendations_associations(self):
""" The recommendations of the associations grouped by slogans. """
def as_list(attribute, code):
value = getattr(self, f'recommendations_other_{attribute}')
return [
(Actor(name.strip()), code)
for name in (value or '').split(',')
if name.strip()
]
recommendations = self.recommendations or {}
recommendations = [
(Actor(name), recommendations.get(name))
for name in Actor.associations()
]
for attribute, code in (
('yes', 1),
('no', 2),
('free', 5),
('counter_proposal', 8),
('popular_initiative', 9),
):
recommendations.extend(as_list(attribute, code))
return self.group_recommendations(recommendations, ignore_unknown=True)
# Electoral strength
national_council_election_year = Column(Integer)
# drop?
national_council_share_fdp = Column(Numeric(13, 10))
national_council_share_cvp = Column(Numeric(13, 10))
national_council_share_sps = Column(Numeric(13, 10))
national_council_share_svp = Column(Numeric(13, 10))
national_council_share_lps = Column(Numeric(13, 10))
national_council_share_ldu = Column(Numeric(13, 10))
national_council_share_evp = Column(Numeric(13, 10))
national_council_share_csp = Column(Numeric(13, 10))
national_council_share_pda = Column(Numeric(13, 10))
national_council_share_poch = Column(Numeric(13, 10))
national_council_share_gps = Column(Numeric(13, 10))
national_council_share_sd = Column(Numeric(13, 10))
national_council_share_rep = Column(Numeric(13, 10))
national_council_share_edu = Column(Numeric(13, 10))
national_council_share_fps = Column(Numeric(13, 10))
national_council_share_lega = Column(Numeric(13, 10))
national_council_share_kvp = Column(Numeric(13, 10))
national_council_share_glp = Column(Numeric(13, 10))
national_council_share_bdp = Column(Numeric(13, 10))
national_council_share_mcg = Column(Numeric(13, 10))
national_council_share_mitte = Column(Numeric(13, 10))
national_council_share_ubrige = Column(Numeric(13, 10))
national_council_share_yeas = Column(Numeric(13, 10))
national_council_share_nays = Column(Numeric(13, 10))
national_council_share_none = Column(Numeric(13, 10))
national_council_share_empty = Column(Numeric(13, 10))
national_council_share_free_vote = Column(Numeric(13, 10))
national_council_share_neutral = Column(Numeric(13, 10))
national_council_share_unknown = Column(Numeric(13, 10))
@cached_property
def has_national_council_share_data(self):
if self.national_council_election_year:
return True
return False
# attachments
voting_text = LocalizedFile(
label=_('Voting text'),
extension='pdf',
static_views={
'de_CH': 'abstimmungstext-de.pdf',
'fr_CH': 'abstimmungstext-fr.pdf',
}
)
brief_description = LocalizedFile(
label=_('Brief description Swissvotes'),
extension='pdf',
static_views={
'de_CH': 'kurzbeschreibung.pdf',
}
)
federal_council_message = LocalizedFile(
label=_('Federal council message'),
extension='pdf',
static_views={
'de_CH': 'botschaft-de.pdf',
'fr_CH': 'botschaft-fr.pdf',
}
)
parliamentary_debate = LocalizedFile(
label=_('Parliamentary debate'),
extension='pdf',
static_views={
'de_CH': 'parlamentsberatung.pdf',
}
)
voting_booklet = LocalizedFile(
label=_('Voting booklet'),
extension='pdf',
static_views={
'de_CH': 'brochure-de.pdf',
'fr_CH': 'brochure-fr.pdf',
}
)
resolution = LocalizedFile(
label=_('Resolution'),
extension='pdf',
static_views={
'de_CH': 'erwahrung-de.pdf',
'fr_CH': 'erwahrung-fr.pdf',
}
)
realization = LocalizedFile(
label=_('Realization'),
extension='pdf',
static_views={
'de_CH': 'zustandekommen-de.pdf',
'fr_CH': 'zustandekommen-fr.pdf',
}
)
ad_analysis = LocalizedFile(
label=_('Analysis of the advertising campaign by Année Politique'),
extension='pdf',
static_views={
'de_CH': 'inserateanalyse.pdf',
}
)
results_by_domain = LocalizedFile(
label=_('Result by canton, district and municipality'),
extension='xlsx',
static_views={
'de_CH': 'staatsebenen.xlsx',
}
)
foeg_analysis = LocalizedFile(
label=_('Media coverage: fög analysis'),
extension='pdf',
static_views={
'de_CH': 'medienanalyse.pdf',
}
)
post_vote_poll = LocalizedFile(
label=_('Full analysis of post-vote poll results'),
extension='pdf',
static_views={
'de_CH': 'nachbefragung-de.pdf',
'fr_CH': 'nachbefragung-fr.pdf',
}
)
post_vote_poll_methodology = LocalizedFile(
label=_('Questionnaire of the poll'),
extension='pdf',
static_views={
'de_CH': 'nachbefragung-methode-de.pdf',
'fr_CH': 'nachbefragung-methode-fr.pdf',
}
)
post_vote_poll_dataset = LocalizedFile(
label=_('Dataset of the post-vote poll'),
extension='csv',
static_views={
'de_CH': 'nachbefragung.csv',
}
)
post_vote_poll_dataset_sav = LocalizedFile(
label=_('Dataset of the post-vote poll'),
extension='sav',
static_views={
'de_CH': 'nachbefragung.sav',
}
)
post_vote_poll_dataset_dta = LocalizedFile(
label=_('Dataset of the post-vote poll'),
extension='dta',
static_views={
'de_CH': 'nachbefragung.dta',
}
)
post_vote_poll_codebook = LocalizedFile(
label=_('Codebook for the post-vote poll'),
extension='pdf',
static_views={
'de_CH': 'nachbefragung-codebuch-de.pdf',
'fr_CH': 'nachbefragung-codebuch-fr.pdf',
}
)
post_vote_poll_codebook_xlsx = LocalizedFile(
label=_('Codebook for the post-vote poll'),
extension='xlsx',
static_views={
'de_CH': 'nachbefragung-codebuch-de.xlsx',
'fr_CH': 'nachbefragung-codebuch-fr.xlsx',
}
)
post_vote_poll_report = LocalizedFile(
label=_('Technical report of the post-vote poll'),
extension='pdf',
static_views={
'de_CH': 'nachbefragung-technischer-bericht.pdf',
}
)
preliminary_examination = LocalizedFile(
label=_('Preliminary examination'),
extension='pdf',
static_views={
'de_CH': 'vorpruefung-de.pdf',
'fr_CH': 'vorpruefung-fr.pdf',
}
)
campaign_material_yea = FileSubCollection()
campaign_material_nay = FileSubCollection()
campaign_material_other = FileSubCollection()
campaign_material_metadata = Column(JSON, nullable=False, default=dict)
# searchable attachment texts
searchable_text_de_CH = deferred(Column(TSVECTOR))
searchable_text_fr_CH = deferred(Column(TSVECTOR))
indexed_files = {
'voting_text',
'brief_description',
'federal_council_message',
'parliamentary_debate',
# we don't include the voting booklet, resolution and ad analysis
# - they might contain other votes from the same day!
'realization',
'preliminary_examination'
}
def vectorize_files(self):
""" Extract the text from the indexed files and store it. """
for locale, language in (('de_CH', 'german'), ('fr_CH', 'french')):
files = [
SwissVote.__dict__[file].__get_by_locale__(self, locale)
for file in self.indexed_files
]
text = ' '.join([
extract_pdf_info(file.reference.file)[1] or ''
for file in files if file
]).strip()
setattr(
self,
f'searchable_text_{locale}',
func.to_tsvector(language, text)
)
@observes('files')
def files_observer(self, files):
self.vectorize_files()
def get_file(self, name, locale=None, fallback=True):
""" Returns the requested localized file.
Uses the current locale if no locale is given.
Falls back to the default locale if the file is not available in the
requested locale.
"""
get = SwissVote.__dict__.get(name).__get_by_locale__
default_locale = self.session_manager.default_locale
fallback = get(self, default_locale) if fallback else None
result = get(self, locale) if locale else getattr(self, name, None)
return result or fallback
| 35.593023 | 79 | 0.620748 |
e047f5bdcb57a46d7c8cfa4c5ccc97c519816d86
| 1,102 |
pyde
|
Python
|
sketches/imagepalette/imagepalette.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 4 |
2018-06-03T02:11:46.000Z
|
2021-08-18T19:55:15.000Z
|
sketches/imagepalette/imagepalette.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | null | null | null |
sketches/imagepalette/imagepalette.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 3 |
2019-12-23T19:12:51.000Z
|
2021-04-30T14:00:31.000Z
|
# Nach einer Idee von Kevin Workman
# (https://happycoding.io/examples/p5js/images/image-palette)
WIDTH = 800
HEIGHT = 640
palette = ["#264653", "#2a9d8f", "#e9c46a", "#f4a261", "#e76f51"]
def setup():
global img
size(WIDTH, HEIGHT)
this.surface.setTitle("Image Palette")
img = loadImage("akt.jpg")
image(img, 0, 0)
noLoop()
def draw():
global y, img
for x in range(width/2):
for y in range(height):
img_color = img.get(x, y)
palette_color = get_palette_color(img_color)
set(x + width/2, y, palette_color)
def get_palette_color(img_color):
min_distance = 999999
img_r = red(img_color)
img_g = green(img_color)
img_b = blue(img_color)
for c in palette:
palette_r = red(c)
palette_g = green(c)
palette_b = blue(c)
color_distance = dist(img_r, img_g, img_b,
palette_r, palette_g, palette_b)
if color_distance < min_distance:
target_color = c
min_distance = color_distance
return(target_color)
| 26.878049 | 65 | 0.598004 |
164fa02725b62e0ebdec2154127c81d33176130a
| 505 |
py
|
Python
|
06.BinarySearch/HG/B2003.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 1 |
2021-11-21T06:03:06.000Z
|
2021-11-21T06:03:06.000Z
|
06.BinarySearch/HG/B2003.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 2 |
2021-10-13T07:21:09.000Z
|
2021-11-14T13:53:08.000Z
|
06.BinarySearch/HG/B2003.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | null | null | null |
import sys
N, M = map(int, sys.stdin.readline().split())
arr = list(map(int, sys.stdin.readline().split()))
# print("===================")
s = 0
e = 0
answer = 0
tmp = arr[s]
while s < N and e < N:
if tmp < M:
if e+1 >= N:
break
tmp += arr[e+1]
e += 1
elif tmp == M:
answer += 1
if e+1 >= N:
break
tmp += arr[e+1]
e += 1
else:
tmp -= arr[s]
s += 1
# print(s, e, tmp, answer)
print(answer)
| 16.833333 | 50 | 0.411881 |
16b03a4bdc1791f895a738f3289a159ca31508c3
| 217 |
py
|
Python
|
src/python/py-accepted/50A.py
|
cbarnson/UVa
|
0dd73fae656613e28b5aaf5880c5dad529316270
|
[
"Unlicense",
"MIT"
] | 2 |
2019-09-07T17:00:26.000Z
|
2020-08-05T02:08:35.000Z
|
src/python/py-accepted/50A.py
|
cbarnson/UVa
|
0dd73fae656613e28b5aaf5880c5dad529316270
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/python/py-accepted/50A.py
|
cbarnson/UVa
|
0dd73fae656613e28b5aaf5880c5dad529316270
|
[
"Unlicense",
"MIT"
] | null | null | null |
#! python
# Problem # : 50A
# Created on : 2019-01-14 21:29:26
def Main():
m, n = map(int, input().split(' '))
val = m * n
cnt = int(val / 2)
print(cnt)
if __name__ == '__main__':
Main()
| 13.5625 | 39 | 0.502304 |
bcf6984647315163fa6dfa3c70fcb9ccd945a1c1
| 4,005 |
py
|
Python
|
tests/test_tarifeinschraenkung.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
tests/test_tarifeinschraenkung.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
tests/test_tarifeinschraenkung.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
import pytest # type:ignore[import]
from bo4e.com.geraet import Geraet
from bo4e.com.geraeteeigenschaften import Geraeteeigenschaften
from bo4e.com.menge import Menge # type:ignore[import]
from bo4e.com.tarifeinschraenkung import Tarifeinschraenkung, TarifeinschraenkungSchema
from bo4e.enum.geraetemerkmal import Geraetemerkmal
from bo4e.enum.geraetetyp import Geraetetyp
from bo4e.enum.mengeneinheit import Mengeneinheit
from bo4e.enum.voraussetzungen import Voraussetzungen
from tests.serialization_helper import assert_serialization_roundtrip # type:ignore[import]
example_tarifeinschraenkung = Tarifeinschraenkung(
zusatzprodukte=["foo", "bar"],
voraussetzungen=[Voraussetzungen.ALTVERTRAG, Voraussetzungen.DIREKTVERTRIEB],
einschraenkungzaehler=[
Geraet(
geraetenummer="0815",
geraeteeigenschaften=Geraeteeigenschaften(
geraetemerkmal=Geraetemerkmal.GAS_G1000,
geraetetyp=Geraetetyp.MULTIPLEXANLAGE,
),
),
Geraet(geraetenummer="197foo"),
],
einschraenkungleistung=[
Menge(wert=Decimal(12.5), einheit=Mengeneinheit.MWH),
Menge(wert=Decimal(30), einheit=Mengeneinheit.KWH),
],
)
class TestTarifeinschraenkung:
@pytest.mark.parametrize(
"tarifeinschraenkung, expected_json_dict",
[
pytest.param(
Tarifeinschraenkung(),
{
"zusatzprodukte": None,
"voraussetzungen": None,
"einschraenkungzaehler": None,
"einschraenkungleistung": None,
},
id="minimal attributes",
),
pytest.param(
Tarifeinschraenkung(
zusatzprodukte=["foo", "bar"],
voraussetzungen=[Voraussetzungen.ALTVERTRAG, Voraussetzungen.DIREKTVERTRIEB],
einschraenkungzaehler=[
Geraet(
geraetenummer="0815",
geraeteeigenschaften=Geraeteeigenschaften(
geraetemerkmal=Geraetemerkmal.GAS_G1000,
geraetetyp=Geraetetyp.MULTIPLEXANLAGE,
),
),
Geraet(geraetenummer="197foo"),
],
einschraenkungleistung=[
Menge(wert=Decimal(12.5), einheit=Mengeneinheit.MWH),
Menge(wert=Decimal(30), einheit=Mengeneinheit.KWH),
],
),
{
"zusatzprodukte": ["foo", "bar"],
"voraussetzungen": ["ALTVERTRAG", "DIREKTVERTRIEB"],
"einschraenkungzaehler": [
{
"geraetenummer": "0815",
"geraeteeigenschaften": {"geraetemerkmal": "GAS_G1000", "geraetetyp": "MULTIPLEXANLAGE"},
},
{
"geraetenummer": "197foo",
"geraeteeigenschaften": None,
},
],
"einschraenkungleistung": [
{
"wert": "12.5",
"einheit": "MWH",
},
{
"wert": "30",
"einheit": "KWH",
},
],
},
id="maximal attributes",
),
],
)
def test_serialization_roundtrip(self, tarifeinschraenkung: Tarifeinschraenkung, expected_json_dict: dict):
"""
Test de-/serialisation of Tarifeinschraenkung
"""
assert_serialization_roundtrip(tarifeinschraenkung, TarifeinschraenkungSchema(), expected_json_dict)
| 39.653465 | 117 | 0.518352 |
d5fe099416be061df33d43283a52c1995868dbe1
| 1,037 |
py
|
Python
|
2016/day05_how_about_a_nice_game_of_chess/python/src/part2.py
|
tlake/advent-of-code
|
17c729af2af5f1d95ba6ff68771a82ca6d00b05d
|
[
"MIT"
] | null | null | null |
2016/day05_how_about_a_nice_game_of_chess/python/src/part2.py
|
tlake/advent-of-code
|
17c729af2af5f1d95ba6ff68771a82ca6d00b05d
|
[
"MIT"
] | null | null | null |
2016/day05_how_about_a_nice_game_of_chess/python/src/part2.py
|
tlake/advent-of-code
|
17c729af2af5f1d95ba6ff68771a82ca6d00b05d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Docstring."""
from hashlib import md5
from common import get_input
class PasswordFinder(object):
"""."""
def __init__(self):
"""Initialize."""
self.integer = 0
self.password = [None for x in range(8)]
def get_digest(self, source):
"""."""
return md5(source.encode()).hexdigest()
def find_password(self, door_id):
"""."""
while None in self.password:
digest = self.get_digest(str(door_id) + str(self.integer))
if digest[:5] == "00000":
try:
i = int(digest[5])
if i < 8 and self.password[i] is None:
self.password[i] = digest[6]
except ValueError:
pass
self.integer += 1
return ''.join(self.password)
if __name__ == "__main__":
finder = PasswordFinder()
print("Finding password for Door ID {}...".format(get_input()))
print(finder.find_password(get_input()))
| 24.690476 | 70 | 0.529412 |
f8d69f099f5609a2e46585f56b6c19c09e5b0f6e
| 1,460 |
py
|
Python
|
gen.py
|
bivab/-markov-university-classes
|
34e0563646c14e91cab8fb46b220b8b2f8866269
|
[
"MIT"
] | null | null | null |
gen.py
|
bivab/-markov-university-classes
|
34e0563646c14e91cab8fb46b220b8b2f8866269
|
[
"MIT"
] | null | null | null |
gen.py
|
bivab/-markov-university-classes
|
34e0563646c14e91cab8fb46b220b8b2f8866269
|
[
"MIT"
] | null | null | null |
import codecs
import sys
import markovify
BATCH_SIZE = 5
DATASETS = ['cs', 'phil', 'wiwi']
def get_all_models(state_size):
return markovify.combine([get_model(state_size, ds) for ds in DATASETS])
def get_model(state_size, dataset):
units = 'data/{0}/units.txt'.format(dataset)
abstract_units = 'data/{0}/abstract_units.txt'.format(dataset)
#
with codecs.open(units, 'r', 'utf-8') as f:
text = f.read()
model1 = markovify.NewlineText(text, state_size=state_size)
#
with codecs.open(abstract_units, 'r', 'utf-8') as f:
text =f.read()
model2 = markovify.NewlineText(text, state_size=state_size)
#
model = markovify.combine([model1, model2], [ 1.5, 1 ])
return model
def main(state_size=1, dataset='phil'):
if dataset == 'ALL':
model = get_all_models(state_size)
else:
model = get_model(state_size, dataset)
for i in range(BATCH_SIZE):
print(model.make_sentence())
print("\n----------------\n")
for i in range(BATCH_SIZE):
print(model.make_short_sentence(140))
print("\n----------------\n")
try:
for i in range(BATCH_SIZE):
print(model.make_sentence_with_start("Die"))
except KeyError:
pass
if __name__ == '__main__':
kwargs = {}
if len(sys.argv) > 1:
kwargs['state_size'] = int(sys.argv[1])
if len(sys.argv) > 2:
kwargs['dataset'] = sys.argv[2]
main(**kwargs)
| 24.333333 | 76 | 0.609589 |
071cf486a8d65cffb0566b38855a718dd197e642
| 107 |
py
|
Python
|
cryptoauthlib/python/tests/__init__.py
|
PhillyNJ/SAMD21
|
0f123422ed0ad183d510add8f5d3472a16f1e8cb
|
[
"MIT"
] | 12 |
2017-11-15T08:29:03.000Z
|
2021-05-22T04:57:20.000Z
|
cryptoauthlib/python/tests/__init__.py
|
PhillyNJ/SAMD21
|
0f123422ed0ad183d510add8f5d3472a16f1e8cb
|
[
"MIT"
] | 2 |
2019-09-22T12:02:07.000Z
|
2021-09-09T22:38:25.000Z
|
cryptoauthlib/python/tests/__init__.py
|
PhillyNJ/SAMD21
|
0f123422ed0ad183d510add8f5d3472a16f1e8cb
|
[
"MIT"
] | 5 |
2019-04-05T13:46:44.000Z
|
2020-11-25T08:58:32.000Z
|
import os
import sys
sys.path.append(os.path.dirname(__file__))
from cryptoauthlib_mock import atcab_mock
| 17.833333 | 42 | 0.831776 |
07252063a568e34edf388de3ddcb8f5db9bbd6e1
| 466 |
py
|
Python
|
backend/api/btb/api/models.py
|
prototypefund/project-c
|
a87a49d7c1317b1e3ec03ddd0ce146ad0391b5d2
|
[
"MIT"
] | 4 |
2020-04-30T16:11:24.000Z
|
2020-06-02T10:08:07.000Z
|
backend/api/btb/api/models.py
|
prototypefund/project-c
|
a87a49d7c1317b1e3ec03ddd0ce146ad0391b5d2
|
[
"MIT"
] | 291 |
2020-04-20T13:11:13.000Z
|
2022-02-10T21:54:46.000Z
|
backend/api/btb/api/models.py
|
prototypefund/project-c
|
a87a49d7c1317b1e3ec03ddd0ce146ad0391b5d2
|
[
"MIT"
] | 2 |
2020-04-19T14:56:01.000Z
|
2020-04-19T18:09:34.000Z
|
from flask import current_app
from sqlalchemy import create_engine, text
from sqlalchemy.pool import NullPool
class DB:
def init_app(self, app):
url = app.config["SQLALCHEMY_DATABASE_URI"]
# lambda uses a single container per request model
# we do the pooling via pgbouncer
self.engine = create_engine(
url,
echo=True,
echo_pool=True,
poolclass=NullPool,
)
db = DB()
| 22.190476 | 58 | 0.622318 |
073eb2a8ae160697bd059d18d8ffe51d0cd0b35a
| 762 |
py
|
Python
|
product/migrations/0002_auto_20201030_1515.py
|
hhdMrLion/Product-System
|
e870225ab10c32688a87426d5943d922c47c4404
|
[
"MIT"
] | 1 |
2021-06-18T03:03:42.000Z
|
2021-06-18T03:03:42.000Z
|
product/migrations/0002_auto_20201030_1515.py
|
hhdMrLion/Product-System
|
e870225ab10c32688a87426d5943d922c47c4404
|
[
"MIT"
] | null | null | null |
product/migrations/0002_auto_20201030_1515.py
|
hhdMrLion/Product-System
|
e870225ab10c32688a87426d5943d922c47c4404
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.16 on 2020-10-30 07:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product',
name='status',
field=models.SmallIntegerField(choices=[(3, '待发货'), (2, '生产中'), (1, '备料'), (4, '订单完成')], default=1, verbose_name='生产状态'),
),
migrations.AlterField(
model_name='product',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='user.User', verbose_name='生产人员'),
),
]
| 30.48 | 144 | 0.581365 |
9194e7651831367cd37c6a412ff3e02cfb2c3b18
| 7,692 |
py
|
Python
|
features/snake_main.py
|
BogyMitutoyoCTL/Riesen-Tetris
|
8bbbaf0b7aeae7890da724d3d72719a7d068237a
|
[
"MIT"
] | 1 |
2019-04-27T07:28:52.000Z
|
2019-04-27T07:28:52.000Z
|
features/snake_main.py
|
BogyMitutoyoCTL/Riesen-Tetris
|
8bbbaf0b7aeae7890da724d3d72719a7d068237a
|
[
"MIT"
] | null | null | null |
features/snake_main.py
|
BogyMitutoyoCTL/Riesen-Tetris
|
8bbbaf0b7aeae7890da724d3d72719a7d068237a
|
[
"MIT"
] | null | null | null |
import time
from datetime import datetime
from random import random
import game_sound
from Score import Score
from features.feature import Feature
from field import Field
from highscorelist import Highscorelist, Highscoreentry
from painter import RGB_Field_Painter, Led_Matrix_Painter
BLACK = [0, 0, 0]
class Snake_Main(Feature):
def __init__(self, field_leds: Field, field_matrix: Field, rgb_field_painter: RGB_Field_Painter,
led_matrix_painter: Led_Matrix_Painter, highscorelist: Highscorelist = Highscorelist("Not_used")):
super(Snake_Main, self).__init__(field_leds, field_matrix, rgb_field_painter, led_matrix_painter, highscorelist)
self.direction = 0
self.is_there_a_direction_change_in_this_tick = False
self.food_is_on_field = False
self.field_for_snake = []
def event(self, eventname: str):
if not self.is_there_a_direction_change_in_this_tick:
if eventname == "move up":
if self.direction != 2:
self.direction = 0
self.is_there_a_direction_change_in_this_tick = True
elif eventname == "move left":
if self.direction != 3:
self.direction = 1
self.is_there_a_direction_change_in_this_tick = True
elif eventname == "move down":
if self.direction != 0:
self.direction = 2
self.is_there_a_direction_change_in_this_tick = True
elif eventname == "move right":
if self.direction != 1:
self.direction = 3
self.is_there_a_direction_change_in_this_tick = True
elif eventname == "rotate left":
self.direction += 1
if self.direction >= 4:
self.direction -= 4
self.is_there_a_direction_change_in_this_tick = True
elif eventname == "rotate right":
self.direction -= 1
if self.direction < 0:
self.direction += 4
self.is_there_a_direction_change_in_this_tick = True
def move_snake_if_possible(self):
if self.direction == 0:
if self.test_for_case_of_block_in_field(self.head_x, self.head_y - 1) <= 0:
self.head_y -= 1
elif self.test_for_case_of_block_in_field(self.head_x, self.head_y - 1) == 1:
self.game_over = True
elif self.direction == 1:
if self.test_for_case_of_block_in_field(self.head_x - 1, self.head_y) <= 0:
self.head_x -= 1
elif self.test_for_case_of_block_in_field(self.head_x - 1, self.head_y) == 1:
self.game_over = True
elif self.direction == 2:
if self.test_for_case_of_block_in_field(self.head_x, self.head_y + 1) <= 0:
self.head_y += 1
elif self.test_for_case_of_block_in_field(self.head_x, self.head_y + 1) == 1:
self.game_over = True
elif self.direction == 3:
if self.test_for_case_of_block_in_field(self.head_x + 1, self.head_y) <= 0:
self.head_x += 1
elif self.test_for_case_of_block_in_field(self.head_x + 1, self.head_y) == 1:
self.game_over = True
if not self.game_over:
if self.test_for_case_of_block_in_field(self.head_x, self.head_y) == -1: # if head eats food
self.food_is_on_field = False
self.lenght_of_snake += 1
self.score.score_for_block()
self.field_matrix.set_all_pixels_to_black()
self.score.draw_score_on_field(self.field_matrix)
self.led_matrix_painter.draw(self.field_matrix)
self.turn_every_pixel_in_snakes_field_ones_up()
self.field_for_snake[self.head_y][self.head_x] = 1
else:
game_sound.stop_song()
game_sound.play_sound("game_over")
self.highscorelist.add_entry(Highscoreentry(datetime.today(), self.playername, self.score.get_score_int()))
self.highscorelist.save()
self.led_matrix_painter.show_Message("Game over - Your Points: " + self.score.get_score_str(), 250)
def turn_every_pixel_in_snakes_field_ones_up(self):
for y in range(len(self.field_for_snake)):
for x in range(len(self.field_for_snake[0])):
if self.field_for_snake[y][x] > 0:
self.field_for_snake[y][x] += 1
if self.field_for_snake[y][x] > self.lenght_of_snake:
self.field_for_snake[y][x] = 0
def test_for_case_of_block_in_field(self, x: int, y: int) -> int:
if 0 <= x < len(self.field_for_snake[0]) and 0 <= y < len(self.field_for_snake):
if self.field_for_snake[y][x] == 0:
return 0
elif self.field_for_snake[y][x] < 0:
return -1
else:
return 1
else:
return 1
def translate_snakes_field_into_normal_field(self):
self.field_leds.set_all_pixels_to_black()
for y in range(self.field_leds.height):
for x in range(self.field_leds.width):
if self.field_for_snake[y][x] == 1:
self.field_leds.field[y][x] = [255, 0, 0]
elif self.field_for_snake[y][x] > 1:
self.field_leds.field[y][x] = [0, 255, 0]
elif self.field_for_snake[y][x] == -1:
self.field_leds.field[y][x] = [0, 0, 255]
def test_and_print_food(self):
if not self.food_is_on_field:
while not self.food_is_on_field:
self.food_x = int(random()*len(self.field_for_snake[0]))
self.food_y = int(random()*len(self.field_for_snake))
if self.test_for_case_of_block_in_field(self.food_x, self.food_y) == 0:
self.food_is_on_field = True
self.field_for_snake[self.food_y][self.food_x] = -1
def tick(self):
if not self.game_over:
self.move_snake_if_possible()
self.test_and_print_food()
self.translate_snakes_field_into_normal_field()
self.rgb_field_painter.draw(self.field_leds)
self.is_there_a_direction_change_in_this_tick = False
time.sleep(0.5)
else:
self.led_matrix_painter.move_Message()
time.sleep(0.02)
def start(self, playername: str = None):
super().start(playername)
self.prepare_for_start()
def stop(self):
self.game_over = True
def is_game_over(self):
return super(Snake_Main, self).is_game_over()
def prepare_for_start(self):
self.field_leds.set_all_pixels_to_black()
self.field_matrix.set_all_pixels_to_black()
self.field_for_snake = []
for i in range(self.field_leds.height):
self.field_for_snake.append([])
for _ in range(self.field_leds.width):
self.field_for_snake[i].append(0)
self.head_x = 5
self.head_y = 20
self.direction = 0
self.lenght_of_snake = 3
self.delay = 0.5
self.game_over = False
self.food_is_on_field = False
self.food_x = 0
self.food_y = 0
self.is_there_a_direction_change_in_this_tick = False
self.score = Score()
self.score.points = 3
self.score.draw_score_on_field(self.field_matrix)
self.rgb_field_painter.draw(self.field_leds)
self.led_matrix_painter.draw(self.field_matrix)
| 41.578378 | 120 | 0.602054 |
91c58aa889ea5118cf58f2a53f95f841a66dbf63
| 405 |
py
|
Python
|
3_Functions/defaults.py
|
felixdittrich92/Python3
|
16b767465e4bdf0adc652c195d15384bb9faa4cf
|
[
"MIT"
] | 1 |
2022-03-02T07:16:30.000Z
|
2022-03-02T07:16:30.000Z
|
3_Functions/defaults.py
|
felixdittrich92/Python3
|
16b767465e4bdf0adc652c195d15384bb9faa4cf
|
[
"MIT"
] | null | null | null |
3_Functions/defaults.py
|
felixdittrich92/Python3
|
16b767465e4bdf0adc652c195d15384bb9faa4cf
|
[
"MIT"
] | null | null | null |
# x, y: arguments
x = 2
y = 3
# a, b: parameters
def function(a, b):
print(a, b)
function(x, y)
# default arguments
def function2(a, b=None):
if b:
print(a, b)
else:
print(a)
function2(x)
function2(x, b=y) # bei default parametern immer variable= -> besser lesbar
# Funktionen ohne return Value returnen immer None !
#return_value = function2(x ,b=y)
#print(return_value)
| 17.608696 | 76 | 0.644444 |
53315be361a8c097c81fd165c0a76c88bf0bd91b
| 1,797 |
py
|
Python
|
examples/text_to_sql/RAT-SQL/script/available_gpu.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/text_to_sql/RAT-SQL/script/available_gpu.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/text_to_sql/RAT-SQL/script/available_gpu.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import traceback
import logging
import nvgpu
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s: %(asctime)s %(filename)s'
' [%(funcName)s:%(lineno)d][%(process)d] %(message)s',
datefmt='%m-%d %H:%M:%S',
filename=None,
filemode='a')
if __name__ == "__main__":
from argparse import ArgumentParser
try:
arg_parser = ArgumentParser(
description="print available_gpu id, using nvgpu")
arg_parser.add_argument("-b",
"--best",
default=None,
type=int,
help="output best N")
args = arg_parser.parse_args()
if args.best is not None:
gpus = sorted(nvgpu.gpu_info(),
key=lambda x: (x['mem_used'], x['index']))
ids = [x['index'] for x in gpus]
print(','.join(ids[:args.best]))
else:
print(','.join(nvgpu.available_gpus()))
except Exception as e:
traceback.print_exc()
exit(-1)
| 35.235294 | 74 | 0.576516 |
f42cf75aa4b2896d8fd88356432a0be891d51aac
| 5,221 |
py
|
Python
|
python/oneflow/compatible/single_client/nn/modules/slice.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 1 |
2021-09-13T02:34:53.000Z
|
2021-09-13T02:34:53.000Z
|
python/oneflow/compatible/single_client/nn/modules/slice.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/compatible/single_client/nn/modules/slice.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 1 |
2021-01-17T03:34:39.000Z
|
2021-01-17T03:34:39.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Sequence, Tuple
import numpy as np
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.nn.module import Module
from oneflow.compatible.single_client.ops.array_ops import (
GetSliceAttrs,
check_slice_tup_list,
)
class Slice(Module):
def __init__(
self, start: Tuple[int, ...], stop: Tuple[int, ...], step: Tuple[int, ...]
) -> None:
super().__init__()
self.start = start
self.stop = stop
self.step = step
def forward(self, x):
return flow.F.slice(x, start=self.start, stop=self.stop, step=self.step)
def slice_op(x, slice_tup_list: Sequence[Tuple[int, int, int]]):
"""Extracts a slice from a tensor.
The `slice_tup_list` assigns the slice indices in each dimension, the format is (start, stop, step).
The operator will slice the tensor according to the `slice_tup_list`.
Args:
x: A `Tensor`.
slice_tup_list: A list of slice tuple, indicate each dimension slice (start, stop, step).
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.compatible.single_client.experimental as flow
>>> flow.enable_eager_execution()
>>> input = flow.Tensor(np.random.randn(3, 6, 9).astype(np.float32))
>>> tup_list = [[None, None, None], [0, 5, 2], [0, 6, 3]]
>>> y = flow.slice(input, slice_tup_list=tup_list)
>>> y.shape
flow.Size([3, 3, 2])
"""
(start, stop, step) = check_slice_tup_list(slice_tup_list, x.shape)
return Slice(start, stop, step)(x)
class SliceUpdate(Module):
def __init__(
self, start: Tuple[int, ...], stop: Tuple[int, ...], step: Tuple[int, ...]
) -> None:
super().__init__()
self.start = start
self.stop = stop
self.step = step
def forward(self, x, update):
return flow.F.slice_update(
x, update, start=self.start, stop=self.stop, step=self.step
)
def slice_update_op(x, update, slice_tup_list: Sequence[Tuple[int, int, int]]):
"""Update a slice of tensor `x`. Like `x[start:stop:step] = update`.
Args:
x: A `Tensor`, whose slice will be updated.
update: A `Tensor`, indicate the update content.
slice_tup_list: A list of slice tuple, indicate each dimension slice (start, stop, step).
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.compatible.single_client.experimental as flow
>>> flow.enable_eager_execution()
>>> input = flow.Tensor(np.array([1, 1, 1, 1, 1]).astype(np.float32))
>>> update = flow.Tensor(np.array([2, 3, 4]).astype(np.float32))
>>> y = flow.slice_update(input, update, slice_tup_list=[[1, 4, 1]])
>>> y.numpy()
array([1., 2., 3., 4., 1.], dtype=float32)
"""
(start, stop, step) = GetSliceAttrs(slice_tup_list, x.shape)
return SliceUpdate(start, stop, step)(x, update)
class LogicalSliceAssign(Module):
def __init__(
self, start: Tuple[int, ...], stop: Tuple[int, ...], step: Tuple[int, ...]
) -> None:
super().__init__()
self.start = start
self.stop = stop
self.step = step
def forward(self, x, update):
if update.dtype != x.dtype:
update = update.to(dtype=x.dtype)
return flow.F.logical_slice_assign(
x, update, start=self.start, stop=self.stop, step=self.step
)
def logical_slice_assign_op(x, update, slice_tup_list: Sequence[Tuple[int, int, int]]):
"""Update a slice of tensor `x`(in-place). Like `x[start:stop:step] = update`.
Args:
x: A `Tensor`, whose slice will be updated.
update: A `Tensor`, indicate the update content.
slice_tup_list: A list of slice tuple, indicate each dimension slice (start, stop, step).
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.compatible.single_client.experimental as flow
>>> flow.enable_eager_execution()
>>> input = flow.Tensor(np.array([1, 1, 1, 1, 1]).astype(np.float32))
>>> update = flow.Tensor(np.array([2, 3, 4]).astype(np.float32))
>>> y = flow.tmp.logical_slice_assign(input, update, slice_tup_list=[[1, 4, 1]])
"""
"[summary]\n\n Returns:\n [type]: [description]\n "
(start, stop, step) = GetSliceAttrs(slice_tup_list, x.shape)
return LogicalSliceAssign(start, stop, step)(x, update)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| 33.683871 | 104 | 0.633787 |
be64c57399a0e2f019955ddd3b92ec663af63efa
| 948 |
py
|
Python
|
314/Fcat_314_bfs.py
|
Leetcode-Secret-Society/warehouse
|
40d7969683b1296f361e799cda37f15ceec52af8
|
[
"MIT"
] | null | null | null |
314/Fcat_314_bfs.py
|
Leetcode-Secret-Society/warehouse
|
40d7969683b1296f361e799cda37f15ceec52af8
|
[
"MIT"
] | null | null | null |
314/Fcat_314_bfs.py
|
Leetcode-Secret-Society/warehouse
|
40d7969683b1296f361e799cda37f15ceec52af8
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
from typing import List
from collections import defaultdict, deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def verticalOrder(self, root) -> List[List[int]]:
if not root: return []
position_mapping = defaultdict(list)
bound = [0,0]
bfs = deque([(root, 0)])
while bfs:
node, pos = bfs.popleft()
position_mapping[pos].append(node.val)
if node.left:
bound[0] = min(bound[0], pos - 1)
bfs.append((node.left, pos - 1))
if node.right:
bound[1] = max(bound[1], pos + 1)
bfs.append((node.right, pos + 1))
result = []
for i in range(bound[0], bound[1]+1):
result.append(position_mapping[i])
return result
| 30.580645 | 53 | 0.542194 |
fea3b772b61a812c02365208b2da226f3efb7fdf
| 7,561 |
py
|
Python
|
Crawler/crawl_divi_public.py
|
dbvis-ukon/coronavis
|
f00374ac655c9d68541183d28ede6fe5536581dc
|
[
"Apache-2.0"
] | 15 |
2020-04-24T20:18:11.000Z
|
2022-01-31T21:05:05.000Z
|
Crawler/crawl_divi_public.py
|
dbvis-ukon/coronavis
|
f00374ac655c9d68541183d28ede6fe5536581dc
|
[
"Apache-2.0"
] | 2 |
2021-05-19T07:15:09.000Z
|
2022-03-07T08:29:34.000Z
|
Crawler/crawl_divi_public.py
|
dbvis-ukon/coronavis
|
f00374ac655c9d68541183d28ede6fe5536581dc
|
[
"Apache-2.0"
] | 4 |
2020-04-27T16:20:13.000Z
|
2021-02-23T10:39:42.000Z
|
#!/usr/bin/env python
# coding: utf-8
# author: Max Fischer
import os
import logging
import jsonschema as jsonschema
import psycopg2 as pg
import psycopg2.extensions
import psycopg2.extras
import requests
import json
from datetime import datetime, timezone
# noinspection PyUnresolvedReferences
import loadenv
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
from db_config import SQLALCHEMY_DATABASE_URI, get_connection
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
logger = logging.getLogger(__name__)
logger.info('Crawler for divi public data')
STORAGE_PATH = "/var/divi_public/"
URL_API = "https://www.intensivregister.de/api/public/intensivregister"
header_base = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/86.0.4298.4 Safari/537.36 '
}
# start session
session = requests.Session()
session.headers.update(header_base)
# prepare bearer token
headers = {
'Content-Type': 'application/json',
}
session.headers.update(headers)
JSONPAYLOAD = {"criteria":
{"bundesland": None,
"standortId": None,
"standortBezeichnung":"",
"bettenStatus":[],
"bettenKategorie":[],
# only look for beds for adults since otherwise it always uses the best possible status
# i.e., there are beds for kids available but none for adults: overall status is still available
# this request is also the default on the DIVI website
"behandlungsschwerpunktL1":["ERWACHSENE"],
"behandlungsschwerpunktL2":[],
"behandlungsschwerpunktL3":[]
},
"pageNumber":0,
"pageSize": 3000
}
logger.info('Assembling bearer and downloading data...')
# get private api data
x = session.post(URL_API, json=JSONPAYLOAD)
data = x.json()
# infos
count = data['rowCount']
logger.info(f'Downloaded data from {count} hospitals.')
#
# store data
#
if os.name == 'nt': # debug only
STORAGE_PATH = './'
if not os.path.isdir(STORAGE_PATH):
logger.error(f"Storage path {STORAGE_PATH} does not appear to be a valid directory")
exit(1)
current_update = datetime.now(timezone.utc)
filepath = STORAGE_PATH + current_update.strftime("divi-public-%Y-%m-%dT%H-%M-%S") + '.json'
logger.info(f'Storing data on pvc: {filepath}')
with open(filepath, 'w') as outfile:
json.dump(data, outfile)
with open('./divi_public.schema.json') as schema:
logger.info('Validate json data with schema')
jsonschema.validate(data, json.load(schema))
logger.info(f'Loading the data into the database')
# logger.debug(data)
conn, cur = get_connection('crawl_divi_public')
# noinspection PyShadowingNames
def insert_data(data):
query_krankenhaus_standorte = f'INSERT INTO divi_krankenhaus_standorte ' \
f'(id, bezeichnung, strasse, hausnummer, plz, ort, bundesland, iknummer, ' \
f'position) ' \
f'VALUES %s ON CONFLICT ON CONSTRAINT divi_krankenhaus_standorte_pk DO ' \
f'UPDATE SET ' \
f'bezeichnung = EXCLUDED.bezeichnung, ' \
f'strasse = EXCLUDED.strasse, ' \
f'hausnummer = EXCLUDED.hausnummer, ' \
f'plz = EXCLUDED.plz, ' \
f'ort = EXCLUDED.ort, ' \
f'bundesland = EXCLUDED.bundesland, ' \
f'iknummer = EXCLUDED.iknummer, ' \
f'position = EXCLUDED.position;'
entries_kh_standorte = []
for d in data['data']:
e = d['krankenhausStandort']
e['pos_lon'] = e['position']['longitude']
e['pos_lat'] = e['position']['latitude']
entries_kh_standorte.append(e)
# print(entries_kh_standorte)
psycopg2.extras.execute_values(
cur,
query_krankenhaus_standorte,
entries_kh_standorte,
template='(%(id)s, %(bezeichnung)s, %(strasse)s, %(hausnummer)s, %(plz)s, %(ort)s, %(bundesland)s, '
'%(ikNummer)s, ST_SetSRID(ST_POINT(%(pos_lon)s, %(pos_lat)s), 4326))',
page_size=500
)
conn.commit()
query_krankenhaus_meldungen = f'INSERT INTO divi_meldungen ' \
f'(private, meldezeitpunkt, kh_id, meldebereiche, statuseinschaetzunglowcare, ' \
f'statuseinschaetzunghighcare, statuseinschaetzungecmo, behandlungsschwerpunktl1, ' \
f'behandlungsschwerpunktl2, behandlungsschwerpunktl3) ' \
f'VALUES %s ON CONFLICT ON CONSTRAINT divi_meldungen_pk DO ' \
f'UPDATE SET ' \
f'meldebereiche = EXCLUDED.meldebereiche, ' \
f'statuseinschaetzunglowcare = EXCLUDED.statuseinschaetzunglowcare, ' \
f'statuseinschaetzunghighcare = EXCLUDED.statuseinschaetzunghighcare, ' \
f'statuseinschaetzungecmo = EXCLUDED.statuseinschaetzungecmo, ' \
f'behandlungsschwerpunktl1 = EXCLUDED.behandlungsschwerpunktl1, ' \
f'behandlungsschwerpunktl2 = EXCLUDED.behandlungsschwerpunktl2, ' \
f'behandlungsschwerpunktl3 = EXCLUDED.behandlungsschwerpunktl3;'
entries_meldunden = []
for d in data['data']:
e = {'id': d['krankenhausStandort']['id'], 'meldezeitpunkt': d['letzteMeldezeitpunkt'],
'statusEinschaetzungLowcare': d['maxBettenStatusEinschaetzungLowCare'],
'statusEinschaetzungHighcare': d['maxBettenStatusEinschaetzungHighCare'],
'statusEinschaetzungEcmo': d['maxBettenStatusEinschaetzungEcmo'],
'meldebereiche': list(map(lambda x: x['meldebereichBezeichnung'], d['meldebereiche'])),
'behandlungsschwerpunktL1': list(map(lambda x: x['behandlungsschwerpunktL1'], d['meldebereiche'])),
'behandlungsschwerpunktL2': list(map(lambda x: x['behandlungsschwerpunktL2'], d['meldebereiche'])),
'behandlungsschwerpunktL3': list(map(lambda x: x['behandlungsschwerpunktL3'], d['meldebereiche']))}
if d['krankenhausStandort']['id'] == '773017':
print(e)
entries_meldunden.append(e)
psycopg2.extras.execute_values(
cur,
query_krankenhaus_meldungen,
entries_meldunden,
template='(false, %(meldezeitpunkt)s, %(id)s, %(meldebereiche)s, %(statusEinschaetzungLowcare)s, '
'%(statusEinschaetzungHighcare)s, %(statusEinschaetzungEcmo)s, %(behandlungsschwerpunktL1)s, '
'%(behandlungsschwerpunktL2)s, %(behandlungsschwerpunktL3)s)',
page_size=500
)
conn.commit()
try:
# load the newest data into the DB to overwrite the latest data
insert_data(data)
logger.info('Refreshing materialized view')
cur.execute('set time zone \'UTC\'; REFRESH MATERIALIZED VIEW filled_hospital_timeseries_with_fix;')
conn.commit()
cur.close()
conn.close()
logger.info('Done. Exiting...')
except Exception as e:
if cur:
cur.close()
if conn:
conn.close()
raise e
| 39.586387 | 119 | 0.606269 |
4309776c2b978f8f9b79f48f9888d944549162cd
| 490 |
py
|
Python
|
backend/search/client.py
|
saulhappy/drf
|
5e62da54cdf0f0fead742c891d34e7eacd488a1b
|
[
"MIT"
] | null | null | null |
backend/search/client.py
|
saulhappy/drf
|
5e62da54cdf0f0fead742c891d34e7eacd488a1b
|
[
"MIT"
] | null | null | null |
backend/search/client.py
|
saulhappy/drf
|
5e62da54cdf0f0fead742c891d34e7eacd488a1b
|
[
"MIT"
] | null | null | null |
from algoliasearch_django import algolia_engine
def get_client():
return algolia_engine.client
def get_index(index_name="_Product"):
client = get_client()
index = client.init_index(index_name)
return index
def perform_search(query, **kwargs):
index = get_index()
params = {}
if "tags" in kwargs:
tags = kwargs.pop("tags") or None
if tags:
params["tagFilters"] = tags
results = index.search(query, params)
return results
| 21.304348 | 47 | 0.661224 |
4394a86b457382c9c24852be7b2e32365ad50ee3
| 1,941 |
py
|
Python
|
MAIN/STM32F405/V18/peripheral.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
MAIN/STM32F405/V18/peripheral.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
MAIN/STM32F405/V18/peripheral.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
from pyb import Pin, Timer
import utime
ESP32RESET = Pin('B0', Pin.OUT_PP)
IGNITION = Pin('Y7', Pin.IN, Pin.PULL_DOWN)
startTime = 0
stopTime = 0
ignitionFlag = False
ignitionTriggerValue = 250
def IGNITIONCallback():
global startTime
global stopTime
global ignitionFlag
global ignitionTriggerValue
if IGNITION.value() == 1 and ignitionFlag == False:
stopTime = 0
if startTime == 0:
startTime = utime.ticks_ms()
if (utime.ticks_ms() - startTime) > ignitionTriggerValue:
ignitionFlag = True
elif IGNITION.value() == 0 and ignitionFlag == True:
startTime = 0
if stopTime == 0:
stopTime = utime.ticks_ms()
if (utime.ticks_ms() - stopTime) > ignitionTriggerValue:
ignitionFlag = False
return ignitionFlag
buzPin = Pin('X1')
buzTimer = Timer(2, freq=1000)
buzChannel = buzTimer.channel(1, Timer.PWM, pin=buzPin)
buzzerOrderList = []
def buzzer(value=0):
buzChannel.pulse_width_percent(value)
toggleValue = False
def buzzerToggle(timer):
global toggleValue
global buzzerOrderList
if toggleValue == False:
buzzer(50)
toggleValue = True
else:
buzzer(0)
toggleValue = False
# if toggleValue == True:
# timer.freq(1000 / buzzerOrderList[1])
# else:
# timer.freq(1000 / buzzerOrderList[2])
if buzzerOrderList[0] > 0 and toggleValue == True:
buzzerOrderList[0] -= 1
elif buzzerOrderList[0] < 1 and toggleValue == False:
buzzer(0)
buzzerOrderList = []
timer.callback(None)
timer.deinit()
def buzzerObject(replay=1, onTime=100, offTime=100, priority=1):
global buzzerOrderList
global toggleValue
buzzerOrderList = [replay, onTime, offTime, priority]
toggleValue = False
periodicTimer = Timer(4, freq=1000 / buzzerOrderList[1])
periodicTimer.callback(buzzerToggle)
| 23.670732 | 65 | 0.64915 |
78e0a1d18183e5a58cf26302f999bb3cf45215bc
| 493 |
py
|
Python
|
INBa/2015/Semyenov_A_N/task_3_24.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Semyenov_A_N/task_3_24.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Semyenov_A_N/task_3_24.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
#Задача 3. Вариант 24
#Напишите программу, которая выводит имя "Максимилиан Гольдман", и запрашивает его псевдоним. Программа должна сцеплять две эти строки и выводить полученную строку, разделяя имя и псевдоним с помощью тире.
#Semyenov A.N.Ы
#08.03.2016
print("Сегодня речь пойдёт про Максимилиана Гольдмана")
print("Под каким именем мы знаем этого человека?"); print("Ваш ответ: Макс Рейнхардт")
print("Всё верно: Максимилиан Гольдман - Макс Рейнхардт")
input('\nНажмите Enter для выхода')
| 54.777778 | 205 | 0.787018 |
60635645dc074ef9ef9fb9011fa7e487e8c19733
| 697 |
py
|
Python
|
python/primary/模块/demo_list2.py
|
EstherLacan/jiangfw
|
a449b1925742873c76dc1b3284aedb359204bc76
|
[
"Apache-2.0"
] | 1 |
2020-07-29T16:43:46.000Z
|
2020-07-29T16:43:46.000Z
|
python/primary/模块/demo_list2.py
|
EstherLacan/jiangfw
|
a449b1925742873c76dc1b3284aedb359204bc76
|
[
"Apache-2.0"
] | null | null | null |
python/primary/模块/demo_list2.py
|
EstherLacan/jiangfw
|
a449b1925742873c76dc1b3284aedb359204bc76
|
[
"Apache-2.0"
] | null | null | null |
#A Python Program for List and tuple
import sys
#Make a User-Passwd Login
database = [
['admin', 123456],
['guest', 123],
['Tom', 'tom123'],
['Alice', 'alice123']
]
username = raw_input("User name: ")
passwd = raw_input("Password: ")
if [username, passwd] in database:
print "Access granted!"
else:
print "Access denyed!"
sys.exit()
#After login...
x = [3, 5, 2, 8, 9, 10, 56, 99]
print "List X is: "
print x
y = x # x y 用的同一个引用地址
z = x[:]
x.sort()
print "List y is after x.sort(): "
print y
print "List z is after x.sort(): "
print z
y.reverse()
print "List x after y.reverse()"
print x
raw_input("Please Enter for Exit...")
| 20.5 | 37 | 0.583931 |
716a14d0e6073ccb71412d7a8340df8c3f9f7421
| 5,603 |
py
|
Python
|
calibration.py
|
danielvh8/RP-granzyme-B
|
fcb29321f8ad55bfaa56e31f45eeab907e1ed1af
|
[
"MIT"
] | null | null | null |
calibration.py
|
danielvh8/RP-granzyme-B
|
fcb29321f8ad55bfaa56e31f45eeab907e1ed1af
|
[
"MIT"
] | null | null | null |
calibration.py
|
danielvh8/RP-granzyme-B
|
fcb29321f8ad55bfaa56e31f45eeab907e1ed1af
|
[
"MIT"
] | null | null | null |
from configparser import ConfigParser
from controls import getTestPerformance, File2Matches, IDfromFASTA, getPerformance
from pathlib import Path
from pipeline import PipeLine
import matplotlib.pyplot as plt
from time import sleep
import pickle
from tqdm import tqdm
def ROC(path, parser, lbl, clr):
parser.read(Path('In/parameters.ini'))
#thresholds = [0, 0.5e-10, 1e-10, 0.5e-9, 1e-9, 0.5e-8, 1e-8, 0.5e-7, 1e-7, 0.5e-6, 1e-6, 0.5e-5, 1e-5, 0.5e-4, 1e-4, 0.5e-3, 1e-3, 0.5e-2, 1e-2, 0.5e-1, 1e-1, 1]
thresholds = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
sensitivity = []
FPR = []
GrB = PipeLine.GenerateTarget([Path('In/neo-N-terminal.csv'), Path('In/neo-C-terminal.csv')], "Granzyme B")
for n,i in enumerate(thresholds):
print(f"\nTesting {n+1} of {len(thresholds)}...\n")
parser.set('threshold', 'match', str(i))
with open(Path('In/parameters.ini'), 'w') as f:
parser.write(f)
RunTest(sensitivity, FPR, GrB)
# print(sensitivity)
# print(FPR)
plt.plot(FPR, sensitivity, '--bo', label=lbl, color=clr)
# plt.plot([0,1], [0,1], linestyle="--", color='gray')
# plt.title('ROC-curve threshold parameter')
# plt.ylabel('True Positive Rate')
# plt.xlabel('False Positive Rate')
# plt.savefig(path)
# plt.clf()
def setROCinfo(sensitivity, FPR, performance):
sensitivity.append(performance.sensitivity)
FPR.append(1-performance.specificity)
def PlotROC(FPR, sensitivity, path):
plt.plot(FPR, sensitivity, '--bo')
plt.plot([0,1], [0,1], linestyle="--", color='gray')
plt.title('ROC-curve threshold parameter')
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig(path)
plt.clf()
def RunTest(sensitivity, FPR, GrB):
MEROPScontrol = File2Matches(Path("In/Positives from MEROPS (incl Mouse).csv"), IDfromFASTA(Path("In/MEROPS HS proteins.gz")))
MEROPS = PipeLine.Run(Path("In/MEROPS HS proteins.gz"), Path('Out/MEROPS HS.csv'), GrB)
performance = getTestPerformance(MEROPScontrol, MEROPS)
sensitivity.append(performance.sensitivity)
FPR.append(1-performance.specificity)
def PerformanceCalibration(path, control):
parser = ConfigParser()
parser.read(Path("In/parameters.ini"))
thresholds = [0, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1]
#thresholds = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
performance = []
sensitivity = []
FPR = []
GrB = PipeLine.GenerateTarget([Path('In/neo-N-terminal.csv'), Path('In/neo-C-terminal.csv')], "Granzyme B")
for i in tqdm(thresholds):
parser.set('threshold', 'match', str(i))
with open(Path('In/parameters.ini'), 'w') as f:
parser.write(f)
MEROPS = PipeLine.Run(Path("In/MEROPS HS proteins.gz"), Path('Out/MEROPS HS.csv'), GrB)
instance = getPerformance(control, MEROPS)
performance.append(instance)
#setROCinfo(sensitivity, FPR, instance)
#PlotROC(FPR, sensitivity, path)
return performance
def ChangeParam(S, E, parser):
endstring = '1'
for i in range(1,E):
endstring += ',1'
startstring = '1'
for i in range(1, S):
startstring += ',1'
parser.set('target', 'impstart', startstring)
parser.set('target', 'impend', endstring)
parser.set('target', 'lengthstart', str(S))
parser.set('target', 'lengthend', str(E))
with open(Path("In/parameters.ini"), 'w') as f:
parser.write(f)
def ROCbayes():
parser = ConfigParser()
parser.read(Path("In/parameters.ini"))
ChangeParam(4, 1, parser)
ROC(Path(), parser, '4|1', 'blue')
ChangeParam(4, 4, parser)
ROC(Path(), parser, '4|4', 'orange')
ChangeParam(6, 1, parser, )
ROC(Path(), parser, '6|1', 'green')
ChangeParam(6, 4, parser, )
ROC(Path(), parser, '6|4', 'red')
ChangeParam(6, 6, parser, )
ROC(Path(), parser, '6|6', 'black')
plt.plot([0,1], [0,1], linestyle="--", color='gray', label='random')
plt.title('ROC-curve threshold parameter')
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.legend()
plt.savefig(Path("ROC/Sum over Max threshold/Combined.png"))
if __name__ == "__main__":
#ROCbayes()
parser = ConfigParser()
parser.read(Path("In/parameters.ini"))
#ROC(Path("ROC/Sum over Max threshold/ROC S6E4.png"), parser)
performance = []
MEROPScontrol = File2Matches(Path("In/Positives from MEROPS (incl Mouse).csv"), IDfromFASTA(Path("In/MEROPS HS proteins.gz")))
for start in tqdm(range(10, 0, -1)):
out = []
for end in tqdm(range(1, 11)):
endstring = '1'
for i in range(1,end):
endstring += ',1'
startstring = '1'
for i in range(1, start):
startstring += ',1'
parser.set('target', 'impstart', startstring)
parser.set('target', 'impend', endstring)
parser.set('target', 'lengthstart', str(start))
parser.set('target', 'lengthend', str(end))
with open(Path("In/parameters.ini"), 'w') as f:
parser.write(f)
out += PerformanceCalibration(Path(f"ROC/Sum over Max threshold/ROC S{start}E{end}.png"), MEROPScontrol)
performance += out
with open(Path(f'Out/cal_S{start}E1_S{start}E10 bayes score.pickle'), 'wb') as f:
pickle.dump(out, f)
with open(Path('Out/cal_all parameters bayes score.pickle'), 'wb') as f:
pickle.dump(performance, f)
| 38.641379 | 166 | 0.611637 |
71792ae01e9b62dad9e5eeb2f9e01801ac5a285f
| 171 |
py
|
Python
|
Exercicios/ex05v3.py
|
BoltzBit/LP
|
f84d36d1bdee9a20c197cebec2810234c5311fb8
|
[
"MIT"
] | null | null | null |
Exercicios/ex05v3.py
|
BoltzBit/LP
|
f84d36d1bdee9a20c197cebec2810234c5311fb8
|
[
"MIT"
] | null | null | null |
Exercicios/ex05v3.py
|
BoltzBit/LP
|
f84d36d1bdee9a20c197cebec2810234c5311fb8
|
[
"MIT"
] | null | null | null |
nome = input('Digite seu nome: ')
cpf = input('Digite seu CPF: ')
rg = input('Digite seu RG: ')
msg = '{0}, seu CPF é {1} e seu RG é {2}'
print(msg.format(nome,cpf,rg))
| 21.375 | 41 | 0.602339 |
71bc160070e2e3bb0f09b32ba1b1d5dcecacda1f
| 2,378 |
py
|
Python
|
INBa/2015/SOSNOVY_M_S/task_10_26.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/SOSNOVY_M_S/task_10_26.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/SOSNOVY_M_S/task_10_26.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
POINT = 30
ochki = 30
person = {"Сила":"0","Здоровье":"0","Мудрость":"0","Ловкость":"0"}
points = 0
choice = None
while choice != 0:
print("""
0 - Выход
1 - Добавить пункты к характеристике
2 - Уменьшить пункты характеристики
3 - Просмотр характеристик
""")
choice = int(input("Выбор пункта меню: "))
if choice == 1:
print("Пожалуйста, введите характеристику. ", len(person), "характеристики:")
for item in person:
print(item)
char = str(input("\n:"))
char = char.title()
while char not in person:
print("Нет такой характеристики, вы не в WoW: ")
char = str(input("\n:"))
char = char.title()
else:
print("\nВведите количество пунктов. У вас", ochki, "свободных пунктов")
points = int(input("\n:"))
while points > ochki or points < 0:
print("Вы не можете назначить такое количество пунктов", "Доступно", ochki, "свободных пунктов")
points = int(input("\n:"))
person[char] = points
print(points, "пунктов было добавлено к", char)
ochki -= points
elif choice == 2:
print("Пожалуйста, введите имя характеристики.", "Доступно изменение для: ")
for item in person:
if int(person[item]) > 0:
print(item)
char = str(input("\n:"))
char = char.title()
while char not in person:
print("Нет такой характеристики, вы не в WoW: ")
char = str(input("\n:"))
char = char.title()
else:
print("\nВведите количество пунктов. Доступно", person[char], "пунктов:")
points = int(input("\n:"))
while points > int(person[char]) or points < 0:
print("Невозможно удалить такое количество пунктов. Доступно", person[char], "пунктов")
points = int(input("\n:"))
person[char] = points
print(points, "пунктов было удалено")
ochki += points
elif choice == 3:
print("\nХарактеристики Вашего героя")
for item in person:
print(item, "\t\t", person[item])
elif choice == 0:
print("BB")
elif choice == 100500:
ochki += 99999999
print ("Вы ввели чит код на 99999999 поинтов")
else:
print("В меню нет такого пункта")
| 36.584615 | 112 | 0.549622 |
1c0606e7099727a34a4353c69a714cea307ce1e3
| 167 |
py
|
Python
|
python/python_backup/PRAC_PYTHON/tu.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/python_backup/PRAC_PYTHON/tu.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/python_backup/PRAC_PYTHON/tu.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
def tuple(t):
maxvalue=t[0]
minvalue=t[0]
for i in range(t):
.if t[i]>maxvalue:
max=t[i]
if t[i]<minvalue:
min=t[i]
print max,
print mini,
| 15.181818 | 21 | 0.550898 |
1c59d651b58bb97890a30e93422a86f59f708a63
| 2,618 |
py
|
Python
|
src/bo4e/enum/leistungstyp.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/enum/leistungstyp.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/enum/leistungstyp.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
# pylint: disable=missing-module-docstring
from bo4e.enum.strenum import StrEnum
# pylint:disable=empty-docstring
# no docstring in official docs as of 2021-12-01
class Leistungstyp(StrEnum):
""" """
ARBEITSPREIS_WIRKARBEIT = "ARBEITSPREIS_WIRKARBEIT" #: Arbeitspreis zur Abrechnung der Wirkarbeit
LEISTUNGSPREIS_WIRKLEISTUNG = "LEISTUNGSPREIS_WIRKLEISTUNG" #: Leistungspreis zur Abrechnung der Wirkleistung
ARBEITSPREIS_BLINDARBEIT_IND = (
"ARBEITSPREIS_BLINDARBEIT_IND" #: Arbeitspreis zur Abrechnung der Blindarbeit induktiv
)
ARBEITSPREIS_BLINDARBEIT_KAP = (
"ARBEITSPREIS_BLINDARBEIT_KAP" #: Arbeitspreis zur Abrechnung der Blindarbeit kapazitiv
)
GRUNDPREIS = "GRUNDPREIS" #: Grundpreis (pro Zeiteinheit)
GRUNDPREIS_ARBEIT = "GRUNDPREIS_ARBEIT" #: Grundpreis, der auf die Arbeit berechnet wird (bei RLM)
GRUNDPREIS_LEISTUNG = "GRUNDPREIS_LEISTUNG" #: Grundpreis, der auf die Leistung berechnet wird (bei RLM)
MEHRMINDERMENGE = "MEHRMINDERMENGE" #: Mehr- oder Mindermenge
MESSSTELLENBETRIEB = "MESSSTELLENBETRIEB" #: Preis pro Zeiteinheit
MESSDIENSTLEISTUNG = "MESSDIENSTLEISTUNG" #: Preis pro Zeiteinheit
MESSDIENSTLEISTUNG_INKL_MESSUNG = (
"MESSDIENSTLEISTUNG_INKL_MESSUNG" #: MDL inklusive der Messung (ab 2017), Preis pro Zeiteinheit
)
ABRECHNUNG = "ABRECHNUNG" #: Preis pro Zeiteinheit
KONZESSIONS_ABGABE = "KONZESSIONS_ABGABE" #: Konzessionsabgabe
KWK_UMLAGE = "KWK_UMLAGE" #: KWK-Umlage
OFFSHORE_UMLAGE = "OFFSHORE_UMLAGE" #: Offshore-Haftungsumlage
ABLAV_UMLAGE = "ABLAV_UMLAGE" #: Umlage für abschaltbare Lasten
SONDERKUNDEN_UMLAGE = "SONDERKUNDEN_UMLAGE" #: §19 StromNEV Umlage
REGELENERGIE_UMLAGE = "REGELENERGIE_UMLAGE" #: Regelenergieumlage
BILANZIERUNG_UMLAGE = "BILANZIERUNG_UMLAGE" #: Bilanzierungsumlage
AUSLESUNG_ZUSAETZLICH = "AUSLESUNG_ZUSAETZLICH" #: Zusätzliche Auslesung (pro Vorgang)
ABLESUNG_ZUSAETZLICH = "ABLESUNG_ZUSAETZLICH" #: Zusätzliche Ablesung (pro Vorgang)
ABRECHNUNG_ZUSAETZLICH = "ABRECHNUNG_ZUSAETZLICH" #: Zusätzliche Abresung (pro Vorgang)
SPERRUNG = "SPERRUNG" #: Sperrung einer Abnahmestelle
ENTSPERRUNG = "ENTSPERRUNG" #: Entsperrung einer Abnahmestelle
MAHNKOSTEN = "MAHNKOSTEN" #: Mahnkosten
INKASSOKOSTEN = "INKASSOKOSTEN" #: Inkassokosten
EEG_UMLAGE = "EEG_UMLAGE" #: EEG-Umlage
ENERGIESTEUER = "ENERGIESTEUER" #: Strom- oder Erdgassteuer
NETZPREIS = "NETZPREIS" #: Netzpreis
MESSPREIS = "MESSPREIS" #: Messpreis
SONSTIGER_PREIS = "SONSTIGER_PREIS" #: Sonstiger_Preis
| 55.702128 | 114 | 0.747899 |
4672b6107575d1fc5b202803509017068f526d19
| 356 |
py
|
Python
|
Data Structures/DataStructures-Problems/Arrays/Micro and Array Update/microandarrayupdate.py
|
Nidita/Data-Structures-Algorithms
|
7b5198c8d37e9a70dd0885c6eef6dddd9d85d74a
|
[
"MIT"
] | 26 |
2019-07-17T11:05:43.000Z
|
2022-02-06T08:31:40.000Z
|
Data Structures/DataStructures-Problems/Arrays/Micro and Array Update/microandarrayupdate.py
|
Nidita/Data-Structures-Algorithms
|
7b5198c8d37e9a70dd0885c6eef6dddd9d85d74a
|
[
"MIT"
] | 7 |
2019-07-16T19:52:25.000Z
|
2022-01-08T08:03:44.000Z
|
Data Structures/DataStructures-Problems/Arrays/Micro and Array Update/microandarrayupdate.py
|
Nidita/Data-Structures-Algorithms
|
7b5198c8d37e9a70dd0885c6eef6dddd9d85d74a
|
[
"MIT"
] | 19 |
2020-01-14T02:44:28.000Z
|
2021-12-27T17:31:59.000Z
|
if __name__=="__main__":
t=int(input())
while(t>0):
(n, k) = map(int, input().split())
li=list(map(int, input().split()[:n]))
min = 99999999
for i in range(len(li)):
if li[i] < min:
min=li[i]
if k-min > 0:
print(k-min)
else:
print(0)
t=t-1
| 23.733333 | 46 | 0.407303 |
31adec41c7ef52b094097933816262e9bf48c6d1
| 1,882 |
py
|
Python
|
0-notes/job-search/Cracking the Coding Interview/C07ObjectOrientedDesign/python/7.10-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C07ObjectOrientedDesign/python/7.10-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C07ObjectOrientedDesign/python/7.10-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
'''
Design Amazon / Flipkart (an online shopping platform)
Beyond the basic functionality (signup, login etc.), interviewers will be
looking for the following:
Discoverability:
How will the buyer discover a product?
How will the search surface results?
Cart & Checkout: Users expect the cart and checkout to behave in a certain way.
How will the design adhere to such known best practices while also introducing
innovative checkout semantics like One-Click-Purchase?
Payment Methods: Users can pay using credit cards, gift cards, etc.
How will the payment method work with the checkout process?
Product Reviews & Ratings: When can a user post a review and a rating?
How are useful reviews tracked and less useful reviews de-prioritized?
'''
# Objects
# Customer
# account, cart, order
# add_item_to_cart(item), remove_item_from_cart(item), place_order(order)
# Account
# username, password, status, name, shipping_address, email, phone, credit_cards
# add_product(product), product_review(review)
# Cart
# items
# add_item(item), remove_item(item), update_item_quantity(item, quantity),
# get_items, checkout
# Item
# item, product_id, quantity, price
# update_quantity(quantity)
# Product
# product_id, name, description, price, category, available_item_count, seller
# ProductCategory
# name, description
# Order
# status (unshipped, pending, shipped, completed, canceled), order_logs,
# order_number, status, order_date
# send_for_shipment, make_payment(payment), add_order_log(order_log)
# Order Log
# order_number, creation_date, status
# Shipping
# shipment_number, shipment_date, estimated_arrival, shipment_method,
# order_details
| 41.822222 | 88 | 0.690223 |
31bcd25aad4c846bd2e353275be40d498f9d9c5e
| 1,833 |
py
|
Python
|
sorting_algorithms.py
|
caul1flower/alg
|
a9eaae99798df24fa611a83e7280c6ae2dde974e
|
[
"MIT"
] | null | null | null |
sorting_algorithms.py
|
caul1flower/alg
|
a9eaae99798df24fa611a83e7280c6ae2dde974e
|
[
"MIT"
] | null | null | null |
sorting_algorithms.py
|
caul1flower/alg
|
a9eaae99798df24fa611a83e7280c6ae2dde974e
|
[
"MIT"
] | null | null | null |
def selection_sort(arr):
comparisons = 1
for i in range(len(arr)):
comparisons += 1
min_idx = i
comparisons += 1
for j in range(i + 1, len(arr)):
comparisons += 2
if arr[min_idx] > arr[j]:
min_idx = j
arr[i], arr[min_idx] = arr[min_idx], arr[i]
return comparisons
def insertion_sort(arr):
comparisons = 1
for i in range(1, len(arr)):
comparisons += 1
key = arr[i]
j = i - 1
comparisons += 1
while j >= 0 and key < arr[j]:
comparisons += 2
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = key
return comparisons
def merge_sort(lst):
comparisons = 0
if len(lst) > 1:
middle = len(lst) // 2
left = lst[:middle]
right = lst[middle:]
merge_sort(left)
merge_sort(right)
i = j = k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
lst[k] = left[i]
i += 1
else:
lst[k] = right[j]
j += 1
k += 1
comparisons += 1
while i < len(left):
lst[k] = left[i]
i += 1
k += 1
while j < len(right):
lst[k] = right[j]
j += 1
k += 1
return comparisons
def shell_sort(lst):
length = len(lst)
h = 1
comparisons = 0
while (h < (length//3)):
h = 3*h + 1
while (h >= 1):
for i in range(h, length):
for j in range(i, h-1, -h):
comparisons += 1
if (lst[j] < lst[j-h]):
lst[j], lst[j-h] = lst[j-h], lst[j]
else:
break
h = h//3
return comparisons
| 24.118421 | 55 | 0.414075 |
735131118b9ebe08eb5ab32dbba90663b0dde82b
| 1,003 |
py
|
Python
|
Aggregator/agg_parkhaeuser.py
|
cfleschhut/virushack
|
2fe7ded0be8672b066edef7fed52573794db2ba5
|
[
"Apache-2.0"
] | null | null | null |
Aggregator/agg_parkhaeuser.py
|
cfleschhut/virushack
|
2fe7ded0be8672b066edef7fed52573794db2ba5
|
[
"Apache-2.0"
] | null | null | null |
Aggregator/agg_parkhaeuser.py
|
cfleschhut/virushack
|
2fe7ded0be8672b066edef7fed52573794db2ba5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 16:47:26 2020
@author: Peter
"""
from datetime import datetime
import boto3
import json
import csv
def aggregate(date):
s3_client = boto3.client('s3')
response = s3_client.get_object(Bucket='sdd-s3-basebucket', Key='parkhaeuser/{}/{}/{}/{}'.format(
str(date.year).zfill(4), str(date.month).zfill(2), str(date.day).zfill(2), str(date.hour).zfill(2)))
json.loads(response["Body"].read())
results = []
with open('zuordnung_plz_ort_landkreis.csv', encoding='utf-8') as f:
reader = csv.DictReader(f)
for city in results:
city_name = result['landkreis']
for row in reader:
if row['ort'].startswith(city_name):
ags = row['ags']
break
data = {'landkreis': ags,
'parkhaeuser_score': result['Auslastung']
}
results.append(data)
return results
| 30.393939 | 109 | 0.555334 |
7353ce7d1b8334c9673af5ee74690556543399c0
| 836 |
py
|
Python
|
content/labs/lab8/solutions/exercise_2_sol.py
|
yankeesong/2021-CS109A
|
0fea6b4411092446719d09379c6a12815aa91ab2
|
[
"MIT"
] | 19 |
2021-08-29T21:23:48.000Z
|
2022-03-16T14:38:25.000Z
|
docs/labs/lab8/solutions/exercise_2_sol.py
|
SBalas/2021-CS109A
|
0f57c3d80b7cef99d660f6a77c0166cffc1253e8
|
[
"MIT"
] | null | null | null |
docs/labs/lab8/solutions/exercise_2_sol.py
|
SBalas/2021-CS109A
|
0f57c3d80b7cef99d660f6a77c0166cffc1253e8
|
[
"MIT"
] | 22 |
2021-09-01T13:03:05.000Z
|
2022-03-31T14:34:36.000Z
|
# init exercise 2 solution
# Using an approach similar to what was used in the Iris example
# we can identify appropriate boundaries for our meshgrid by
# referencing the actual wine data
x_1_wine = X_wine_train[predictors[0]]
x_2_wine = X_wine_train[predictors[1]]
x_1_min_wine, x_1_max_wine = x_1_wine.min() - 0.2, x_1_wine.max() + 0.2
x_2_min_wine, x_2_max_wine = x_2_wine.min() - 0.2, x_2_wine.max() + 0.2
# Then we use np.arange to generate our interval arrays
# and np.meshgrid to generate our actual grids
xx_1_wine, xx_2_wine = np.meshgrid(
np.arange(x_1_min_wine, x_1_max_wine, 0.003),
np.arange(x_2_min_wine, x_2_max_wine, 0.003)
)
# Now we have everything we need to generate our plot
plot_wine_2d_boundaries(
X_wine_train,
y_wine_train,
predictors,
model1_wine,
xx_1_wine,
xx_2_wine,
)
| 27.866667 | 71 | 0.742823 |
7d903782bb6421913de305607c7f084a6e880976
| 4,849 |
py
|
Python
|
PSA/psaEE.py
|
SECURED-FP7/secured-psa-nsm
|
20c8f790ebc2d2aa8c33bda1e047f8f29275a0be
|
[
"Apache-2.0"
] | null | null | null |
PSA/psaEE.py
|
SECURED-FP7/secured-psa-nsm
|
20c8f790ebc2d2aa8c33bda1e047f8f29275a0be
|
[
"Apache-2.0"
] | null | null | null |
PSA/psaEE.py
|
SECURED-FP7/secured-psa-nsm
|
20c8f790ebc2d2aa8c33bda1e047f8f29275a0be
|
[
"Apache-2.0"
] | null | null | null |
# -*- Mode:Python;indent-tabs-mode:nil; -*-
#
# File: psaEE.py
# Created: 27/08/2014
# Author: BSC
# Author: jju / VTT Technical Research Centre of Finland Ltd., 2016
#
# Description:
# Web service running on the PSA interacting with the PSC
#
#
import falcon
#import json
import Config
import logging
import subprocess
from execInterface import execInterface
from getConfiguration import getConfiguration
from psaExceptions import psaExceptions
from dumpLogFile import dumpLogFile
import os.path
conf = Config.Configuration()
date_format = "%m/%d/%Y %H:%M:%S"
log_format = "[%(asctime)s.%(msecs)d] [%(module)s] %(message)s"
logging.basicConfig( filename = conf.LOG_FILE,
level = logging.DEBUG,
format = log_format,
datefmt = date_format )
# Enforce logging level even if handlers had already
# been added into the root logger:
logger = logging.getLogger()
logger.setLevel( logging.DEBUG )
#pscAddr = conf.PSC_ADDRESS
#configsPath = conf.PSA_CONFIG_PATH
#psaID = conf.PSA_ID
#confID = conf.CONF_ID
if conf.TEST_MODE:
logging.info( 'Test Mode enabled' )
logging.info( "--------" )
logging.info( "PSA EE init." )
logging.info( "PSA ID: " + str( conf.PSA_ID ) )
logging.info( "PSA NAME: " + str( conf.PSA_NAME ) )
logging.info( "PSA VERSION: " + str( conf.PSA_VERSION ) )
logging.info( "PSA-PSC API version: " + str( conf.PSA_API_VERSION ) )
logging.info( "PSA log location: " + str( conf.PSA_LOG_LOCATION ) )
logging.info( "--------" )
# instantiate class object to manage REST interface to the PSC
execIntf = execInterface( conf.PSA_HOME,
conf.PSA_CONFIG_PATH,
conf.PSA_SCRIPTS_PATH,
conf.PSA_LOG_LOCATION,
conf.PSA_ID,
conf.PSC_ADDRESS,
str(conf.PSA_API_VERSION))
#confHand = getConfiguration(pscAddr, configsPath, confID, psaID)
confHand = None
if not conf.TEST_MODE:
confHand = getConfiguration( conf.PSC_ADDRESS,
conf.PSA_CONFIG_PATH,
conf.PSA_SCRIPTS_PATH,
conf.PSA_ID,
str(conf.PSA_API_VERSION) )
# start the HTTP falcon proxy and adds reachable resources as routes
app = falcon.API()
base = '/' + str( conf.PSA_API_VERSION ) + '/execInterface/'
app.add_route( base + '{command}', execIntf )
dumpLog = dumpLogFile()
#FOR DEBUGGING ONLY, REMOVE IN PRODUCTION
app.add_route( base + 'dump-log-ctrl', dumpLog )
logging.info("execInterface routes added.")
# Inform our PSC that we are up
#TODO
'''
try:
start_res = confHand.send_start_event()
# We don't need to enable anything
#proc = subprocess.Popen(confScript, stdout=subprocess.PIPE, shell=True)
#(out, err) = proc.communicate()
except psaExceptions as exc:
pass
'''
# Pull configuration and start the PSA.
try:
if not conf.TEST_MODE:
confScript = confHand.pullPSAconf( execIntf )
else: # Do local test setup
# Check that some psaconf file exists
if not os.path.isfile( conf.PSA_CONFIG_PATH + '/psaconf' ):
raise psaExceptions.confRetrievalFailed()
execIntf.callInitScript()
if conf.TEST_MODE_IP != None:
# Only run ip_conf.sh if all the parameters are present
if ( conf.TEST_MODE_DNS == None
or conf.TEST_MODE_NETMASK == None
or conf.TEST_MODE_GATEWAY == None ):
raise psaExceptions.confRetrievalFailed()
logging.info( 'PSA requires IP, configuring...' )
ip = conf.TEST_MODE_IP
dns = conf.TEST_MODE_DNS
netmask = conf.TEST_MODE_NETMASK
gateway = conf.TEST_MODE_GATEWAY
logging.info( 'ip: ' + str( ip ) )
logging.info( 'gateway: ' + str( gateway ) )
logging.info( 'dns: ' + str( dns ) )
logging.info( 'netmask: ' + str( netmask ) )
ret = subprocess.call( [ conf.PSA_SCRIPTS_PATH + 'ip_conf.sh',
ip, gateway, dns, netmask ] )
logging.info( 'Result of setting config: ' + str( ret ) )
else:
logging.info( "PSA doesn't require IP, skipping configuration." )
logging.info('PSA '+ conf.PSA_ID + ' configuration registered' )
execIntf.callStartScript()
except psaExceptions.confRetrievalFailed as e:
print e
logging.info( "PSA start done." )
# http request to ask for the configuration and start the script
'''
try:
confScript = confHand.pullPSAconf()
proc = subprocess.Popen(confScript, stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
except psaExceptions as exc:
pass
'''
| 32.763514 | 77 | 0.620953 |
81574b9b3bc094b3a084cb740cf3436263355003
| 8,561 |
py
|
Python
|
frappe-bench/env/lib/python2.7/site-packages/github/tests/BadAttributes.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/github/tests/BadAttributes.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/github/tests/BadAttributes.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2014 Vincent Jacques <[email protected]> #
# Copyright 2016 Peter Buckley <[email protected]> #
# Copyright 2017 Hugo <[email protected]> #
# Copyright 2018 sfdye <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import datetime
import Framework
import github
# Replay data is forged to simulate bad things returned by Github
class BadAttributes(Framework.TestCase):
def testBadSimpleAttribute(self):
user = self.g.get_user("klmitch")
self.assertEqual(user.created_at, datetime.datetime(2011, 3, 23, 15, 42, 9))
raised = False
try:
user.name
except github.BadAttributeException, e:
raised = True
self.assertEqual(e.actual_value, 42)
self.assertEqual(e.expected_type, (str, unicode))
self.assertEqual(e.transformation_exception, None)
self.assertTrue(raised)
def testBadAttributeTransformation(self):
user = self.g.get_user("klmitch")
self.assertEqual(user.name, "Kevin L. Mitchell")
raised = False
try:
user.created_at
except github.BadAttributeException, e:
raised = True
self.assertEqual(e.actual_value, "foobar")
self.assertEqual(e.expected_type, (str, unicode))
self.assertEqual(e.transformation_exception.__class__, ValueError)
self.assertEqual(e.transformation_exception.args, ("time data 'foobar' does not match format '%Y-%m-%dT%H:%M:%SZ'",))
self.assertTrue(raised)
def testBadTransformedAttribute(self):
user = self.g.get_user("klmitch")
self.assertEqual(user.name, "Kevin L. Mitchell")
raised = False
try:
user.updated_at
except github.BadAttributeException, e:
raised = True
self.assertEqual(e.actual_value, 42)
self.assertEqual(e.expected_type, (str, unicode))
self.assertEqual(e.transformation_exception, None)
self.assertTrue(raised)
def testBadSimpleAttributeInList(self):
hook = self.g.get_hook("activecollab")
self.assertEqual(hook.name, "activecollab")
raised = False
try:
hook.events
except github.BadAttributeException, e:
raised = True
self.assertEqual(e.actual_value, ["push", 42])
self.assertEqual(e.expected_type, [(str, unicode)])
self.assertEqual(e.transformation_exception, None)
self.assertTrue(raised)
def testBadAttributeInClassAttribute(self):
repo = self.g.get_repo("klmitch/turnstile")
owner = repo.owner
self.assertEqual(owner.id, 686398)
raised = False
try:
owner.avatar_url
except github.BadAttributeException, e:
raised = True
self.assertEqual(e.actual_value, 42)
self.assertTrue(raised)
def testBadTransformedAttributeInList(self):
commit = self.g.get_repo("klmitch/turnstile").get_commit("38d9082a898d0822b5ccdfd78f3a536e2efa6c26")
raised = False
try:
commit.files
except github.BadAttributeException, e:
raised = True
self.assertEqual(e.actual_value, [42])
self.assertEqual(e.expected_type, [dict])
self.assertEqual(e.transformation_exception, None)
self.assertTrue(raised)
def testBadTransformedAttributeInDict(self):
gist = self.g.get_gist("6437766")
raised = False
try:
gist.files
except github.BadAttributeException, e:
raised = True
self.assertEqual(e.actual_value, {"test.py": 42})
self.assertEqual(e.expected_type, {(str, unicode): dict})
self.assertEqual(e.transformation_exception, None)
self.assertTrue(raised)
def testIssue195(self):
hooks = self.g.get_hooks()
# We can loop on all hooks as long as we don't access circleci's events attribute
self.assertListKeyEqual(hooks, lambda h: h.name, [u'activecollab', u'acunote', u'agilebench', u'agilezen', u'amazonsns', u'apiary', u'apoio', u'appharbor', u'apropos', u'asana', u'backlog', u'bamboo', u'basecamp', u'bcx', u'blimp', u'boxcar', u'buddycloud', u'bugherd', u'bugly', u'bugzilla', u'campfire', u'cia', u'circleci', u'codeclimate', u'codeportingcsharp2java', u'codeship', u'coffeedocinfo', u'conductor', u'coop', u'copperegg', u'cube', u'depending', u'deployhq', u'devaria', u'docker', u'ducksboard', u'email', u'firebase', u'fisheye', u'flowdock', u'fogbugz', u'freckle', u'friendfeed', u'gemini', u'gemnasium', u'geocommit', u'getlocalization', u'gitlive', u'grmble', u'grouptalent', u'grove', u'habitualist', u'hakiri', u'hall', u'harvest', u'hipchat', u'hostedgraphite', u'hubcap', u'hubci', u'humbug', u'icescrum', u'irc', u'irker', u'ironmq', u'ironworker', u'jabber', u'jaconda', u'jeapie', u'jenkins', u'jenkinsgit', u'jira', u'jqueryplugins', u'kanbanery', u'kickoff', u'leanto', u'lechat', u'lighthouse', u'lingohub', u'loggly', u'mantisbt', u'masterbranch', u'mqttpub', u'nma', u'nodejitsu', u'notifo', u'ontime', u'pachube', u'packagist', u'phraseapp', u'pivotaltracker', u'planbox', u'planio', u'prowl', u'puppetlinter', u'pushalot', u'pushover', u'pythonpackages', u'railsbp', u'railsbrakeman', u'rally', u'rapidpush', u'rationaljazzhub', u'rationalteamconcert', u'rdocinfo', u'readthedocs', u'redmine', u'rubyforge', u'scrumdo', u'shiningpanda', u'sifter', u'simperium', u'slatebox', u'snowyevening', u'socialcast', u'softlayermessaging', u'sourcemint', u'splendidbacon', u'sprintly', u'sqsqueue', u'stackmob', u'statusnet', u'talker', u'targetprocess', u'tddium', u'teamcity', u'tender', u'tenxer', u'testpilot', u'toggl', u'trac', u'trajectory', u'travis', u'trello', u'twilio', u'twitter', u'unfuddle', u'web', u'weblate', u'webtranslateit', u'yammer', u'youtrack', u'zendesk', u'zohoprojects'])
for hook in hooks:
if hook.name != "circleci":
hook.events
raised = False
for hook in hooks:
if hook.name == "circleci":
try:
hook.events
except github.BadAttributeException, e:
raised = True
self.assertEqual(e.actual_value, [["commit_comment", "create", "delete", "download", "follow", "fork", "fork_apply", "gist", "gollum", "issue_comment", "issues", "member", "public", "pull_request", "pull_request_review_comment", "push", "status", "team_add", "watch"]])
self.assertEqual(e.expected_type, [(str, unicode)])
self.assertEqual(e.transformation_exception, None)
self.assertTrue(raised)
| 56.322368 | 1,932 | 0.579372 |
c4a3d62e2a0153457c02eb939e51f5a90406ad51
| 2,837 |
py
|
Python
|
Termux-Login-master/Termux-Lock.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
Termux-Login-master/Termux-Lock.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
Termux-Login-master/Termux-Lock.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
import stdiomask as sm
import os,sys
# coded by AnonyminHack5
flag = True
endc = '\033[0m'
black = '\033[30m'
red = '\033[31m'
green = '\033[32m'
yellow = '\033[33m'
blue = '\033[34m'
magneto = '\033[36m'
os.system('figlet -c -k -f slant Termux-Lock|lolcat')
print ( magneto +'\n\t\t[ ★ Termux - Lock ★ ]\n',endc)
print ( green +'\t\tcoded by - AnonyminHack5\n',endc)
def main_menu():
dash = '-'
print(blue +'\n'+ dash*15 +'Main-Menu'+ dash*15)
print(yellow +'''
1.Register
2.Login
3.Remove Lock
4.Exit\n''',endc)
print(blue +'\n'+ dash*13 +'Select option'+ dash*13)
def register():
dash = '-'
global usr,pw
print(blue +'\n'+ dash*15 +'Register'+ dash*15)
usr = input(blue +'\nEnter username : ')
pw = input(green +'\nEnter password : ')
rpw = input(green +'\nRetype password : ')
if pw == rpw:
os.chdir('/data/data/com.termux/files/usr/share')
usrpwd = open("usr_nd_pwd.txt",'w')
usrpwd.writelines(usr+'\n')
usrpwd.writelines(pw+'\n')
usrpwd.close()
print(magneto +'\nRegistered Successfully...')
os.chdir('/data/data/com.termux/files/home')
else:
print(red +"Password doesn't match")
print(blue +'\n'+ dash*15 +'Complete'+ dash*15)
def check_usr_pass():
dash = '-'
global flag,usr,pw
print(blue +'\n'+ dash*15 +'Login'+ dash*15)
username = input(yellow + '\n\t[+] Username : ')
password = sm.getpass(prompt=yellow + '\n\t[*] Password : ',mask='*')
print(blue +'\n'+ dash*13 +'Completed'+ dash*13)
usrpwd = open("/data/data/com.termux/files/usr/share/usr_nd_pwd.txt")
lines = usrpwd.readlines()
usrpwd.close()
if(len(lines) >= 2):
usr = lines[0]
pwd = lines[1]
if username+'\n' == usr and password+'\n' == pwd:
print(green + '\n\t\t[★] Welcome to the termux [★]\n',endc)
flag = False
else:
print(red + '\n\t\t[×] Invalid username or password [×]',endc)
else:
print(red +'\n\tYou have removed your lock')
print(blue +'\tso, First register to login')
def remove():
dash = '-'
readFile = open("/data/data/com.termux/files/usr/share/usr_nd_pwd.txt")
lines = readFile.readlines()
readFile.close()
print(blue +'\n'+dash*40)
if(len(lines) >= 2):
w = open("/data/data/com.termux/files/home/MyRepo/usr_nd_pwd.txt",'w')
w.writelines([item for item in lines[:-2]])
w.close()
print(magneto +'\n\tTermux-Lock disabled successfully...')
else:
print(red +'\n\tYou have already removed your lock')
print(blue +'\tso, First register to login')
print(blue +'\n'+dash*40)
def exit():
global flag
print(blue +'\n\tThank you for Using my tool...',endc)
flag = False
exit
if len(sys.argv) >=2:
arg = sys.argv[1]
if arg == '-l':
check_usr_pass()
while flag == True:
menu = {1:register,2:check_usr_pass,3:remove,4:exit}
main_menu()
choice = int(input(magneto +'\nEnter choice : '))
menu[choice]()
| 27.813725 | 72 | 0.627776 |
f20d99db7e934d0e282b0c5cdd817e4191209f22
| 1,075 |
py
|
Python
|
src/test/summarizeResults.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/test/summarizeResults.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/test/summarizeResults.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
#!/bin/env python
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: summarizeResults.py
description: prints a summary of test results contained in results.json
author: Kathleen Biagas
date: Tue Feb 11 08:57:54 PST 2014
"""
# ----------------------------------------------------------------------------
# Modifications:
#
# ----------------------------------------------------------------------------
import os
import json
if (os.path.isfile("results.json")):
full = json.load(open("results.json"))
for r in full["results"]:
if "status" in r:
print("%s: %s/%s"%(r["status"],r["category"],r["base"]))
if r["status"] != "succeeded":
for s in r["details"]["sections"]:
for c in s["cases"]:
print(" %s: %s"%(c["status"],c["name"]))
else:
print("results.json does not exist.")
exit()
| 34.677419 | 78 | 0.52093 |
f218761745689b7a14ddf565b2107df04ffa02aa
| 3,381 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/ch05_datastructures/solutions/ex06_longest_subsequence.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch05_datastructures/solutions/ex06_longest_subsequence.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch05_datastructures/solutions/ex06_longest_subsequence.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import sys
def find_longest_growing_sequence(values):
longest_subsequence = []
current_subsequence = []
last_value = sys.maxsize
for current_value in values:
if current_value >= last_value:
last_value = current_value
current_subsequence.append(current_value)
else:
# Ende dieser Sequenz, starte neue Sequenz
if len(current_subsequence) >= len(longest_subsequence):
longest_subsequence = current_subsequence
current_subsequence = []
last_value = current_value
current_subsequence.append(current_value)
# wichtig, weil sonst die letzte Sequenz ggf. nicht betrachtet wird
if len(current_subsequence) >= len(longest_subsequence):
longest_subsequence = current_subsequence
return longest_subsequence
def find_longest_growing_sequence_mini_opt(values):
longest_subsequence = []
current_subsequence = []
last_value = sys.maxsize
for current_value in values:
if current_value < last_value:
# Ende dieser Sequenz, starte neue Sequenz
if len(current_subsequence) >= len(longest_subsequence):
longest_subsequence = current_subsequence
current_subsequence = []
last_value = current_value
current_subsequence.append(current_value)
# wichtig, weil sonst die letzte Sequenz ggf. nicht betrachtet wird
if len(current_subsequence) >= len(longest_subsequence):
longest_subsequence = current_subsequence
return longest_subsequence
def find_longest_growing_sequence_optimized(values):
if len(values) == 0:
return values
longest = (0, 0)
start_current = 0
end_current = 0
for end_current in range(1, len(values)):
if values[end_current] < values[end_current - 1]:
if end_current - start_current > len(longest):
longest = (start_current, end_current)
start_current = end_current
if end_current - start_current > len(longest):
longest = (start_current, end_current)
return values[longest[0]: longest[1]]
def main():
print(find_longest_growing_sequence([7, 2, 7, 1, 2, 5, 7, 1])) # [1, 2, 5, 7]
print(find_longest_growing_sequence([7, 2, 7, 1, 2, 3, 8, 1, 2, 3, 4, 5])) # [1, 2, 3, 4, 5]]
print(find_longest_growing_sequence([1, 1, 2, 2, 2, 3, 3, 3, 3])) # [1, 1, 2, 2, 2, 3, 3, 3, 3]
print(find_longest_growing_sequence([])) # []
print(find_longest_growing_sequence_mini_opt([7, 2, 7, 1, 2, 5, 7, 1])) # [1, 2, 5, 7]
print(find_longest_growing_sequence_mini_opt([7, 2, 7, 1, 2, 3, 8, 1, 2, 3, 4, 5])) # [1, 2, 3, 4, 5]]
print(find_longest_growing_sequence_mini_opt([1, 1, 2, 2, 2, 3, 3, 3, 3])) # [1, 1, 2, 2, 2, 3, 3, 3, 3]
print(find_longest_growing_sequence_mini_opt([])) # []
print(find_longest_growing_sequence_optimized([7, 2, 7, 1, 2, 5, 7, 1])) # [1, 2, 5, 7]
print(find_longest_growing_sequence_optimized([7, 2, 7, 1, 2, 3, 8, 1, 2, 3, 4, 5])) # [1, 2, 3, 4, 5]]
print(find_longest_growing_sequence_optimized([1, 1, 2, 2, 2, 3, 3, 3, 3])) # [1, 1, 2, 2, 2, 3, 3, 3, 3]
print(find_longest_growing_sequence_optimized([])) # []
if __name__ == "__main__":
main()
| 34.5 | 110 | 0.644188 |
4872f274f6d34c0d04ca5b3c33bd445dfe444e3c
| 840 |
py
|
Python
|
exercises/de/test_02_05_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/de/test_02_05_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/de/test_02_05_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
def test():
assert (
"from spacy.tokens import Doc" in __solution__
), "Importierst du die Klasse Doc?"
assert (
len(words) == 5
), "Es sieht so aus, als ob du eine falsche Anzahl an Wörtern hast."
assert (
len(spaces) == 5
), "Es sieht so aus, als ob du eine falsche Anzahl an Leerzeichen hast."
assert words == ["Was", ",", "echt", "?", "!"], "Schau dir nochmal die Wörter an!"
assert all(
isinstance(s, bool) for s in spaces
), "Die Leerzeichen-Werte müssen boolesche Werte sein."
assert [int(s) for s in spaces] == [0, 1, 0, 0, 0], "Sind die Leerzeichen korrekt?"
assert (
doc.text == "Was, echt?!"
), "Bist du dir sicher, dass du das Doc richtig erstellt hast?"
__msg__.good("Gut gemacht! Lass uns als nächstes ein paar Entitäten erstellen.")
| 42 | 87 | 0.611905 |
6f9beefb73314116a00ca03fca54e682bf861c63
| 259 |
py
|
Python
|
pypubsub-demo/sub.py
|
gregjhansell97/sandbox
|
d565da5db2c10af404ce62aa747d5e682bc02a86
|
[
"MIT"
] | null | null | null |
pypubsub-demo/sub.py
|
gregjhansell97/sandbox
|
d565da5db2c10af404ce62aa747d5e682bc02a86
|
[
"MIT"
] | null | null | null |
pypubsub-demo/sub.py
|
gregjhansell97/sandbox
|
d565da5db2c10af404ce62aa747d5e682bc02a86
|
[
"MIT"
] | null | null | null |
import time
from pubsub import pub
# create a listener
def on_publish(arg1, arg2=None):
print(f"received: {arg1}, arg2={arg2})")
if __name__ == "__main__":
pub.subscribe(on_publish, "rootTopic")
while True:
time.sleep(10)
pass
| 17.266667 | 44 | 0.648649 |
d22379deb1a05a7e3608c1b862bc46f8c8682e21
| 7,358 |
py
|
Python
|
src/main/python/client/clientView.py
|
mfentler-tgm/sew5-simple-user-database-mfentler-tgm
|
98fba2cdca4243c3b2f25c45ceb043c258a5db53
|
[
"MIT"
] | null | null | null |
src/main/python/client/clientView.py
|
mfentler-tgm/sew5-simple-user-database-mfentler-tgm
|
98fba2cdca4243c3b2f25c45ceb043c258a5db53
|
[
"MIT"
] | null | null | null |
src/main/python/client/clientView.py
|
mfentler-tgm/sew5-simple-user-database-mfentler-tgm
|
98fba2cdca4243c3b2f25c45ceb043c258a5db53
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'clientView.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore,QtGui,QtWidgets
from PyQt5.QtWidgets import QHeaderView
from functools import partial
class Ui_Client(object):
def setupUi(self, MainWindow, Controller):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1025, 786)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.allStudentsTable = QtWidgets.QTableWidget(self.centralwidget)
self.allStudentsTable.setGeometry(QtCore.QRect(20, 80, 1000, 381))
self.allStudentsTable.setMaximumSize(QtCore.QSize(981, 16777215))
self.allStudentsTable.setAutoFillBackground(False)
self.allStudentsTable.setAlternatingRowColors(True)
self.allStudentsTable.setShowGrid(True)
self.allStudentsTable.setWordWrap(True)
self.allStudentsTable.setCornerButtonEnabled(True)
self.allStudentsTable.setRowCount(0)
self.allStudentsTable.setColumnCount(6)
# Source: https://stackoverflow.com/a/31641703
self.allStudentsTable.horizontalHeader().setStretchLastSection(True)
self.allStudentsTable.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.allStudentsTable.setObjectName("allStudentsTable")
item = QtWidgets.QTableWidgetItem()
self.allStudentsTable.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.allStudentsTable.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.allStudentsTable.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.allStudentsTable.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.allStudentsTable.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.allStudentsTable.setHorizontalHeaderItem(5, item)
self.loadStudent_button = QtWidgets.QPushButton(self.centralwidget)
self.loadStudent_button.setGeometry(QtCore.QRect(20, 490, 93, 28))
self.loadStudent_button.setObjectName("loadStudent_button")
self.loadStudent_button.clicked.connect(Controller.getAllStudents)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(250, 30, 191, 41))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(False)
font.setWeight(50)
self.label.setFont(font)
self.label.setObjectName("label")
self.formLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.formLayoutWidget.setGeometry(QtCore.QRect(20, 520, 411, 181))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.label_2 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.addStudent_username = QtWidgets.QLineEdit(self.formLayoutWidget)
self.addStudent_username.setObjectName("addStudent_username")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.addStudent_username)
self.label_3 = QtWidgets.QLabel(self.formLayoutWidget)
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(False)
font.setWeight(50)
self.label_3.setFont(font)
self.label_3.setTextFormat(QtCore.Qt.AutoText)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.SpanningRole, self.label_3)
self.label_5 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_5.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.addStudent_email = QtWidgets.QLineEdit(self.formLayoutWidget)
self.addStudent_email.setObjectName("addStudent_email")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.addStudent_email)
self.label_4 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.addStudent_picture = QtWidgets.QLineEdit(self.formLayoutWidget)
self.addStudent_picture.setObjectName("addStudent_picture")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.addStudent_picture)
self.addStudent_button = QtWidgets.QPushButton(self.formLayoutWidget)
self.addStudent_button.setMaximumSize(QtCore.QSize(93, 16777215))
self.addStudent_button.setObjectName("addStudent_button")
self.addStudent_button.clicked.connect(partial(Controller.addNewStudent, username=self.addStudent_username, email=self.addStudent_email, picture=self.addStudent_picture))
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.addStudent_button)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.allStudentsTable.setSortingEnabled(False)
item = self.allStudentsTable.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "ID"))
item = self.allStudentsTable.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Username"))
item = self.allStudentsTable.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Email"))
item = self.allStudentsTable.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "Picture"))
item = self.allStudentsTable.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "Edit"))
item = self.allStudentsTable.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "Delete"))
self.loadStudent_button.setText(_translate("MainWindow", "Load"))
self.label.setText(_translate("MainWindow", "All Students"))
self.label_2.setText(_translate("MainWindow", "Username"))
self.label_3.setText(_translate("MainWindow", "Add new student"))
self.label_5.setText(_translate("MainWindow", "Email"))
self.label_4.setText(_translate("MainWindow", "Picture"))
self.addStudent_button.setText(_translate("MainWindow", "Add"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 52.184397 | 178 | 0.728323 |
963113e4741d616c6eb5adbc6d3b69e8df7bbaae
| 1,584 |
py
|
Python
|
interface/getIP/getip.py
|
hanyanze/FS_AILPB
|
7756551cf926aa6296ec851dd696c97d56e06bca
|
[
"Apache-2.0"
] | 1 |
2020-07-16T02:52:47.000Z
|
2020-07-16T02:52:47.000Z
|
interface/getIP/getip.py
|
hanyanze/FS_AILPB
|
7756551cf926aa6296ec851dd696c97d56e06bca
|
[
"Apache-2.0"
] | null | null | null |
interface/getIP/getip.py
|
hanyanze/FS_AILPB
|
7756551cf926aa6296ec851dd696c97d56e06bca
|
[
"Apache-2.0"
] | null | null | null |
import socket
import platform
def getip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('www.baidu.com', 0))
ip = s.getsockname()[0]
except:
ip = "x.x.x.x"
finally:
s.close()
return ip
class GetIP:
def __init__(self):
self.module = 'GetIP'
def Getip(self):
ip_address = "0.0.0.0"
sysstr = platform.system()
if sysstr == "Windows":
ip_address = socket.gethostbyname(socket.gethostname())
print ("Windows @ " + ip_address)
elif sysstr == "Linux":
ip_address = getip()
# print ("Linux @ " + ip_address)
elif sysstr == "Darwin":
ip_address = socket.gethostbyname(socket.gethostname())
print ("Mac @ " + ip_address)
else:
print ("Other System @ some ip")
return ip_address
def ip2hexstr(self, ip): # 分开
hexstr = ""
len_str = len(ip)
for i in range(len_str):
hex_str = str(hex(ord(ip[i])))[2:]
hexstr = hexstr + hex_str
# print("hex ip :", hexstr)
return hexstr
def ip2hexstr_(self, ip): # 整体
hexstr = ""
len_str = len(ip)
parting_ip = ip.split(".", -1)
# print(parting_ip)
for i in range(4):
hex_str = str(hex(int(parting_ip[i], 10)))[2:]
if len(hex_str) < 2:
hex_str = "0" + hex_str
hexstr = hexstr + hex_str
# print("hex ip :", hexstr)
return hexstr
get = GetIP()
| 26.847458 | 67 | 0.510101 |
73afa277b4b76ad7e00a732d9cbf9c11056f1c1e
| 457 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v8_4/make_scorecard_records.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v8_4/make_scorecard_records.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v8_4/make_scorecard_records.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.buying.doctype.supplier_scorecard.supplier_scorecard import make_default_records
def execute():
frappe.reload_doc('buying', 'doctype', 'supplier_scorecard_variable')
frappe.reload_doc('buying', 'doctype', 'supplier_scorecard_standing')
make_default_records()
| 41.545455 | 93 | 0.818381 |
793300304239a7ab0af9211f9ff48aa44dbd550d
| 707 |
py
|
Python
|
Packs/ApiModules/Scripts/CrowdStrikeApiModule/TestsInput/http_responses.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/ApiModules/Scripts/CrowdStrikeApiModule/TestsInput/http_responses.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/ApiModules/Scripts/CrowdStrikeApiModule/TestsInput/http_responses.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
MULTI_ERRORS_HTTP_RESPONSE = {
"errors": [
{
"code": 403,
"message": "access denied, authorization failed"
},
{
"code": 401,
"message": "test error #1"
},
{
"code": 402,
"message": "test error #2"
}
],
"meta": {
"powered_by": "crowdstrike-api-gateway",
"query_time": 0.000654734,
"trace_id": "39f1573c-7a51-4b1a-abaa-92d29f704afd"
}
}
NO_ERRORS_HTTP_RESPONSE = {
"errors": [],
"meta": {
"powered_by": "crowdstrike-api-gateway",
"query_time": 0.000654734,
"trace_id": "39f1573c-7a51-4b1a-abaa-92d29f704afd"
}
}
| 22.806452 | 60 | 0.486563 |
541915f012eb1076af797db4c87a7635be85bf27
| 6,282 |
py
|
Python
|
Mock/Api.py
|
jonaes/ds100bot
|
79a646114400c5c8d21ff21376276a8d380b031f
|
[
"Apache-2.0"
] | 15 |
2019-12-20T08:24:31.000Z
|
2022-03-18T09:24:25.000Z
|
Mock/Api.py
|
jonaes/ds100bot
|
79a646114400c5c8d21ff21376276a8d380b031f
|
[
"Apache-2.0"
] | 124 |
2020-04-20T04:36:49.000Z
|
2022-01-29T11:08:09.000Z
|
Mock/Api.py
|
jonaes/ds100bot
|
79a646114400c5c8d21ff21376276a8d380b031f
|
[
"Apache-2.0"
] | 12 |
2020-07-08T22:19:39.000Z
|
2022-03-19T09:13:11.000Z
|
# pylint: disable=C0114
import tweepy # for exceptions
from Externals import Twitter
from Externals.Measure import Measure
from AnswerMachine.tweet import Tweet
import Persistence.log as log
from .Tweet import User, mocked_source, mocked_tweets
log_ = log.getLogger(__name__)
class Count: # pylint: disable=too-few-public-methods
def __init__(self):
self.correct = 0
self.missed = 0
self.bad_content = 0
class Result: # pylint: disable=too-few-public-methods
def __init__(self):
self.tweet = Count()
self.follow = Count()
class MockApi(Twitter): # pylint: disable=too-many-instance-attributes
def __init__(self, **kwargs):
log_.setLevel(log_.getEffectiveLevel() - 10)
self.running_id = 10001
self.myself = User.theBot
self.mode = kwargs.get('mode', 'testcases')
mocked_t = mocked_tweets()
if self.mode == 'external':
self.mock = mocked_source()
elif self.mode == 'testcases':
self.mock = mocked_t
elif self.mode == 'id':
self.mock = [t for t in mocked_t if t.id in kwargs.get('id_list', [])]
else:
raise ValueError("Invalid mode in {}: {}".format(__name__, self.mode))
self.replies = {}
self.double_replies = []
self.measure = Measure()
self.readonly = True
def get_tweet(self, tweet_id):
for t in self.mock:
if t.id == tweet_id:
return t
raise tweepy.TweepError("Kein solcher Tweet vorhanden")
def tweet_single(self, text, **kwargs):
super().tweet_single(text, **kwargs)
if 'in_reply_to_status_id' in kwargs:
reply_id = kwargs['in_reply_to_status_id']
# don't track thread answers:
if reply_id != self.running_id:
if reply_id in self.replies:
log_.warning("Tweet %d was replied to twice!", reply_id)
self.double_replies.append(reply_id)
else:
self.replies[reply_id] = text.strip()
self.running_id += 1
return self.running_id
def mentions(self, highest_id):
mention_list = []
for t in self.mock:
for um in t.raw['entities']['user_mentions']:
if um['screen_name'] == self.myself.screen_name:
mention_list.append(t)
break
return mention_list
def timeline(self, highest_id):
return [t for t in self.mock if t.author.follows]
def hashtag(self, tag, highest_id):
return [t for t in self.mock if Tweet(t).has_hashtag(tag)]
def is_followed(self, user):
return user.follows
def follow(self, user):
super().follow(user)
user.follows = True
def defollow(self, user):
super().defollow(user)
user.follows = False
def statistics(self, output='descriptive'):
stat_log = log.getLogger('statistics', '{message}')
res_count = Result()
stat_log.debug(" RESULTS")
for t in self.mock:
was_replied_to = t.id in self.replies
if t.expected_answer is None:
if was_replied_to:
stat_log.error("Tweet %d falsely answered", t.id)
res_count.tweet.missed += 1
else:
res_count.tweet.correct += 1
stat_log.info("Tweet %d correctly unanswered", t.id)
continue
# expected answer is not None:
if not was_replied_to:
res_count.tweet.missed += 1
stat_log.error("Tweet %d falsely unanswered", t.id)
continue
# correctly answered: is it the correct answer?
if t.expected_answer == self.replies[t.id]:
res_count.tweet.correct += 1
stat_log.info("Tweet %d correctly answered with correct answer", t.id)
continue
res_count.tweet.bad_content += 1
stat_log.error("Tweet %d correctly answered, but with wrong answer", t.id)
stat_log.warning(t.expected_answer)
stat_log.warning("↑↑↑↑EXPECTED↑↑↑↑ ↓↓↓↓GOT THIS↓↓↓↓")
stat_log.warning(self.replies[t.id])
for l in User.followers, User.nonfollowers:
for u in l:
if u.follows == u.follow_after:
stat_log.info("User @%s has correct following behaviour %s",
u.screen_name, u.follows)
res_count.follow.correct += 1
else:
stat_log.error("User @%s doesn't follow correctly (should %s, does %s)",
u.screen_name, u.follow_after, u.follows)
res_count.follow.missed += 1
self.report_statisctics(stat_log, output, res_count)
return res_count.tweet.missed + res_count.tweet.bad_content + res_count.follow.missed
def report_statisctics(self, stat_log, output, res_count): # pylint: disable=R0201
denominator = (res_count.tweet.correct + res_count.tweet.missed +
res_count.tweet.bad_content + res_count.follow.correct +
res_count.follow.missed)
if denominator == 0:
stat_log.log(51, "No testcases found")
elif output == 'descriptive':
stat_log.log(51, "ALL GOOD: %2d", res_count.tweet.correct)
stat_log.log(51, "INCORRECT TEXT: %2d", res_count.tweet.bad_content)
stat_log.log(51, "WRONG ANSWER/NOT ANSWER:%2d", res_count.tweet.missed)
stat_log.log(51, "CORRECT FOLLOWING: %2d", res_count.follow.correct)
stat_log.log(51, "WRONG FOLLOWING: %2d", res_count.follow.missed)
elif output == 'summary':
ratio = (res_count.tweet.correct + res_count.follow.correct) / (0.0 + denominator)
stat_log.log(51, "A %d/%d F %d/%d R %.1f%%",
res_count.tweet.correct,
res_count.tweet.bad_content + res_count.tweet.missed,
res_count.follow.correct, res_count.follow.missed,
100.0 * ratio)
| 42.161074 | 94 | 0.572588 |
583d81504f5bea2a2272f13856141acbff1b368f
| 13,371 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/cloudengine/ce_mdn_interface.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/cloudengine/ce_mdn_interface.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/cloudengine/ce_mdn_interface.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_mdn_interface
short_description: Manages MDN configuration on HUAWEI CloudEngine switches.
description:
- Manages MDN configuration on HUAWEI CloudEngine switches.
author: xuxiaowei0512 (@CloudEngine-Ansible)
options:
lldpenable:
description:
- Set global LLDP enable state.
type: str
choices: ['enabled', 'disabled']
mdnstatus:
description:
- Set interface MDN enable state.
type: str
choices: ['rxOnly', 'disabled']
ifname:
description:
- Interface name.
type: str
state:
description:
- Manage the state of the resource.
default: present
type: str
choices: ['present','absent']
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- This module works with connection C(netconf).
'''
EXAMPLES = '''
- name: "Configure global LLDP enable state"
ce_mdn_interface:
lldpenable: enabled
- name: "Configure interface MDN enable state"
ce_mdn_interface:
ifname: 10GE1/0/1
mdnstatus: rxOnly
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"lldpenable": "enabled",
"ifname": "10GE1/0/1",
"mdnstatus": "rxOnly",
"state":"present"
}
existing:
description: k/v pairs of existing global LLDP configration
returned: always
type: dict
sample: {
"lldpenable": "enabled",
"ifname": "10GE1/0/1",
"mdnstatus": "disabled"
}
end_state:
description: k/v pairs of global LLDP configration after module execution
returned: always
type: dict
sample: {
"lldpenable": "enabled",
"ifname": "10GE1/0/1",
"mdnstatus": "rxOnly"
}
updates:
description: command sent to the device
returned: always
type: list
sample: [
"interface 10ge 1/0/1",
"lldp mdn enable",
]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import copy
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import set_nc_config, get_nc_config, execute_nc_action
CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpSys>
<lldpEnable></lldpEnable>
</lldpSys>
</lldp>
</filter>
"""
CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpSys operation="merge">
<lldpEnable>%s</lldpEnable>
</lldpSys>
</lldp>
</config>
"""
CE_NC_GET_INTERFACE_MDNENABLE_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mdnInterfaces>
<mdnInterface>
<ifName></ifName>
<mdnStatus></mdnStatus>
</mdnInterface>
</mdnInterfaces>
</lldp>
</filter>
"""
CE_NC_MERGE_INTERFACE_MDNENABLE_CONFIG = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mdnInterfaces>
<mdnInterface operation="merge">
<ifName>%s</ifName>
<mdnStatus>%s</mdnStatus>
</mdnInterface>
</mdnInterfaces>
</lldp>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('PORT-GROUP'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class Interface_mdn(object):
"""Manage global lldp enable configration"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# LLDP global configration info
self.lldpenable = self.module.params['lldpenable'] or None
self.ifname = self.module.params['ifname']
self.mdnstatus = self.module.params['mdnstatus'] or None
self.state = self.module.params['state']
self.lldp_conf = dict()
self.conf_exsit = False
self.enable_flag = 0
self.check_params()
# state
self.changed = False
self.proposed_changed = dict()
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def check_params(self):
"""Check all input params"""
if self.ifname:
intf_type = get_interface_type(self.ifname)
if not intf_type:
self.module.fail_json(
msg='Error: ifname name of %s '
'is error.' % self.ifname)
if (len(self.ifname) < 1) or (len(self.ifname) > 63):
self.module.fail_json(
msg='Error: Ifname length is beetween 1 and 63.')
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed"""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def config_interface_mdn(self):
"""Configure lldp enabled and interface mdn enabled parameters"""
if self.state == 'present':
if self.enable_flag == 0 and self.lldpenable == 'enabled':
xml_str = CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG % self.lldpenable
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "LLDP_ENABLE_CONFIG")
self.changed = True
elif self.enable_flag == 1 and self.lldpenable == 'disabled':
xml_str = CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG % self.lldpenable
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "LLDP_ENABLE_CONFIG")
self.changed = True
elif self.enable_flag == 1 and self.conf_exsit:
xml_str = CE_NC_MERGE_INTERFACE_MDNENABLE_CONFIG % (self.ifname, self.mdnstatus)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "INTERFACE_MDN_ENABLE_CONFIG")
self.changed = True
def show_result(self):
"""Show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def get_interface_mdn_exist_config(self):
"""Get lldp existed configure"""
lldp_config = list()
lldp_dict = dict()
conf_enable_str = CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG
conf_enable_obj = get_nc_config(self.module, conf_enable_str)
xml_enable_str = conf_enable_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get lldp enable config info
root_enable = ElementTree.fromstring(xml_enable_str)
ntpsite_enable = root_enable.findall("lldp/lldpSys")
for nexthop_enable in ntpsite_enable:
for ele_enable in nexthop_enable:
if ele_enable.tag in ["lldpEnable"]:
lldp_dict[ele_enable.tag] = ele_enable.text
if self.state == "present":
if lldp_dict['lldpEnable'] == 'enabled':
self.enable_flag = 1
lldp_config.append(dict(lldpenable=lldp_dict['lldpEnable']))
if self.enable_flag == 1:
conf_str = CE_NC_GET_INTERFACE_MDNENABLE_CONFIG
conf_obj = get_nc_config(self.module, conf_str)
if "<data/>" in conf_obj:
return lldp_config
xml_str = conf_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get all ntp config info
root = ElementTree.fromstring(xml_str)
ntpsite = root.findall("lldp/mdnInterfaces/mdnInterface")
for nexthop in ntpsite:
for ele in nexthop:
if ele.tag in ["ifName", "mdnStatus"]:
lldp_dict[ele.tag] = ele.text
if self.state == "present":
cur_interface_mdn_cfg = dict(ifname=lldp_dict['ifName'], mdnstatus=lldp_dict['mdnStatus'])
exp_interface_mdn_cfg = dict(ifname=self.ifname, mdnstatus=self.mdnstatus)
if self.ifname == lldp_dict['ifName']:
if cur_interface_mdn_cfg != exp_interface_mdn_cfg:
self.conf_exsit = True
lldp_config.append(dict(ifname=lldp_dict['ifName'], mdnstatus=lldp_dict['mdnStatus']))
return lldp_config
lldp_config.append(dict(ifname=lldp_dict['ifName'], mdnstatus=lldp_dict['mdnStatus']))
return lldp_config
def get_existing(self):
"""Get existing info"""
self.existing = self.get_interface_mdn_exist_config()
def get_proposed(self):
"""Get proposed info"""
if self.lldpenable:
self.proposed = dict(lldpenable=self.lldpenable)
if self.enable_flag == 1:
if self.ifname:
self.proposed = dict(ifname=self.ifname, mdnstatus=self.mdnstatus)
def get_end_state(self):
"""Get end state info"""
self.end_state = self.get_interface_mdn_exist_config()
def get_update_cmd(self):
"""Get updated commands"""
update_list = list()
if self.state == "present":
if self.lldpenable == "enabled":
cli_str = "lldp enable"
update_list.append(cli_str)
if self.ifname:
cli_str = "%s %s" % ("interface", self.ifname)
update_list.append(cli_str)
if self.mdnstatus:
if self.mdnstatus == "rxOnly":
cli_str = "lldp mdn enable"
update_list.append(cli_str)
else:
cli_str = "undo lldp mdn enable"
update_list.append(cli_str)
elif self.lldpenable == "disabled":
cli_str = "undo lldp enable"
update_list.append(cli_str)
else:
if self.enable_flag == 1:
if self.ifname:
cli_str = "%s %s" % ("interface", self.ifname)
update_list.append(cli_str)
if self.mdnstatus:
if self.mdnstatus == "rxOnly":
cli_str = "lldp mdn enable"
update_list.append(cli_str)
else:
cli_str = "undo lldp mdn enable"
update_list.append(cli_str)
self.updates_cmd.append(update_list)
def work(self):
"""Excute task"""
self.check_params()
self.get_existing()
self.get_proposed()
self.config_interface_mdn()
self.get_update_cmd()
self.get_end_state()
self.show_result()
def main():
"""Main function entry"""
argument_spec = dict(
lldpenable=dict(type='str', choices=['enabled', 'disabled']),
mdnstatus=dict(type='str', choices=['rxOnly', 'disabled']),
ifname=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
lldp_obj = Interface_mdn(argument_spec)
lldp_obj.work()
if __name__ == '__main__':
main()
| 33.17866 | 141 | 0.586344 |
545dbe96f79203847c2c7a187c1a6bed76d8e9fe
| 671 |
py
|
Python
|
Hackerrank_problems/Reverse Game/solution.py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 165 |
2020-10-03T08:01:11.000Z
|
2022-03-31T02:42:08.000Z
|
Hackerrank_problems/Reverse Game/solution.py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 383 |
2020-10-03T07:39:11.000Z
|
2021-11-20T07:06:35.000Z
|
Hackerrank_problems/Reverse Game/solution.py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 380 |
2020-10-03T08:05:04.000Z
|
2022-03-19T06:56:59.000Z
|
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
# since there's no def function you need to make a loop by yourself to keep asking input
# for each testcase
t = int(input())
for i in range(t):
nk = input().split()
n = int(nk[0])
k = int(nk[1])
# it can be seen from the pattern in the example case
# you just need to check whether it is less than n-k-1 or not
# and print it two times k plus one if it is less
# or print two times n-k-1
if (k < n-k-1 ):
print ((k*2)+1)
else :
print(2*(n-k-1))
| 23.964286 | 92 | 0.555887 |
4de1e359ad04c75efb70bca64f836467fdb303f9
| 2,104 |
py
|
Python
|
evolutionary-algo/Population.py
|
bjarnege/Portfolioleistung-KI-Entwicklungen
|
27be45e3735421a5dd8441cc76ab69da52678304
|
[
"MIT"
] | null | null | null |
evolutionary-algo/Population.py
|
bjarnege/Portfolioleistung-KI-Entwicklungen
|
27be45e3735421a5dd8441cc76ab69da52678304
|
[
"MIT"
] | null | null | null |
evolutionary-algo/Population.py
|
bjarnege/Portfolioleistung-KI-Entwicklungen
|
27be45e3735421a5dd8441cc76ab69da52678304
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu May 27 21:07:55 2021
@author: Bjarne Gerdes
"""
import uuid
def f(x, y):
"""
Function that will be optimized
Parameters
----------
x : float
Value for parameter x.
y : float
Value for parameter x.
Returns
-------
float
Function value for f(x,y).
"""
if (x**2 + y**2) <= 2:
return (1-x)**2 + 100*((y - x**2)**2)
else:
return 10**8
class PopulationInstance:
def __init__(self, x, y, parent_1_uuid, parent_2_uuid, parent_1_share, parent_2_share):
"""
Represents a an individual.
Parameters
----------
x : float
x-Value of the individual x from f(x,y).
y : float
y-Value of the individual y from f(x,y).
parent_1_uuid : str
Identifier of one parent of the individual.
parent_2_uuid : str
Identifier of the other parent of the individual.
parent_1_share : float
Share of the parent 1 on the x and y values.
parent_2_share : float
Share of the parent 2 on the x and y values.
Returns
-------
None.
"""
# parameters of the instance
self.x = x
self.y = y
# store data about parents
self.parent_1_uuid = parent_1_uuid
self.parent_1_share = parent_1_share
self.parent_2_uuid = parent_2_uuid
self.parent_2_share = parent_2_share
# initialize uuid of the instance
self.uuid = str(uuid.uuid4())
# track if instance is alive
self.is_alive = True
def fitnessFunction(self, f):
"""
Function that defines the fitness of the individual.
Parameters
----------
f : function
Function that will be optimized.
Returns
-------
float
fitness of the individual f(x,y)
"""
self.fitness_value = f(self.x, self.y)
return self.fitness_value
| 22.382979 | 91 | 0.527567 |
12c5e3c6c68acab1454cacbfb112e33a0f981489
| 1,218 |
py
|
Python
|
src/python3_learn_video/exception_try_except.py
|
HuangHuaBingZiGe/GitHub-Demo
|
f3710f73b0828ef500343932d46c61d3b1e04ba9
|
[
"Apache-2.0"
] | null | null | null |
src/python3_learn_video/exception_try_except.py
|
HuangHuaBingZiGe/GitHub-Demo
|
f3710f73b0828ef500343932d46c61d3b1e04ba9
|
[
"Apache-2.0"
] | null | null | null |
src/python3_learn_video/exception_try_except.py
|
HuangHuaBingZiGe/GitHub-Demo
|
f3710f73b0828ef500343932d46c61d3b1e04ba9
|
[
"Apache-2.0"
] | null | null | null |
"""
file_name = input('请输入需要打开的文件名:')
f = open(file_name)
print('文件的内容是:')
for each_line in f:
print(each_line)
f.close()
"""
print('----------------------------------------------')
my_list = ['小甲鱼是帅哥']
# print(len(my_list))
assert len(my_list) > 0 # False 抛出异常
"""
try-except语句
try:
检测范围
except Exception[as reason]:
出现异常 (Exception) 后的处理代码
"""
try:
f = open('我为什么是一个文件.txt')
print(f.read())
f.close()
except OSError as reason:
print('文件出错了!\n错误的原因是:' + str(reason))
print('----------------------------------------------')
try:
sum = 1 + '1'
f = open('我为什么是一个文件.txt')
print(f.read())
f.close()
except OSError as reason:
print('文件出错了!\n错误的原因是:' + str(reason))
except TypeError as reason:
print('类型出错了!\n错误的原因是:' + str(reason))
print('----------------------------------------------')
try:
int('abc')
sum = 1 + '1'
f = open('我为什么是一个文件.txt')
print(f.read())
f.close()
except:
print('出错了!')
print('----------------------------------------------')
try:
sum = 1 + '1'
f = open('我为什么是一个文件.txt')
print(f.read())
f.close()
except (OSError, TypeError):
print('出错了!')
print('----------------------------------------------')
| 19.645161 | 55 | 0.472906 |
12fea316285aee290dbd09625583fe2ba1617363
| 5,562 |
py
|
Python
|
robot/kuaipan.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2017-10-23T14:58:47.000Z
|
2017-10-23T14:58:47.000Z
|
robot/kuaipan.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | null | null | null |
robot/kuaipan.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2018-04-06T07:49:18.000Z
|
2018-04-06T07:49:18.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import http.cookiejar
import json
import re
import wx
def create(parent):
return Frame1(parent)
[wxID_FRAME1, wxID_FRAME1BUTTON1, wxID_FRAME1PANEL1, wxID_FRAME1STATICTEXT1,
wxID_FRAME1STATICTEXT2, wxID_FRAME1STATICTEXT3, wxID_FRAME1TEXTCTRL1,
wxID_FRAME1TEXTCTRL2,
] = [wx.NewId() for _init_ctrls in range(8)]
class Frame1(wx.Frame):
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Frame.__init__(self, id=wxID_FRAME1, name='', parent=prnt,
pos=wx.Point(529, 321), size=wx.Size(400, 250),
style=wx.SYSTEM_MENU | wx.MINIMIZE_BOX | wx.CLOSE_BOX | wx.CAPTION,
title='金山快盘自动签到V1.0')
self.SetClientSize(wx.Size(392, 216))
self.panel1 = wx.Panel(id=wxID_FRAME1PANEL1, name='panel1', parent=self,
pos=wx.Point(0, 0), size=wx.Size(392, 216),
style=wx.TAB_TRAVERSAL)
self.staticText1 = wx.StaticText(id=wxID_FRAME1STATICTEXT1,
label='用户名:', name='staticText1', parent=self.panel1,
pos=wx.Point(8, 16), size=wx.Size(95, 23), style=0)
self.staticText1.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD,
False, 'Tahoma'))
self.staticText2 = wx.StaticText(id=wxID_FRAME1STATICTEXT2,
label='密码:', name='staticText2', parent=self.panel1,
pos=wx.Point(8, 56), size=wx.Size(92, 23), style=0)
self.staticText2.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD,
False, 'Tahoma'))
self.textCtrl1 = wx.TextCtrl(id=wxID_FRAME1TEXTCTRL1, name='textCtrl1',
parent=self.panel1, pos=wx.Point(112, 16), size=wx.Size(176, 24),
style=0, value='')
self.textCtrl2 = wx.TextCtrl(id=wxID_FRAME1TEXTCTRL2, name='textCtrl2',
parent=self.panel1, pos=wx.Point(112, 56), size=wx.Size(176, 22),
style=wx.TE_PASSWORD, value='')
self.button1 = wx.Button(id=wxID_FRAME1BUTTON1, label='签到',
name='button1', parent=self.panel1, pos=wx.Point(304, 56),
size=wx.Size(75, 24), style=0)
self.button1.Bind(wx.EVT_BUTTON, self.OnButton1Button,
id=wxID_FRAME1BUTTON1)
self.staticText3 = wx.StaticText(id=wxID_FRAME1STATICTEXT3,
label='签到 状态 ......', name='staticText3', parent=self.panel1,
pos=wx.Point(16, 104), size=wx.Size(352, 96), style=0)
self.staticText3.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD,
False, 'Tahoma'))
self.button1.Bind(wx.EVT_BUTTON, self.OnButton1Button,
id=wxID_FRAME1BUTTON1)
cj = http.cookiejar.CookieJar()
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
urllib.request.install_opener(self.opener)
self.opener.addheaders = [('User-agent', 'IE')]
def __init__(self, parent):
self._init_ctrls(parent)
def login(self, username, password):
url = 'https://www.kuaipan.cn/index.php?ac=account&op=login'
data = urllib.parse.urlencode({'username': username, 'userpwd': password})
req = urllib.request.Request(url, data)
try:
fd = self.opener.open(req)
except Exception as e:
self.staticText3.SetLabel('网络连接错误!')
return False
if fd.url != "http://www.kuaipan.cn/home.htm":
self.staticText3.SetLabel("用户名跟密码不匹配!")
return False
self.staticText3.SetLabel('%s 登陆成功' % username),
return True
def logout(self):
url = 'http://www.kuaipan.cn/index.php?ac=account&op=logout'
req = urllib.request.Request(url)
fd = self.opener.open(req)
fd.close()
def sign(self):
url = 'http://www.kuaipan.cn/index.php?ac=common&op=usersign'
req = urllib.request.Request(url)
fd = self.opener.open(req)
sign_js = json.loads(fd.read())
# print sign_js
tri = self.staticText3.GetLabel()
if sign_js['state'] == -102:
self.staticText3.SetLabel(tri + '\n' + "今天已签到了!")
elif sign_js['state'] == 1:
self.staticText3.SetLabel(tri + '\n' + "签到成功! \n获得积分:%d,总积分:%d;\n获得空间:%dM\n" % (
sign_js['increase'], sign_js['status']['points'], sign_js['rewardsize']))
else:
self.staticText3.SetLabel(tri + '\n' + "签到失败!")
fd.close()
def OnButton1Button(self, event):
self.staticText3.SetLabel('')
namew = self.textCtrl1.GetValue()
passw = self.textCtrl2.GetValue()
if self.login(namew, passw) == True:
self.sign()
self.logout()
# event.Skip()
class App(wx.App):
def OnInit(self):
self.main = create(None)
self.main.Show()
self.SetTopWindow(self.main)
return True
def main():
application = App(0)
application.MainLoop()
if __name__ == '__main__':
main()
| 38.09589 | 102 | 0.556095 |
97ea3bb3774de114b5fdf8db56f5629bc4bf3d1f
| 1,066 |
py
|
Python
|
tests/api/test_image.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 1 |
2021-06-01T14:49:18.000Z
|
2021-06-01T14:49:18.000Z
|
tests/api/test_image.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 286 |
2020-12-04T14:13:00.000Z
|
2022-03-09T19:05:16.000Z
|
tests/api/test_image.py
|
DanielGrams/gsevpt
|
a92f71694388e227e65ed1b24446246ee688d00e
|
[
"MIT"
] | null | null | null |
import pytest
def test_validate_image():
from marshmallow import ValidationError
from project.api.image.schemas import ImagePostRequestSchema
data = {
"copyright_text": "Horst",
}
schema = ImagePostRequestSchema()
with pytest.raises(ValidationError) as e:
schema.load(data)
assert "Either image_url or image_base64 has to be defined." in str(e.value)
def test_post_load_image_data(seeder):
from project.api.image.schemas import ImagePostRequestSchema
data = {
"image_base64": seeder.get_default_image_upload_base64(),
}
item = dict()
schema = ImagePostRequestSchema()
schema.post_load_image_data(item, data)
schema.load(data)
assert item.get("encoding_format") is not None
assert item.get("data") is not None
def test_load_image_data():
from project.api.image.schemas import ImagePostRequestSchema
schema = ImagePostRequestSchema()
encoding_format, data = schema.load_image_data(None, None)
assert encoding_format is None
assert data is None
| 23.688889 | 80 | 0.719512 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.