max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
wagtail/users/models.py | originell/wagtail | 0 | 11600 | import os
import uuid
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
def upload_avatar_to(instance, filename):
filename, ext = os.path.splitext(filename)
return os.path.join(
'avatar_images',
'avatar_{uuid}_{filename}{ext}'.format(
uuid=uuid.uuid4(), filename=filename, ext=ext)
)
class UserProfile(models.Model):
user = models.OneToOneField(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='wagtail_userprofile'
)
submitted_notifications = models.BooleanField(
verbose_name=_('submitted notifications'),
default=True,
help_text=_("Receive notification when a page is submitted for moderation")
)
approved_notifications = models.BooleanField(
verbose_name=_('approved notifications'),
default=True,
help_text=_("Receive notification when your page edit is approved")
)
rejected_notifications = models.BooleanField(
verbose_name=_('rejected notifications'),
default=True,
help_text=_("Receive notification when your page edit is rejected")
)
preferred_language = models.CharField(
verbose_name=_('preferred language'),
max_length=10,
help_text=_("Select language for the admin"),
default=''
)
current_time_zone = models.CharField(
verbose_name=_('current time zone'),
max_length=40,
help_text=_("Select your current time zone"),
default=''
)
avatar = models.ImageField(
verbose_name=_('profile picture'),
upload_to=upload_avatar_to,
blank=True,
)
@classmethod
def get_for_user(cls, user):
return cls.objects.get_or_create(user=user)[0]
def get_preferred_language(self):
return self.preferred_language or settings.LANGUAGE_CODE
def get_current_time_zone(self):
return self.current_time_zone or settings.TIME_ZONE
def __str__(self):
return self.user.get_username()
class Meta:
verbose_name = _('user profile')
verbose_name_plural = _('user profiles')
| 2.109375 | 2 |
python/simulator.py | chongdashu/puzzlescript-analyze | 1 | 11601 | __author__ = '<NAME>, <EMAIL>'
import uinput
def Simulator():
def __init__(self):
pass
def test1(self):
device = uinput.Device([uinput.KEY_E, uinput.KEY_H, uinput.KEY_L, uinput.KEY_O])
device.emit_click(uinput.KEY_H)
| 1.546875 | 2 |
PRESUBMIT.py | oneumyvakin/catapult | 0 | 11602 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for catapult.
See https://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import re
import sys
_EXCLUDED_PATHS = (
r'(.*[\\/])?\.git[\\/].*',
r'.+\.png$',
r'.+\.svg$',
r'.+\.skp$',
r'.+\.gypi$',
r'.+\.gyp$',
r'.+\.gn$',
r'.*\.gitignore$',
r'.*codereview.settings$',
r'.*AUTHOR$',
r'^CONTRIBUTORS\.md$',
r'.*LICENSE$',
r'.*OWNERS$',
r'.*README\.md$',
r'^dashboard[\\/]dashboard[\\/]api[\\/]examples[\\/].*.js',
r'^dashboard[\\/]dashboard[\\/]templates[\\/].*',
r'^experimental[\\/]heatmap[\\/].*',
r'^experimental[\\/]trace_on_tap[\\/]third_party[\\/].*',
r'^perf_insights[\\/]test_data[\\/].*',
r'^perf_insights[\\/]third_party[\\/].*',
r'^telemetry[\\/]third_party[\\/].*',
r'^third_party[\\/].*',
r'^tracing[\\/]\.allow-devtools-save$',
r'^tracing[\\/]bower\.json$',
r'^tracing[\\/]\.bowerrc$',
r'^tracing[\\/]tracing_examples[\\/]string_convert\.js$',
r'^tracing[\\/]test_data[\\/].*',
r'^tracing[\\/]third_party[\\/].*',
r'^py_vulcanize[\\/]third_party[\\/].*',
r'^common/py_vulcanize[\\/].*', # TODO(hjd): Remove after fixing long lines.
)
_GITHUB_BUG_ID_RE = re.compile(r'#[1-9]\d*')
_MONORAIL_BUG_ID_RE = re.compile(r'[1-9]\d*')
_MONORAIL_PROJECT_NAMES = frozenset({'chromium', 'v8', 'angleproject'})
def CheckChangeLogBug(input_api, output_api):
if not input_api.change.issue:
# If there is no change issue, there won't be a bug yet. Skip the check.
return []
# Show a presubmit message if there is no Bug line or an empty Bug line.
if not input_api.change.BugsFromDescription():
return [output_api.PresubmitNotifyResult(
'If this change has associated bugs on GitHub or Monorail, add a '
'"Bug: <bug>(, <bug>)*" line to the patch description where <bug> can '
'be one of the following: catapult:#NNNN, ' +
', '.join('%s:NNNNNN' % n for n in _MONORAIL_PROJECT_NAMES) + '.')]
# Check that each bug in the BUG= line has the correct format.
error_messages = []
catapult_bug_provided = False
for index, bug in enumerate(input_api.change.BugsFromDescription()):
# Check if the bug can be split into a repository name and a bug ID (e.g.
# 'catapult:#1234' -> 'catapult' and '#1234').
bug_parts = bug.split(':')
if len(bug_parts) != 2:
error_messages.append('Invalid bug "%s". Bugs should be provided in the '
'"<project-name>:<bug-id>" format.' % bug)
continue
project_name, bug_id = bug_parts
if project_name == 'catapult':
if not _GITHUB_BUG_ID_RE.match(bug_id):
error_messages.append('Invalid bug "%s". Bugs in the Catapult '
'repository should be provided in the '
'"catapult:#NNNN" format.' % bug)
catapult_bug_provided = True
elif project_name in _MONORAIL_PROJECT_NAMES:
if not _MONORAIL_BUG_ID_RE.match(bug_id):
error_messages.append('Invalid bug "%s". Bugs in the Monorail %s '
'project should be provided in the '
'"%s:NNNNNN" format.' % (bug, project_name,
project_name))
else:
error_messages.append('Invalid bug "%s". Unknown repository "%s".' % (
bug, project_name))
return map(output_api.PresubmitError, error_messages)
def CheckChange(input_api, output_api):
results = []
try:
sys.path += [input_api.PresubmitLocalPath()]
from catapult_build import bin_checks
from catapult_build import html_checks
from catapult_build import js_checks
from catapult_build import repo_checks
results += input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS)
results += input_api.RunTests(
input_api.canned_checks.CheckVPythonSpec(input_api, output_api))
results += CheckChangeLogBug(input_api, output_api)
results += js_checks.RunChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS)
results += html_checks.RunChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS)
results += repo_checks.RunChecks(input_api, output_api)
results += bin_checks.RunChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS)
finally:
sys.path.remove(input_api.PresubmitLocalPath())
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
| 1.453125 | 1 |
exam_at_home/2/boolean_expression.py | jamie-jjd/110_spring_IDS | 2 | 11603 | <gh_stars>1-10
# author: jamie
# email: <EMAIL>
def Priority (c):
if c == '&': return 3
elif c == '|': return 2
elif c == '^': return 1
elif c == '(': return 0
def InfixToPostfix (infix, postfix):
stack = []
for c in infix:
if c == '(':
stack.append('(')
elif c == ')':
while stack[-1] != '(':
postfix.append(stack.pop())
stack.pop()
elif c == '&' or c == '|' or c == '^':
while len(stack) and Priority(c) <= Priority(stack[-1]):
postfix.append(stack.pop())
stack.append(c)
else:
postfix.append(c)
while len(stack):
postfix.append(stack.pop())
def Evaluate (postfix, value):
stack = []
for c in postfix:
if c == '&' or c == '|' or c == '^':
rhs = stack.pop()
lhs = stack.pop()
if c == '&': stack.append(lhs & rhs)
elif c == '|': stack.append(lhs | rhs)
elif c == '^': stack.append(lhs ^ rhs)
elif c == '1' or c == '0':
stack.append(ord(c) - ord('0'))
else:
stack.append(value[ord(c) - ord('A')])
return stack.pop()
if __name__ == "__main__":
infix = input()
T = int(input())
for _ in range(T):
value = list(map(int, input().split()))
postfix = []
InfixToPostfix(infix, postfix)
print(Evaluate(postfix, value)) | 3.8125 | 4 |
workoutlog/workout/admin.py | michaelrodgers/itc172_final | 0 | 11604 | <reponame>michaelrodgers/itc172_final
from django.contrib import admin
from .models import Target, Exercise, Workout
# Register your models here.
admin.site.register(Target)
admin.site.register(Exercise)
admin.site.register(Workout)
| 1.46875 | 1 |
ciclo.py | BeltetonJosue96/Ejercicio3Python | 0 | 11605 | <reponame>BeltetonJosue96/Ejercicio3Python
class Ciclo:
def __init__(self):
self.cicloNew = ()
self.respu = ()
self.a = ()
self.b = ()
self.c = ()
def nuevoCiclo(self):
cicloNew = []
print(" ")
print("Formulario de ingreso de ciclos")
print("-----------------------------------")
respu = input("¿Quiere resgistrar un ciclo? (S/F): ")
while respu == "S" or respu == "s":
print("Ingrese el numero de semestre (1 o 2): ")
a = int(input())
print("Ingrese año: ")
b = int(input())
cicloNew.append((a, b))
respu = input("¿Quiere resgistrar otro ciclo? (S/F): ")
print(" ")
print("Datos guardados")
print("-----------------------------------")
for x in range(len(cicloNew)):
print("[Numero de semestre: ", cicloNew[x][0], "] [año: ", cicloNew[x][1],"]")
print(" ")
print(" ")
print(" ")
return None
Ciclo().nuevoCiclo() | 4.1875 | 4 |
timeglass.py | mountwebs/timeglass | 110 | 11606 | <filename>timeglass.py
import rumps
import sys
import icon_manager
from datetime import timedelta
import timekeeper
import os
# pyinstaller --onefile -w --add-data "Icons/:Icons" --icon="Icons/timeglass.png" --clean timeglass.spec
# rumps.debug_mode(True)
class TimerApp(rumps.App):
def __init__(self, initial_seconds):
super(TimerApp, self).__init__("")
self.mode = "hourglass"
self.timekeeper = timekeeper.Timer(initial_seconds)
self.template = True
self.im = icon_manager.Icon_manager(initial_seconds)
self.change_icon()
self.remaining_sec = rumps.MenuItem(self.timekeeper.get_remaining_string())
self.menu = [self.remaining_sec]
self.next_icon_change = self.im.icon_interval
self.rumps_timer = rumps.Timer(self.tick,0.5)
self.rumps_timer.callback(self.tick)
self.invert_counter = 0
self.notified = False
self.sound = True
def change_icon(self):
print("frame:", self.im.icon_counter)
self.icon = self.im.get_icon_path()
def change_remaining(self):
self.remaining_sec.title = self.timekeeper.get_remaining_string()
def tick(self, _):
if self.timekeeper.tick():
self.notDone = True
self.invert_counter = 0
self.change_remaining()
if self.timekeeper.elapsed >= self.next_icon_change:
self.im.icon_counter = int(self.timekeeper.elapsed/self.im.icon_interval) + 1 #1-89
self.change_icon()
self.next_icon_change += self.im.icon_interval
if self.timekeeper.done:
self.im.active = False
self.change_icon()
if not self.notified:
self.notify()
self.notified = True
if self.notDone:
self.icon = self.im.invert()
self.invert_counter += 1
if self.invert_counter > 5:
self.notDone = False
self.rumps_timer.stop()
self.reset()
def notify(self):
title = "Time is up!"
text = ""
sound = "Glass"
try:
if self.sound:
os.system("""osascript -e 'display notification "{}" with title "{}" sound name "{}"'""".format(text, title, sound))
else:
os.system("""osascript -e 'display notification "{}" with title "{}"'""".format(text, title, sound))
except:
print("Could not send notification")
@rumps.clicked("Start", key="s")
def pause(self, sender):
if sender.title == "Pause":
self.timekeeper.pause_timer()
self.rumps_timer.stop()
sender.title = "Start"
elif sender.title == "Start":
self.timekeeper.start()
self.im.active = True
self.change_icon()
self.rumps_timer.start()
sender.title = "Pause"
@rumps.clicked("Reset", key="r")
def reset_button(self, sender):
self.reset()
self.menu["Start"].title = "Start"
def reset(self):
self.timekeeper.reset()
self.rumps_timer.stop()
self.im.active = False
self.im.reset()
self.change_icon()
self.change_remaining()
self.next_icon_change = self.im.icon_interval
self.menu["Start"].title = "Start"
self.notified = False
def string_to_sec(self, text):
nums = text.split(":")
nums.reverse()
seconds = 0
for i,n in enumerate(nums):
if i == 0:
seconds += int(n)
else:
seconds += (60**i) * int(n)
print((i * 60) * int(n))
return seconds
def validate_input(self, text):
texts = text.split(":")
if len(texts)>3: return False
for s in texts:
try:
int(s)
except:
return False
return True
@rumps.clicked("Set time", key="t")
def set_time(self, _):
self.timekeeper.pause_timer()
response = rumps.Window("Enter time: (hours:minutes:seconds)").run()
if response.clicked:
if not self.validate_input(response.text):
skip = True
rumps.alert("Does not compute! Please try again.")
else:
seconds = self.string_to_sec(response.text)
print(seconds)
skip = False
if not skip:
self.rumps_timer.stop()
self.timekeeper.set_time(seconds)
self.im.set_icon_interval(seconds)
self.im.reset()
self.im.active = False
self.next_icon_change = self.im.icon_interval
self.change_icon()
self.change_remaining()
self.menu["Start"].title = "Start"
if __name__ == "__main__":
default_secounds = 60 * 60
TimerApp(default_secounds).run()
| 2.421875 | 2 |
example_bot/bot.py | JakeCover/Flare-DiscordPy | 1 | 11607 | import os
from discord.ext.commands import Bot
from Flare import Flare
bot = Bot("~~")
bot.add_cog(Flare(bot))
@bot.command("ping")
async def ping_pong(ctx):
ctx.send("pong")
bot.run(os.environ.get("BOT_TOKEN"))
| 2.3125 | 2 |
utils/logger.py | huangxd-/BTC-ISMIR19 | 82 | 11608 | <reponame>huangxd-/BTC-ISMIR19
import logging
import os
import sys
import time
project_name = os.getcwd().split('/')[-1]
_logger = logging.getLogger(project_name)
_logger.addHandler(logging.StreamHandler())
def _log_prefix():
# Returns (filename, line number) for the stack frame.
def _get_file_line():
# pylint: disable=protected-access
# noinspection PyProtectedMember
f = sys._getframe()
# pylint: enable=protected-access
our_file = f.f_code.co_filename
f = f.f_back
while f:
code = f.f_code
if code.co_filename != our_file:
return code.co_filename, f.f_lineno
f = f.f_back
return '<unknown>', 0
# current time
now = time.time()
now_tuple = time.localtime(now)
now_millisecond = int(1e3 * (now % 1.0))
# current filename and line
filename, line = _get_file_line()
basename = os.path.basename(filename)
s = '%02d-%02d %02d:%02d:%02d.%03d %s:%d] ' % (
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
now_millisecond,
basename,
line)
return s
def logging_verbosity(verbosity=0):
_logger.setLevel(verbosity)
def debug(msg, *args, **kwargs):
_logger.debug('D ' + project_name + ' ' + _log_prefix() + msg, *args, **kwargs)
def info(msg, *args, **kwargs):
_logger.info('I ' + project_name + ' ' + _log_prefix() + msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
_logger.warning('W ' + project_name + ' ' + _log_prefix() + msg, *args, **kwargs)
def error(msg, *args, **kwargs):
_logger.error('E ' + project_name + ' ' + _log_prefix() + msg, *args, **kwargs)
def fatal(msg, *args, **kwargs):
_logger.fatal('F ' + project_name + ' ' + _log_prefix() + msg, *args, **kwargs)
| 2.21875 | 2 |
setup.py | VNOpenAI/OpenControl | 5 | 11609 | import setuptools
ver = {}
with open('OpenControl/_version.py') as fd:
exec(fd.read(), ver)
version = ver.get('__version__')
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="OpenControl",
version=version,
author="VNOpenAI",
author_email="<EMAIL>",
description="A python control systems package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://opencontrol.readthedocs.io/en/latest/",
project_urls={
"Bug Tracker": "https://github.com/VNOpenAI/OpenControl/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.7",
) | 1.390625 | 1 |
ismore/plants.py | DerekYJC/bmi_python | 0 | 11610 | '''See the shared Google Drive documentation for an inheritance diagram that
shows the relationships between the classes defined in this file.
'''
import numpy as np
import socket
import time
from riglib import source
from ismore import settings, udp_feedback_client
import ismore_bmi_lib
from utils.constants import *
#import armassist
#import rehand
from riglib.filter import Filter
from riglib.plants import Plant
import os
class BasePlantUDP(Plant):
'''
Common UDP interface for the ArmAssist/ReHand
'''
debug = 0
sensor_data_timeout = 1 # seconds. if this number of seconds has passed since sensor data was received, velocity commands will not be sent
lpf_vel = 0
# define in subclasses!
ssm_cls = None
addr = None
feedback_data_cls = None
data_source_name = None
n_dof = None
blocking_joints = None
safety_grid = None
feedback_str = ''
def __init__(self, *args, **kwargs):
self.source = source.DataSource(self.feedback_data_cls, bufferlen=5, name=self.data_source_name)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # used only for sending
ssm = self.ssm_cls()
self.pos_state_names = [s.name for s in ssm.states if s.order == 0]
self.vel_state_names = [s.name for s in ssm.states if s.order == 1]
self.aa_xy_ix = [i for i, j in enumerate(ssm.states) if j.name in ['aa_px', 'aa_py']]
self.aa_psi_ix = [i for i, j in enumerate(ssm.states) if j.name == 'aa_ppsi']
self.rh_pron_ix = [i for i, j in enumerate(ssm.states) if j.name == 'rh_pprono']
self.rh_pfings = [(i, j.name) for i, j in enumerate(ssm.states) if j.name in ['rh_pthumb', 'rh_pindex', 'rh_pfing3']]
self.drive_velocity_raw = np.zeros((len(self.vel_state_names),))
self.drive_velocity_raw_fb_gain = np.zeros((len(self.vel_state_names),))
self.drive_velocity_sent = np.zeros((len(self.vel_state_names),))
self.drive_velocity_sent_pre_safety = np.zeros((len(self.vel_state_names),))
self.pre_drive_state = np.zeros((len(self.vel_state_names), ))
# low-pass filters to smooth out command velocities
# from scipy.signal import butter
# b, a = butter(5, 0.1) # fifth order, 2 Hz bandpass (assuming 10 Hz update rate)
#omega, H = signal.freqz(b, a)
#plt.figure()
#plt.plot(omega/np.pi, np.abs(H))
# self.vel_command_lpfs = [None] * self.n_dof
# for k in range(self.n_dof):
# self.vel_command_lpfs[k] = Filter(b=b, a=a)
# self.last_sent_vel = np.ones(self.n_dof) * np.nan
# calculate coefficients for a 4th-order Butterworth LPF at 1.5 Hz for kinematic data received from the exo
# fs_synch = 20 #Frequency at which emg and kin data are synchronized
# nyq = 0.5 * fs_synch
# cuttoff_freq = 1.5 / nyq
# bpf_kin_coeffs = butter(4, cuttoff_freq, btype='low')
# self.pos_filt = [None] * self.n_dof
# for k in range(self.n_dof):
# self.pos_filt[k] = Filter(bpf_kin_coeffs[0], bpf_kin_coeffs[1])
def init(self):
from riglib import sink
sink.sinks.register(self.source)
def start(self):
# only start this DataSource after it has been registered with
# the SinkManager singleton (sink.sinks) in the call to init()
self.source.start()
self.ts_start_data = time.time()
def stop(self):
# send a zero-velocity command
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(np.zeros(self.n_dof))))
self.source.stop()
self.feedback_file.close()
def last_data_ts_arrival(self):
return self.source.read(n_pts=1)['ts_arrival'][0]
def _send_command(self, command):
self.sock.sendto(command, self.addr)
def pack_vel(self, vel):
format_str = "%f " * self.n_dof
return format_str % tuple(vel)
def send_vel(self, vel):
assert len(vel) == self.n_dof
vel = vel.copy()
vel *= self.vel_gain # change the units of the velocity, if necessary
self.last_sent_vel = vel
#command_vel is already fitlered at the task level, no need to filter it again.
#self.last_sent_vel = filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel()
if all(v <= 0.00000001 for v in abs(self.last_sent_vel)):
print ('last sent vel')
print (self.last_sent_vel)
if (self.last_data_ts_arrival() == 0) or ((self.last_data_ts_arrival() - time.time()) > self.sensor_data_timeout):
print ("sensor data not received for %s recently enough, not sending velocity command!" % self.plant_type)
return
# squash any velocities which would take joints outside of the rectangular bounding box
current_pos = self.get_pos() * self.vel_gain
projected_pos = current_pos + vel * 0.1
max_reached, = np.nonzero((projected_pos > self.max_pos_vals) * (vel > 0))
min_reached, = np.nonzero((projected_pos < self.min_pos_vals) * (vel < 0))
vel[max_reached] = 0
vel[min_reached] = 0
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel)))
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
#if we wanna define some limit values for the rehand use the filt_vel. Otherwise use vel
#self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(filt_vel)))
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel)))
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
if self.debug:
print ("input vel")
print (vel)
print ("vel sent to %s" % self.plant_type)
print (vel)
print ("current_pos")
print (current_pos)
print ("projected_pos")
print (projected_pos)
print ("actual velocity")
print (self.get_vel())
if self.lpf_vel:
# squash any velocities which would take joints outside of the rectangular bounding box
current_pos = self.get_pos() * self.vel_gain
projected_pos = current_pos + vel * (1.0/20)
max_reached, = np.nonzero((projected_pos > self.max_pos_vals) * (vel > 0))
min_reached, = np.nonzero((projected_pos < self.min_pos_vals) * (vel < 0))
vel[max_reached] = 0
vel[min_reached] = 0
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
if faster_than_max_speed > 0:
print ('faster_than_max_speed')
print (faster_than_max_speed)
if self.debug:
print ("input vel")
print (vel)
print ("vel sent to %s" % self.plant_type)
print (vel)
#print "current_pos"
#print current_pos
#print "projected_pos"
#print projected_pos
#print "actual velocity"
#print self.get_vel()
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel)))
else:
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel)))
# def get_pos(self):
# # udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF
# return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0]))
def drive(self, decoder):
vel = decoder['qdot']
vel_bl = vel.copy()
feedback_str = ''
if self.blocking_joints is not None:
vel_bl[self.blocking_joints] = 0
if self.safety_grid is not None:
#If the next position is outside of safety then damp velocity to only go to limit:
pos_pred = decoder['q'] + 0.1*vel_bl
#Make sure predicted AA PX, AA PY within bounds:
xy_change = True
if len(self.aa_xy_ix) > 0:
if self.safety_grid.is_valid_pos(pos_pred[self.aa_xy_ix]) is False:
#If not, make their velocity zero:
vel_bl[self.aa_xy_ix] = 0
xy_change = False
feedback_str = feedback_str+ ' stopping xy from moving'
else:
xy_change = False
# Make sure AA Psi within bounds:
if len(self.aa_psi_ix) > 0:
# If X/Y ok
if xy_change:
mn, mx = self.safety_grid.get_minmax_psi(pos_pred[self.aa_xy_ix])
# If x/y not ok:
else:
mn, mx = self.safety_grid.get_minmax_psi(decoder['q'][self.aa_xy_ix])
# Set psi velocity :
if np.logical_and(pos_pred[self.aa_psi_ix] >= mn, pos_pred[self.aa_psi_ix] <= mx):
pass
else:
vel_bl[self.aa_psi_ix] = 0
feedback_str = feedback_str+ 'stopping psi'
# Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY)
if len(self.rh_pron_ix) > 0:
# If X/Y ok
if xy_change:
mn, mx = self.safety_grid.get_minmax_prono(pos_pred[self.aa_xy_ix])
# If x/y not ok or not moving bc not part of state pace :
else:
if len(self.aa_xy_ix) > 0:
mn, mx = self.safety_grid.get_minmax_prono(decoder['q'][self.aa_xy_ix])
else:
mn, mx = self.safety_grid.get_minmax_prono(settings.starting_pos['aa_px'], settings.starting_pos['aa_py'])
# Set prono velocity :
if np.logical_and(pos_pred[self.rh_pron_ix] >= mn, pos_pred[self.rh_pron_ix] <= mx):
pass
else:
vel_bl[self.rh_pron_ix] = 0
feedback_str = feedback_str+ 'stopping prono'
# Assure RH fingers are within range:
if len(self.rh_pfings) > 0:
for i, (ix, nm) in enumerate(self.rh_pfings):
mn, mx = self.safety_grid.get_rh_minmax(nm)
if np.logical_and(pos_pred[ix] >= mn, pos_pred[ix] <= mx):
pass
else:
vel_bl[ix] = 0
feedback_str = feedback_str+ 'stopping rh fings'
self.feedback_str = feedback_str
self.drive_velocity = vel_bl
self.send_vel(vel_bl)
decoder['q'] = self.get_pos()
def write_feedback(self):
pos_vel = [str(i) for i in np.hstack(( self.get_pos(), self.get_vel() )) ]
#self.feedback_file.write(','.join(pos_vel)+'\n')
if self.feedback_str != '':
self.feedback_file.write(self.feedback_str+ time.ctime() + '\n')
class ArmAssistPlantUDP(BasePlantUDP):
'''Sends velocity commands and receives feedback over UDP. Can be used
with either the real or simulated ArmAssist.
'''
ssm_cls = ismore_bmi_lib.StateSpaceArmAssist
addr = settings.ARMASSIST_UDP_SERVER_ADDR
feedback_data_cls = udp_feedback_client.ArmAssistData
data_source_name = 'armassist'
n_dof = 3
plant_type = 'ArmAssist'
vel_gain = np.array([cm_to_mm, cm_to_mm, rad_to_deg]) # convert units to: [mm/s, mm/s, deg/s]
max_pos_vals = np.array([np.inf, np.inf, np.inf])
min_pos_vals = np.array([-np.inf, -np.inf, -np.inf])
max_speed = np.array([np.inf, np.inf, np.inf])
feedback_file = open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'w')
#max_speed = np.array([40, 60, 20]) # in mm/s and deg/s
#max_speed = np.array([60, 80, 50]) # in mm/s and deg/s
#parameters for kinematics low-pass filtering
from scipy.signal import butter, lfilter
from ismore.filter import Filter
fs_synch = 25 #Frequency at which emg and kin data are synchronized
nyq = 0.5 * fs_synch
cuttoff_freq = 1.5 / nyq
bpf_kin_coeffs = butter(2, cuttoff_freq, btype='low')
n_dof = 3
vel_filter = [None] * n_dof
for k in range(n_dof):
vel_filter[k] = Filter(bpf_kin_coeffs[0], bpf_kin_coeffs[1])
n_getpos_iter= 0
def __init__(self, *args, **kwargs):
super(ArmAssistPlantUDP, self).__init__(*args, **kwargs)
def set_pos_control(self): # position control with global reference system
self._send_command('SetControlMode ArmAssist Position')
def set_global_control(self): #velocity control with global reference system
self._send_command('SetControlMode ArmAssist Global')
def set_trajectory_control(self): #trajectory control with global reference system
self._send_command('SetControlMode ArmAssist Trajectory')
def send_vel(self, vel):
vel = vel.copy()
# units of vel should be: [cm/s, cm/s, rad/s]
assert len(vel) == self.n_dof
# convert units to: [mm/s, mm/s, deg/s] to send them through UDP to the ArmAssist application
vel[0] *= cm_to_mm
vel[1] *= cm_to_mm
vel[2] *= rad_to_deg
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
self.debug = True
if self.debug:
# print "vel sent to armassist"
# print vel
if faster_than_max_speed.any() > 0:
print ('faster_than_max_speed')
print (faster_than_max_speed)
print ("speed set to: ")
print (vel)
self._send_command('SetSpeed ArmAssist %f %f %f\r' % tuple(vel))
# get raw position
def get_pos_raw(self):
# udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF
#get the last poitns of data of the armassist and low-pass filter
return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0]))
# get filtered position
def get_pos(self):
return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.pos_state_names][0]))
# calculate vel from raw position
def get_vel_raw(self):
recent_pos_data = self.source.read(n_pts=2)
pos = recent_pos_data['data'][self.pos_state_names]
ts = recent_pos_data['ts']
delta_pos = np.array(tuple(pos[1])) - np.array(tuple(pos[0]))
delta_ts = ts[1] - ts[0]
vel = delta_pos / delta_ts
#filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel() #nerea --> to test!
if ts[0] != 0 and any(np.isnan(v) for v in vel):
print ("WARNING -- delta_ts = 0 in AA vel calculation:", vel)
for i in range(3):
if np.isnan(vel[i]):
vel[i] = 0
return vel
#calculate vel from raw position and filter
def get_vel(self):
recent_pos_data = self.source.read(n_pts=2)
pos = recent_pos_data['data'][self.pos_state_names]
ts = recent_pos_data['ts']
delta_pos = np.array(tuple(pos[1])) - np.array(tuple(pos[0]))
delta_ts = ts[1] - ts[0]
vel = delta_pos / delta_ts
if ts[0] != 0 and any(np.isnan(v) for v in vel):
print ("WARNING -- delta_ts = 0 in AA vel calculation:", vel)
for i in range(3):
if np.isnan(vel[i]):
vel[i] = 0
# the first value of the pos because it is always NaN and if a NaN is introduced in the filter, all the following filtered values will be also NaNs
if np.any(np.isnan(vel)):
self.n_getpos_iter = self.n_getpos_iter +1
vel_filt = vel
else:
vel_filt = np.array([self.vel_filter[k](vel[k]) for k in range(self.n_dof)]).ravel()
return vel_filt
def send_pos(self, pos, time):
pos = pos.copy()
# units of vel should be: [cm/s, cm/s, rad/s]
assert len(pos) == 3
# convert units to: [mm/s, mm/s, deg/s]
pos[0] *= cm_to_mm
pos[1] *= cm_to_mm
pos[2] *= rad_to_deg
# mode 1: the forearm angle (psi) stays the same as it is. mode 2: psi will move according to the determined value
mode = 2
pos_command = np.zeros(5)
pos_command[0] = pos[0]
pos_command[1] = pos[1]
pos_command[2] = pos[2]
pos_command[3] = time
pos_command[4] = mode
print ("pos")
print (pos)
print ("time")
print (time)
self._send_command('SetPosition ArmAssist %f %f %f %f %f\r' % tuple(pos_command))
def enable(self):
self._send_command('SetControlMode ArmAssist Global\r')
def disable(self):
self._send_command('SetControlMode ArmAssist Disable\r')
def enable_watchdog(self, timeout_ms):
print ('ArmAssist watchdog not enabled, doing nothing')
def send_traj(self, pos_vel):
pos_vel = pos_vel.copy()
# units of vel should be: [cm/s, cm/s, rad/s]
assert len(pos_vel) == 6
# units to are alread in [mm/s, mm/s, rad/s]
# convert values to integers to reduce noise
#pos_vel_int = np.rint(pos_vel)
pos_vel_int = pos_vel
print ("trajectory sent to AA")
print ("x y psi vx vy vpsi")
print (pos_vel_int)
traj_command = np.zeros(6)
traj_command[0] = pos_vel_int[0]
traj_command[1] = pos_vel_int[1]
traj_command[2] = pos_vel_int[2]
traj_command[3] = pos_vel_int[3]
traj_command[4] = pos_vel_int[4]
traj_command[5] = pos_vel_int[5]
self._send_command('SetTrajectory ArmAssist %d %d %d %d %d %d\r' % tuple(traj_command))
class DummyPlantUDP(object):
drive_velocity_raw = np.array([0,0,0])
drive_velocity_sent = np.array([0,0,0])
drive_velocity_sent_pre_safety = np.array([0,0,0])
pre_drive_state = np.array([0, 0, 0])
def init(self):
pass
def enable(self):
pass
def start(self):
pass
def stop(self):
pass
def write_feedback(self):
pass
def get_pos_raw(self):
return np.array([0,0,0])
def get_pos(self):
return np.array([0,0,0])
def get_vel_raw(self):
return np.array([0,0,0])
def get_vel(self):
return np.array([0,0,0])
class ReHandPlantUDP(BasePlantUDP):
'''Sends velocity commands and receives feedback over UDP. Can be used
with either the real or simulated ReHand.
'''
ssm_cls = ismore_bmi_lib.StateSpaceReHand
addr = settings.REHAND_UDP_SERVER_ADDR
feedback_data_cls = udp_feedback_client.ReHandData
data_source_name = 'rehand'
n_dof = 4
plant_type = 'ReHand'
vel_gain = np.array([rad_to_deg, rad_to_deg, rad_to_deg, rad_to_deg])
max_pos_vals = np.array([60, 60, 60, 90], dtype=np.float64) # degrees
min_pos_vals = np.array([25, 25, 25, 25], dtype=np.float64) # degrees
max_speed = np.array([np.inf, np.inf, np.inf, np.inf], dtype=np.float64) # degrees/sec
#max_speed = np.array([15., 15., 15., 15.], dtype=np.float64) # degrees/sec
feedback_file = open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'w')
def send_vel(self, vel):
vel = vel.copy()
# units of vel should be: [rad/s, rad/s, rad/s, rad/s]
assert len(vel) == self.n_dof
# convert units to: [deg/s, deg/s, deg/s, deg/s]
vel *= rad_to_deg
#filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel()
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
self.debug = True
if self.debug:
# print 'filt_vel in plants in degrees'
# print filt_vel #*np.array([deg_to_rad, deg_to_rad, deg_to_rad, deg_to_rad])
if faster_than_max_speed.any() > 0:
print ('faster_than_max_speed')
print (faster_than_max_speed)
print ("speed set to: ")
print (vel)
# self.plant.enable() #when we send vel commands always enable the rehand motors
# self._send_command('SystemEnable ReHand\r')
self._send_command('SetSpeed ReHand %f %f %f %f\r' % tuple(vel))
def get_vel_raw(self):
return np.array(tuple(self.source.read(n_pts=1)['data'][self.vel_state_names][0]))
def get_vel(self):
return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.vel_state_names][0]))
def enable(self):
self._send_command('SystemEnable ReHand\r')
def disable(self):
self._send_command('SystemDisable ReHand\r')
def diff_enable(self,DoFs):
self._send_command('DiffEnable ReHand %i %i %i %i\r' % tuple(DoFs))
def get_enable_state(self):
self._send_command('GetEnableState ReHand\r')
def enable_watchdog(self, timeout_ms):
self._send_command('WatchDogEnable ReHand %d\r' % timeout_ms)
def get_pos_raw(self):
# udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF
return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0]))
#get pos filtered
def get_pos(self):
return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.pos_state_names][0]))
################################################
class BasePlantIsMore(Plant):
# define in subclasses!
aa_plant_cls = None
rh_plant_cls = None
safety_grid = None
both_feedback_str = ''
def __init__(self, *args, **kwargs):
self.aa_plant = self.aa_plant_cls()
self.rh_plant = self.rh_plant_cls()
self.drive_velocity_raw = np.zeros((7,))
self.drive_velocity_sent= np.zeros((7,))
self.drive_velocity_sent_pre_safety = np.zeros((7, ))
self.pre_drive_state = np.zeros((7, ))
self.prev_vel_bl_aa = np.zeros((3, ))*np.NaN
self.prev_vel_bl_rh = np.zeros((4, ))*np.NaN
self.accel_lim_armassist = np.inf #0.8
self.accel_lim_psi = np.inf #0.16
self.accel_lim_rehand = np.inf #0.16
def init(self):
self.aa_plant.init()
self.rh_plant.init()
def start(self):
self.aa_plant.start()
self.rh_plant.start()
self.ts_start_data = time.time()
def stop(self):
self.aa_plant.stop()
self.rh_plant.stop()
def last_data_ts_arrival(self):
return {
'ArmAssist': self.aa_plant.last_data_ts_arrival(),
'ReHand': self.rh_plant.last_data_ts_arrival(),
}
def send_vel(self, vel):
self.aa_plant.send_vel(vel[0:3])
self.rh_plant.send_vel(vel[3:7])
def get_pos_raw(self):
aa_pos = self.aa_plant.get_pos_raw()
rh_pos = self.rh_plant.get_pos_raw()
return np.hstack([aa_pos, rh_pos])
def get_pos(self):
aa_pos = self.aa_plant.get_pos()
rh_pos = self.rh_plant.get_pos()
return np.hstack([aa_pos, rh_pos])
def get_vel_raw(self):
aa_vel = self.aa_plant.get_vel_raw()
rh_vel = self.rh_plant.get_vel_raw()
return np.hstack([aa_vel, rh_vel])
def get_vel(self):
aa_vel = self.aa_plant.get_vel()
rh_vel = self.rh_plant.get_vel()
return np.hstack([aa_vel, rh_vel])
def enable(self):
self.aa_plant.enable()
self.rh_plant.enable()
def disable(self):
self.aa_plant.disable()
self.rh_plant.disable()
def drive(self, decoder):
# print self.aa_plant.aa_xy_ix: [0, 1]
# print self.aa_plant.aa_psi_ix: [2]
# print self.rh_plant.rh_pfings: [0, 1, 2]
# print self.rh_plant.rh_pron_ix: [3]
vel = decoder['qdot']
vel_bl = vel.copy()
current_state = self.get_pos()
self.pre_drive_state = current_state.copy()
self.drive_velocity_raw = vel_bl.copy()
if self.blocking_joints is not None:
vel_bl[self.blocking_joints] = 0
vel_bl_aa0 = vel_bl[0:3].copy()
vel_bl_rh0 = vel_bl[3:7].copy()
### Accel Limit Velocitites ###
# if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))):
# aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa
# rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh
# ### AA XY ###
# for i in np.arange(2):
# if aa_output_accel[i] > self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist
# elif aa_output_accel[i] < -1*self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist
# ### AA PSI ###
# if aa_output_accel[2] > self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi
# elif aa_output_accel[2] < -1*self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi
# ### RH All ###
# for i in np.arange(4):
# if rh_output_accel[i] > self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand
# elif rh_output_accel[i] < -1*self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand
### Add Attractor ###
if self.safety_grid is not None:
attractor_point_aa = self.safety_grid.attractor_point[:3]
attractor_point_rh = self.safety_grid.attractor_point[3:]
vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05
vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05
vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy()
vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy()
else:
vel_bl_aa = vel_bl_aa0
vel_bl_rh = vel_bl_rh0
### LPF Filter Velocities ###
for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']):
vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s])
if np.isnan(vel_bl_aa[s]):
vel_bl_aa[s] = 0
for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']):
vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s])
if np.isnan(vel_bl_rh[s]):
vel_bl_rh[s] = 0
self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
#If the next position is outside of safety then damp velocity to only go to limit:
pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh))
pos_pred_aa = pos_pred[0:3]
pos_pred_rh = pos_pred[3:7]
both_feedback_str = ''
if self.safety_grid is not None:
if len(self.aa_plant.aa_xy_ix) > 0:
x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[self.aa_plant.aa_xy_ix])
if x_tmp == False:
current_pos = current_state[self.aa_plant.aa_xy_ix]
pos_valid = attractor_point_aa[self.aa_plant.aa_xy_ix]
#d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos)
vel_bl_aa[self.aa_plant.aa_xy_ix] = self.attractor_speed*(pos_valid - current_pos)/0.05
pos_pred_aa[self.aa_plant.aa_xy_ix] = current_pos + 0.05*vel_bl_aa[self.aa_plant.aa_xy_ix]
#print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix]
xy_change = True
# Make sure AA Psi within bounds:
if len(self.aa_plant.aa_psi_ix) > 0:
mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[self.aa_plant.aa_xy_ix])
predx, predy= pos_pred_aa[[0, 1]]
# Set psi velocity :
psi_ok = False
if np.logical_and(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx):
# Test if globally ok:
global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]])) >=0
if global_ok:
psi_ok = True
if psi_ok == False:
# Move psi back to attractor pos:
psi_neutral = attractor_point_aa[self.aa_plant.aa_psi_ix]
vel_bl_aa[self.aa_plant.aa_psi_ix] = self.attractor_speed*(psi_neutral-current_state[self.aa_plant.aa_psi_ix])/0.05
# Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY)
if len(self.rh_plant.rh_pron_ix) > 0:
mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[self.aa_plant.aa_xy_ix])
# Set prono velocity :
if np.logical_and(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx):
pass
else:
tmp_pos = pos_pred_rh[self.rh_plant.rh_pron_ix]
prono_neutral = attractor_point_rh[self.rh_plant.rh_pron_ix]
vel_bl_rh[self.rh_plant.rh_pron_ix] = self.attractor_speed*(prono_neutral-tmp_pos)/0.05
# Assure RH fingers are within range:
if len(self.rh_plant.rh_pfings) > 0:
for i, (ix, nm) in enumerate(self.rh_plant.rh_pfings):
mn, mx = self.safety_grid.get_rh_minmax(nm)
if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx):
pass
else:
tmp_ = pos_pred_rh[ix]
neutral = attractor_point_rh[ix]
vel_bl_rh[ix] = self.attractor_speed*(neutral - tmp_)/0.05
# If in the rest state -- block the arm:
if self.task_state in ['rest', 'prep', 'baseline_check']:
vel_bl_aa[:] = 0
vel_bl_rh[:] = 0
elif self.task_state == 'emg_rest':
scaling = self.rest_emg_output
if scaling <= 0.5:
scaling = 0
else:
scaling = 0.5*scaling
vel_bl_aa = scaling*vel_bl_aa
vel_bl_rh = scaling*vel_bl_rh
max_vel_xy = 10.
vel_bl_aa[vel_bl_aa>max_vel_xy] = max_vel_xy
vel_bl_aa[vel_bl_aa<-1*max_vel_xy] = -1*max_vel_xy
max_vel_ang = 2.
if vel_bl_aa[2] > max_vel_ang:
vel_bl_aa[2] = max_vel_ang
elif vel_bl_aa[2] < -1*max_vel_ang:
vel_bl_aa[2] = -1*max_vel_ang
vel_bl_rh[vel_bl_rh>max_vel_ang] = max_vel_ang
vel_bl_rh[vel_bl_rh<-1*max_vel_ang] = -1*max_vel_ang
if self.blocking_joints is not None:
for j in [0, 1, 2]:
if j in self.blocking_joints:
vel_bl_aa[j] = 0
#print 'blocking vel_bl_aa: ', j
for j in [3, 4, 5, 6]:
if j in self.blocking_joints:
vel_bl_rh[j-3] = 0
#print 'blocking vel_bl_rh: ', j-3
self.both_feedback_str = both_feedback_str
self.aa_plant.send_vel(vel_bl_aa)
self.rh_plant.send_vel(vel_bl_rh)
self.prev_vel_bl_aa = vel_bl_aa.copy()
self.prev_vel_bl_rh = vel_bl_rh.copy()
self.drive_velocity_sent = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
decoder['q'] = self.get_pos()
class IsMorePlantUDP(BasePlantIsMore):
'''Sends velocity commands and receives feedback over UDP. Can be used
with either the real or simulated ArmAssist+ReHand.
'''
aa_plant_cls = ArmAssistPlantUDP
rh_plant_cls = ReHandPlantUDP
def write_feedback(self):
self.aa_plant.feedback_str = self.both_feedback_str
self.aa_plant.write_feedback()
#self.rh_plant.write_feedback()
class IsMorePlantEMGControl(IsMorePlantUDP): # Plant used for the pure EMG control task
def drive(self):
vel_bl = self.drive_velocity_raw
current_state = self.get_pos()
self.pre_drive_state = current_state.copy()
if self.blocking_joints is not None:
vel_bl[self.blocking_joints] = 0
vel_bl_aa0 = vel_bl[0:3].copy()
vel_bl_rh0 = vel_bl[3:7].copy()
### Accel Limit Velocitites ###
# if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))):
# aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa
# rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh
# ### AA XY ###
# for i in np.arange(2):
# if aa_output_accel[i] > self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist
# elif aa_output_accel[i] < -1*self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist
# ### AA PSI ###
# if aa_output_accel[2] > self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi
# elif aa_output_accel[2] < -1*self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi
# ### RH All ###
# for i in np.arange(4):
# if rh_output_accel[i] > self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand
# elif rh_output_accel[i] < -1*self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand
### Add Attractor ###
attractor_point_aa = self.safety_grid.attractor_point[:3]
attractor_point_rh = self.safety_grid.attractor_point[3:]
vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05
vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05
vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy()
vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy()
### LPF Filter Velocities ###
for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']):
vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s])
if np.isnan(vel_bl_aa[s]):
vel_bl_aa[s] = 0
for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']):
vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s])
if np.isnan(vel_bl_rh[s]):
vel_bl_rh[s] = 0
self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
#If the next position is outside of safety then damp velocity to only go to limit:
pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh))
pos_pred_aa = pos_pred[0:3]
pos_pred_rh = pos_pred[3:7]
both_feedback_str = ''
if self.safety_grid is not None:
if len(self.aa_plant.aa_xy_ix) > 0:
x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[self.aa_plant.aa_xy_ix])
if x_tmp == False:
current_pos = current_state[self.aa_plant.aa_xy_ix]
pos_valid = attractor_point_aa[self.aa_plant.aa_xy_ix]
#d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos)
vel_bl_aa[self.aa_plant.aa_xy_ix] = self.attractor_speed*(pos_valid - current_pos)/0.05
pos_pred_aa[self.aa_plant.aa_xy_ix] = current_pos + 0.05*vel_bl_aa[self.aa_plant.aa_xy_ix]
#print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix]
xy_change = True
# Make sure AA Psi within bounds:
if len(self.aa_plant.aa_psi_ix) > 0:
mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[self.aa_plant.aa_xy_ix])
predx, predy= pos_pred_aa[[0, 1]]
# Set psi velocity :
psi_ok = False
if np.logical_and(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx):
# Test if globally ok:
global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]])) >=0
if global_ok:
psi_ok = True
if psi_ok == False:
# Move psi back to attractor pos:
psi_neutral = attractor_point_aa[self.aa_plant.aa_psi_ix]
vel_bl_aa[self.aa_plant.aa_psi_ix] = self.attractor_speed*(psi_neutral-current_state[self.aa_plant.aa_psi_ix])/0.05
# Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY)
if len(self.rh_plant.rh_pron_ix) > 0:
mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[self.aa_plant.aa_xy_ix])
# Set prono velocity :
if np.logical_and(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx):
pass
else:
tmp_pos = pos_pred_rh[self.rh_plant.rh_pron_ix]
prono_neutral = attractor_point_rh[self.rh_plant.rh_pron_ix]
vel_bl_rh[self.rh_plant.rh_pron_ix] = self.attractor_speed*(prono_neutral-tmp_pos)/0.05
# Assure RH fingers are within range:
if len(self.rh_plant.rh_pfings) > 0:
for i, (ix, nm) in enumerate(self.rh_plant.rh_pfings):
mn, mx = self.safety_grid.get_rh_minmax(nm)
if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx):
pass
else:
tmp_ = pos_pred_rh[ix]
neutral = attractor_point_rh[ix]
vel_bl_rh[ix] = self.attractor_speed*(neutral - tmp_)/0.05
# If in the rest state -- block the arm:
if self.task_state in ['rest', 'prep']:
vel_bl_aa[:] = 0
vel_bl_rh[:] = 0
elif self.task_state == 'emg_rest':
scaling = self.rest_emg_output
if scaling <= 0.5:
scaling = 0
else:
scaling = 0.5*scaling
vel_bl_aa = scaling*vel_bl_aa
vel_bl_rh = scaling*vel_bl_rh
max_vel_xy = 10.
vel_bl_aa[vel_bl_aa>max_vel_xy] = max_vel_xy
vel_bl_aa[vel_bl_aa<-1*max_vel_xy] = -1*max_vel_xy
max_vel_ang = 2.
if vel_bl_aa[2] > max_vel_ang:
vel_bl_aa[2] = max_vel_ang
elif vel_bl_aa[2] < -1*max_vel_ang:
vel_bl_aa[2] = -1*max_vel_ang
vel_bl_rh[vel_bl_rh>max_vel_ang] = max_vel_ang
vel_bl_rh[vel_bl_rh<-1*max_vel_ang] = -1*max_vel_ang
if self.blocking_joints is not None:
for j in [0, 1, 2]:
if j in self.blocking_joints:
vel_bl_aa[j] = 0
#print 'blocking vel_bl_aa: ', j
for j in [3, 4, 5, 6]:
if j in self.blocking_joints:
vel_bl_rh[j-3] = 0
#print 'blocking vel_bl_rh: ', j-3
self.both_feedback_str = both_feedback_str
self.aa_plant.send_vel(vel_bl_aa)
self.rh_plant.send_vel(vel_bl_rh)
self.prev_vel_bl_aa = vel_bl_aa.copy()
self.prev_vel_bl_rh = vel_bl_rh.copy()
self.drive_velocity_sent = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
class IsMorePlantHybridBMI(IsMorePlantUDP): # Plant used for the hybrid (EMG + brain) BMI task.
def __init__(self, *args, **kwargs):
self.drive_velocity_raw_brain = np.zeros((7,))
self.emg_vel_raw_scaled = np.zeros((7,))
super(IsMorePlantHybridBMI, self).__init__(*args, **kwargs)
def drive(self, decoder):
vel = decoder['qdot']
vel_brain = vel.copy()
vel_brain_aa = vel_brain[[0, 1, 2]]
vel_brain_fingers = vel_brain[[3, 4, 5]]
vel_brain_prono = vel_brain[[6]]
self.drive_velocity_raw_brain = vel_brain.copy()
# Use EMG scaled array to scale the output:
vel_emg = self.emg_vel.copy()
vel_emg_scaled = []
for i in range(7):
vel_emg_scaled.append(vel_emg[i]*self.scale_emg_pred_arr[i])
vel_emg_scaled = np.hstack((vel_emg_scaled))
self.emg_vel_raw_scaled = vel_emg_scaled.copy()
vel_emg_aa = vel_emg_scaled[[0, 1, 2]]
vel_emg_fingers = vel_emg_scaled[[3, 4, 5]]
vel_emg_prono = vel_emg_scaled[[6]]
vel_bl_aa = vel_emg_aa*self.emg_weight_aa + vel_brain_aa*(1-self.emg_weight_aa)
vel_bl_fingers = vel_emg_fingers*self.emg_weight_fingers + vel_brain_fingers*(1-self.emg_weight_fingers)
vel_bl_prono = vel_emg_prono*self.emg_weight_prono + vel_brain_prono*(1-self.emg_weight_prono)
vel_bl = np.hstack((vel_bl_aa, vel_bl_fingers, vel_bl_prono))
# Fuse velocities from EMG and neural decoders
#vel_bl = vel_emg*self.emg_weight + vel_brain*(1-self.emg_weight)
self.drive_velocity_raw = vel_bl.copy()
vel_bl_fb_gain = []
for i in range(7):
vel_bl_fb_gain.append(vel_bl[i]*self.fb_vel_gain[i])
vel_bl_fb_gain = np.hstack((vel_bl_fb_gain))
self.drive_velocity_raw_fb_gain = vel_bl_fb_gain.copy()
current_state = self.get_pos()
self.pre_drive_state = current_state.copy()
if self.blocking_joints is not None:
print ('self.blocking_joints --> ', self.blocking_joints)
vel_bl_fb_gain[self.blocking_joints] = 0
vel_bl_aa0 = vel_bl_fb_gain[0:3].copy()
vel_bl_rh0 = vel_bl_fb_gain[3:7].copy()
### Accel Limit Velocitites ###
# if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))):
# aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa
# rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh
# ### AA XY ###
# for i in np.arange(2):
# if aa_output_accel[i] > self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist
# elif aa_output_accel[i] < -1*self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist
# ### AA PSI ###
# if aa_output_accel[2] > self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi
# elif aa_output_accel[2] < -1*self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi
# ### RH All ###
# for i in np.arange(4):
# if rh_output_accel[i] > self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand
# elif rh_output_accel[i] < -1*self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand
### Add Attractor ###
attractor_point_aa = self.safety_grid.attractor_point[:3]
attractor_point_rh = self.safety_grid.attractor_point[3:]
vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05
vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05
vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy()
vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy()
### LPF Filter Velocities ###
for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']):
vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s])
if np.isnan(vel_bl_aa[s]):
vel_bl_aa[s] = 0
for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']):
vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s])
if np.isnan(vel_bl_rh[s]):
vel_bl_rh[s] = 0
self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
#If the next position is outside of safety then damp velocity to only go to limit:
pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh))
pos_pred_aa = pos_pred[0:3]
pos_pred_rh = pos_pred[3:7]
both_feedback_str = ''
if self.safety_grid is not None:
if len(self.aa_plant.aa_xy_ix) > 0:
x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[self.aa_plant.aa_xy_ix])
if x_tmp == False:
print ('false position')
current_pos = current_state[self.aa_plant.aa_xy_ix]
pos_valid = attractor_point_aa[self.aa_plant.aa_xy_ix]
#d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos)
vel_bl_aa[self.aa_plant.aa_xy_ix] = self.attractor_speed*(pos_valid - current_pos)/0.05
pos_pred_aa[self.aa_plant.aa_xy_ix] = current_pos + 0.05*vel_bl_aa[self.aa_plant.aa_xy_ix]
#print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix]
xy_change = True
# Make sure AA Psi within bounds:
if len(self.aa_plant.aa_psi_ix) > 0:
mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[self.aa_plant.aa_xy_ix])
predx, predy= pos_pred_aa[[0, 1]]
# Set psi velocity :
psi_ok = False
if np.logical_and(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx):
# Test if globally ok:
#global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]])) >=0
global_ok = True
if global_ok:
psi_ok = True
if psi_ok == False:
# Move psi back to attractor pos:
psi_neutral = attractor_point_aa[self.aa_plant.aa_psi_ix]
vel_bl_aa[self.aa_plant.aa_psi_ix] = self.attractor_speed*(psi_neutral-current_state[self.aa_plant.aa_psi_ix])/0.05
# Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY)
if len(self.rh_plant.rh_pron_ix) > 0:
mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[self.aa_plant.aa_xy_ix])
# Set prono velocity :
if np.logical_and(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx):
pass
else:
tmp_pos = pos_pred_rh[self.rh_plant.rh_pron_ix]
prono_neutral = attractor_point_rh[self.rh_plant.rh_pron_ix]
vel_bl_rh[self.rh_plant.rh_pron_ix] = self.attractor_speed*(prono_neutral-tmp_pos)/0.05
# Assure RH fingers are within range:
if len(self.rh_plant.rh_pfings) > 0:
for i, (ix, nm) in enumerate(self.rh_plant.rh_pfings):
mn, mx = self.safety_grid.get_rh_minmax(nm)
if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx):
pass
else:
tmp_ = pos_pred_rh[ix]
neutral = attractor_point_rh[ix]
vel_bl_rh[ix] = self.attractor_speed*(neutral - tmp_)/0.05
# print 'safely adjusting fingers! ', nm, 'min: ', mn, ' max: ', mx, ' pred: ', pos_pred_rh[ix]
# If in the rest state -- block the arm:
if self.task_state in ['rest', 'prep', 'baseline_check']:
vel_bl_aa[:] = 0
vel_bl_rh[:] = 0
elif self.task_state == 'emg_rest':
scaling = self.rest_emg_output
if scaling <= 0.5:
scaling = 0
else:
scaling = 0.5*scaling
vel_bl_aa = scaling*vel_bl_aa
vel_bl_rh = scaling*vel_bl_rh
elif self.task_state == 'rest_back':
vel_bl_aa = vel_bl_aa_pull/self.attractor_speed_const*self.rest_back_attractor_speed
vel_bl_rh = vel_bl_rh_pull/self.attractor_speed_const*self.rest_back_attractor_speed
elif self.task_state in ['drive_to_start', 'drive_to_rest']:
vel_bl_aa = self.back_to_target_speed*(self.drive_to_start_target[:3] - current_state[:3])/0.05
vel_bl_rh = self.back_to_target_speed*(self.drive_to_start_target[3:] - current_state[3:])/0.05
max_vel_xy = 10.
vel_bl_aa[vel_bl_aa>max_vel_xy] = max_vel_xy
vel_bl_aa[vel_bl_aa<-1*max_vel_xy] = -1*max_vel_xy
max_vel_ang = 2.
if vel_bl_aa[2] > max_vel_ang:
vel_bl_aa[2] = max_vel_ang
elif vel_bl_aa[2] < -1*max_vel_ang:
vel_bl_aa[2] = -1*max_vel_ang
vel_bl_rh[vel_bl_rh>max_vel_ang] = max_vel_ang
vel_bl_rh[vel_bl_rh<-1*max_vel_ang] = -1*max_vel_ang
if self.blocking_joints is not None:
for j in [0, 1, 2]:
if j in self.blocking_joints:
vel_bl_aa[j] = 0
#print 'blocking vel_bl_aa: ', j
for j in [3, 4, 5, 6]:
if j in self.blocking_joints:
vel_bl_rh[j-3] = 0
#print 'blocking vel_bl_rh: ', j-3
self.both_feedback_str = both_feedback_str
self.aa_plant.send_vel(vel_bl_aa)
self.rh_plant.send_vel(vel_bl_rh)
self.prev_vel_bl_aa = vel_bl_aa.copy()
self.prev_vel_bl_rh = vel_bl_rh.copy()
self.drive_velocity_sent = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
decoder['q'] = self.get_pos()
class IsMorePlantHybridBMISoftSafety(IsMorePlantHybridBMI):
def drive(self, decoder):
vel = decoder['qdot']
vel_brain = vel.copy()
vel_brain_aa = vel_brain[[0, 1, 2]]
vel_brain_fingers = vel_brain[[3, 4, 5]]
vel_brain_prono = vel_brain[[6]]
self.drive_velocity_raw_brain = vel_brain.copy()
# Use EMG scaled array to scale the output:
vel_emg = self.emg_vel.copy()
vel_emg_scaled = []
for i in range(7):
vel_emg_scaled.append(vel_emg[i]*self.scale_emg_pred_arr[i])
vel_emg_scaled = np.hstack((vel_emg_scaled))
self.emg_vel_raw_scaled = vel_emg_scaled.copy()
vel_emg_aa = vel_emg_scaled[[0, 1, 2]]
vel_emg_fingers = vel_emg_scaled[[3, 4, 5]]
vel_emg_prono = vel_emg_scaled[[6]]
vel_bl_aa = vel_emg_aa*self.emg_weight_aa + vel_brain_aa*(1-self.emg_weight_aa)
vel_bl_fingers = vel_emg_fingers*self.emg_weight_fingers + vel_brain_fingers*(1-self.emg_weight_fingers)
vel_bl_prono = vel_emg_prono*self.emg_weight_prono + vel_brain_prono*(1-self.emg_weight_prono)
vel_bl = np.hstack((vel_bl_aa, vel_bl_fingers, vel_bl_prono))
# Fuse velocities from EMG and neural decoders
#vel_bl = vel_emg*self.emg_weight + vel_brain*(1-self.emg_weight)
self.drive_velocity_raw = vel_bl.copy()
vel_bl_fb_gain = []
for i in range(7):
vel_bl_fb_gain.append(vel_bl[i]*self.fb_vel_gain[i])
vel_bl_fb_gain = np.hstack((vel_bl_fb_gain))
self.drive_velocity_raw_fb_gain = vel_bl_fb_gain.copy()
current_state = self.get_pos()
self.pre_drive_state = current_state.copy()
if self.blocking_joints is not None:
vel_bl_fb_gain[self.blocking_joints] = 0
vel_bl_aa0 = vel_bl_fb_gain[0:3].copy()
vel_bl_rh0 = vel_bl_fb_gain[3:7].copy()
### Accel Limit Velocitites ###
# if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))):
# aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa
# rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh
# ### AA XY ###
# for i in np.arange(2):
# if aa_output_accel[i] > self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist
# elif aa_output_accel[i] < -1*self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist
# ### AA PSI ###
# if aa_output_accel[2] > self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi
# elif aa_output_accel[2] < -1*self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi
# ### RH All ###
# for i in np.arange(4):
# if rh_output_accel[i] > self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand
# elif rh_output_accel[i] < -1*self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand
### Add Attractor ###
attractor_point_aa = self.safety_grid.attractor_point[:3]
attractor_point_rh = self.safety_grid.attractor_point[3:]
vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05
vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05
vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy()
vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy()
### LPF Filter Velocities ###
for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']):
vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s])
if np.isnan(vel_bl_aa[s]):
vel_bl_aa[s] = 0
for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']):
vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s])
if np.isnan(vel_bl_rh[s]):
vel_bl_rh[s] = 0
self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
#If the next position is outside of safety then damp velocity to only go to limit:
pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh))
pos_pred_aa = pos_pred[0:3]
pos_pred_rh = pos_pred[3:7]
both_feedback_str = ''
if self.safety_grid is not None:
if len(self.aa_plant.aa_xy_ix) > 0:
x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[self.aa_plant.aa_xy_ix])
if x_tmp == False:
# Find the closest point on the boundary of the safety grid and set velocity in same
# direction, but at 90% of way to get to the edge of the safety grid:
current_pos = current_state[self.aa_plant.aa_xy_ix]
### loop through percentages of velocity and check validity of point:
valid_scale = False
scale = 1.0
while valid_scale is False:
scale -= 0.05
pos_pred_xy = current_pos + 0.05*(vel_bl_aa[self.aa_plant.aa_xy_ix]*scale)
valid_scale = self.safety_grid.is_valid_pos(pos_pred_xy)
if scale < -1.0:
scale = 0.0
break
#d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos)
vel_bl_aa[self.aa_plant.aa_xy_ix] = vel_bl_aa[self.aa_plant.aa_xy_ix]*scale
pos_pred_aa[self.aa_plant.aa_xy_ix] = current_pos + 0.05*vel_bl_aa[self.aa_plant.aa_xy_ix]
#print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix]
xy_change = True
# Make sure AA Psi within bounds:
if len(self.aa_plant.aa_psi_ix) > 0:
mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[self.aa_plant.aa_xy_ix])
predx, predy= pos_pred_aa[[0, 1]]
# Set psi velocity :
psi_ok = False
if np.logical_and(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx):
# Test if globally ok:
#global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]])) >=0
global_ok = True
if global_ok:
psi_ok = True
if psi_ok == False:
valid_scale_psi = False
scale = 1.0
while valid_scale_psi is False:
scale -= 0.05
psi_pred = current_state[self.aa_plant.aa_psi_ix] + 0.05*(scale*vel_bl_aa[self.aa_plant.aa_psi_ix])
if np.logical_and(psi_pred >= mn, psi_pred <= mx):
valid_scale_psi = True
if scale < -1.0:
scale = 0.0
break
vel_bl_aa[self.aa_plant.aa_psi_ix] = scale*vel_bl_aa[self.aa_plant.aa_psi_ix]
# Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY)
if len(self.rh_plant.rh_pron_ix) > 0:
mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[self.aa_plant.aa_xy_ix])
# Set prono velocity :
if np.logical_and(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx):
pass
else:
valid_scale_prono = False
scale = 1.0
while valid_scale_prono is False:
scale -= 0.05
pron_pred = pos_pred_rh[self.rh_plant.rh_pron_ix] + 0.05*(scale*vel_bl_rh[self.rh_plant.rh_pron_ix])
if np.logical_and(pron_pred >= mn, pron_pred <= mx):
valid_scale_prono = True
if scale < -1.0:
scale = 0.
break
vel_bl_rh[self.rh_plant.rh_pron_ix] = scale*vel_bl_rh[self.rh_plant.rh_pron_ix]
# Assure RH fingers are within range:
if len(self.rh_plant.rh_pfings) > 0:
for i, (ix, nm) in enumerate(self.rh_plant.rh_pfings):
mn, mx = self.safety_grid.get_rh_minmax(nm)
if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx):
pass
else:
finger_scale = False
scale = 1.0
while finger_scale is False:
scale -= 0.05
fing_pred = pos_pred_rh[ix] + 0.05*(scale*vel_bl_rh[ix])
if np.logical_and(fing_pred >= mn, fing_pred<= mx):
finger_scale = True
if scale < -1.0:
scale = 0.0
break
vel_bl_rh[ix] = scale*vel_bl_rh[ix]
# If in the rest state -- block the arm:
if self.task_state in ['rest', 'prep', 'baseline_check', 'wait']:
vel_bl_aa[:] = 0
vel_bl_rh[:] = 0
elif self.task_state == 'emg_rest':
scaling = self.rest_emg_output
if scaling <= 0.5:
scaling = 0
else:
scaling = 0.5*scaling
vel_bl_aa = scaling*vel_bl_aa
vel_bl_rh = scaling*vel_bl_rh
elif self.task_state == 'rest_back':
vel_bl_aa = vel_bl_aa_pull/self.attractor_speed_const*self.rest_back_attractor_speed
vel_bl_rh = vel_bl_rh_pull/self.attractor_speed_const*self.rest_back_attractor_speed
elif self.task_state in ['drive_to_start', 'drive_to_rest']:
vel_bl_aa = self.back_to_target_speed*(self.drive_to_start_target[:3] - current_state[:3])/0.05
vel_bl_rh = self.back_to_target_speed*(self.drive_to_start_target[3:] - current_state[3:])/0.05
max_vel_xy = 10.
vel_bl_aa[vel_bl_aa>max_vel_xy] = max_vel_xy
vel_bl_aa[vel_bl_aa<-1*max_vel_xy] = -1*max_vel_xy
max_vel_ang = 2.
if vel_bl_aa[2] > max_vel_ang:
vel_bl_aa[2] = max_vel_ang
elif vel_bl_aa[2] < -1*max_vel_ang:
vel_bl_aa[2] = -1*max_vel_ang
vel_bl_rh[vel_bl_rh>max_vel_ang] = max_vel_ang
vel_bl_rh[vel_bl_rh<-1*max_vel_ang] = -1*max_vel_ang
if self.blocking_joints is not None:
for j in [0, 1, 2]:
if j in self.blocking_joints:
vel_bl_aa[j] = 0
#print 'blocking vel_bl_aa: ', j
for j in [3, 4, 5, 6]:
if j in self.blocking_joints:
vel_bl_rh[j-3] = 0
#print 'blocking vel_bl_rh: ', j-3
self.both_feedback_str = both_feedback_str
self.aa_plant.send_vel(vel_bl_aa)
self.rh_plant.send_vel(vel_bl_rh)
self.prev_vel_bl_aa = vel_bl_aa.copy()
self.prev_vel_bl_rh = vel_bl_rh.copy()
self.drive_velocity_sent = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
decoder['q'] = self.get_pos()
UDP_PLANT_CLS_DICT = {
'ArmAssist': ArmAssistPlantUDP,
'ReHand': ReHandPlantUDP,
'IsMore': IsMorePlantUDP,
'IsMoreEMGControl': IsMorePlantEMGControl,
'IsMoreHybridControl': IsMorePlantHybridBMI,
'IsMorePlantHybridBMISoftSafety': IsMorePlantHybridBMISoftSafety,
'DummyPlant': DummyPlantUDP,
}
###########################
##### Deprecated code #####
###########################
class BasePlant(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError('Implement in subclasses!')
def init(self):
raise NotImplementedError('Implement in subclasses!')
def start(self):
raise NotImplementedError('Implement in subclasses!')
def stop(self):
raise NotImplementedError('Implement in subclasses!')
def last_data_ts_arrival(self):
raise NotImplementedError('Implement in subclasses!')
def send_vel(self, vel):
raise NotImplementedError('Implement in subclasses!')
def get_pos(self):
raise NotImplementedError('Implement in subclasses!')
def get_vel(self):
raise NotImplementedError('Implement in subclasses!')
def enable(self):
'''Disable the device's motor drivers.'''
raise NotImplementedError('Implement in subclasses!')
def disable(self):
'''Disable the device's motor drivers.'''
raise NotImplementedError('Implement in subclasses!')
def enable_watchdog(self, timeout_ms):
raise NotImplementedError('Implement in subclasses!')
def get_intrinsic_coordinates(self):
return self.get_pos()
| 2.53125 | 3 |
catalog/client/services/catalog.py | eoss-cloud/madxxx_catalog_api | 0 | 11611 | <gh_stars>0
#-*- coding: utf-8 -*-
""" EOSS catalog system
functionality for the catalog endpoint
"""
from utilities.web_utils import remote_file_exists
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import datetime
import ujson
import time
import dateparser
import falcon
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import csv
from xlsxwriter import Workbook
from dateutil.parser import parse
import numpy
from sqlalchemy import and_
import logging
from collections import defaultdict
from model.orm import Catalog_Dataset, Spatial_Reference
from api import General_Structure
from .db_calls import Persistance
from . import getKeysFromDict
from .tools import get_base_url, can_zip_response, compress_body, serialize, make_GeoJson
from api_logging import logger
def date_handler(obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
raise TypeError
GRID_SYSTEMS = {'Sentinel - 2A': 10,
'LANDSAT_ETM': 11,
'LANDSAT_ETM_SLC_OFF': 11,
'OLI_TIRS': 11,
'TIRS': 11}
class Catalog(object):
"""
EOSS catalog class from web API
"""
def __init__(self):
self.logger = logging.getLogger('eoss.' + __name__)
self.aggregations = defaultdict(list)
for agg in Persistance().get_all_sensor_aggregations():
self.aggregations[agg.aggregation_name.lower()].append(agg)
def _query_(self, areas, dates, sensors, clouds):
sensors_filter = list()
grid_list = defaultdict(set)
for sensor_grid in set(GRID_SYSTEMS.values()):
if 'ref_group' in areas[0].keys():
ref_type_id, ref_id = areas[0]['ref_group'], areas[0]['ref_id']
spatial_query = Persistance().get_reference_by_sensorgrid(ref_id, ref_type_id, sensor_grid)
elif 'aoi' in areas[0].keys():
aoi = areas[0]['aoi']
spatial_query = Persistance().get_referencebyaoi(aoi, sensor_grid)
for grid in spatial_query.all():
grid_list[sensor_grid].add(grid)
if len(grid_list) == 0:
description = 'Please specify valid reference object for data. (type:%s, id:%s)' \
% (ref_type_id, ref_id)
raise falcon.HTTPBadRequest('SensorGrid', description,
href='http://docs.example.com/auth')
joint_gridset = grid_list[10] | grid_list[11] # TODO: better grid system handling from extra table?
for item in sensors:
sensor, level = item['sensor_name'], item['level']
if len(sensor) > 0 and len(level) > 0:
sensors_filter.append(and_(Catalog_Dataset.level == level, Catalog_Dataset.sensor == sensor))
elif len(sensor) == 0 and len(level) > 0:
sensors_filter.append(Catalog_Dataset.level == level)
elif len(sensor) > 0 and len(level) == 0:
sensors_filter.append(Catalog_Dataset.sensor == sensor)
dates_filter = list()
for item in dates:
# ExtJS POST requests has provides unicode body
if type(item["start_date"]) is unicode:
item["start_date"] = parse(item["start_date"])
if type(item["end_date"]) is unicode:
item["end_date"] = parse(item["end_date"])
dates_filter.append(
and_(Catalog_Dataset.acq_time >= item["start_date"].isoformat(), Catalog_Dataset.acq_time <= item["end_date"].isoformat()))
query = Persistance().find_dataset(dates_filter, sensors_filter, grid_list, joint_gridset, clouds)
return query
def _get_datasets(self, query):
query_result = list()
for ds in query:
values = dict()
types = dict()
for k, v in ds.__dict__.iteritems():
if '_' != k[0]:
values[k] = v
types[k] = type(v)
x = General_Structure(values, types)
x.__class__.__name__ = 'Catalog_Dataset'
query_result.append(serialize(x, as_json=False)['data'])
return query_result
# TODO: tiles list as input - only first will be returned or exception thrown !
def _query_tile_geom(self, tiles):
tile_objs = Persistance().get_tile_geom(tiles)
return tile_objs.all()
def _export_query(self, found_dataset):
row_keys = ['tile_identifier', 'entity_id', 'acq_time', 'clouds']
resources = [('resources', 'metadata'), ('resources', 'quicklook')]
row = list()
rows = list()
for k in row_keys:
row.append(k)
for k in resources:
row.append(' '.join(k))
row.append('data')
rows.append(row)
for ds in found_dataset:
row = list()
for k in row_keys:
row.append(ds.get(k))
for k in resources:
row.append(getKeysFromDict(ds, k))
if ds.get('sensor') in ['LANDSAT_TM', 'LANDSAT_ETM', 'LANDSAT_ETM_SLC_OFF']:
if 'google' in ds.get('resources').keys():
row.append(getKeysFromDict(ds, ('resources', 'google', 'link')))
elif 'usgs' in ds.get('resources').keys():
row.append(getKeysFromDict(ds, ('resources', 'usgs', 'link')))
else:
row.append('?')
elif ds.get('sensor') in ['OLI_TIRS', 'OLI', 'TIRS']:
if 's3public' in ds.get('resources').keys():
row.append(getKeysFromDict(ds, ('resources', 's3public', 'zip')))
elif 'google' in ds.get('resources').keys():
row.append(getKeysFromDict(ds, ('resources', 'google', 'link')))
elif ds.get('sensor') in ['Sentinel-2A']:
if 's3public' in ds.get('resources').keys():
if getKeysFromDict(ds, ('resources', 's3public')) != None:
row.append(getKeysFromDict(ds, ('resources', 's3public', 'zip')))
else:
row.append('?')
else:
row.append('?')
rows.append(row)
return rows
class CatalogApi(Catalog):
def __init__(self, my_router):
Catalog.__init__(self)
self.router = my_router
def on_get(self, req, resp, format, check_resources=False):
"""Handles GET requests
http://localhost:8000/catalog/search/result.json?from_date=2016-05-01&to_date=2016-06-02&sensor=sentinel2&ref_group=9&ref_id=73&clouds=50
"""
BASE_URL = get_base_url(req.url)
start_time = time.time()
query_filter = req.params
results = dict()
results['action'] = 'catalog search'
results['action-time'] = str(datetime.datetime.now())
results.update({'query': query_filter})
dates = list()
sensor_list = list()
try:
for date_string in ['from_date', 'to_date']:
date = dateparser.parse(req.params[date_string])
if date is None:
description = 'Please format date propery, used %s:%s.' % (date_string, date)
raise falcon.HTTPBadRequest('DateFormat', description,
href='http://docs.example.com/auth')
else:
dates.append(date)
if dates[0] == dates[1]:
description = 'Given dates didnt cover date range. Please correct date span. (%s-%s)' \
% (req.params['from_date'], req.params['to_date'])
raise falcon.HTTPBadRequest('DateFormat', description,
href='http://docs.example.com/auth')
elif dates[0] > dates[1]:
description = 'Given end date is before start date. Please reverse dates. (%s-%s)' \
% (req.params['from_date'], req.params['to_date'])
raise falcon.HTTPBadRequest('DateFormat', description,
href='http://docs.example.com/auth')
if not req.params['sensor'].lower() in self.aggregations.keys():
description = 'Sensor label is unknown in aggregation table, use %s' % str(map(str, self.aggregations.keys()))
raise falcon.HTTPBadRequest('DateFormat', description,
href='http://docs.example.com/auth')
for agg in self.aggregations[req.params['sensor'].lower()]:
sensor_list.append({"sensor_name": agg.sensor, "level": agg.level})
ref_group, ref_id, clouds = int(req.params['ref_group']), int(req.params['ref_id']), int(req.params['clouds'])
except KeyError, e:
description = 'Search key: %s missing in query.' % e
raise falcon.HTTPBadRequest('KeyError', description,
href='http://docs.example.com/auth')
except ValueError, e:
description = 'Given parameters contain bad values: %s'% str(e)
raise falcon.HTTPBadRequest('KeyError', description,
href='http://docs.example.com/auth')
query = self._query_([{"ref_group": ref_group, "ref_id": ref_id}],
[{"start_date": dates[0], "end_date": dates[1]}],
sensor_list, clouds)
query_struct = {'area':[{"ref_group": ref_group, "ref_id": ref_id}],
'dates':[{"start_date": dates[0], "end_date": dates[1]}],
'sensors':sensor_list, 'clouds':clouds
}
found_dataset = self._get_datasets(query)
logger.info('[GET] /catalog/search/result.%s' % format, extra={x:str(y) for x,y in query_struct.iteritems()})
if check_resources:
for ds in found_dataset:
if 's3public' in ds['resources'].keys():
if 'zip' in ds['resources']['s3public'].keys():
if not remote_file_exists( ds['resources']['s3public']['zip']):
print '%s missing' % ds['resources']['s3public']['zip']
if format.lower() == 'json':
if 'search/count' in req.url:
results['count'] = query.count()
else:
results['count'] = query.count()
results['found_dataset'] = found_dataset
results['found_tiles'] = sorted(list(set([x['tile_identifier'] for x in found_dataset])))
results['found_resources'] = [BASE_URL + self.router.reverse('dataset_entity', entity_id=x['entity_id'])
for x in results['found_dataset']]
results['processing_time'] = time.time() - start_time
elif format.lower() == 'geojson':
tilegrids = defaultdict(lambda: defaultdict(list))
geoms, attrs = list(), list()
for x in found_dataset:
tilegrids[x['tile_identifier']]['acq_time'].append(x['acq_time'])
# tilegrids[x['tile_identifier']]['acq_time_js'].append(
# int(time.mktime(dateparser.parse(x['acq_time']).timetuple())) * 1000)
tilegrids[x['tile_identifier']]['tile_identifier'].append(x['tile_identifier'])
tilegrids[x['tile_identifier']]['clouds'].append(x['clouds'])
for tile_id in tilegrids.keys():
tilegrids[tile_id]['count'] = len(tilegrids[tile_id]['clouds'])
tilegrids[tile_id]['tile_identifier'] = tilegrids[tile_id]['tile_identifier'][0]
tiles_dict = dict()
if len(tilegrids.keys()) > 0:
for ref_name, geom in self._query_tile_geom(tilegrids.keys()):
tiles_dict[ref_name] = geom
for tile_id in tilegrids.keys():
geoms.append(ujson.loads(tiles_dict[tile_id]))
attrs.append(tilegrids[tile_id])
results = make_GeoJson(geoms, attrs)
elif format.lower() == 'csv':
rows = self._export_query(found_dataset)
si = StringIO.StringIO()
cw = csv.writer(si, delimiter='\t')
for row in rows:
cw.writerow(row)
results = si.getvalue().strip('\r\n')
elif format.lower() == 'xlsx':
rows = self._export_query(found_dataset)
strIO = StringIO.StringIO()
workbook = Workbook(strIO, {'in_memory': True, 'constant_memory': True})
bold = workbook.add_format({'bold': True})
big_bold = workbook.add_format({'bold': True, 'size': 20})
italic = workbook.add_format({'italic': True})
worksheet = workbook.add_worksheet(name='EOSS analysis')
worksheet.write(0, 0, 'EOSS data analysis', big_bold)
ref_obj = Persistance().get_reference(query_filter.get('ref_id'), query_filter.get('ref_group')).one()
query_filter['reference_name'] = ref_obj.ref_name
query_filter['reference_type'] = ref_obj.referencetype.name
# {'clouds': '60', 'ref_id': '5502', 'from_date': '09/07/2016', 'to_date': '10/07/2016', 'ref_group': '12', 'sensor': 'Sentinel2'}
r = 3
worksheet.write(r - 1, 0, 'query filter:', big_bold)
for c, k in enumerate(['sensor', 'from_date', 'to_date', 'clouds', 'reference_name', 'reference_type']):
worksheet.write(r + c, 0, k, bold)
worksheet.write(r + c, 1, query_filter[k])
r = 13
worksheet.write(r - 2, 0, 'query set:', big_bold)
for c, k in enumerate(rows[0]):
worksheet.write(r - 1, c, k, bold)
for values in rows[1:]:
for c, v in enumerate(values):
worksheet.write(r, c, v)
r += 1
workbook.close()
strIO.seek(0)
results = strIO.read()
elif format.lower() == 'hist':
found_tiles = sorted(list(set([x['tile_identifier'] for x in found_dataset])))
result_list = []
first = dict()
first['tile_identifier'] = 'percentagelabel'
first['span'] = 100
result_list.append(first)
data = numpy.zeros((len(found_dataset)))
tileslist = []
i = 0
for x in found_dataset:
tileslist.append(x['tile_identifier'])
data[i] = float(x['clouds'])
i = i + 1
for t in found_tiles:
ix = numpy.array(tileslist) == t
subset_clouds = data[ix]
num_scenes = sum(ix)
hist_abs = numpy.histogram(subset_clouds, bins=[-1] + range(0, 120, 20))
hist_rel = hist_abs[0] * 1.0 / num_scenes
hist_struct = dict()
hist_struct['tile_identifier'] = t
hist_struct['span'] = 100
hist_struct['scenes_perc_-1'] = hist_rel[0]
hist_struct['scenes_perc_20'] = hist_rel[1]
hist_struct['scenes_perc_40'] = hist_rel[2]
hist_struct['scenes_perc_60'] = hist_rel[3]
hist_struct['scenes_perc_80'] = hist_rel[4]
hist_struct['scenes_perc_100'] = hist_rel[5]
hist_struct['scenes_abs_-1'] = hist_abs[0][0]
hist_struct['scenes_abs_20'] = hist_abs[0][1]
hist_struct['scenes_abs_40'] = hist_abs[0][2]
hist_struct['scenes_abs_60'] = hist_abs[0][3]
hist_struct['scenes_abs_80'] = hist_abs[0][4]
hist_struct['scenes_abs_100'] = hist_abs[0][5]
result_list.append(hist_struct)
results['found_tiles'] = result_list
resp.status = falcon.HTTP_200
if can_zip_response(req.headers):
if format.lower() in ['hist', 'json', 'geojson']:
resp.set_header('Content-Type', 'application/json')
resp.set_header('Content-Encoding', 'gzip')
resp.body = compress_body(ujson.dumps(results))
elif format.lower() == 'csv':
resp.set_header('Content-Type', 'text/csv')
resp.set_header('Content-disposition', 'attachment;filename=%s;' % self.create_output_name('csv'))
resp.set_header('Content-Encoding', 'gzip')
resp.body = compress_body(results)
elif format.lower() == 'xlsx':
resp.set_header('Content-Type', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
resp.set_header('Content-disposition', 'attachment;filename=%s;' % self.create_output_name('xlsx'))
resp.set_header('Content-Encoding', 'gzip')
resp.body = compress_body(results)
else:
if format.lower() in ['hist', 'json', 'geojson']:
resp.set_header('Content-Type', 'application/json')
resp.body = ujson.dumps(results)
elif format.lower() == 'csv':
resp.set_header('Content-Type', 'text/csv')
resp.set_header('Content-disposition', 'attachment;filename=%s;' % self.create_output_name('csv'))
resp.body = results
elif format.lower() == 'xlsx':
resp.set_header('Content-Type', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
resp.set_header('Content-disposition', 'attachment;filename=%s;' % self.create_output_name('xlsx'))
resp.body = results
def create_output_name(self, extension):
return 'EOSS_analysis_%s.%s' % (datetime.datetime.now().isoformat(), extension)
def on_post(self, req, resp, format):
"""Handles POST requests
{
"daterange": [{
"start_date": "05/31/2000",
"end_date": "07/02/2003"
}],
"clouds": 1,
"sensors": [
{"sensor_name": "LANDSAT_ETM", "level": "" }],
"areas": [{
"ref_group": 12,
"ref_id": 6208
}]
}
{"clouds":20,"daterange":[{"start_date":"09/02/2015","end_date":"09/14/2016"}],
"sensors":[{"name":"landsat"}],
"areas":[{"ref_id":362,"ref_group":"9"}]}
"""
# TODO: loop over areas
sensor_list = list()
results = dict()
start_time = time.time()
output = StringIO.StringIO()
while True:
chunk = req.stream.read(4096)
if not chunk:
break
output.write(chunk)
body = output.getvalue()
output.close()
try:
struct = ujson.loads(body.decode('utf-8'))
except ValueError, e:
# try decode x-www-form-urlencoded
query_str = falcon.util.uri.decode(body.decode('utf-8'))
query_str = query_str[query_str.find('{'):query_str.rfind('}') + 1]
try:
struct = ujson.loads(query_str)
except ValueError, e:
description = 'Give request is no valid JSON nor urlencoded psot body.'
raise falcon.HTTPUnsupportedMediaType(description,
href='http://docs.example.com/auth')
try:
for s in struct['sensors']:
if 'sensor_name' in s.keys() and 'level' in s.keys():
sensor_list.append(s)
elif 'name' in s.keys():
if not s['name'].lower() in self.aggregations.keys():
description = 'Sensor label is unknown in aggregation table'
raise falcon.HTTPBadRequest('Catalog', description,
href='http://docs.example.com/auth')
for agg in self.aggregations[s['name'].lower()]:
sensor_list.append({"sensor_name": agg.sensor, "level": agg.level})
else:
description = 'Sensor is not specified in query'
raise falcon.HTTPBadRequest('Catalog', description,
href='http://docs.example.com/auth')
query = self._query_(struct['areas'], struct['daterange'], sensor_list, struct['clouds'])
query_struct = {'area': struct['areas'],
'dates': struct['daterange'],
'sensors': sensor_list, 'clouds': struct['clouds']
}
logger.info('[POST] /catalog/search/result.%s' % format, extra={x:str(y) for x,y in query_struct.iteritems()})
except KeyError, e:
description = 'Search key: %s missing in query.' % e
raise falcon.HTTPBadRequest('KeyError', description,
href='http://docs.example.com/auth')
results['count'] = query.count()
found_dataset = self._get_datasets(query)
results['found_dataset'] = found_dataset
results['found_tiles'] = sorted(list(set([x['tile_identifier'] for x in found_dataset])))
# results.update({'query': struct})
resp.body = ujson.dumps(results)
resp.status = falcon.HTTP_200
results['processing_time'] = time.time() - start_time
if can_zip_response(req.headers):
resp.set_header('Content-Type', 'application/json')
resp.set_header('Content-Encoding', 'gzip')
resp.body = compress_body(ujson.dumps(results))
else:
resp.set_header('Content-Type', 'application/json')
resp.body = ujson.dumps(results)
| 2.03125 | 2 |
tests/ui/test_pvc_ui.py | MeridianExplorer/ocs-ci | 0 | 11612 | import logging
import pytest
from ocs_ci.framework.testlib import tier1, skipif_ui_not_support, ui
from ocs_ci.ocs.ui.pvc_ui import PvcUI
from ocs_ci.framework.testlib import skipif_ocs_version
from ocs_ci.framework.pytest_customization.marks import green_squad
from ocs_ci.ocs.resources.pvc import get_all_pvc_objs, get_pvc_objs
from ocs_ci.ocs import constants
from ocs_ci.helpers import helpers
from ocs_ci.helpers.helpers import wait_for_resource_state, create_unique_resource_name
from ocs_ci.utility.utils import get_ocp_version
from ocs_ci.ocs.ui.views import locators
from ocs_ci.ocs.resources.pod import get_fio_rw_iops
logger = logging.getLogger(__name__)
@ui
@skipif_ocs_version("<4.6")
@skipif_ui_not_support("pvc")
@green_squad
class TestPvcUserInterface(object):
"""
Test PVC User Interface
"""
@tier1
@pytest.mark.parametrize(
argnames=["sc_name", "access_mode", "pvc_size", "vol_mode"],
argvalues=[
pytest.param(
"ocs-storagecluster-cephfs",
"ReadWriteMany",
"2",
"Filesystem",
),
pytest.param(
"ocs-storagecluster-ceph-rbd",
"ReadWriteMany",
"3",
"Block",
),
pytest.param(
"ocs-storagecluster-cephfs",
"ReadWriteOnce",
"10",
"Filesystem",
),
pytest.param(
"ocs-storagecluster-ceph-rbd",
"ReadWriteOnce",
"11",
"Block",
),
pytest.param(
"ocs-storagecluster-ceph-rbd",
"ReadWriteOnce",
"13",
"Filesystem",
),
],
)
def test_create_resize_delete_pvc(
self,
project_factory,
teardown_factory,
setup_ui,
sc_name,
access_mode,
pvc_size,
vol_mode,
):
"""
Test create, resize and delete pvc via UI
"""
# Creating a test project via CLI
pro_obj = project_factory()
project_name = pro_obj.namespace
pvc_ui_obj = PvcUI(setup_ui)
# Creating PVC via UI
pvc_name = create_unique_resource_name("test", "pvc")
pvc_ui_obj.create_pvc_ui(
project_name, sc_name, pvc_name, access_mode, pvc_size, vol_mode
)
pvc_objs = get_all_pvc_objs(namespace=project_name)
pvc = [pvc_obj for pvc_obj in pvc_objs if pvc_obj.name == pvc_name]
assert pvc[0].size == int(pvc_size), (
f"size error| expected size:{pvc_size} \n "
f"actual size:{str(pvc[0].size)}"
)
assert pvc[0].get_pvc_access_mode == access_mode, (
f"access mode error| expected access mode:{access_mode} "
f"\n actual access mode:{pvc[0].get_pvc_access_mode}"
)
assert pvc[0].backed_sc == sc_name, (
f"storage class error| expected storage class:{sc_name} "
f"\n actual storage class:{pvc[0].backed_sc}"
)
assert pvc[0].get_pvc_vol_mode == vol_mode, (
f"volume mode error| expected volume mode:{vol_mode} "
f"\n actual volume mode:{pvc[0].get_pvc_vol_mode}"
)
# Verifying PVC via UI
logger.info("Verifying PVC Details via UI")
pvc_ui_obj.verify_pvc_ui(
pvc_size=pvc_size,
access_mode=access_mode,
vol_mode=vol_mode,
sc_name=sc_name,
pvc_name=pvc_name,
project_name=project_name,
)
logger.info("PVC Details Verified via UI..!!")
# Creating Pod via CLI
logger.info("Creating Pod")
if sc_name in (constants.DEFAULT_STORAGECLASS_RBD,):
interface_type = constants.CEPHBLOCKPOOL
else:
interface_type = constants.CEPHFILESYSTEM
new_pod = helpers.create_pod(
interface_type=interface_type,
pvc_name=pvc_name,
namespace=project_name,
raw_block_pv=vol_mode == constants.VOLUME_MODE_BLOCK,
)
logger.info(f"Waiting for Pod: state= {constants.STATUS_RUNNING}")
wait_for_resource_state(resource=new_pod, state=constants.STATUS_RUNNING)
# Calling the Teardown Factory Method to make sure Pod is deleted
teardown_factory(new_pod)
# Expanding the PVC
logger.info("Pvc Resizing")
new_size = int(pvc_size) + 3
pvc_ui_obj.pvc_resize_ui(
pvc_name=pvc_name, new_size=new_size, project_name=project_name
)
assert new_size > int(
pvc_size
), f"New size of the PVC cannot be less than existing size: new size is {new_size})"
ocp_version = get_ocp_version()
self.pvc_loc = locators[ocp_version]["pvc"]
# Verifying PVC expansion
logger.info("Verifying PVC resize")
expected_capacity = f"{new_size} GiB"
pvc_resize = pvc_ui_obj.verify_pvc_resize_ui(
project_name=project_name,
pvc_name=pvc_name,
expected_capacity=expected_capacity,
)
assert pvc_resize, "PVC resize failed"
logger.info(
"Pvc resize verified..!!"
f"New Capacity after PVC resize is {expected_capacity}"
)
# Running FIO
logger.info("Execute FIO on a Pod")
if vol_mode == constants.VOLUME_MODE_BLOCK:
storage_type = constants.WORKLOAD_STORAGE_TYPE_BLOCK
else:
storage_type = constants.WORKLOAD_STORAGE_TYPE_FS
new_pod.run_io(storage_type, size=(new_size - 1), invalidate=0, rate="1000m")
get_fio_rw_iops(new_pod)
logger.info("FIO execution on Pod successfully completed..!!")
# Checking if the Pod is deleted or not
new_pod.delete(wait=True)
new_pod.ocp.wait_for_delete(resource_name=new_pod.name)
# Deleting the PVC via UI
logger.info(f"Delete {pvc_name} pvc")
pvc_ui_obj.delete_pvc_ui(pvc_name, project_name)
pvc[0].ocp.wait_for_delete(pvc_name, timeout=120)
pvc_objs = get_all_pvc_objs(namespace=project_name)
pvcs = [pvc_obj for pvc_obj in pvc_objs if pvc_obj.name == pvc_name]
if len(pvcs) > 0:
assert f"PVC {pvcs[0].name} does not deleted"
@tier1
@pytest.mark.parametrize(
argnames=["sc_name", "access_mode", "clone_access_mode"],
argvalues=[
pytest.param(
"ocs-storagecluster-ceph-rbd",
constants.ACCESS_MODE_RWO,
constants.ACCESS_MODE_RWO,
),
pytest.param(
"ocs-storagecluster-cephfs",
constants.ACCESS_MODE_RWX,
constants.ACCESS_MODE_RWO,
),
],
)
def test_clone_pvc(
self,
project_factory,
teardown_factory,
setup_ui,
sc_name,
access_mode,
clone_access_mode,
):
"""
Test to verify PVC clone from UI
"""
pvc_size = "1"
vol_mode = constants.VOLUME_MODE_FILESYSTEM
# Creating a project from CLI
pro_obj = project_factory()
project_name = pro_obj.namespace
pvc_ui_obj = PvcUI(setup_ui)
# Creating PVC from UI
pvc_name = create_unique_resource_name("test", "pvc")
pvc_ui_obj.create_pvc_ui(
project_name, sc_name, pvc_name, access_mode, pvc_size, vol_mode
)
teardown_factory(get_pvc_objs(pvc_names=[pvc_name], namespace=project_name)[0])
# Verifying PVC details in UI
logger.info("Verifying PVC details in UI")
pvc_ui_obj.verify_pvc_ui(
pvc_size=pvc_size,
access_mode=access_mode,
vol_mode=vol_mode,
sc_name=sc_name,
pvc_name=pvc_name,
project_name=project_name,
)
logger.info("Verified PVC details in UI")
# Clone PVC from UI
clone_pvc_name = f"{pvc_name}-clone"
pvc_ui_obj.pvc_clone_ui(
project_name=project_name,
pvc_name=pvc_name,
cloned_pvc_access_mode=clone_access_mode,
cloned_pvc_name=clone_pvc_name,
)
teardown_factory(
get_pvc_objs(pvc_names=[clone_pvc_name], namespace=project_name)[0]
)
# Verifying cloned PVC details in UI
logger.info("Verifying cloned PVC details in UI")
pvc_ui_obj.verify_pvc_ui(
pvc_size=pvc_size,
access_mode=clone_access_mode,
vol_mode=vol_mode,
sc_name=sc_name,
pvc_name=clone_pvc_name,
project_name=project_name,
)
logger.info("Verified cloned PVC details in UI")
| 1.890625 | 2 |
feature_generation/datasets/CSCW.py | s0lvang/ideal-pancake | 6 | 11613 | <reponame>s0lvang/ideal-pancake
import pandas as pd
from feature_generation.datasets.Timeseries import Timeseries
from os.path import basename
class CSCW(Timeseries):
def __init__(self):
super().__init__("cscw")
self.column_name_mapping = {
"id": self.column_names["subject_id"],
"Fixation Start [ms]": self.column_names["time"],
"Position X": self.column_names["x"],
"Position Y": self.column_names["y"],
"Average Pupil Size [px] X": self.column_names["pupil_diameter"],
"Fixation Duration [ms]": self.column_names["duration"],
"Fixation End [ms]": self.column_names["fixation_end"],
}
self.label = "Posttest.Score"
def prepare_files(self, file_references, metadata_references):
labels = pd.DataFrame()
dataset = []
with metadata_references[0].open("r") as f:
metadata_file = pd.read_csv(f, sep=";")
for file_reference in file_references:
dataset, labels = self.prepare_file(
file_reference, metadata_file, dataset, labels
)
labels = labels.T
return dataset, labels
def prepare_file(self, file_reference, metadata_file, dataset, labels):
participant_name_array = basename(file_reference.reference).split("_")
participant_name = "_".join(participant_name_array[0:3])
participant_name_with_type = "_".join(participant_name_array[0:4])
with file_reference.open("r") as f:
csv = pd.read_csv(f)
csv = csv.rename(columns=self.column_name_mapping)
csv[self.column_names["subject_id"]] = participant_name_with_type
dataset.append(csv)
print(participant_name)
labels[participant_name_with_type] = metadata_file[
metadata_file["Participant"] == participant_name
].iloc[0]
return dataset, labels
def __str__(self):
return super().__str__()
| 2.671875 | 3 |
reactics-smt/rs/reaction_system_with_concentrations.py | arturmeski/reactics | 2 | 11614 | from sys import exit
from colour import *
from rs.reaction_system import ReactionSystem
class ReactionSystemWithConcentrations(ReactionSystem):
def __init__(self):
self.reactions = []
self.meta_reactions = dict()
self.permanent_entities = dict()
self.background_set = []
self.context_entities = [] # legacy. to be removed
self.reactions_by_prod = None
self.max_concentration = 0
self.max_conc_per_ent = dict()
def add_bg_set_entity(self, e):
name = ""
def_max_conc = -1
if type(e) is tuple and len(e) == 2:
name, def_max_conc = e
elif type(e) is str:
name = e
print("\nWARNING: no maximal concentration level specified for:", e, "\n")
else:
raise RuntimeError(
"Bad entity type when adding background set element")
self.assume_not_in_bgset(name)
self.background_set.append(name)
if def_max_conc != -1:
ent_id = self.get_entity_id(name)
self.max_conc_per_ent.setdefault(ent_id, 0)
if self.max_conc_per_ent[ent_id] < def_max_conc:
self.max_conc_per_ent[ent_id] = def_max_conc
if self.max_concentration < def_max_conc:
self.max_concentration = def_max_conc
def get_max_concentration_level(self, e):
if e in self.max_conc_per_ent:
return self.max_conc_per_ent[e]
else:
return self.max_concentration
def is_valid_entity_with_concentration(self, e):
"""Sanity check for entities with concentration"""
if type(e) is tuple:
if len(e) == 2 and type(e[1]) is int:
return True
if type(e) is list:
if len(e) == 2 and type(e[1]) is int:
return True
print("FATAL. Invalid entity+concentration: {:s}".format(e))
exit(1)
return False
def get_state_ids(self, state):
"""Returns entities of the given state without levels"""
return [e for e, c in state]
def has_non_zero_concentration(self, elem):
if elem[1] < 1:
raise RuntimeError(
"Unexpected concentration level in state: " + str(elem))
def process_rip(self, R, I, P, ignore_empty_R=False):
"""Chcecks concentration levels and converts entities names into their ids"""
if R == [] and not ignore_empty_R:
raise RuntimeError("No reactants defined")
reactants = []
for r in R:
self.is_valid_entity_with_concentration(r)
self.has_non_zero_concentration(r)
entity, level = r
reactants.append((self.get_entity_id(entity), level))
if self.max_concentration < level:
self.max_concentration = level
inhibitors = []
for i in I:
self.is_valid_entity_with_concentration(i)
self.has_non_zero_concentration(i)
entity, level = i
inhibitors.append((self.get_entity_id(entity), level))
if self.max_concentration < level:
self.max_concentration = level
products = []
for p in P:
self.is_valid_entity_with_concentration(p)
self.has_non_zero_concentration(p)
entity, level = p
products.append((self.get_entity_id(entity), level))
return reactants, inhibitors, products
def add_reaction(self, R, I, P):
"""Adds a reaction"""
if P == []:
raise RuntimeError("No products defined")
reaction = self.process_rip(R, I, P)
self.reactions.append(reaction)
def add_reaction_without_reactants(self, R, I, P):
"""Adds a reaction"""
if P == []:
raise RuntimeError("No products defined")
reaction = self.process_rip(R, I, P, ignore_empty_R=True)
self.reactions.append(reaction)
def add_reaction_inc(self, incr_entity, incrementer, R, I):
"""Adds a macro/meta reaction for increasing the value of incr_entity"""
reactants, inhibitors, products = self.process_rip(
R, I, [], ignore_empty_R=True)
incr_entity_id = self.get_entity_id(incr_entity)
self.meta_reactions.setdefault(incr_entity_id, [])
self.meta_reactions[incr_entity_id].append(
("inc", self.get_entity_id(incrementer), reactants, inhibitors))
def add_reaction_dec(self, decr_entity, decrementer, R, I):
"""Adds a macro/meta reaction for decreasing the value of incr_entity"""
reactants, inhibitors, products = self.process_rip(
R, I, [], ignore_empty_R=True)
decr_entity_id = self.get_entity_id(decr_entity)
self.meta_reactions.setdefault(decr_entity_id, [])
self.meta_reactions[decr_entity_id].append(
("dec", self.get_entity_id(decrementer), reactants, inhibitors))
def add_permanency(self, ent, I):
"""Sets entity to be permanent unless it is inhibited"""
ent_id = self.get_entity_id(ent)
if ent_id in self.permanent_entities:
raise RuntimeError(
"Permanency for {0} already defined.".format(ent))
inhibitors = self.process_rip([], I, [], ignore_empty_R=True)[1]
self.permanent_entities[ent_id] = inhibitors
def set_context_entities(self, entities):
raise NotImplementedError
def entities_names_set_to_str(self, entities):
s = ""
for entity in entities:
s += entity + ", "
s = s[:-2]
return s
def entities_ids_set_to_str(self, entities):
s = ""
for entity in entities:
s += self.get_entity_name(entity) + ", "
s = s[:-2]
return s
def state_to_str(self, state):
s = ""
for ent, level in state:
s += self.get_entity_name(ent) + "=" + str(level) + ", "
s = s[:-2]
return s
def show_background_set(self):
print(
C_MARK_INFO + " Background set: {" + self.entities_names_set_to_str(self.background_set) + "}")
def show_meta_reactions(self):
print(C_MARK_INFO + " Meta reactions:")
for param_ent, reactions in self.meta_reactions.items():
for r_type, command, reactants, inhibitors in reactions:
if r_type == "inc" or r_type == "dec":
print(" - [ Type=" + repr(r_type) + " Operand=( " + self.get_entity_name(param_ent) + " ) Command=( " + self.get_entity_name(
command) + " ) ] -- ( R={" + self.state_to_str(reactants) + "}, I={" + self.state_to_str(inhibitors) + "} )")
else:
raise RuntimeError(
"Unknown meta-reaction type: " + repr(r_type))
def show_max_concentrations(self):
print(
C_MARK_INFO +
" Maximal allowed concentration levels (for optimized translation to RS):")
for e, max_conc in self.max_conc_per_ent.items():
print(" - {0:^20} = {1:<6}".format(self.get_entity_name(e), max_conc))
def show_permanent_entities(self):
print(C_MARK_INFO + " Permanent entities:")
for e, inhibitors in self.permanent_entities.items():
print(" - {0:^20}{1:<6}".format(self.get_entity_name(e) + ": ",
"I={" + self.state_to_str(inhibitors) + "}"))
def show(self, soft=False):
self.show_background_set()
self.show_reactions(soft)
self.show_permanent_entities()
self.show_meta_reactions()
self.show_max_concentrations()
def get_reactions_by_product(self):
"""Sorts reactions by their products and returns a dictionary of products"""
if self.reactions_by_prod != None:
return self.reactions_by_prod
producible_entities = set()
for reaction in self.reactions:
product_entities = [e for e, c in reaction[2]]
producible_entities = producible_entities.union(
set(product_entities))
reactions_by_prod = {}
for p_e in producible_entities:
reactions_by_prod[p_e] = []
rcts_for_p_e = reactions_by_prod[p_e]
for r in self.reactions:
product_entities = [e for e, c in r[2]]
if p_e in product_entities:
reactants = r[0]
inhibitors = r[1]
products = [(e, c) for e, c in r[2] if e == p_e]
prod_conc = products[0][1]
insert_place = None
# we need to order the reactions w.r.t. the concentration levels produced (increasing order)
for i in range(0, len(rcts_for_p_e)):
checked_conc = rcts_for_p_e[i][2][0][1]
if prod_conc <= checked_conc:
insert_place = i
break
if insert_place == None: # empty or the is only one element which is smaller than the element being added
# we append (to the end)
rcts_for_p_e.append((reactants, inhibitors, products))
else:
rcts_for_p_e.insert(
insert_place, (reactants, inhibitors, products))
# save in cache
self.reactions_by_prod = reactions_by_prod
return reactions_by_prod
def get_reaction_system(self):
rs = ReactionSystem()
for reactants, inhibitors, products in self.reactions:
new_reactants = []
new_inhibitors = []
new_products = []
for ent, conc in reactants:
n = self.get_entity_name(ent) + "#" + str(conc)
rs.ensure_bg_set_entity(n)
new_reactants.append(n)
for ent, conc in inhibitors:
n = self.get_entity_name(ent) + "#" + str(conc)
rs.ensure_bg_set_entity(n)
new_inhibitors.append(n)
for ent, conc in products:
for i in range(1, conc+1):
n = self.get_entity_name(ent) + "#" + str(i)
rs.ensure_bg_set_entity(n)
new_products.append(n)
rs.add_reaction(new_reactants, new_inhibitors, new_products)
for param_ent, reactions in self.meta_reactions.items():
for r_type, command, reactants, inhibitors in reactions:
param_ent_name = self.get_entity_name(param_ent)
new_reactants = []
new_inhibitors = []
for ent, conc in reactants:
n = self.get_entity_name(ent) + "#" + str(conc)
rs.ensure_bg_set_entity(n)
new_reactants.append(n)
for ent, conc in inhibitors:
n = self.get_entity_name(ent) + "#" + str(conc)
rs.ensure_bg_set_entity(n)
new_inhibitors.append(n)
max_cmd_c = self.max_concentration
if command in self.max_conc_per_ent:
max_cmd_c = self.max_conc_per_ent[command]
else:
print(
"WARNING:\n\tThere is no maximal concentration level defined for "
+ self.get_entity_name(command))
print("\tThis is a very bad idea -- expect degraded performance\n")
for l in range(1, max_cmd_c+1):
cmd_ent = self.get_entity_name(command) + "#" + str(l)
rs.ensure_bg_set_entity(cmd_ent)
if r_type == "inc":
# pre_conc -- predecessor concentration
# succ_conc -- successor concentration concentration
for i in range(1, self.max_concentration):
pre_conc = param_ent_name + "#" + str(i)
rs.ensure_bg_set_entity(pre_conc)
new_products = []
succ_value = i+l
for j in range(1, succ_value+1):
if j > self.max_concentration:
break
new_p = param_ent_name + "#" + str(j)
rs.ensure_bg_set_entity(new_p)
new_products.append(new_p)
if new_products != []:
rs.add_reaction(
set(new_reactants + [pre_conc, cmd_ent]),
set(new_inhibitors),
set(new_products))
elif r_type == "dec":
for i in range(1, self.max_concentration+1):
pre_conc = param_ent_name + "#" + str(i)
rs.ensure_bg_set_entity(pre_conc)
new_products = []
succ_value = i-l
for j in range(1, succ_value+1):
if j > self.max_concentration:
break
new_p = param_ent_name + "#" + str(j)
rs.ensure_bg_set_entity(new_p)
new_products.append(new_p)
if new_products != []:
rs.add_reaction(
set(new_reactants + [pre_conc, cmd_ent]),
set(new_inhibitors),
set(new_products))
else:
raise RuntimeError(
"Unknown meta-reaction type: " + repr(r_type))
for ent, inhibitors in self.permanent_entities.items():
max_c = self.max_concentration
if ent in self.max_conc_per_ent:
max_c = self.max_conc_per_ent[ent]
else:
print(
"WARNING:\n\tThere is no maximal concentration level defined for "
+ self.get_entity_name(ent))
print("\tThis is a very bad idea -- expect degraded performance\n")
def e_value(i):
return self.get_entity_name(ent) + "#" + str(i)
for value in range(1, max_c+1):
new_reactants = []
new_inhibitors = []
new_products = []
new_reactants = [e_value(value)]
for e_inh, conc in inhibitors:
n = self.get_entity_name(e_inh) + "#" + str(conc)
rs.ensure_bg_set_entity(n)
new_inhibitors.append(n)
for i in range(1, value+1):
new_products.append(e_value(i))
rs.add_reaction(new_reactants, new_inhibitors, new_products)
return rs
class ReactionSystemWithAutomaton(object):
def __init__(self, reaction_system, context_automaton):
self.rs = reaction_system
self.ca = context_automaton
def show(self, soft=False):
self.rs.show(soft)
self.ca.show()
def is_with_concentrations(self):
if not isinstance(self.rs, ReactionSystemWithConcentrations):
return False
if not isinstance(self.ca, ContextAutomatonWithConcentrations):
return False
return True
def sanity_check(self):
pass
def get_ordinary_reaction_system_with_automaton(self):
if not self.is_with_concentrations():
raise RuntimeError("Not RS/CA with concentrations")
ors = self.rs.get_reaction_system()
oca = self.ca.get_automaton_with_flat_contexts(ors)
return ReactionSystemWithAutomaton(ors, oca)
# EOF
| 2.53125 | 3 |
src/models/cnn_train.py | zh272/AIGOGO | 0 | 11615 | <filename>src/models/cnn_train.py
import os
import time
import fire
import torch
import random
import numpy as np
import pandas as pd
import torch.nn.functional as F
## to detach from monitor
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from trainer import Trainer
from model import ConvNet1D
from helpers import get_dataset, test_epoch, ready, save_obj, load_obj
def get_submission(
X_train, X_valid, y_train, y_valid, X_test, model=ConvNet1D, max_epoch=200, base_lr=0.1,
momentum=0.9, weight_decay=0.0001, batch_size = 128, train_params={}, plot=True,
test_along=False, optimizer='sgd', hyper={}, save=False, load=False, mdl_name='cnn.pt'
):
train_set, valid_set, X_test_np, X_train_np, X_valid_np, _ = get_dataset(
X_train.values, y_train.values, X_test.values, X_valid.values, y_valid.values
)
PATH = './saved_model'
if not os.path.isdir(PATH): os.makedirs(PATH)
start_time = time.time()
end_time = start_time
if load:
trainer = Trainer(
torch.load(os.path.join(PATH, mdl_name)), train_set=train_set, loss_fn=F.l1_loss, hyper=hyper,
valid_set=valid_set, batch_size=batch_size, epochs=max_epoch, optimizer=optimizer
)
else:
trainer = Trainer(
model(**train_params), train_set=train_set, loss_fn=F.l1_loss, hyper=hyper,
valid_set=valid_set, batch_size=batch_size, epochs=max_epoch, optimizer=optimizer
)
valid_hist = []
for epochs in range(max_epoch):
trainer.train_epoch()
temp_lr = trainer.optimizer.param_groups[0]['lr']
if test_along:
temp_valid = trainer.loss_epoch()
valid_hist.append(temp_valid)
print('Epoch {:3}: Training MAE={:8.2f}, Valid MAE={:8.2f}, lr={}'.format(epochs, trainer.eval(), temp_valid, temp_lr))
else:
print('Epoch {:3}: Training MAE={:8.2f}, lr={}'.format(epochs, trainer.eval(), temp_lr))
end_time = time.time()
if plot:
t_step = np.arange(0, max_epoch, 1)
train_hist = trainer.evaluator.hist
fig_path = 'figures'
if not os.path.isdir(fig_path): os.makedirs(fig_path)
plt.figure()
plt.plot(t_step, train_hist, 'r', ls='-', label='training MAE')
if test_along:
plt.plot(t_step, valid_hist, 'b', ls='--', label='validation MAE')
plt.legend(loc='best')
plt.xlabel('steps')
plt.title('Training and Validation MAE')
plt.grid()
plt.savefig(os.path.join(fig_path, 'training_plot.png'))
plt.close()
if save:
torch.save(trainer.model, os.path.join(PATH, mdl_name))
train_loss = trainer.loss_epoch(load='train')
valid_loss = trainer.loss_epoch(load='valid')
state_dict = trainer.model.state_dict()
if torch.cuda.device_count() > 1:
input_weights = state_dict['module.regressor.fc0.weight'].cpu().numpy()
else:
input_weights = state_dict['regressor.fc0.weight'].cpu().numpy()
# assume std deviation of each feature is 1
avg_w = np.mean(np.abs(input_weights), axis=0)
feature_importances = avg_w
feature_names = X_train.columns.values
sorted_idx = np.argsort(feature_importances*-1) # descending order
summary = '====== MLPRegressor Training Summary ======\n'
summary += '>>> epochs={}, lr={}, momentum={}, weight_decay={}\n'.format(max_epoch,base_lr,momentum,weight_decay)
summary += '>>> schedule={}\n'.format(hyper['lr_schedule'])
summary += '>>> hidden={}, optimizer="{}", batch_size={}\n'.format(train_params['num_neuron'],optimizer,batch_size)
for idx in sorted_idx:
summary += '[{:<25s}] {:<10.4f}\n'.format(feature_names[idx], feature_importances[idx])
summary += '>>> training_time={:10.2f}min\n'.format((end_time-start_time)/60)
summary += '>>> Final MAE: {:10.4f}(Training), {:10.4f}(Validation)\n'.format(train_loss,valid_loss)
# Generate submission
test_output = trainer.predict(torch.FloatTensor(X_test_np)).cpu().data.numpy()
submission = pd.DataFrame(data=test_output,index=X_test.index, columns=['Next_Premium'])
train_output = trainer.predict(torch.FloatTensor(X_train_np)).cpu().data.numpy()
submission_train = pd.DataFrame(data=train_output,index=X_train.index, columns=['Next_Premium'])
valid_output = trainer.predict(torch.FloatTensor(X_valid_np)).cpu().data.numpy()
submission_valid = pd.DataFrame(data=valid_output,index=X_valid.index, columns=['Next_Premium'])
return {
'model': trainer, 'submission': submission,
'submission_train':submission_train, 'submission_valid':submission_valid,
'valid_loss':valid_loss, 'summary':summary
}
def read_interim_data(file_name, index_col='Policy_Number'):
'''
In: file_name
Out: interim_data
Description: read data from directory /data/interim
'''
# set the path of raw data
interim_data_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.path.pardir, os.path.pardir, 'data', 'interim'
)
file_path = os.path.join(interim_data_path, file_name)
interim_data = pd.read_csv(file_path, index_col=index_col)
return(interim_data)
def write_precessed_data(df, suffix=None):
'''
In:
DataFrame(df),
str(file_name),
Out:
None
Description:
Write sample data to directory /data/interim
'''
precessed_data_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.path.pardir, os.path.pardir, 'data', 'processed'
)
if suffix is None:
file_name = 'testing-set.csv'
else:
file_name = 'testing-set_{}.csv'.format(suffix)
write_sample_path = os.path.join(precessed_data_path, file_name)
df.to_csv(write_sample_path)
# empirical scale: weight_decay=0.0001
def demo(
epochs=100, base_lr=0.0001, momentum=0.9, weight_decay=0,
batch_size=128, optimizer='sgd', dropout=False, seed=None,
get_train=False, get_test=False, save=False, load=False
):
if seed is not None:
rand_reset(seed)
# X_train = read_interim_data('X_train_prefs.csv')
# y_train = read_interim_data('y_train_prefs.csv')
# X_valid = read_interim_data('X_valid_prefs.csv')
# y_valid = read_interim_data('y_valid_prefs.csv')
# X_test = read_interim_data('X_test_prefs.csv')
X_train = read_interim_data('X_train_new.csv')
y_train = read_interim_data('y_train_new.csv')
X_valid = read_interim_data('X_valid_new.csv')
y_valid = read_interim_data('y_valid_new.csv')
X_test = read_interim_data('X_test_new.csv')
feature_list = [feature for feature in X_train.columns.values if 'cat_' not in feature]
num_features = len(feature_list)
print('Number of features: {}'.format(num_features))
# Filter features
X_train = X_train[feature_list]
X_valid = X_valid[feature_list]
X_test = X_test[feature_list]
### Fill Missing Values
X_train = X_train.apply(lambda x:x.fillna(-1))
X_valid = X_valid.apply(lambda x:x.fillna(-1))
X_test = X_test.apply(lambda x:x.fillna(-1))
# begin training
# n_input = X_train.shape[1]
train_params = {
'num_cv_filter': [1,40,80],
'num_fc_neuron': [20,5,1],
'dropout': dropout
}
optim_hyper = {
'lr':base_lr,
'momentum':momentum,
'weight_decay':weight_decay,
'lr_schedule':{
epochs//4:base_lr,
epochs//2:base_lr/5,
epochs//4*3:base_lr/50,
epochs: base_lr/200
}
}
model_output = get_submission(
X_train, X_valid, y_train, y_valid, X_test,
model=ConvNet1D, max_epoch=epochs, base_lr=base_lr,
momentum=momentum, weight_decay=weight_decay,
batch_size = batch_size, train_params=train_params,
test_along=True, optimizer=optimizer, hyper=optim_hyper,
save=save, load=load
)
summary = model_output['summary']
summary += '>>> random seed: {}\n'.format(seed)
print(summary)
# generate submission
if get_test:
write_precessed_data(model_output['submission'], suffix='mlptest{}'.format(int(model_output['valid_loss'])))
with open('summary_mlp{}.txt'.format(int(model_output['valid_loss'])), 'w') as f:
f.write(summary)
if get_train:
write_precessed_data(model_output['submission_train'], suffix='mlptrain')
write_precessed_data(model_output['submission_valid'], suffix='mlpvalid')
def rand_reset(seed):
random.seed(seed)
torch.manual_seed(random.randint(0,1000))
torch.cuda.manual_seed_all(random.randint(0,1000))
np.random.seed(random.randint(0,1000))
if __name__ == '__main__':
# Example usage: "python nn_train.py --epochs 100"
fire.Fire(demo) | 2.296875 | 2 |
tests/fetchers/test_hvdcLineCktOwnersFetcher.py | rohit98077/mis_outages_ingest | 0 | 11616 | <filename>tests/fetchers/test_hvdcLineCktOwnersFetcher.py
import unittest
from src.fetchers.hvdcLineCktOwnersFetcher import getOwnersForHvdcLineCktIds
import datetime as dt
from src.appConfig import getConfig
class TestHvdcLineCktOwnersFetcher(unittest.TestCase):
appConfig: dict = {}
def setUp(self):
self.appConfig = getConfig()
def test_run(self) -> None:
"""tests the function that fetches the owners of
HvdcLineCkts from reporting software
"""
elemIds = [12, 13]
ownersDict = getOwnersForHvdcLineCktIds(
self.appConfig['reportsConStr'], elemIds)
expectedDict = {
12: "POWERGRID-WR1 (PGCIL)", 13: "POWERGRID-SR,POWERGRID-WR1 (PGCIL)"}
self.assertTrue(ownersDict == expectedDict)
| 2.859375 | 3 |
3rdParty/boost/1.71.0/libs/python/test/iterator.py | rajeev02101987/arangodb | 12,278 | 11617 | # Copyright <NAME> 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from __future__ import print_function
'''
>>> from iterator_ext import *
>>> from input_iterator import *
>>> x = list_int()
>>> x.push_back(1)
>>> x.back()
1
>>> x.push_back(3)
>>> x.push_back(5)
>>> for y in x:
... print(y)
1
3
5
>>> z = range(x)
>>> for y in z:
... print(y)
1
3
5
Range2 wraps a transform_iterator which doubles the elements it
traverses. This proves we can wrap input iterators
>>> z2 = range2(x)
>>> for y in z2:
... print(y)
2
6
10
>>> l2 = two_lists()
>>> for y in l2.primes:
... print(y)
2
3
5
7
11
13
>>> for y in l2.evens:
... print(y)
2
4
6
8
10
12
>>> ll = list_list()
>>> ll.push_back(x)
>>> x.push_back(7)
>>> ll.push_back(x)
>>> for a in ll: #doctest: +NORMALIZE_WHITESPACE
... for b in a:
... print(b, end='')
... print('')
...
1 3 5
1 3 5 7
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print("running...")
import sys
status = run()[0]
if (status == 0): print("Done.")
sys.exit(status)
| 3.109375 | 3 |
py/locationdb/geonames.py | acorg/locationdb | 0 | 11618 | # -*- Python -*-
# license
# license.
# ======================================================================
"""Looks name up in the [geonames database](http://www.geonames.org/).
[GeoNames Search Webservice API](http://www.geonames.org/export/geonames-search.html)
"""
import sys, os, urllib.request, json, time
from pathlib import Path
import logging; module_logger = logging.getLogger(__name__)
from .utilities import is_chinese
# ======================================================================
def geonames(name):
if not name:
return name
if is_chinese(name):
r = _lookup_chinese(name=name)
else:
r = _lookup("search", isNameRequired="true", name=name)
return r
# ----------------------------------------------------------------------
def _lookup(feature, **args):
def make(entry):
if entry.get("fcl") in ["A", "P"]:
return {
# "local_name": entry[],
"name": entry["toponymName"],
"province": entry["adminName1"],
"country": entry["countryName"],
"latitude": entry["lat"],
"longitude": entry["lng"],
}
else:
return None
return _get(feature, make, args)
# ----------------------------------------------------------------------
def _get(feature, result_maker, args):
args.update({"username": "acorg", "type": "json"})
url = "http://api.geonames.org/{}?{}".format(feature, urllib.parse.urlencode(args))
# module_logger.debug('_lookup {!r}'.format(url))
while True:
rj = json.loads(urllib.request.urlopen(url=url).read().decode("utf-8"))
try:
return [e2 for e2 in (result_maker(e1) for e1 in rj["geonames"]) if e2]
except KeyError:
if "the hourly limit of" in rj.get("status", {}).get("message"):
print(f"WARNING: {rj['status']['message']}", file=sys.stderr)
seconds_to_wait = 120
print(f"WARNING: about to wait {seconds_to_wait} seconds", file=sys.stderr)
time.sleep(seconds_to_wait)
else:
print(f"ERROR: {rj}", file=sys.stderr)
raise RuntimeError(str(rj))
except Exception as err:
print(f"ERROR: {rj}: {err}", file=sys.stderr)
raise RuntimeError(f"{rj}: {err}")
# ----------------------------------------------------------------------
def _lookup_chinese(name):
if len(name) > 3:
r = []
if provinces := _find_chinese_province(name):
province = provinces[0]
county = _find_chinese_county(name, province);
if county:
r = [{
"local_name": name,
"name": _make_chinese_name(province, county),
"province": _make_province_name(province),
"country": province["countryName"],
"latitude": county["lat"],
"longitude": county["lng"],
}]
else:
def make(entry):
province_name = _make_province_name(entry)
return {
"local_name": name,
"name": province_name,
"province": province_name,
"country": entry["countryName"],
"latitude": entry["lat"],
"longitude": entry["lng"],
}
r = [make(e) for e in _find_chinese_province(name)]
return r
# ----------------------------------------------------------------------
def _find_chinese_province(name):
r = _get("search", lambda e: e if e["name"] == name[:2] else None, {"isNameRequired": "true", "name_startsWith": name[:2], "fclass": "A", "fcode": "ADM1", "lang": "cn"})
# module_logger.debug('name: {!r} : {!r}'.format(name[:2], r))
if not r: # Inner Mongolia is written using 3 Hanzi
r = _get("search", lambda e: e if e["name"] == name[:3] else None, {"isNameRequired": "true", "name_startsWith": name[:3], "fclass": "A", "fcode": "ADM1", "lang": "cn"})
return r
# ----------------------------------------------------------------------
def _make_province_name(entry):
r = entry["toponymName"].upper()
space_pos = r.find(' ', 6 if r[:6] == "INNER " else 0)
if space_pos >= 0:
r = r[:space_pos]
return r;
# ----------------------------------------------------------------------
def _find_chinese_county(full_name, province):
name = full_name[len(province["name"]):]
r = _get("search", lambda e: e, {"isNameRequired": "true", "name_startsWith": name, "fclass": "A", "fcode": "ADM3", "adminCode1": province["adminCode1"], "lang": "cn"})
if not r:
r = _get("search", lambda e: e, {"isNameRequired": "true", "name_startsWith": name, "adminCode1": province["adminCode1"], "lang": "cn"})
# module_logger.debug('_find_chinese_county {}'.format(r))
return r[0] if r else None
# ----------------------------------------------------------------------
def _make_chinese_name(province, county):
return _make_province_name(province) + " " + _make_county_name(county)
# ----------------------------------------------------------------------
def _make_county_name(county):
def remove_suffix(source, suffix):
if source[-len(suffix):] == suffix:
source = source[:-len(suffix)]
return source
def remove_apostrophe(source):
return source.replace("’", "")
r = county["toponymName"].upper()
r1 = remove_suffix(r, " ZIZHIXIAN")
if r1 != r:
r = remove_suffix(r1, "ZU")
else:
for s in [" QU", " XIAN", " SHI"]:
r2 = remove_suffix(r, s)
if r2 != r:
r = r2
break
r = remove_apostrophe(r)
return r
# ======================================================================
### Local Variables:
### eval: (if (fboundp 'eu-rename-buffer) (eu-rename-buffer))
### End:
| 2.515625 | 3 |
python/testData/completion/notImportedQualifiedName/UseImportPriorityWhenAddingImport/main.py | 06needhamt/intellij-community | 0 | 11619 | <reponame>06needhamt/intellij-community
import subprocess
import sys
import django.conf
import django.utils.encoding
subprocess.Popen
sys.argv
plt.<caret> | 1.296875 | 1 |
Array/271EncodeDecodeStrings.py | john-the-dev/leetcode | 0 | 11620 | <reponame>john-the-dev/leetcode
# 271. Encode and Decode Strings
'''
Design an algorithm to encode a list of strings to a string. The encoded string is then sent over the network and is decoded back to the original list of strings.
Machine 1 (sender) has the function:
string encode(vector<string> strs) {
// ... your code
return encoded_string;
}
Machine 2 (receiver) has the function:
vector<string> decode(string s) {
//... your code
return strs;
}
So Machine 1 does:
string encoded_string = encode(strs);
and Machine 2 does:
vector<string> strs2 = decode(encoded_string);
strs2 in Machine 2 should be the same as strs in Machine 1.
Implement the encode and decode methods.
Note:
The string may contain any possible characters out of 256 valid ascii characters. Your algorithm should be generalized enough to work on any possible characters.
Do not use class member/global/static variables to store states. Your encode and decode algorithms should be stateless.
Do not rely on any library method such as eval or serialize methods. You should implement your own encode/decode algorithm.
'''
from common import *
'''
Encode to numbers and decode from numbers.
O(N) runtime for both encode and decode, in which N is total # of characters in strs. O(N) storage.
Beat 5% runtime, 29% storage of all Leetcode submissions.
'''
class Codec:
def toNum(self, s):
num,zero,i,n = 0,0,0,len(s)
while i < n and ord(s[i]) == 0:
zero += 1
i += 1
while i < n:
num = num << 8
num += ord(s[i])
i += 1
return [zero,num]
def toStr(self, zero, num):
s = []
while num > 0:
s.append(chr(num % 256))
num = num >> 8
s.extend([chr(0)]*zero)
return ''.join(s[::-1])
def encode(self, strs: [str]) -> str:
"""Encodes a list of strings to a single string.
"""
out = []
for s in strs:
zero,num = self.toNum(s)
out.append('{}:{}'.format(zero,num))
return ','.join(out)
def decode(self, s: str) -> [str]:
"""Decodes a single string to a list of strings.
"""
out = []
strs = s.split(',') if len(s) > 0 else []
for s in strs:
zero,num = s.split(':')
out.append(self.toStr(int(zero),int(num)))
return out
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(strs))
# Tests.
codec = Codec()
strs = ['Great','Nice']
encoded = codec.encode(strs)
assert(codec.decode(encoded) == strs)
strs = ['{}leading'.format(chr(0)),'Nice']
encoded = codec.encode(strs)
assert(codec.decode(encoded) == strs)
strs = ['{}l:eadi.ng'.format(chr(0)),'{}leading,{}'.format(chr(0),chr(1))]
encoded = codec.encode(strs)
assert(codec.decode(encoded) == strs)
strs = []
encoded = codec.encode(strs)
assert(codec.decode(encoded) == strs)
| 3.625 | 4 |
ois_api_client/v2_0/deserialization/deserialize_invoice_number_query.py | peterkulik/ois_api_client | 7 | 11621 | <filename>ois_api_client/v2_0/deserialization/deserialize_invoice_number_query.py
from typing import Optional
import xml.etree.ElementTree as ET
from ...xml.XmlReader import XmlReader as XR
from ..namespaces import API
from ..namespaces import DATA
from ...deserialization.create_enum import create_enum
from ..dto.InvoiceNumberQuery import InvoiceNumberQuery
from ..dto.InvoiceDirection import InvoiceDirection
def deserialize_invoice_number_query(element: ET.Element) -> Optional[InvoiceNumberQuery]:
if element is None:
return None
result = InvoiceNumberQuery(
invoice_number=XR.get_child_text(element, 'invoiceNumber', API),
invoice_direction=create_enum(InvoiceDirection, XR.get_child_text(element, 'invoiceDirection', API)),
batch_index=XR.get_child_int(element, 'batchIndex', API),
supplier_tax_number=XR.get_child_text(element, 'supplierTaxNumber', API),
)
return result
| 2.3125 | 2 |
scripts/markov_rulesets.py | takuyakanbr/covfefe | 0 | 11622 | # Script to generate the necessary grammar rules for the
# markov generator output type
# Dataset:
# http://www.drmaciver.com/2009/12/i-want-one-meelyun-sentences/
import re
ALPHA = ' abcdefghijklmnopqrstuvwxyz'
# read data from file
with open('sentences', 'r', encoding="utf8") as f:
content = f.read().splitlines()
n = len(content)
freq = {}
# process sentences
for i in range(n):
content[i] = re.sub('[^a-z]+', ' ', content[i].lower())
for word in content[i].split(' '):
if len(word) < 1: continue
word = ' ' + word + ' '
# sum up next-letter frequencies
pc = ''
for j in range(len(word) - 1):
c = word[j]
if pc != ' ': c = pc + c
nc = word[j+1]
if c not in freq:
freq[c] = {}
for a in ALPHA:
freq[c][a] = 0
freq[c][nc] += 1
pc = word[j]
# normalize frequencies
for c, d in freq.items():
sum_ = sum(d.values())
for nc in d:
d[nc] /= sum_
# helper functions for printing rulesets
def make_name(c):
if c == ' ': return '@mstart'
return '@m' + c
def make_option(pc, c, nc):
if nc == ' ': return pc + c + '|'
if c == ' ': return '@m' + nc + '|'
if len(pc) == 0: return '@m' + c + nc + '|'
return pc + ',@m' + c + nc + '|'
# print rulesets
for c, d in freq.items():
rule = make_name(c) + '='
pc = c[:-1]
c = c[-1]
for nc in d:
if d[nc] <= 0.0055: continue
mult = max(1, int(d[nc] / 0.01))
rule += make_option(pc, c, nc) * mult
print(rule[:-1])
| 3.09375 | 3 |
tools/mo/openvino/tools/mo/front/caffe/proposal_ext.py | ryanloney/openvino-1 | 1,127 | 11623 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.proposal import ProposalOp
from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs
from openvino.tools.mo.front.extractor import FrontExtractorOp
class ProposalFrontExtractor(FrontExtractorOp):
op = 'Proposal'
enabled = True
@classmethod
def extract(cls, node):
proto_layer = node.pb
param = proto_layer.proposal_param
update_attrs = {
'feat_stride': param.feat_stride,
'base_size': param.base_size,
'min_size': param.min_size,
'ratio': mo_array(param.ratio),
'scale': mo_array(param.scale),
'pre_nms_topn': param.pre_nms_topn,
'post_nms_topn': param.post_nms_topn,
'nms_thresh': param.nms_thresh
}
mapping_rule = merge_attrs(param, update_attrs)
# update the attributes of the node
ProposalOp.update_node_stat(node, mapping_rule)
return cls.enabled
| 1.804688 | 2 |
mk42/apps/users/migrations/0003_auto_20170614_0038.py | vint21h/mk42 | 5 | 11624 | # -*- coding: utf-8 -*-
# mk42
# mk42/apps/users/migrations/0003_auto_20170614_0038.py
# Generated by Django 1.11.2 on 2017-06-14 00:38
from __future__ import unicode_literals
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
("users", "0002_auto_20170613_2124"),
]
operations = [
migrations.AlterField(
model_name="user",
name="language",
field=models.CharField(choices=[("en", "English"), ("uk", "\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430")], default="en", max_length=5, verbose_name="language"),
),
]
| 1.65625 | 2 |
utils/datasets.py | LukasStruppek/Plug-and-Play-Attacks | 0 | 11625 | import pickle
import pandas as pd
import torch
import torch.nn as nn
import torchvision.transforms as T
from torch.utils import data
from torch.utils.data import random_split
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from datasets.celeba import CelebA1000
from datasets.facescrub import FaceScrub
from datasets.stanford_dogs import StanfordDogs
def get_normalization():
normalization = T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
return normalization
def get_train_val_split(data, split_ratio, seed=0):
validation_set_length = int(split_ratio * len(data))
training_set_length = len(data) - validation_set_length
torch.manual_seed(seed)
training_set, validation_set = random_split(
data, [training_set_length, validation_set_length])
return training_set, validation_set
def get_subsampled_dataset(dataset,
dataset_size=None,
proportion=None,
seed=0):
if dataset_size > len(dataset):
raise ValueError(
'Dataset size is smaller than specified subsample size')
if dataset_size is None:
if proportion is None:
raise ValueError('Neither dataset_size nor proportion specified')
else:
dataset_size = int(proportion * len(dataset))
torch.manual_seed(seed)
subsample, _ = random_split(
dataset, [dataset_size, len(dataset) - dataset_size])
return subsample
def get_facescrub_idx_to_class():
with open('utils/files/facescrub_idx_to_class.pkl', 'rb') as f:
idx_to_class = pickle.load(f)
return idx_to_class
def get_facescrub_class_to_idx():
with open('utils/files/facescrub_class_to_idx.pkl', 'rb') as f:
class_to_idx = pickle.load(f)
return class_to_idx
def get_celeba_idx_to_attr(list_attr_file='data/celeba/list_attr_celeba.txt'):
file = pd.read_csv(list_attr_file)
attributes = file.iloc[0].tolist()[0].split(' ')[:-1]
attr_dict = {idx: attributes[idx] for idx in range(len(attributes))}
return attr_dict
def get_celeba_attr_to_idx(list_attr_file='data/celeba/list_attr_celeba.txt'):
file = pd.read_csv(list_attr_file)
attributes = file.iloc[0].tolist()[0].split(' ')[:-1]
attr_dict = {attributes[idx]: idx for idx in range(len(attributes))}
return attr_dict
def get_stanford_dogs_idx_to_class():
with open('utils/files/stanford_dogs_idx_to_class.pkl', 'rb') as f:
idx_to_class = pickle.load(f)
return idx_to_class
def get_stanford_dogs_class_to_idx():
with open('utils/files/stanford_dogs_class_to_idx.pkl', 'rb') as f:
class_to_idx = pickle.load(f)
return class_to_idx
def create_target_dataset(dataset_name, transform):
if dataset_name.lower() == 'facescrub':
return FaceScrub(group='all',
train=True,
transform=transform)
elif dataset_name.lower() == 'celeba_identities':
return CelebA1000(train=True, transform=transform)
elif 'stanford_dogs' in dataset_name.lower():
return StanfordDogs(train=True, cropped=True, transform=transform)
else:
print(f'{dataset_name} is no valid dataset.')
| 2.34375 | 2 |
applications/MappingApplication/test_examples/Fluid_SubModelling/MainKratos.py | AndreaVoltan/MyKratos7.0 | 2 | 11626 | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
from KratosMultiphysics import *
from KratosMultiphysics.IncompressibleFluidApplication import *
from KratosMultiphysics.FluidDynamicsApplication import *
from KratosMultiphysics.ExternalSolversApplication import *
from KratosMultiphysics.MeshingApplication import *
import KratosMultiphysics.MappingApplication as KratosMapping
# In this example two domains are solved, a coarse background mesh and a fine mesh around
# an obstacle. The fine domain receives the values from the coarse domain as input on it's boundary
######################################################################################
######################################################################################
######################################################################################
##PARSING THE PARAMETERS
#import define_output
parameter_file_background = open("ProjectParameters_Background.json",'r')
Projectparameters_BG = Parameters( parameter_file_background.read())
parameter_file_bodyfitted = open("ProjectParameters_BodyFitted.json",'r')
Projectparameters_BF = Parameters( parameter_file_bodyfitted.read())
## Fluid model part definition
main_model_part_bg = ModelPart(Projectparameters_BG["problem_data"]["model_part_name"].GetString())
main_model_part_bg.ProcessInfo.SetValue(DOMAIN_SIZE, Projectparameters_BG["problem_data"]["domain_size"].GetInt())
main_model_part_bf = ModelPart(Projectparameters_BF["problem_data"]["model_part_name"].GetString())
main_model_part_bf.ProcessInfo.SetValue(DOMAIN_SIZE, Projectparameters_BF["problem_data"]["domain_size"].GetInt())
###TODO replace this "model" for real one once available
Model_BG = {Projectparameters_BG["problem_data"]["model_part_name"].GetString() : main_model_part_bg}
Model_BF = {Projectparameters_BF["problem_data"]["model_part_name"].GetString() : main_model_part_bf}
## Solver construction
solver_module = __import__(Projectparameters_BG["solver_settings"]["solver_type"].GetString())
solver_bg = solver_module.CreateSolver(main_model_part_bg, Projectparameters_BG["solver_settings"])
solver_bg.AddVariables()
solver_module = __import__(Projectparameters_BF["solver_settings"]["solver_type"].GetString())
solver_bf = solver_module.CreateSolver(main_model_part_bf, Projectparameters_BF["solver_settings"])
solver_bf.AddVariables()
## Read the model - note that SetBufferSize is done here
solver_bg.ImportModelPart()
solver_bf.ImportModelPart()
## Add AddDofs
solver_bg.AddDofs()
solver_bf.AddDofs()
## Initialize GiD I/O
from gid_output_process import GiDOutputProcess
gid_output_bg = GiDOutputProcess(solver_bg.GetComputingModelPart(),
Projectparameters_BG["problem_data"]["problem_name"].GetString() ,
Projectparameters_BG["output_configuration"])
gid_output_bg.ExecuteInitialize()
gid_output_bf = GiDOutputProcess(solver_bf.GetComputingModelPart(),
Projectparameters_BF["problem_data"]["problem_name"].GetString() ,
Projectparameters_BF["output_configuration"])
gid_output_bf.ExecuteInitialize()
##here all of the allocation of the strategies etc is done
solver_bg.Initialize()
solver_bf.Initialize()
##TODO: replace MODEL for the Kratos one ASAP
## Get the list of the skin submodel parts in the object Model
for i in range(Projectparameters_BG["solver_settings"]["skin_parts"].size()):
skin_part_name = Projectparameters_BG["solver_settings"]["skin_parts"][i].GetString()
Model_BG.update({skin_part_name: main_model_part_bg.GetSubModelPart(skin_part_name)})
for i in range(Projectparameters_BF["solver_settings"]["skin_parts"].size()):
skin_part_name = Projectparameters_BF["solver_settings"]["skin_parts"][i].GetString()
Model_BF.update({skin_part_name: main_model_part_bf.GetSubModelPart(skin_part_name)})
## Get the list of the initial conditions submodel parts in the object Model
for i in range(Projectparameters_BF["initial_conditions_process_list"].size()):
initial_cond_part_name = Projectparameters_BF["initial_conditions_process_list"][i]["Parameters"]["model_part_name"].GetString()
Model_BF.update({initial_cond_part_name: main_model_part_bf.GetSubModelPart(initial_cond_part_name)})
## Processes construction
import process_factory
# "list_of_processes_bg" contains all the processes already constructed (boundary conditions, initial conditions and gravity)
# Note that the conditions are firstly constructed. Otherwise, they may overwrite the BCs information.
list_of_processes_bg = process_factory.KratosProcessFactory(Model_BG).ConstructListOfProcesses( Projectparameters_BG["initial_conditions_process_list"] )
list_of_processes_bg += process_factory.KratosProcessFactory(Model_BG).ConstructListOfProcesses( Projectparameters_BG["boundary_conditions_process_list"] )
list_of_processes_bf = process_factory.KratosProcessFactory(Model_BF).ConstructListOfProcesses( Projectparameters_BF["initial_conditions_process_list"] )
list_of_processes_bf += process_factory.KratosProcessFactory(Model_BF).ConstructListOfProcesses( Projectparameters_BF["boundary_conditions_process_list"] )
## Processes initialization
for process in list_of_processes_bg:
process.ExecuteInitialize()
for process in list_of_processes_bf:
process.ExecuteInitialize()
# Mapper initialization
mapper_settings_file = open("MapperSettings.json",'r')
Projectparameters_Mapper = Parameters( mapper_settings_file.read())["mapper_settings"]
inlet_mapper = KratosMapping.MapperFactory.CreateMapper(main_model_part_bg,
main_model_part_bf,
Projectparameters_Mapper[0])
sides_mapper = KratosMapping.MapperFactory.CreateMapper(main_model_part_bg,
main_model_part_bf,
Projectparameters_Mapper[1])
outlet_mapper = KratosMapping.MapperFactory.CreateMapper(main_model_part_bg,
main_model_part_bf,
Projectparameters_Mapper[2])
## Stepping and time settings
Dt = Projectparameters_BG["problem_data"]["time_step"].GetDouble()
end_time = Projectparameters_BG["problem_data"]["end_time"].GetDouble()
time = 0.0
step = 0
out = 0.0
gid_output_bg.ExecuteBeforeSolutionLoop()
gid_output_bf.ExecuteBeforeSolutionLoop()
for process in list_of_processes_bg:
process.ExecuteBeforeSolutionLoop()
for process in list_of_processes_bf:
process.ExecuteBeforeSolutionLoop()
while(time <= end_time):
time = time + Dt
step = step + 1
main_model_part_bg.CloneTimeStep(time)
main_model_part_bf.CloneTimeStep(time)
print("STEP = ", step)
print("TIME = ", time)
if(step >= 3):
for process in list_of_processes_bg:
process.ExecuteInitializeSolutionStep()
for process in list_of_processes_bf:
process.ExecuteInitializeSolutionStep()
gid_output_bg.ExecuteInitializeSolutionStep()
gid_output_bf.ExecuteInitializeSolutionStep()
solver_bg.Solve()
inlet_mapper.Map(VELOCITY, VELOCITY)
sides_mapper.Map(VELOCITY, VELOCITY)
outlet_mapper.Map(VELOCITY, VELOCITY)
solver_bf.Solve()
for process in list_of_processes_bg:
process.ExecuteFinalizeSolutionStep()
for process in list_of_processes_bf:
process.ExecuteFinalizeSolutionStep()
gid_output_bg.ExecuteFinalizeSolutionStep()
gid_output_bf.ExecuteFinalizeSolutionStep()
#TODO: decide if it shall be done only when output is processed or not
for process in list_of_processes_bg:
process.ExecuteBeforeOutputStep()
for process in list_of_processes_bf:
process.ExecuteBeforeOutputStep()
if gid_output_bg.IsOutputStep():
gid_output_bg.PrintOutput()
gid_output_bf.PrintOutput()
for process in list_of_processes_bg:
process.ExecuteAfterOutputStep()
for process in list_of_processes_bf:
process.ExecuteAfterOutputStep()
out = out + Dt
for process in list_of_processes_bg:
process.ExecuteFinalize()
for process in list_of_processes_bf:
process.ExecuteFinalize()
gid_output_bg.ExecuteFinalize()
gid_output_bf.ExecuteFinalize()
| 1.976563 | 2 |
src/ga4gh/vrs/extras/vcf_annotation.py | reece/vmc-python | 1 | 11627 | <reponame>reece/vmc-python<gh_stars>1-10
"""
Annotate VCF files with VRS
Input Format: VCF
Output Format: VCF
The user should pass arguments for the VCF input, VCF output, &
the vrs object file name.
ex. python3 src/ga4gh/vrs/extras/vcf_annotation.py input.vcf.gz --out
./output.vcf.gz --vrs-file ./vrs_objects.pkl
"""
import argparse
import sys
import pickle
import time
from biocommons.seqrepo import SeqRepo
import pysam
from ga4gh.vrs.dataproxy import SeqRepoDataProxy
from ga4gh.vrs.extras.translator import Translator
class VCFAnnotator:
"""
This class provides utility for annotating VCF's with VRS allele id's.
VCF's are read using pysam and stored as pysam objects.
Alleles are translated into vrs allele id's using VRS-Python Translator.
"""
def __init__(self, tlr) -> None:
"""
param: Translator tlr Valid translator object with a specified data proxy
"""
self.tlr = tlr
def annotate(self, inputfile, outputfile, vrsfile):
"""
Annotates an input VCF file with VRS allele ids & creates a
pickle file containing the vrs object information.
param: str inputfile The path and filename for the input VCF file
param: str outputfile The path and filename for the output VCF file
param: str vrsfile The path and filename for the output VRS object file
"""
INFO_FIELD_ID = "VRS_Allele"
vrs_data = {}
vcf_in = pysam.VariantFile(filename=inputfile)
vcf_in.header.info.add(INFO_FIELD_ID, "1", "String", "vrs")
vcf_out = pysam.VariantFile(outputfile, "w", header=vcf_in.header)
vrs_out = open(vrsfile, "wb") # For sending VRS data to the pickle file
for record in vcf_in:
ld = self._record_digests(record, vrs_data)
record.info[INFO_FIELD_ID] = ",".join(ld)
vcf_out.write(record)
pickle.dump(vrs_data, vrs_out)
vrs_out.close()
vcf_in.close()
vcf_out.close()
def _record_digests(self, record, vrs_data):
"""
Mutate vrs_data with vrs object information and returning a list of vrs allele ids
param: pysam.VariantRecord record A row in the vcf file
param: dict vrs_data Dictionary containing the VRS object information for the VCF
return: list vrs_allele_ids List containing the vrs allele id information
"""
gnomad_loc = f"{record.chrom}-{record.pos}"
alts = record.alts if record.alts else []
data = f"{record.chrom}\t{record.pos}\t{record.id}\t{record.ref}\t{record.alts}"
# payloads like ['20:14369:1', '20:14369:1:G', '20:14369:1:A']
reference_allele = f"{gnomad_loc}-{record.ref}-{record.ref}"
vrs_ref_object = self.tlr.translate_from(reference_allele, "gnomad")
vrs_data[reference_allele] = str(vrs_ref_object.as_dict())
alleles = [f"{gnomad_loc}-{record.ref}-{a}" for a in [*alts]] # using gnomad format
vrs_allele_ids = [vrs_ref_object._id._value]
for allele in alleles:
if "*" in allele:
vrs_allele_ids.append("")
else:
vrs_object = self.tlr.translate_from(allele, "gnomad")
vrs_allele_ids.append(vrs_object._id._value)
vrs_data[data] = str(vrs_object.as_dict())
return vrs_allele_ids
def parse_args(argv):
"""
Parses arguments passed in by the user
param: list[str] argv Arguments passed by the user to specify file locations and names
return: argparse.Namespace Returns the options passed by the user to be assigned to proper variables
"""
ap = argparse.ArgumentParser()
ap.add_argument("VCF_IN")
ap.add_argument("--out", "-o", default="-")
ap.add_argument("--vrs-file", default="-")
opts = ap.parse_args(argv)
return opts
if __name__ == "__main__":
start_time = time.time()
options = parse_args(sys.argv[1:])
print(f"These are the options that you have selected: {options}\n")
data_proxy = SeqRepoDataProxy(SeqRepo("/usr/local/share/seqrepo/latest"))
tlr = Translator(data_proxy)
vcf_annotator = VCFAnnotator(tlr)
vcf_annotator.annotate(options.VCF_IN, options.out, options.vrs_file)
end_time = time.time()
total_time = (float(end_time) - float(start_time))
total_time_minutes = (total_time / 60)
print(f"This program took {total_time} seconds to run.")
print(f"This program took {total_time_minutes} minutes to run.")
| 2.796875 | 3 |
ci/infra/testrunner/utils/utils.py | butsoleg/skuba | 0 | 11628 | <gh_stars>0
import glob
import hashlib
import logging
import os
import shutil
import subprocess
from functools import wraps
from tempfile import gettempdir
from threading import Thread
import requests
from timeout_decorator import timeout
from utils.constants import Constant
from utils.format import Format
logger = logging.getLogger('testrunner')
_stepdepth = 0
def step(f):
@wraps(f)
def wrapped(*args, **kwargs):
global _stepdepth
_stepdepth += 1
logger.debug("{} entering {} {}".format(Format.DOT * _stepdepth, f.__name__,
f.__doc__ or ""))
r = f(*args, **kwargs)
logger.debug("{} exiting {}".format(
Format.DOT_EXIT * _stepdepth, f.__name__))
_stepdepth -= 1
return r
return wrapped
class Utils:
def __init__(self, conf):
self.conf = conf
@staticmethod
def chmod_recursive(directory, permissions):
os.chmod(directory, permissions)
for file in glob.glob(os.path.join(directory, "**/*"), recursive=True):
try:
os.chmod(file, permissions)
except Exception as ex:
logger.debug(ex)
@staticmethod
def cleanup_file(file):
if os.path.exists(file):
logger.debug(f"Cleaning up {file}")
try:
try:
# Attempt to remove the file first, because a socket (e.g.
# ssh-agent) is not a file but has to be removed like one.
os.remove(file)
except IsADirectoryError:
shutil.rmtree(file)
except Exception as ex:
logger.debug(ex)
else:
logger.debug(f"Nothing to clean up for {file}")
@staticmethod
def cleanup_files(files):
"""Remove any files or dirs in a list if they exist"""
for file in files:
Utils.cleanup_file(file)
def ssh_cleanup(self):
"""Remove ssh sock files"""
# TODO: also kill ssh agent here? maybe move pkill to kill_ssh_agent()?
sock_file = self.ssh_sock_fn()
sock_dir = os.path.dirname(sock_file)
try:
Utils.cleanup_file(sock_file)
# also remove tempdir if it's empty afterwards
if 0 == len(os.listdir(sock_dir)):
os.rmdir(sock_dir)
else:
logger.warning(f"Dir {sock_dir} not empty; leaving it")
except FileNotFoundError:
pass
except OSError as ex:
logger.debug(ex)
def collect_remote_logs(self, ip_address, logs, store_path):
"""
Collect logs from a remote machine
:param ip_address: (str) IP of the machine to collect the logs from
:param logs: (dict: list) The different logs to collect {"files": [], "dirs": [], ""services": []}
:param store_path: (str) Path to copy the logs to
:return: (bool) True if there was an error while collecting the logs
"""
logging_errors = False
for log in logs.get("files", []):
try:
self.scp_file(ip_address, log, store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {log} from {ip_address}\n {ex}")
logging_errors = True
for log in logs.get("dirs", []):
try:
self.rsync(ip_address, log, store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {log} from {ip_address}\n {ex}")
logging_errors = True
for service in logs.get("services", []):
try:
self.ssh_run(
ip_address, f"sudo journalctl -xeu {service} > {service}.log")
self.scp_file(ip_address, f"{service}.log", store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {service}.log from {ip_address}\n {ex}")
logging_errors = True
return logging_errors
def authorized_keys(self):
public_key_path = self.conf.terraform.ssh_key + ".pub"
os.chmod(self.conf.terraform.ssh_key, 0o400)
with open(public_key_path) as f:
pubkey = f.read().strip()
return pubkey
def ssh_run(self, ipaddr, cmd):
key_fn = self.conf.terraform.ssh_key
cmd = "ssh " + Constant.SSH_OPTS + " -i {key_fn} {username}@{ip} -- '{cmd}'".format(
key_fn=key_fn, ip=ipaddr, cmd=cmd, username=self.conf.terraform.nodeuser)
return self.runshellcommand(cmd)
def scp_file(self, ip_address, remote_file_path, local_file_path):
"""
Copies a remote file from the given ip to the give path
:param ip_address: (str) IP address of the node to copy from
:param remote_file_path: (str) Path of the file to be copied
:param local_file_path: (str) Path where to store the log
:return:
"""
cmd = (f"scp {Constant.SSH_OPTS} -i {self.conf.terraform.ssh_key}"
f" {self.conf.terraform.nodeuser}@{ip_address}:{remote_file_path} {local_file_path}")
self.runshellcommand(cmd)
def rsync(self, ip_address, remote_dir_path, local_dir_path):
"""
Copies a remote dir from the given ip to the give path
:param ip_address: (str) IP address of the node to copy from
:param remote_dir_path: (str) Path of the dir to be copied
:param local_dir_path: (str) Path where to store the dir
:return:
"""
cmd = (f'rsync -avz --no-owner --no-perms -e "ssh {Constant.SSH_OPTS} -i {self.conf.terraform.ssh_key}" '
f'--rsync-path="sudo rsync" --ignore-missing-args {self.conf.terraform.nodeuser}@{ip_address}:{remote_dir_path} '
f'{local_dir_path}')
self.runshellcommand(cmd)
def runshellcommand(self, cmd, cwd=None, env={}, ignore_errors=False, stdin=None):
"""Running shell command in {workspace} if cwd == None
Eg) cwd is "skuba", cmd will run shell in {workspace}/skuba/
cwd is None, cmd will run in {workspace}
cwd is abs path, cmd will run in cwd
Keyword arguments:
cmd -- command to run
cwd -- dir to run the cmd
env -- environment variables
ignore_errors -- don't raise exception if command fails
stdin -- standard input for the command in bytes
"""
if not cwd:
cwd = self.conf.workspace
if not os.path.isabs(cwd):
cwd = os.path.join(self.conf.workspace, cwd)
if not os.path.exists(cwd):
raise FileNotFoundError(Format.alert("Directory {} does not exists".format(cwd)))
if logging.DEBUG >= logger.level:
logger.debug("Executing command\n"
" cwd: {} \n"
" env: {}\n"
" cmd: {}".format(cwd, str(env) if env else "{}", cmd))
else:
logger.info("Executing command {}".format(cmd))
stdout, stderr = [], []
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd,
stdin=subprocess.PIPE if stdin else None, shell=True, env=env
)
if stdin:
p.stdin.write(stdin)
p.stdin.close()
stdoutStreamer = Thread(target = self.read_fd, args = (p, p.stdout, logger.debug, stdout))
stderrStreamer = Thread(target = self.read_fd, args = (p, p.stderr, logger.error, stderr))
stdoutStreamer.start()
stderrStreamer.start()
stdoutStreamer.join()
stderrStreamer.join()
# this is redundant, at this point threads were joined and they waited for the subprocess
# to exit, however it should not hurt to explicitly wait for it again (no-op).
p.wait()
stdout, stderr = "".join(stdout), "".join(stderr)
if p.returncode != 0:
if not ignore_errors:
raise RuntimeError("Error executing command {}".format(cmd))
else:
return stderr
return stdout
def ssh_sock_fn(self):
"""generate path to ssh socket
A socket path can't be over 107 chars on Linux, so generate a short
hash of the workspace and use that in $TMPDIR (usually /tmp) so we have
a predictable, test-unique, fixed-length path.
"""
path = os.path.join(
gettempdir(),
hashlib.md5(self.conf.workspace.encode()).hexdigest(),
"ssh-agent-sock"
)
maxl = 107
if len(path) > maxl:
raise Exception(f"Socket path '{path}' len {len(path)} > {maxl}")
return path
def read_fd(self, proc, fd, logger_func, output):
"""Read from fd, logging using logger_func
Read from fd, until proc is finished. All contents will
also be appended onto output."""
while True:
contents = fd.readline().decode()
if contents == '' and proc.poll() is not None:
return
if contents:
output.append(contents)
logger_func(contents.strip())
@timeout(60)
@step
def setup_ssh(self):
os.chmod(self.conf.terraform.ssh_key, 0o400)
# use a dedicated agent to minimize stateful components
sock_fn = self.ssh_sock_fn()
# be sure directory containing socket exists and socket doesn't exist
if os.path.exists(sock_fn):
try:
if os.path.isdir(sock_fn):
os.path.rmdir(sock_fn) # rmdir only removes an empty dir
else:
os.remove(sock_fn)
except FileNotFoundError:
pass
try:
os.mkdir(os.path.dirname(sock_fn), mode=0o700)
except FileExistsError:
if os.path.isdir(os.path.dirname(sock_fn)):
pass
else:
raise
# clean up old ssh agent process(es)
try:
self.runshellcommand("pkill -f 'ssh-agent -a {}'".format(sock_fn))
logger.warning("Killed previous instance of ssh-agent")
except:
pass
self.runshellcommand("ssh-agent -a {}".format(sock_fn))
self.runshellcommand(
"ssh-add " + self.conf.terraform.ssh_key, env={"SSH_AUTH_SOCK": sock_fn})
@timeout(30)
@step
def info(self):
"""Node info"""
info_lines = "Env vars: {}\n".format(sorted(os.environ))
info_lines += self.runshellcommand('ip a')
info_lines += self.runshellcommand('ip r')
info_lines += self.runshellcommand('cat /etc/resolv.conf')
# TODO: the logic for retrieving external is platform depedant and should be
# moved to the corresponding platform
try:
r = requests.get(
'http://169.254.169.254/2009-04-04/meta-data/public-ipv4', timeout=2)
r.raise_for_status()
except (requests.HTTPError, requests.Timeout) as err:
logger.warning(
f'Meta Data service unavailable could not get external IP addr{err}')
else:
info_lines += 'External IP addr: {}'.format(r.text)
return info_lines
| 2.140625 | 2 |
nonebot/command/argfilter/controllers.py | EVAyo/nonebot | 676 | 11629 | """
提供几种常用的控制器。
这些验证器通常需要提供一些参数进行一次调用,返回的结果才是真正的验证器,其中的技巧在于通过闭包使要控制的对象能够被内部函数访问。
版本: 1.3.0+
"""
import re
from nonebot import CommandSession
from nonebot.helpers import render_expression
def handle_cancellation(session: CommandSession):
"""
在用户发送 `算了`、`不用了`、`取消吧`、`停` 之类的话的时候,结束当前传入的命令会话(调用 `session.finish()`),并发送配置项 `SESSION_CANCEL_EXPRESSION` 所填的内容。
如果不是上述取消指令,则将输入原样输出。
参数:
session: 要控制的命令会话
"""
def control(value):
if _is_cancellation(value) is True:
session.finish(
render_expression(session.bot.config.SESSION_CANCEL_EXPRESSION))
return value
return control
def _is_cancellation(sentence: str) -> bool:
for kw in ('算', '别', '不', '停', '取消'):
if kw in sentence:
# a keyword matches
break
else:
# no keyword matches
return False
if re.match(r'^那?[算别不停]\w{0,3}了?吧?$', sentence) or \
re.match(r'^那?(?:[给帮]我)?取消了?吧?$', sentence):
return True
return False
__all__ = [
'handle_cancellation',
]
| 2.734375 | 3 |
hnn_core/dipole.py | chenghuzi/hnn-core | 0 | 11630 | <filename>hnn_core/dipole.py
"""Class to handle the dipoles."""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import warnings
import numpy as np
from copy import deepcopy
from .viz import plot_dipole, plot_psd, plot_tfr_morlet
def simulate_dipole(net, tstop, dt=0.025, n_trials=None, record_vsoma=False,
record_isoma=False, postproc=False):
"""Simulate a dipole given the experiment parameters.
Parameters
----------
net : Network object
The Network object specifying how cells are
connected.
tstop : float
The simulation stop time (ms).
dt : float
The integration time step of h.CVode (ms)
n_trials : int | None
The number of trials to simulate. If None, the 'N_trials' value
of the ``params`` used to create ``net`` is used (must be >0)
record_vsoma : bool
Option to record somatic voltages from cells
record_isoma : bool
Option to record somatic currents from cells
postproc : bool
If True, smoothing (``dipole_smooth_win``) and scaling
(``dipole_scalefctr``) values are read from the parameter file, and
applied to the dipole objects before returning. Note that this setting
only affects the dipole waveforms, and not somatic voltages, possible
extracellular recordings etc. The preferred way is to use the
:meth:`~hnn_core.dipole.Dipole.smooth` and
:meth:`~hnn_core.dipole.Dipole.scale` methods instead. Default: False.
Returns
-------
dpls: list
List of dipole objects for each trials
"""
from .parallel_backends import _BACKEND, JoblibBackend
if _BACKEND is None:
_BACKEND = JoblibBackend(n_jobs=1)
if n_trials is None:
n_trials = net._params['N_trials']
if n_trials < 1:
raise ValueError("Invalid number of simulations: %d" % n_trials)
if not net.connectivity:
warnings.warn('No connections instantiated in network. Consider using '
'net = jones_2009_model() or net = law_2021_model() to '
'create a predefined network from published models.',
UserWarning)
for drive_name, drive in net.external_drives.items():
if 'tstop' in drive['dynamics']:
if drive['dynamics']['tstop'] is None:
drive['dynamics']['tstop'] = tstop
for bias_name, bias in net.external_biases.items():
for cell_type, bias_cell_type in bias.items():
if bias_cell_type['tstop'] is None:
bias_cell_type['tstop'] = tstop
if bias_cell_type['tstop'] < 0.:
raise ValueError('End time of tonic input cannot be negative')
duration = bias_cell_type['tstop'] - bias_cell_type['t0']
if duration < 0.:
raise ValueError('Duration of tonic input cannot be negative')
net._instantiate_drives(n_trials=n_trials, tstop=tstop)
net._reset_rec_arrays()
if isinstance(record_vsoma, bool):
net._params['record_vsoma'] = record_vsoma
else:
raise TypeError("record_vsoma must be bool, got %s"
% type(record_vsoma).__name__)
if isinstance(record_isoma, bool):
net._params['record_isoma'] = record_isoma
else:
raise TypeError("record_isoma must be bool, got %s"
% type(record_isoma).__name__)
if postproc:
warnings.warn('The postproc-argument is deprecated and will be removed'
' in a future release of hnn-core. Please define '
'smoothing and scaling explicitly using Dipole methods.',
DeprecationWarning)
dpls = _BACKEND.simulate(net, tstop, dt, n_trials, postproc)
return dpls
def read_dipole(fname):
"""Read dipole values from a file and create a Dipole instance.
Parameters
----------
fname : str
Full path to the input file (.txt)
Returns
-------
dpl : Dipole
The instance of Dipole class
"""
dpl_data = np.loadtxt(fname, dtype=float)
dpl = Dipole(dpl_data[:, 0], dpl_data[:, 1:])
return dpl
def average_dipoles(dpls):
"""Compute dipole averages over a list of Dipole objects.
Parameters
----------
dpls: list of Dipole objects
Contains list of dipole objects, each with a `data` member containing
'L2', 'L5' and 'agg' components
Returns
-------
dpl: instance of Dipole
A new dipole object with each component of `dpl.data` representing the
average over the same components in the input list
"""
scale_applied = dpls[0].scale_applied
for dpl_idx, dpl in enumerate(dpls):
if dpl.scale_applied != scale_applied:
raise RuntimeError('All dipoles must be scaled equally!')
if not isinstance(dpl, Dipole):
raise ValueError(
f"All elements in the list should be instances of "
f"Dipole. Got {type(dpl)}")
if dpl.nave > 1:
raise ValueError("Dipole at index %d was already an average of %d"
" trials. Cannot reaverage" %
(dpl_idx, dpl.nave))
avg_data = list()
layers = dpl.data.keys()
for layer in layers:
avg_data.append(
np.mean(np.array([dpl.data[layer] for dpl in dpls]), axis=0)
)
avg_data = np.c_[avg_data].T
avg_dpl = Dipole(dpls[0].times, avg_data)
# The averaged scale should equal all scals in the input dpl list.
avg_dpl.scale_applied = scale_applied
# set nave to the number of trials averaged in this dipole
avg_dpl.nave = len(dpls)
return avg_dpl
def _rmse(dpl, exp_dpl, tstart=0.0, tstop=0.0, weights=None):
""" Calculates RMSE between data in dpl and exp_dpl
Parameters
----------
dpl: instance of Dipole
A dipole object with simulated data
exp_dpl: instance of Dipole
A dipole object with experimental data
tstart | None: float
Time at beginning of range over which to calculate RMSE
tstop | None: float
Time at end of range over which to calculate RMSE
weights | None: array
An array of weights to be applied to each point in
simulated dpl. Must have length >= dpl.data
If None, weights will be replaced with 1's for typical RMSE
calculation.
Returns
-------
err: float
Weighted RMSE between data in dpl and exp_dpl
"""
from scipy import signal
exp_times = exp_dpl.times
sim_times = dpl.times
# do tstart and tstop fall within both datasets?
# if not, use the closest data point as the new tstop/tstart
for tseries in [exp_times, sim_times]:
if tstart < tseries[0]:
tstart = tseries[0]
if tstop > tseries[-1]:
tstop = tseries[-1]
# make sure start and end times are valid for both dipoles
exp_start_index = (np.abs(exp_times - tstart)).argmin()
exp_end_index = (np.abs(exp_times - tstop)).argmin()
exp_length = exp_end_index - exp_start_index
sim_start_index = (np.abs(sim_times - tstart)).argmin()
sim_end_index = (np.abs(sim_times - tstop)).argmin()
sim_length = sim_end_index - sim_start_index
if weights is None:
# weighted RMSE with weights of all 1's is equivalent to
# normal RMSE
weights = np.ones(len(sim_times[0:sim_end_index]))
weights = weights[sim_start_index:sim_end_index]
dpl1 = dpl.data['agg'][sim_start_index:sim_end_index]
dpl2 = exp_dpl.data['agg'][exp_start_index:exp_end_index]
if (sim_length > exp_length):
# downsample simulation timeseries to match exp data
dpl1 = signal.resample(dpl1, exp_length)
weights = signal.resample(weights, exp_length)
indices = np.where(weights < 1e-4)
weights[indices] = 0
elif (sim_length < exp_length):
# downsample exp timeseries to match simulation data
dpl2 = signal.resample(dpl2, sim_length)
return np.sqrt((weights * ((dpl1 - dpl2) ** 2)).sum() / weights.sum())
class Dipole(object):
"""Dipole class.
An instance of the ``Dipole``-class contains the simulated dipole moment
timecourses for L2 and L5 pyramidal cells, as well as their aggregate
(``'agg'``). The units of the dipole moment are in ``nAm``
(1e-9 Ampere-meters).
Parameters
----------
times : array (n_times,)
The time vector (in ms)
data : array, shape (n_times x n_layers)
The data. The first column represents 'agg' (the total diple),
the second 'L2' layer and the last one 'L5' layer. For experimental
data, it can contain only one column.
nave : int
Number of trials that were averaged to produce this Dipole. Defaults
to 1
Attributes
----------
times : array-like
The time vector (in ms)
sfreq : float
The sampling frequency (in Hz)
data : dict of array
Dipole moment timecourse arrays with keys 'agg', 'L2' and 'L5'
nave : int
Number of trials that were averaged to produce this Dipole
scale_applied : int or float
The total factor by which the dipole has been scaled (using
:meth:`~hnn_core.dipole.Dipole.scale`).
"""
def __init__(self, times, data, nave=1): # noqa: D102
self.times = np.array(times)
if data.ndim == 1:
data = data[:, None]
if data.shape[1] == 3:
self.data = {'agg': data[:, 0], 'L2': data[:, 1], 'L5': data[:, 2]}
elif data.shape[1] == 1:
self.data = {'agg': data[:, 0]}
self.nave = nave
self.sfreq = 1000. / (times[1] - times[0]) # NB assumes len > 1
self.scale_applied = 1 # for visualisation
def copy(self):
"""Return a copy of the Dipole instance
Returns
-------
dpl_copy : instance of Dipole
A copy of the Dipole instance.
"""
return deepcopy(self)
def _post_proc(self, window_len, fctr):
"""Apply scaling and smoothing from param-files (DEPRECATE)
Parameters
----------
window_len : int
Smoothing window in ms
fctr : int
Scaling factor
"""
self.scale(fctr)
if window_len > 0: # this is to allow param-files with len==0
self.smooth(window_len)
def _convert_fAm_to_nAm(self):
"""The NEURON simulator output is in fAm, convert to nAm
NB! Must be run `after` :meth:`Dipole.baseline_renormalization`
"""
for key in self.data.keys():
self.data[key] *= 1e-6
def scale(self, factor):
"""Scale (multiply) the dipole moment by a fixed factor
The attribute ``Dipole.scale_applied`` is updated to reflect factors
applied and displayed in plots.
Parameters
----------
factor : int
Scaling factor, applied to the data in-place.
"""
for key in self.data.keys():
self.data[key] *= factor
self.scale_applied *= factor
return self
def smooth(self, window_len):
"""Smooth the dipole waveform using Hamming-windowed convolution
Note that this method operates in-place, i.e., it will alter the data.
If you prefer a filtered copy, consider using the
:meth:`~hnn_core.dipole.Dipole.copy`-method.
Parameters
----------
window_len : float
The length (in ms) of a `~numpy.hamming` window to convolve the
data with.
Returns
-------
dpl_copy : instance of Dipole
A copy of the modified Dipole instance.
"""
from .utils import smooth_waveform
for key in self.data.keys():
self.data[key] = smooth_waveform(self.data[key], window_len,
self.sfreq)
return self
def savgol_filter(self, h_freq):
"""Smooth the dipole waveform using Savitzky-Golay filtering
Note that this method operates in-place, i.e., it will alter the data.
If you prefer a filtered copy, consider using the
:meth:`~hnn_core.dipole.Dipole.copy`-method. The high-frequency cutoff
value of a Savitzky-Golay filter is approximate; see the SciPy
reference: :func:`~scipy.signal.savgol_filter`.
Parameters
----------
h_freq : float or None
Approximate high cutoff frequency in Hz. Note that this
is not an exact cutoff, since Savitzky-Golay filtering
is done using polynomial fits
instead of FIR/IIR filtering. This parameter is thus used to
determine the length of the window over which a 5th-order
polynomial smoothing is applied.
Returns
-------
dpl_copy : instance of Dipole
A copy of the modified Dipole instance.
"""
from .utils import _savgol_filter
if h_freq < 0:
raise ValueError('h_freq cannot be negative')
elif h_freq > 0.5 * self.sfreq:
raise ValueError(
'h_freq must be less than half the sample rate')
for key in self.data.keys():
self.data[key] = _savgol_filter(self.data[key],
h_freq,
self.sfreq)
return self
def plot(self, tmin=None, tmax=None, layer='agg', decim=None, ax=None,
color='k', show=True):
"""Simple layer-specific plot function.
Parameters
----------
tmin : float or None
Start time of plot (in ms). If None, plot entire simulation.
tmax : float or None
End time of plot (in ms). If None, plot entire simulation.
layer : str
The layer to plot. Can be one of 'agg', 'L2', and 'L5'
decimate : int
Factor by which to decimate the raw dipole traces (optional)
ax : instance of matplotlib figure | None
The matplotlib axis
color : tuple of float
RGBA value to use for plotting. By default, 'k' (black)
show : bool
If True, show the figure
Returns
-------
fig : instance of plt.fig
The matplotlib figure handle.
"""
return plot_dipole(self, tmin=tmin, tmax=tmax, ax=ax, layer=layer,
decim=decim, color=color, show=show)
def plot_psd(self, fmin=0, fmax=None, tmin=None, tmax=None, layer='agg',
ax=None, show=True):
"""Plot power spectral density (PSD) of dipole time course
Applies `~scipy.signal.periodogram` from SciPy with
``window='hamming'``.
Note that no spectral averaging is applied across time, as most
``hnn_core`` simulations are short-duration. However, passing a list of
`Dipole` instances will plot their average (Hamming-windowed) power,
which resembles the `Welch`-method applied over time.
Parameters
----------
dpl : instance of Dipole | list of Dipole instances
The Dipole object.
fmin : float
Minimum frequency to plot (in Hz). Default: 0 Hz
fmax : float
Maximum frequency to plot (in Hz). Default: None (plot up to
Nyquist)
tmin : float or None
Start time of data to include (in ms). If None, use entire
simulation.
tmax : float or None
End time of data to include (in ms). If None, use entire
simulation.
layer : str, default 'agg'
The layer to plot. Can be one of 'agg', 'L2', and 'L5'
ax : instance of matplotlib figure | None
The matplotlib axis.
show : bool
If True, show the figure
Returns
-------
fig : instance of matplotlib Figure
The matplotlib figure handle.
"""
return plot_psd(self, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax,
layer=layer, ax=ax, show=show)
def plot_tfr_morlet(self, freqs, n_cycles=7., tmin=None, tmax=None,
layer='agg', decim=None, padding='zeros', ax=None,
colormap='inferno', colorbar=True, show=True):
"""Plot Morlet time-frequency representation of dipole time course
NB: Calls `~mne.time_frequency.tfr_array_morlet`, so ``mne`` must be
installed.
Parameters
----------
dpl : instance of Dipole | list of Dipole instances
The Dipole object. If a list of dipoles is given, the power is
calculated separately for each trial, then averaged.
freqs : array
Frequency range of interest.
n_cycles : float or array of float, default 7.0
Number of cycles. Fixed number or one per frequency.
tmin : float or None
Start time of plot in milliseconds. If None, plot entire
simulation.
tmax : float or None
End time of plot in milliseconds. If None, plot entire simulation.
layer : str, default 'agg'
The layer to plot. Can be one of 'agg', 'L2', and 'L5'
decim : int or list of int or None (default)
Optional (integer) factor by which to decimate the raw dipole
traces. The SciPy function :func:`~scipy.signal.decimate` is used,
which recommends values <13. To achieve higher decimation factors,
a list of ints can be provided. These are applied successively.
padding : str or None
Optional padding of the dipole time course beyond the plotting
limits. Possible values are: 'zeros' for padding with 0's
(default), 'mirror' for mirror-image padding.
ax : instance of matplotlib figure | None
The matplotlib axis
colormap : str
The name of a matplotlib colormap, e.g., 'viridis'. Default:
'inferno'
colorbar : bool
If True (default), adjust figure to include colorbar.
show : bool
If True, show the figure
Returns
-------
fig : instance of matplotlib Figure
The matplotlib figure handle.
"""
return plot_tfr_morlet(
self, freqs, n_cycles=n_cycles, tmin=tmin, tmax=tmax,
layer=layer, decim=decim, padding=padding, ax=ax,
colormap=colormap, colorbar=colorbar, show=show)
def _baseline_renormalize(self, N_pyr_x, N_pyr_y):
"""Only baseline renormalize if the units are fAm.
Parameters
----------
N_pyr_x : int
Nr of cells (x)
N_pyr_y : int
Nr of cells (y)
"""
# N_pyr cells in grid. This is PER LAYER
N_pyr = N_pyr_x * N_pyr_y
# dipole offset calculation: increasing number of pyr
# cells (L2 and L5, simultaneously)
# with no inputs resulted in an aggregate dipole over the
# interval [50., 1000.] ms that
# eventually plateaus at -48 fAm. The range over this interval
# is something like 3 fAm
# so the resultant correction is here, per dipole
# dpl_offset = N_pyr * 50.207
dpl_offset = {
# these values will be subtracted
'L2': N_pyr * 0.0443,
'L5': N_pyr * -49.0502
# 'L5': N_pyr * -48.3642,
# will be calculated next, this is a placeholder
# 'agg': None,
}
# L2 dipole offset can be roughly baseline shifted over
# the entire range of t
self.data['L2'] -= dpl_offset['L2']
# L5 dipole offset should be different for interval [50., 500.]
# and then it can be offset
# slope (m) and intercept (b) params for L5 dipole offset
# uncorrected for N_cells
# these values were fit over the range [37., 750.)
m = 3.4770508e-3
b = -51.231085
# these values were fit over the range [750., 5000]
t1 = 750.
m1 = 1.01e-4
b1 = -48.412078
# piecewise normalization
self.data['L5'][self.times <= 37.] -= dpl_offset['L5']
self.data['L5'][(self.times > 37.) & (self.times < t1)] -= N_pyr * \
(m * self.times[(self.times > 37.) & (self.times < t1)] + b)
self.data['L5'][self.times >= t1] -= N_pyr * \
(m1 * self.times[self.times >= t1] + b1)
# recalculate the aggregate dipole based on the baseline
# normalized ones
self.data['agg'] = self.data['L2'] + self.data['L5']
def write(self, fname):
"""Write dipole values to a file.
Parameters
----------
fname : str
Full path to the output file (.txt)
Outputs
-------
A tab separatd txt file where rows correspond
to samples and columns correspond to
1) time (s),
2) aggregate current dipole (scaled nAm),
3) L2/3 current dipole (scaled nAm), and
4) L5 current dipole (scaled nAm)
"""
if self.nave > 1:
warnings.warn("Saving Dipole to file that is an average of %d"
" trials" % self.nave)
X = [self.times]
fmt = ['%3.3f']
for data in self.data.values():
X.append(data)
fmt.append('%5.4f')
X = np.r_[X].T
np.savetxt(fname, X, fmt=fmt, delimiter='\t')
| 2.515625 | 3 |
openstates/openstates-master/openstates/ga/bills.py | Jgorsick/Advocacy_Angular | 0 | 11631 | <reponame>Jgorsick/Advocacy_Angular
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
from collections import defaultdict
from .util import get_client, get_url, backoff
# Methods (7):
# GetLegislationDetail(xs:int LegislationId, )
#
# GetLegislationDetailByDescription(ns2:DocumentType DocumentType,
# xs:int Number, xs:int SessionId)
#
# GetLegislationForSession(xs:int SessionId, )
#
# GetLegislationRange(ns2:LegislationIndexRangeSet Range, )
#
# GetLegislationRanges(xs:int SessionId,
# ns2:DocumentType DocumentType, xs:int RangeSize, )
#
# GetLegislationSearchResultsPaged(ns2:LegislationSearchConstraints
# Constraints, xs:int PageSize,
# xs:int StartIndex, )
# GetTitles()
member_cache = {}
SOURCE_URL = "http://www.legis.ga.gov/Legislation/en-US/display/{session}/{bid}"
class GABillScraper(BillScraper):
jurisdiction = 'ga'
lservice = get_client("Legislation").service
vservice = get_client("Votes").service
mservice = get_client("Members").service
lsource = get_url("Legislation")
msource = get_url("Members")
vsource = get_url("Votes")
def get_member(self, member_id):
if member_id in member_cache:
return member_cache[member_id]
mem = backoff(self.mservice.GetMember, member_id)
member_cache[member_id] = mem
return mem
def scrape(self, session, chambers):
sid = self.metadata['session_details'][session]['_guid']
legislation = backoff(
self.lservice.GetLegislationForSession,
sid
)['LegislationIndex']
for leg in legislation:
lid = leg['Id']
instrument = backoff(self.lservice.GetLegislationDetail, lid)
history = [x for x in instrument['StatusHistory'][0]]
actions = reversed([{
"code": x['Code'],
"action": x['Description'],
"_guid": x['Id'],
"date": x['Date']
} for x in history])
guid = instrument['Id']
bill_type = instrument['DocumentType']
chamber = {
"H": "lower",
"S": "upper",
"J": "joint"
}[bill_type[0]] # XXX: This is a bit of a hack.
bill_id = "%s %s" % (
bill_type,
instrument['Number'],
)
if instrument['Suffix']:
bill_id += instrument['Suffix']
title = instrument['Caption']
description = instrument['Summary']
if title is None:
continue
bill = Bill(
session,
chamber,
bill_id,
title,
description=description,
_guid=guid
)
if instrument['Votes']:
for vote_ in instrument['Votes']:
_, vote_ = vote_
vote_ = backoff(self.vservice.GetVote, vote_[0]['VoteId'])
vote = Vote(
{"House": "lower", "Senate": "upper"}[vote_['Branch']],
vote_['Date'],
vote_['Caption'] or "Vote on Bill",
(vote_['Yeas'] > vote_['Nays']),
vote_['Yeas'],
vote_['Nays'],
(vote_['Excused'] + vote_['NotVoting']),
session=session,
bill_id=bill_id,
bill_chamber=chamber)
vote.add_source(self.vsource)
methods = {"Yea": vote.yes, "Nay": vote.no,}
for vdetail in vote_['Votes'][0]:
whom = vdetail['Member']
how = vdetail['MemberVoted']
try:
m = methods[how]
except KeyError:
m = vote.other
m(whom['Name'])
bill.add_vote(vote)
types = {
"HI": ["other"],
"SI": ["other"],
"HH": ["other"],
"SH": ["other"],
"HPF": ["bill:introduced"],
"HDSAS": ["other"],
"SPF": ["bill:introduced"],
"HSR": ["bill:reading:2"],
"SSR": ["bill:reading:2"],
"HFR": ["bill:reading:1"],
"SFR": ["bill:reading:1"],
"HRECM": ["bill:withdrawn", "committee:referred"],
"SRECM": ["bill:withdrawn", "committee:referred"],
"SW&C": ["bill:withdrawn", "committee:referred"],
"HW&C": ["bill:withdrawn", "committee:referred"],
"HRA": ["bill:passed"],
"SRA": ["bill:passed"],
"HPA": ["bill:passed"],
"HRECO": ["other"],
"SPA": ["bill:passed"],
"HTABL": ["other"], # "House Tabled" - what is this?
"SDHAS": ["other"],
"HCFR": ["committee:passed:favorable"],
"SCFR": ["committee:passed:favorable"],
"HRAR": ["committee:referred"],
"SRAR": ["committee:referred"],
"STR": ["bill:reading:3"],
"SAHAS": ["other"],
"SE": ["bill:passed"],
"SR": ["committee:referred"],
"HTRL": ["bill:reading:3", "bill:failed"],
"HTR": ["bill:reading:3"],
"S3RLT": ["bill:reading:3", "bill:failed"],
"HASAS": ["other"],
"S3RPP": ["other"],
"STAB": ["other"],
"SRECO": ["other"],
"SAPPT": ["other"],
"HCA": ["other"],
"HNOM": ["other"],
"HTT": ["other"],
"STT": ["other"],
"SRECP": ["other"],
"SCRA": ["other"],
"SNOM": ["other"],
"S2R": ["bill:reading:2"],
"H2R": ["bill:reading:2"],
"SENG": ["bill:passed"],
"HENG": ["bill:passed"],
"HPOST": ["other"],
"HCAP": ["other"],
"SDSG": ["governor:signed"],
"SSG": ["governor:received"],
"Signed Gov": ["governor:signed"],
"HDSG": ["governor:signed"],
"HSG": ["governor:received"],
"EFF": ["other"],
"HRP": ["other"],
"STH": ["other"],
"HTS": ["other"],
}
ccommittees = defaultdict(list)
committees = instrument['Committees']
if committees:
for committee in committees[0]:
ccommittees[{
"House": "lower",
"Senate": "upper",
}[committee['Type']]].append(committee['Name'])
for action in actions:
chamber = {
"H": "lower",
"S": "upper",
"E": "other", # Effective Date
}[action['code'][0]]
try:
_types = types[action['code']]
except KeyError:
self.debug(action)
_types = ["other"]
committees = []
if any(('committee' in x for x in _types)):
committees = [str(x) for x in ccommittees.get(chamber, [])]
bill.add_action(chamber, action['action'], action['date'], _types,
committees=committees,
_code=action['code'],
_code_id=action['_guid'])
sponsors = []
if instrument['Authors']:
sponsors = instrument['Authors']['Sponsorship']
if 'Sponsors' in instrument and instrument['Sponsors']:
sponsors += instrument['Sponsors']['Sponsorship']
sponsors = [
(x['Type'], self.get_member(x['MemberId'])) for x in sponsors
]
for typ, sponsor in sponsors:
name = "{First} {Last}".format(**dict(sponsor['Name']))
bill.add_sponsor(
'primary' if 'Author' in typ else 'seconday',
name
)
for version in instrument['Versions']['DocumentDescription']:
name, url, doc_id, version_id = [
version[x] for x in [
'Description',
'Url',
'Id',
'Version'
]
]
bill.add_version(
name,
url,
mimetype='application/pdf',
_internal_document_id=doc_id,
_version_id=version_id
)
versions = sorted(
bill['versions'],
key=lambda x: x['_internal_document_id']
)
bill['versions'] = versions
bill.add_source(self.msource)
bill.add_source(self.lsource)
bill.add_source(SOURCE_URL.format(**{
"session": session,
"bid": guid,
}))
self.save_bill(bill)
| 2.40625 | 2 |
data/external/repositories_2to3/267667/kaggle-heart-master/generate_roi_pkl.py | Keesiu/meta-kaggle | 0 | 11632 | import argparse
import numpy as np
import glob
import re
from log import print_to_file
from scipy.fftpack import fftn, ifftn
from skimage.feature import peak_local_max, canny
from skimage.transform import hough_circle
import pickle as pickle
from paths import TRAIN_DATA_PATH, LOGS_PATH, PKL_TRAIN_DATA_PATH, PKL_TEST_DATA_PATH
from paths import TEST_DATA_PATH
def orthogonal_projection_on_slice(percentual_coordinate, source_metadata, target_metadata):
point = np.array([[percentual_coordinate[0]],
[percentual_coordinate[1]],
[0],
[1]])
image_size = [source_metadata["Rows"], source_metadata["Columns"]]
point = np.dot(np.array( [[image_size[0],0,0,0],
[0,image_size[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
pixel_spacing = source_metadata["PixelSpacing"]
point = np.dot(np.array( [[pixel_spacing[0],0,0,0],
[0,pixel_spacing[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
Fa = np.array(source_metadata["ImageOrientationPatient"]).reshape( (2,3) )[::-1,:]
posa = source_metadata["ImagePositionPatient"]
point = np.dot(np.array( [[Fa[0,0],Fa[1,0],0,posa[0]],
[Fa[0,1],Fa[1,1],0,posa[1]],
[Fa[0,2],Fa[1,2],0,posa[2]],
[0,0,0,1]]), point)
posb = target_metadata["ImagePositionPatient"]
point = np.dot(np.array( [[1,0,0,-posb[0]],
[0,1,0,-posb[1]],
[0,0,1,-posb[2]],
[0,0,0,1]]), point)
Fb = np.array(target_metadata["ImageOrientationPatient"]).reshape( (2,3) )[::-1,:]
ff0 = np.sqrt(np.sum(Fb[0,:]*Fb[0,:]))
ff1 = np.sqrt(np.sum(Fb[1,:]*Fb[1,:]))
point = np.dot(np.array( [[Fb[0,0]/ff0,Fb[0,1]/ff0,Fb[0,2]/ff0,0],
[Fb[1,0]/ff1,Fb[1,1]/ff1,Fb[1,2]/ff1,0],
[0,0,0,0],
[0,0,0,1]]), point)
pixel_spacing = target_metadata["PixelSpacing"]
point = np.dot(np.array( [[1./pixel_spacing[0],0,0,0],
[0,1./pixel_spacing[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
image_size = [target_metadata["Rows"], target_metadata["Columns"]]
point = np.dot(np.array( [[1./image_size[0],0,0,0],
[0,1./image_size[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
return point[:2,0] # percentual coordinate as well
#joni
minradius = 15
maxradius = 65
kernel_width = 5
center_margin = 8
num_peaks = 10
num_circles = 10 # 20
radstep = 2
#ira
minradius_mm=25
maxradius_mm=45
kernel_width=5
center_margin=8
num_peaks=10
num_circles=20
radstep=2
def extract_roi(data, pixel_spacing, minradius_mm=15, maxradius_mm=65, kernel_width=5, center_margin=8, num_peaks=10,
num_circles=10, radstep=2):
"""
Returns center and radii of ROI region in (i,j) format
"""
# radius of the smallest and largest circles in mm estimated from the train set
# convert to pixel counts
minradius = int(minradius_mm / pixel_spacing)
maxradius = int(maxradius_mm / pixel_spacing)
ximagesize = data[0]['data'].shape[1]
yimagesize = data[0]['data'].shape[2]
xsurface = np.tile(list(range(ximagesize)), (yimagesize, 1)).T
ysurface = np.tile(list(range(yimagesize)), (ximagesize, 1))
lsurface = np.zeros((ximagesize, yimagesize))
allcenters = []
allaccums = []
allradii = []
for dslice in data:
ff1 = fftn(dslice['data'])
fh = np.absolute(ifftn(ff1[1, :, :]))
fh[fh < 0.1 * np.max(fh)] = 0.0
image = 1. * fh / np.max(fh)
# find hough circles and detect two radii
edges = canny(image, sigma=3)
hough_radii = np.arange(minradius, maxradius, radstep)
hough_res = hough_circle(edges, hough_radii)
if hough_res.any():
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
# For each radius, extract num_peaks circles
peaks = peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius] * num_peaks)
# Keep the most prominent num_circles circles
sorted_circles_idxs = np.argsort(accums)[::-1][:num_circles]
for idx in sorted_circles_idxs:
center_x, center_y = centers[idx]
allcenters.append(centers[idx])
allradii.append(radii[idx])
allaccums.append(accums[idx])
brightness = accums[idx]
lsurface = lsurface + brightness * np.exp(
-((xsurface - center_x) ** 2 + (ysurface - center_y) ** 2) / kernel_width ** 2)
lsurface = lsurface / lsurface.max()
# select most likely ROI center
roi_center = np.unravel_index(lsurface.argmax(), lsurface.shape)
# determine ROI radius
roi_x_radius = 0
roi_y_radius = 0
for idx in range(len(allcenters)):
xshift = np.abs(allcenters[idx][0] - roi_center[0])
yshift = np.abs(allcenters[idx][1] - roi_center[1])
if (xshift <= center_margin) & (yshift <= center_margin):
roi_x_radius = np.max((roi_x_radius, allradii[idx] + xshift))
roi_y_radius = np.max((roi_y_radius, allradii[idx] + yshift))
if roi_x_radius > 0 and roi_y_radius > 0:
roi_radii = roi_x_radius, roi_y_radius
else:
roi_radii = None
return roi_center, roi_radii
def read_slice(path):
return pickle.load(open(path))['data']
def read_metadata(path):
d = pickle.load(open(path))['metadata'][0]
metadata = {k: d[k] for k in ['PixelSpacing', 'ImageOrientationPatient', 'ImagePositionPatient', 'SliceLocation',
'PatientSex', 'PatientAge', 'Rows', 'Columns']}
metadata['PixelSpacing'] = np.float32(metadata['PixelSpacing'])
metadata['ImageOrientationPatient'] = np.float32(metadata['ImageOrientationPatient'])
metadata['SliceLocation'] = np.float32(metadata['SliceLocation'])
metadata['ImagePositionPatient'] = np.float32(metadata['ImagePositionPatient'])
metadata['PatientSex'] = 1 if metadata['PatientSex'] == 'F' else 0
metadata['PatientAge'] = int(metadata['PatientAge'][1:3])
metadata['Rows'] = int(metadata['Rows'])
metadata['Columns'] = int(metadata['Columns'])
return metadata
def get_patient_data(patient_data_path):
patient_data = []
spaths = sorted(glob.glob(patient_data_path + '/sax_*.pkl'),
key=lambda x: int(re.search(r'/\w*_(\d+)*\.pkl$', x).group(1)))
pid = re.search(r'/(\d+)/study$', patient_data_path).group(1)
for s in spaths:
slice_id = re.search(r'/(sax_\d+\.pkl)$', s).group(1)
metadata = read_metadata(s)
d = read_slice(s)
patient_data.append({'data': d, 'metadata': metadata,
'slice_id': slice_id, 'patient_id': pid})
return patient_data
def get_patient_ch_data(patient_data_path):
patient_data = []
spaths = sorted(glob.glob(patient_data_path + '/*ch_*.pkl'),
key=lambda x: int(re.search(r'/\w*_(\d+)*\.pkl$', x).group(1)))
pid = re.search(r'/(\d+)/study$', patient_data_path).group(1)
for s in spaths:
slice_id = re.search(r'/(\d+ch_\d+\.pkl)$', s).group(1)
metadata = read_metadata(s)
d = read_slice(s)
patient_data.append({'data': d, 'metadata': metadata,
'slice_id': slice_id, 'patient_id': pid})
return patient_data
def sort_slices(slices):
nslices = len(slices)
positions = np.zeros((nslices,))
for i in range(nslices):
positions[i] = slices[i]['metadata']['SliceLocation']
sorted_slices = [s for pos, s in sorted(zip(positions.tolist(), slices),
key=lambda x: x[0], reverse=True)]
return sorted_slices
def group_slices(slice_stack):
"""
Groups slices into stacks with the same image orientation
:param slice_stack:
:return: list of slice stacks
"""
img_orientations = []
for s in slice_stack:
img_orientations.append(tuple(s['metadata']['ImageOrientationPatient']))
img_orientations = list(set(img_orientations))
if len(img_orientations) == 1:
return [slice_stack]
else:
slice_groups = [[] for _ in range(len(img_orientations))]
for s in slice_stack:
group = img_orientations.index(tuple(s['metadata']['ImageOrientationPatient']))
slice_groups[group].append(s)
return slice_groups
def plot_roi(slice_group, roi_center, roi_radii):
x_roi_center, y_roi_center = roi_center[0], roi_center[1]
x_roi_radius, y_roi_radius = roi_radii[0], roi_radii[1]
print('nslices', len(slice_group))
for dslice in [slice_group[len(slice_group) / 2]]:
outdata = dslice['data']
# print dslice['slice_id']
# print dslice['metadata']['SliceLocation']
# print dslice['metadata']['ImageOrientationPatient']
# print dslice['metadata']['PixelSpacing']
# print dslice['data'].shape
# print '--------------------------------------'
roi_mask = np.zeros_like(outdata[0])
roi_mask[x_roi_center - x_roi_radius:x_roi_center + x_roi_radius,
y_roi_center - y_roi_radius:y_roi_center + y_roi_radius] = 1
outdata[:, roi_mask > 0.5] = 0.4 * outdata[:, roi_mask > 0.5]
outdata[:, roi_mask > 0.5] = 0.4 * outdata[:, roi_mask > 0.5]
fig = plt.figure(1)
fig.canvas.set_window_title(dslice['patient_id'] + dslice['slice_id'])
def init_out():
im.set_data(outdata[0])
def animate_out(i):
im.set_data(outdata[i])
return im
im = fig.gca().imshow(outdata[0], cmap='gist_gray_r', vmin=0, vmax=255)
anim = animation.FuncAnimation(fig, animate_out, init_func=init_out, frames=30, interval=50)
plt.show()
def get_slice2roi(data_path, plot=False):
patient_paths = sorted(glob.glob(data_path + '*/study'))
slice2roi = {}
for p in patient_paths:
patient_data = get_patient_data(p)
sorted_slices = sort_slices(patient_data)
grouped_slices = group_slices(sorted_slices)
ch_data = get_patient_ch_data(p)
ch4, ch2 = None,None
for data in ch_data:
if data['slice_id'].startswith("4"):
ch4 = data
elif data['slice_id'].startswith("2"):
ch2 = data
# init patient dict
pid = sorted_slices[0]['patient_id']
print("processing patient %s" % pid)
# print pid
slice2roi[pid] = {}
# pixel spacing doesn't change within one patient
pixel_spacing = sorted_slices[0]['metadata']['PixelSpacing'][0]
for slice_group in grouped_slices:
try:
roi_center, roi_radii = extract_roi(slice_group, pixel_spacing)
except:
print('Could not find ROI')
roi_center, roi_radii = None, None
print(roi_center, roi_radii)
if plot and roi_center and roi_radii:
pass
#plot_roi(slice_group, roi_center, roi_radii)
for s in slice_group:
sid = s['slice_id']
slice2roi[pid][sid] = {'roi_center': roi_center, 'roi_radii': roi_radii}
# project found roi_centers on the 4ch and 2ch slice
ch4_centers = []
ch2_centers = []
for slice in sorted_slices:
sid = slice['slice_id']
roi_center = slice2roi[pid][sid]['roi_center']
metadata_source = slice['metadata']
hough_roi_center = (float(roi_center[0]) / metadata_source['Rows'],
float(roi_center[1]) / metadata_source['Columns'])
if ch4 is not None:
metadata_target = ch4['metadata']
result = orthogonal_projection_on_slice(hough_roi_center, metadata_source, metadata_target)
ch_roi_center = [float(result[0]) * metadata_target['Rows'],
float(result[1]) * metadata_target['Columns']]
ch4_centers.append(ch_roi_center)
if ch2 is not None:
metadata_target = ch2['metadata']
result = orthogonal_projection_on_slice(hough_roi_center, metadata_source, metadata_target)
ch_roi_center = [float(result[0]) * metadata_target['Rows'],
float(result[1]) * metadata_target['Columns']]
ch2_centers.append(ch_roi_center)
if ch4 is not None:
centers = np.array(ch4_centers)
ch4_result_center = np.mean(centers, axis=0)
ch4_result_radius = np.max(np.sqrt((centers - ch4_result_center)**2))
sid = ch4['slice_id']
slice2roi[pid][sid] = {'roi_center': tuple(ch4_result_center), 'roi_radii': (ch4_result_radius, ch4_result_radius)}
if ch2 is not None:
centers = np.array(ch2_centers)
ch2_result_center = np.mean(centers, axis=0)
ch2_result_radius = np.max(np.sqrt((centers - ch2_result_center)**2))
sid = ch2['slice_id']
slice2roi[pid][sid] = {'roi_center': tuple(ch2_result_center), 'roi_radii': (ch2_result_radius, ch2_result_radius)}
filename = data_path.split('/')[-1] + '_slice2roi_joni.pkl'
with open(filename, 'w') as f:
pickle.dump(slice2roi, f)
print('saved to ', filename)
return slice2roi
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
required = parser.add_argument_group('required arguments')
#required.add_argument('-c', '--config',
# help='configuration to run',
# required=True)
args = parser.parse_args()
data_paths = [PKL_TRAIN_DATA_PATH, PKL_TEST_DATA_PATH]
log_path = LOGS_PATH + "generate_roi.log"
with print_to_file(log_path):
for d in data_paths:
get_slice2roi(d, plot=True)
print("log saved to '%s'" % log_path)
| 1.921875 | 2 |
scrapers/scrapsfbos.py | ndd365/showup | 48 | 11633 | <gh_stars>10-100
import feedparser
from bs4 import BeautifulSoup
from dateutil.parser import parse
from datetime import timedelta
import pytz
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from oauth2client.service_account import ServiceAccountCredentials
scopes = 'https://www.googleapis.com/auth/calendar'
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'client_secret.json', scopes)
http_auth = credentials.authorize(Http())
CAL = build('calendar', 'v3', http=credentials.authorize(Http()))
class Event(object):
def __init__(self, name, start_date, end_date):
self.name = name
self.start_date = start_date
self.end_date = end_date
def __repr__(self):
return self.name
def get_calendar_data():
events = []
url = "http://sfbos.org/events/feed"
feed = feedparser.parse(url)
for item in feed["items"]:
event_name = item["title"]
event_details=item["summary_detail"]["value"]
soup = BeautifulSoup(event_details, 'html.parser')
start_date_unaware = parse(soup.span.string)
start_date = start_date_unaware.replace(tzinfo=pytz.UTC)
end_date = start_date + timedelta(hours=1)
event = Event(event_name, start_date, end_date)
print event
events.append(event)
return events
def sync_to_google_calendar(events):
for event in events:
GMT_OFF = '-07:00' # PDT/MST/GMT-7
start_date = event.start_date.isoformat()
end_date = event.end_date.isoformat()
gcal_event = {
'summary': event.name,
'start': {'dateTime': start_date},
'end': {'dateTime': end_date},
'attendees': [
# {'email': '<EMAIL>'},
# {'email': '<EMAIL>'},
],
}
print gcal_event
e = CAL.events().insert(calendarId='<EMAIL>',
sendNotifications=True, body=gcal_event).execute()
print e
def print_calendars():
page_token = None
while True:
calendar_list = CAL.calendarList().list(pageToken=page_token).execute()
for calendar_list_entry in calendar_list['items']:
print calendar_list_entry
page_token = calendar_list.get('nextPageToken')
if not page_token:
break
events = get_calendar_data()
sync_to_google_calendar(events)
| 2.875 | 3 |
app/api/v2/models/product.py | danuluma/dannstore | 0 | 11634 | <reponame>danuluma/dannstore<gh_stars>0
import os
import sys
LOCALPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, LOCALPATH + '/../../../../')
from app.api.v2.db import Db
def format_book(book):
"""Formats the results to a dictionary"""
book = {
"id": book[0],
"title": book[1],
"description": book[2],
"category": book[3],
"price": book[4],
"quantity": book[5],
"minimum": book[6],
"image_url": book[7],
"created_by": book[8],
"updated_by": book[9],
"created_at": str(book[10])
}
return book
class ProductModel(Db):
"""Product Model. Books stuff here"""
def get_all_books(self):
"""Gets all books from the db"""
booklist = []
for book in Db().get_query('books'):
details = format_book(book)
booklist.append(details)
return booklist
def get_single_book(self, param, this_row):
"""Gets a single book"""
books = [row for row in Db().get_query(
'books') if row[this_row] == param]
if books:
book = books[0]
return format_book(book)
def add_new_book(self, book):
"""Adds a new book to the db"""
try:
Db().db_query(f"""
INSERT INTO books (title, description, category, price, quantity, minimum, image_url, created_by)
VALUES ('{book[0]}', '{book[1]}', '{book[2]}', {book[3]}, {book[4]}, {book[5]}, '{book[6]}', {book[7]});
""")
except:
return "Failed to add", 500
def edit_book(self, book_id, book):
"""Updates a book's details"""
Db().db_query(f"""UPDATE books SET title = '{book[0]}', description = '{book[1]}', category = '{book[2]}', price = {book[3]}, quantity = {book[4]}, minimum = {book[5]}, image_url = '{book[6]}', updated_by = {book[7]} WHERE id = {book_id};""")
def sell_book(self, book_id, quantity):
"""Updates a book's quantity"""
Db().db_query(f"""UPDATE books SET quantity = quantity - {quantity} WHERE id = {book_id};""")
def delete_book(self, book_id):
"""Deletes a book"""
try:
Db().db_query(f"""DELETE FROM books WHERE id = {book_id};""")
except:
return "Failed", 500
| 3.09375 | 3 |
pbq/pbq.py | amirdor/pbq | 0 | 11635 | # -*- coding: utf-8 -*-
"""Main module."""
import os
from google.cloud import bigquery
from pbq.query import Query
from google.cloud import bigquery_storage_v1beta1
from google.cloud.exceptions import NotFound
from google.api_core.exceptions import BadRequest
import pandas as pd
import datetime
class PBQ(object):
"""
bigquery driver using the google official API
Attributes
------
query : str
the query
query_obj : Query
pbq.Query object
client : Client
the client object for bigquery
bqstorage_client : BigQueryStorageClient
the google storage client object
Methods
------
to_dataframe(save_query=False, **params)
return the query results as data frame
to_csv(filename, sep=',', save_query=False, **params)
save the query results to a csv file
save_to_table(table, dataset, project=None, replace=True, partition=None)
save query to table
run_query()
simply execute your query
table_details(table, dataset, project)
get the information about the table
Static Methods
------
save_file_to_table(filename, table, dataset, project, file_format=bigquery.SourceFormat.CSV, max_bad_records=0,
replace=True, partition=None)
save file to table, it can be partitioned and it can append to existing table.
the supported formats are CSV or PARQUET
save_dataframe_to_table(df: pd.DataFrame, table, dataset, project, max_bad_records=0, replace=True,
partition=None)
same as save file just with pandas dataframe
table_exists(client: bigquery.Client, table_ref: bigquery.table.TableReference)
check if table exists - if True - table exists else not exists
Examples
------
getting query to dataframe
>>> from pbq import Query, PBQ
>>> query = Query("select * from table")
>>> print("the query price:", query.price)
>>> if not query.validate():
>>> raise RuntimeError("table not valid")
>>> pbq = PBQ(query)
>>> pbq.to_dataframe()
saving query to csv
>>> from pbq import Query, PBQ
>>> query = Query("select * from table")
>>> pbq = PBQ(query)
>>> pbq.to_csv()
saving dataframe to table
>>> import pandas as pd
>>> from pbq import Query, PBQ
>>> df = pd.DataFrame()
>>> PBQ.save_dataframe_to_table(df, 'table', 'dataset', 'project_id', partition='20191013', replace=False)
"""
def __init__(self, query: Query, project=None):
"""
bigquery driver using the google official API
:param query: Query object
:param project: str
the BQ project
"""
self.query = query.query
self.query_obj = query
self.project = project
if project:
self.client = bigquery.Client(project=project)
else:
self.client = bigquery.Client()
self.bqstorage_client = bigquery_storage_v1beta1.BigQueryStorageClient()
def to_dataframe(self, save_query=False, **params):
"""
return the query results as data frame
in order to save the query to a table as well as getting the dataframe, send a dict as params with:
- table
- dataset
it will save to the same project
:param save_query: boolean
if to save the query to a table also
:param params: dict
when `save_query` flag is on you need to give the relevant params
:return: pd.DataFrame
the query results
"""
job_config = bigquery.QueryJobConfig()
if save_query:
table_ref = self.client.dataset(params['dataset']).table(params['table'])
job_config.destination = table_ref
query_job = self.client.query(query=self.query, job_config=job_config)
query_job_res = query_job.result()
df = query_job_res.to_dataframe(bqstorage_client=self.bqstorage_client)
return df
def to_csv(self, filename, sep=',', save_query=False, **params):
"""
save the query results to a csv file
in order to save the query to a table as well as getting the dataframe, send a dict as params with:
- table
- dataset
it will save to the same project
:param filename: str
with the path to save the file
:param sep: str
separator to the csv file
:param save_query: boolean
if to save the query to a table also
:param params: dict
when `save_query` flag is on you need to give the relevant params
"""
df = self.to_dataframe(save_query, **params)
df.to_csv(filename, sep=sep, index=False)
def run_query(self):
"""
execute your query
"""
# Set the destination table
client = self.client
query_job = client.query(self.query)
query_job.result()
print('Done running your amazing query')
def save_to_table(self, table, dataset, project=None, replace=True, partition=None):
"""
save query to table
:param table: str
table name
:param dataset: str
data set name
:param project: str
project name
:param replace: boolean
if set as true - it will replace the table, else append to table (default: True)
:param partition: str
partition format DDMMYYY (default: None)
"""
job_config = bigquery.QueryJobConfig()
# Set the destination table
client = self.client
if partition:
table = '{0}${1}'.format(table, partition)
table_ref = client.dataset(dataset).table(table.split('$')[0])
exists_ok = PBQ._writing_disposition(job_config, replace)
if project:
table_ref = client.dataset(dataset, project=project).table(table)
PBQ._create_table(client, exists_ok, partition, replace, table_ref)
job_config.destination = table_ref
query_job = client.query(self.query, job_config=job_config)
query_job.result()
print('Query results loaded to table {}'.format(table_ref.path))
@staticmethod
def _writing_disposition(job_config: bigquery.QueryJobConfig, replace):
exists_ok = False
if replace:
exists_ok = True
job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE
else:
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
return exists_ok
@staticmethod
def _create_table(client: bigquery.Client, exists_ok, partition, replace, table_ref):
if (partition and not PBQ.table_exists(client, table_ref)) or (not partition and replace):
bq_table = bigquery.Table(table_ref)
if partition:
time_partitioning = bigquery.TimePartitioning()
bq_table.time_partitioning = time_partitioning
client.create_table(bq_table, exists_ok=exists_ok)
@staticmethod
def save_file_to_table(filename, table, dataset, project, file_format=bigquery.SourceFormat.CSV, max_bad_records=0,
replace=True, partition=None):
"""
save file to table, it can be partitioned and it can append to existing table.
the supported formats are CSV or PARQUET
:param filename: str
with the path to save the file
:param table: str
table name
:param dataset: str
data set name
:param project: str
project name
:param file_format: str
possible file format (CSV, PARQUET) (default: CSV)
:param max_bad_records: int
number of bad records allowed in file (default: 0)
:param replace: boolean
if set as trueit will replace the table, else append to table (default: True)
:param partition: str
partition format DDMMYYY (default: None)
"""
client = bigquery.Client(project=project)
dataset_ref = client.dataset(dataset)
table_ref = dataset_ref.table(table)
job_config = bigquery.LoadJobConfig()
job_config.max_bad_records = max_bad_records
job_config.source_format = file_format
exists_ok = PBQ._writing_disposition(job_config, replace)
if file_format == bigquery.SourceFormat.CSV:
job_config.skip_leading_rows = 1
job_config.autodetect = True
PBQ._create_table(client, exists_ok, partition, replace, table_ref)
if not partition:
with open(filename, "rb") as source_file:
job = client.load_table_from_file(source_file, table_ref, job_config=job_config)
job.result() # Waits for table load to complete.
print("Loaded {} rows into {}:{}.".format(job.output_rows, dataset, table))
else:
print('fallback loading by CMD command due to missing api feature for partition')
table = '{0}${1}'.format(table, partition)
cmd = "bq load"
if replace:
cmd = "{} --replace".format(cmd)
cmd = "{cmd} --source_format={file_format} '{project}:{dataset}.{tbl_name}' {filename}". \
format(cmd=cmd, tbl_name=table, filename=filename, project=project, dataset=dataset,
file_format=file_format)
os.system(cmd)
@staticmethod
def save_dataframe_to_table(df: pd.DataFrame, table, dataset, project, max_bad_records=0, replace=True,
partition=None, validate_params=False):
"""
save pd.DataFrame object to table
:param df: pd.DataFrame
the dataframe you want to save
:param table: str
table name
:param dataset: str
data set name
:param project: str
project name
:param max_bad_records: int
number of bad records allowed in file (default: 0)
:param replace: boolean
if set as true - it will replace the table, else append to table (default: True)
:param partition: str
partition format DDMMYYY (default: None)
:param validate_params: boolean
validate the schema of the table to the dataframe object (default: False)
"""
now = datetime.datetime.now()
random_string = '{}'.format(now.strftime('%y%m%d%H%M%S'))
input_path = "/tmp/tmp-{}.parquet".format(random_string)
schema = None
if validate_params: # because of the fallback it need to change to be as the schema
table_details = PBQ.table_details(table, dataset, project)
if 'schema' in table_details:
schema = table_details['schema']
PBQ._save_df_to_parquet(df, input_path, schema=schema)
PBQ.save_file_to_table(input_path, table, dataset, project, file_format=bigquery.SourceFormat.PARQUET,
max_bad_records=max_bad_records, replace=replace, partition=partition)
@staticmethod
def _save_df_to_parquet(df, input_path, index=False, schema=None):
if schema:
for s in schema:
if s['field_type'] == 'STRING':
s['field_type'] = 'str'
if s['field_type'] == 'INTEGER':
s['field_type'] = 'int'
if s['field_type'] == 'TIMESTAMP':
df[s['column']] = pd.to_datetime(df[s['column']], errors='coerce')
continue
if s['field_type'] == 'DATE':
df[s['column']] = pd.to_datetime(df[s['column']], errors='coerce')
df[s['column']] = df[s['column']].dt.date
continue
df.columns = ["{}".format(col) for col in df.columns]
df.to_parquet(input_path, index=index)
@staticmethod
def table_details(table, dataset, project):
"""
return a dict object with some details about the table
:param table: str
table name
:param dataset: str
data set name
:param project: str
project name
:return: dict
with some table information like, last_modified_time, num_bytes, num_rows, and creation_time
"""
client = bigquery.Client(project=project)
dataset_ref = client.dataset(dataset, project=project)
table_ref = dataset_ref.table(table)
try:
table = client.get_table(table_ref)
except NotFound as error:
return {}
schema = []
for s in table.schema:
schema.append({'column': s.name, 'field_type': s.field_type})
res = {'last_modified_time': table.modified, 'num_bytes': table.num_bytes, 'num_rows': table.num_rows,
'creation_time': table.created, 'schema': schema}
return res
@staticmethod
def table_exists(client: bigquery.Client, table_ref: bigquery.table.TableReference):
"""
check if table exists - if True - table exists else not exists
:param client: bigquery.Client object
:param table_ref: bigquery.table.TableReference object
with the table name and dataset
:return: boolean
True if table exists
False if table not exists
"""
try:
table = client.get_table(table_ref)
if table:
return True
except NotFound as error:
return False
except BadRequest as error:
return True
| 3.328125 | 3 |
appvalidator/specprocessor.py | mstriemer/app-validator | 20 | 11636 | import re
import types
from functools import partial
LITERAL_TYPE = types.StringTypes + (int, float, long, bool, )
class Spec(object):
"""
This object, when overridden with an object that implements a file format
specification, will perform validation on a given parsed version of the
format input.
SPEC Node Documentation:
========================
expected_type:
A type object whose type the object should match.
required_nodes:
A list of nodes that are required for the current node.
required_nodes_when:
A dict of node name/lambda pairs. If the lambda evaluates to True, a
node whose name corresponds to the node name is required.
The current node is passed as a parameter to the lambda as the only
argument.
disallowed_nodes:
A list of nodes that explicitly are disallowed in the current node.
allowed_once_nodes:
A list of nodes that are allowed only once.
allowed_nodes:
A list of nodes that are allowed multiple times.
unknown_node_level:
The message type to return when an unknown node is encountered.
child_nodes:
A dict of node definitions for nodes that can exist within this node.
max_length:
For sequence values only. An integer describing the maximum length of
the string.
not_empty:
A boolean value describing whether the string/list/dict can be empty.
values:
A list of possible values for the node. Only applies to lists and
literal nodes.
value_matches:
If `values` is not set, the value must match this regex. Only applies
to string nodes.
process:
A lambda function that returns a function to process the node. The
lambda accepts one parameter (self) and should return a function that
accepts two parameters (self, node).
child_process:
A lambda function (similar to `process` that returns a function to
process a child node. The lambda accepts one parameter (self) and
should return a function that accepts three parameters (self, node_name,
node).
If this is set, no further testing will take place on child nodes.
"""
SPEC_NAME = "Specification"
MORE_INFO = "You can find more info online."
SPEC = None
def __init__(self, data, err):
self.data = self.parse(data)
self.err = err
self.error = partial(self._err_message, self.err.error)
self.warning = partial(self._err_message, self.err.warning)
self.notice = partial(self._err_message, self.err.notice)
self.err_map = {"error": self.error,
"warning": self.warning,
"notice": self.notice}
self.path = []
def _err_message(self, func, *args, **kwargs):
if self.path:
nodepath = "Node: %s" % self._get_path()
if isinstance(kwargs["description"], list):
kwargs["description"].append(nodepath)
else:
kwargs["description"] = [
kwargs["description"], nodepath]
func(*args, **kwargs)
def _message(self, type_, *args, **kwargs):
kwargs[type_] = kwargs.pop("message")
self.err_map[type_](*args, **kwargs)
def validate(self):
# Validate the root node.
root_name, root_node = self.get_root_node(self.data)
root_val_result = self.validate_root_node(root_node)
if root_val_result == False:
return
# Iterate the tree and validate as we go.
self.iterate(root_name, root_node, self.SPEC)
def parse(self, data): pass
def validate_root_node(self, node): pass
def get_root_node(self, data):
"""
We expect this function to return a tuple:
("Root Node Name", root_node)
"""
def has_attribute(self, node, key): pass
def get_attribute(self, node, key): pass
def has_child(self, node, child_name): pass
def get_children(self, node):
"""
This function should return a list of (child_name, child)-form tuples.
"""
def iterate(self, branch_name, branch, spec_branch):
self.path.append(branch_name)
self._iterate(branch_name, branch, spec_branch)
self.path.pop()
def _get_path(self):
return ' > '.join(self.path)
def _iterate(self, branch_name, branch, spec_branch):
"""Iterate the tree of nodes and validate as we go."""
# Check that the node is of the proper type. If it isn't, then we need
# to stop iterating at this point.
exp_type = spec_branch.get("expected_type")
if (exp_type and
not isinstance(branch, exp_type) or
# Handle `isinstance(True, int) == True` :(
(isinstance(branch, bool) and
(exp_type == int if isinstance(exp_type, type) else
bool not in exp_type))):
self.error(
err_id=("spec", "iterate", "bad_type"),
error="%s's `%s` was of an unexpected type." %
(self.SPEC_NAME, branch_name),
description=["While validating a %s, a `%s` was encountered "
"which is of an improper type." %
(self.SPEC_NAME, branch_name),
"Found: %s" % repr(branch),
self.MORE_INFO])
return
# Handle any generic processing.
if "process" in spec_branch:
# Let the spec processor resolve the processor and then run the
# processor.
spec_branch["process"](self)(branch)
if "not_empty" in spec_branch and not branch:
self.error(
err_id=("spec", "iterate", "empty"),
error="`%s` is empty." % branch_name,
description=["A value was expected for `%s`, but one wasn't "
"found." % branch_name,
self.MORE_INFO])
# If the node isn't an object...
if not isinstance(branch, dict):
if "values" in spec_branch and branch not in spec_branch["values"]:
self.error(
err_id=("spec", "iterate", "bad_value"),
error="`%s` contains an invalid value in %s" %
(branch_name, self.SPEC_NAME),
description=["A `%s` was encountered while validating a "
"`%s` containing the value '%s'. This value "
"is not appropriate for this type of "
"element." %
(branch_name, self.SPEC_NAME, branch),
self.MORE_INFO])
elif ("value_matches" in spec_branch and
isinstance(branch, types.StringTypes)):
raw_pattern = spec_branch["value_matches"]
if not re.match(raw_pattern, branch):
self.error(
err_id=("spec", "iterate", "value_pattern_fail"),
error="`%s` contains an invalid value in %s" %
(branch_name, self.SPEC_NAME),
description=["A `%s` was encountered while validating "
"a `%s`. Its value does not match the "
"pattern required for `%s`s." %
(branch_name, self.SPEC_NAME,
branch_name),
"Found value: %s" % branch,
"Pattern: %s" % raw_pattern,
self.MORE_INFO])
if ("max_length" in spec_branch and
len(branch) > spec_branch["max_length"]):
self.error(
err_id=("spec", "iterate", "max_length"),
error="`%s` has exceeded its maximum length." % branch_name,
description=["`%s` has a maximum length (%d), which has "
"been exceeded (%d)." %
(branch_name, spec_branch["max_length"],
len(branch)),
self.MORE_INFO])
# The rest of the tests are for child items.
if not isinstance(branch, (list, tuple)):
return
if "child_nodes" in spec_branch:
for child in branch:
self.iterate(branch_name + " descendant", child,
spec_branch["child_nodes"])
# We've got nothing else to do with lists.
return
# If we need to process the child nodes individually, do that now.
if "child_process" in spec_branch:
processor = spec_branch["child_process"](self)
for child_name, child in self.get_children(branch):
processor(child_name, child)
# If there's nothing else to do, don't go down that path.
if ("required_nodes" not in spec_branch and
"required_nodes_when" not in spec_branch and
"disallowed_nodes" not in spec_branch):
return
considered_nodes = set()
# Check that all required node as present.
if "required_nodes" in spec_branch:
considered_nodes.update(spec_branch["required_nodes"])
for req_node in [n for n in spec_branch["required_nodes"] if
not self.has_child(branch, n)]:
self.error(
err_id=("spec", "iterate", "missing_req"),
error="%s expecting `%s`" % (self.SPEC_NAME, req_node),
description=["The '%s' node of the %s expects a `%s` "
"element, which was not found." %
(branch_name, self.SPEC_NAME, req_node),
self.MORE_INFO])
# Check that conditionally required nodes are present.
if "required_nodes_when" in spec_branch:
considered_nodes.update(spec_branch["required_nodes_when"].keys())
for req_node in [name for name, cond in
spec_branch["required_nodes_when"].items() if
cond(branch) and not self.has_child(branch, name)]:
self.error(
err_id=("spec", "iterate", "missing_req_cond"),
error="%s expecting `%s`" % (self.SPEC_NAME, req_node),
description=["The '%s' node, under the current "
"circumstances, is missing a `%s` element. "
"This is a required condition of a %s." %
(branch_name, req_node, self.SPEC_NAME),
self.MORE_INFO])
# Check that there are no disallowed nodes.
if "disallowed_nodes" in spec_branch:
disallowed_nodes = spec_branch["disallowed_nodes"]
considered_nodes.update(disallowed_nodes)
for dnode in [n for n in disallowed_nodes if
self.has_child(branch, n)]:
self.error(
err_id=("spec", "iterate", "disallowed"),
error="%s found `%s`, which is not allowed." %
(self.SPEC_NAME, dnode),
description=["The '%s' node contains `%s`, which is a "
"disallowed element. It should be removed." %
(branch_name, dnode),
self.MORE_INFO])
if ("allowed_nodes" not in spec_branch and
"allowed_once_nodes" not in spec_branch):
return
# Check that allowed nodes are obeyed.
allowed_nodes = set(spec_branch.setdefault("allowed_nodes", []))
allowed_once_nodes = spec_branch.setdefault("allowed_once_nodes", [])
allowed_nodes.update(allowed_once_nodes)
child_node_specs = spec_branch.setdefault("child_nodes", {})
seen_nodes = set()
warned_nodes = set()
for child_name, child in self.get_children(branch):
cspec_branch = None
# Process the node first.
if child_name in child_node_specs:
cspec_branch = child_node_specs[child_name]
elif "*" in child_node_specs:
cspec_branch = child_node_specs["*"]
if cspec_branch is not None:
# If it's a lazily evaluated branch, evaluate it now.
if isinstance(cspec_branch, types.LambdaType):
cspec_branch = cspec_branch(self)
# Iterate the node.
self.iterate(child_name, child, cspec_branch)
# If we've seen a node before that's only supposed to be seen a
# single time, warn about it.
if child_name in allowed_once_nodes and child_name in seen_nodes:
# Don't warn about the same node multiple times.
if child_name in warned_nodes:
continue
self.error(
err_id=("spec", "iterate", "allow_once_multiple"),
error="%s found `%s` more than once." %
(self.SPEC_NAME, child_name),
description=["%ss may only contain a single `%s` element, "
"however, it was encountered multiple times." %
(self.SPEC_NAME, child_name),
self.MORE_INFO])
continue
# Remember that we've seen this node.
seen_nodes.add(child_name)
if child_name in considered_nodes:
continue
# If the child isn't allowed, throw an error.
if child_name not in allowed_nodes and "*" not in allowed_nodes:
self._message(
spec_branch.get("unknown_node_level", "warning"),
err_id=("spec", "iterate", "not_allowed"),
message="`%s` is not a recognized element within a %s" %
(child_name, self.SPEC_NAME),
description=["While iterating a %s, a `%s` was found "
"within a %s, which is not valid." %
(self.SPEC_NAME, child_name, branch_name),
self.MORE_INFO])
| 3.265625 | 3 |
head_first_v2/ch4/modules/setup.py | alex-d-bondarev/learn-python | 0 | 11637 | from setuptools import setup
setup(
name='lsearch',
version='1.0',
description='The Head First Python Search Tools', author='<NAME>', author_email='<EMAIL>',
url='headfirstlabs.com',
py_modules=['lsearch'],
)
| 1.054688 | 1 |
desktop_local_tests/windows/test_windows_public_ip_disrupt_reorder_adapters.py | UAEKondaya1/expressvpn_leak_testing | 219 | 11638 | <filename>desktop_local_tests/windows/test_windows_public_ip_disrupt_reorder_adapters.py
from desktop_local_tests.public_ip_during_disruption import PublicIPDuringDisruptionTestCase
from desktop_local_tests.windows.windows_reorder_adapters_disrupter import WindowsReorderAdaptersDisrupter
class TestWindowsPublicIPDisruptReorderAdapters(PublicIPDuringDisruptionTestCase):
'''Summary:
Tests whether traffic leaving the user's device has the public IP hidden when the adapter order
is changed.
Details:
This test will connect to VPN then swap the priority of the primary and secondary network
adapters. The test then queries a webpage to detect it's public IP.
Discussion:
It's not 100% clear if, in the real world, adapters can change their order without user
involvement. It is still however a good stress test of the application.
On Windows adapter order is determined by the interface metric. It can be manually set but
otherwise it is determined by the system by deciding how "good" an adapter is, e.g. what is the
throughput. In theory that means metrics can change dynamically.
Weaknesses:
The time taken to perform each IP request is relatively long. Tests using IPResponder should be
preferred over these tests.
Scenarios:
Requires two active adapters.
TODO:
Consider a variant which changes the network "Location". This is much more likely to be
something a user might do.
'''
def __init__(self, devices, parameters):
super().__init__(WindowsReorderAdaptersDisrupter, devices, parameters)
| 2.515625 | 3 |
rotkehlchen/api/server.py | rotkehlchenio/rotkehlchen | 137 | 11639 | import json
import logging
from http import HTTPStatus
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import werkzeug
from flask import Blueprint, Flask, Response, abort, jsonify
from flask.views import MethodView
from flask_cors import CORS
from gevent.pywsgi import WSGIServer
from geventwebsocket import Resource as WebsocketResource, WebSocketServer
from marshmallow import Schema
from marshmallow.exceptions import ValidationError
from webargs.flaskparser import parser
from werkzeug.exceptions import NotFound
from rotkehlchen.api.rest import RestAPI, api_response, wrap_in_fail_result
from rotkehlchen.api.v1.parser import ignore_kwarg_parser, resource_parser
from rotkehlchen.api.v1.resources import (
AaveBalancesResource,
AaveHistoryResource,
AccountingReportDataResource,
AccountingReportsResource,
AdexBalancesResource,
AdexHistoryResource,
AllAssetsResource,
AllBalancesResource,
AssetIconsResource,
AssetMovementsResource,
AssetsReplaceResource,
AssetsTypesResource,
AssetUpdatesResource,
AssociatedLocations,
AsyncTasksResource,
AvalancheTransactionsResource,
BalancerBalancesResource,
BalancerEventsHistoryResource,
BalancerTradesHistoryResource,
BinanceAvailableMarkets,
BinanceUserMarkets,
BlockchainBalancesResource,
BlockchainsAccountsResource,
BTCXpubResource,
CompoundBalancesResource,
CompoundHistoryResource,
CounterpartiesResource,
CurrentAssetsPriceResource,
DatabaseBackupsResource,
DatabaseInfoResource,
DataImportResource,
DBSnapshotDeletingResource,
DBSnapshotDownloadingResource,
DBSnapshotExportingResource,
DBSnapshotImportingResource,
DefiBalancesResource,
ERC20TokenInfo,
ERC20TokenInfoAVAX,
Eth2DailyStatsResource,
Eth2StakeDepositsResource,
Eth2StakeDetailsResource,
Eth2ValidatorsResource,
EthereumAirdropsResource,
EthereumAssetsResource,
EthereumModuleDataResource,
EthereumModuleResource,
EthereumTransactionsResource,
ExchangeBalancesResource,
ExchangeRatesResource,
ExchangesDataResource,
ExchangesResource,
ExternalServicesResource,
HistoricalAssetsPriceResource,
HistoryActionableItemsResource,
HistoryBaseEntryResource,
HistoryDownloadingResource,
HistoryExportingResource,
HistoryProcessingResource,
HistoryStatusResource,
IgnoredActionsResource,
IgnoredAssetsResource,
InfoResource,
LedgerActionsResource,
LiquityStakingHistoryResource,
LiquityStakingResource,
LiquityTrovesHistoryResource,
LiquityTrovesResource,
LoopringBalancesResource,
MakerdaoDSRBalanceResource,
MakerdaoDSRHistoryResource,
MakerdaoVaultDetailsResource,
MakerdaoVaultsResource,
ManuallyTrackedBalancesResource,
MessagesResource,
NamedEthereumModuleDataResource,
NamedOracleCacheResource,
NFTSBalanceResource,
NFTSResource,
OraclesResource,
OwnedAssetsResource,
PeriodicDataResource,
PickleDillResource,
PingResource,
QueriedAddressesResource,
ReverseEnsResource,
SettingsResource,
StakingResource,
StatisticsAssetBalanceResource,
StatisticsNetvalueResource,
StatisticsRendererResource,
StatisticsValueDistributionResource,
SushiswapBalancesResource,
SushiswapEventsHistoryResource,
SushiswapTradesHistoryResource,
TagsResource,
TradesResource,
UniswapBalancesResource,
UniswapEventsHistoryResource,
UniswapTradesHistoryResource,
UserAssetsResource,
UserPasswordChangeResource,
UserPremiumKeyResource,
UserPremiumSyncResource,
UsersByNameResource,
UsersResource,
WatchersResource,
YearnVaultsBalancesResource,
YearnVaultsHistoryResource,
YearnVaultsV2BalancesResource,
YearnVaultsV2HistoryResource,
create_blueprint,
)
from rotkehlchen.api.websockets.notifier import RotkiNotifier, RotkiWSApp
from rotkehlchen.logging import RotkehlchenLogsAdapter
URLS = List[
Union[
Tuple[str, Type[MethodView]],
Tuple[str, Type[MethodView], str],
]
]
URLS_V1: URLS = [
('/users', UsersResource),
('/watchers', WatchersResource),
('/users/<string:name>', UsersByNameResource),
('/users/<string:name>/password', UserPasswordChangeResource),
('/premium', UserPremiumKeyResource),
('/premium/sync', UserPremiumSyncResource),
('/settings', SettingsResource),
('/tasks/', AsyncTasksResource),
('/tasks/<int:task_id>', AsyncTasksResource, 'specific_async_tasks_resource'),
('/exchange_rates', ExchangeRatesResource),
('/external_services/', ExternalServicesResource),
('/oracles', OraclesResource),
('/oracles/<string:oracle>/cache', NamedOracleCacheResource),
('/exchanges', ExchangesResource),
('/exchanges/balances', ExchangeBalancesResource),
(
'/exchanges/balances/<string:location>',
ExchangeBalancesResource,
'named_exchanges_balances_resource',
),
('/assets/<string:asset>/icon', AssetIconsResource),
('/trades', TradesResource),
('/ledgeractions', LedgerActionsResource),
('/asset_movements', AssetMovementsResource),
('/tags', TagsResource),
('/exchanges/binance/pairs', BinanceAvailableMarkets),
('/exchanges/binance/pairs/<string:name>', BinanceUserMarkets),
('/exchanges/data/', ExchangesDataResource),
('/exchanges/data/<string:location>', ExchangesDataResource, 'named_exchanges_data_resource'),
('/balances/blockchains', BlockchainBalancesResource),
(
'/balances/blockchains/<string:blockchain>',
BlockchainBalancesResource,
'named_blockchain_balances_resource',
),
('/balances/', AllBalancesResource),
('/balances/manual', ManuallyTrackedBalancesResource),
('/statistics/netvalue', StatisticsNetvalueResource),
('/statistics/balance/<string:asset>', StatisticsAssetBalanceResource),
('/statistics/value_distribution', StatisticsValueDistributionResource),
('/statistics/renderer', StatisticsRendererResource),
('/messages/', MessagesResource),
('/periodic/', PeriodicDataResource),
('/history/', HistoryProcessingResource),
('/history/status', HistoryStatusResource),
('/history/export/', HistoryExportingResource),
('/history/download/', HistoryDownloadingResource),
('/history/events', HistoryBaseEntryResource),
('/history/actionable_items', HistoryActionableItemsResource),
('/reports/', AccountingReportsResource),
(
'/reports/<int:report_id>',
AccountingReportsResource,
'per_report_resource',
),
(
'/reports/<int:report_id>/data',
AccountingReportDataResource,
'per_report_data_resource',
),
('/queried_addresses', QueriedAddressesResource),
('/blockchains/ETH/transactions', EthereumTransactionsResource),
(
'/blockchains/ETH/transactions/<string:address>',
EthereumTransactionsResource,
'per_address_ethereum_transactions_resource',
),
('/blockchains/ETH2/validators', Eth2ValidatorsResource),
('/blockchains/ETH2/stake/deposits', Eth2StakeDepositsResource),
('/blockchains/ETH2/stake/details', Eth2StakeDetailsResource),
('/blockchains/ETH2/stake/dailystats', Eth2DailyStatsResource),
('/blockchains/ETH/defi', DefiBalancesResource),
('/blockchains/ETH/airdrops', EthereumAirdropsResource),
('/blockchains/ETH/erc20details/', ERC20TokenInfo),
('/blockchains/ETH/modules/<string:module_name>/data', NamedEthereumModuleDataResource),
('/blockchains/ETH/modules/data', EthereumModuleDataResource),
('/blockchains/ETH/modules/data/counterparties', CounterpartiesResource),
('/blockchains/ETH/modules/', EthereumModuleResource),
('/blockchains/ETH/modules/makerdao/dsrbalance', MakerdaoDSRBalanceResource),
('/blockchains/ETH/modules/makerdao/dsrhistory', MakerdaoDSRHistoryResource),
('/blockchains/ETH/modules/makerdao/vaults', MakerdaoVaultsResource),
('/blockchains/ETH/modules/makerdao/vaultdetails', MakerdaoVaultDetailsResource),
('/blockchains/ETH/modules/aave/balances', AaveBalancesResource),
('/blockchains/ETH/modules/aave/history', AaveHistoryResource),
('/blockchains/ETH/modules/adex/balances', AdexBalancesResource),
('/blockchains/ETH/modules/adex/history', AdexHistoryResource),
('/blockchains/ETH/modules/balancer/balances', BalancerBalancesResource),
('/blockchains/ETH/modules/balancer/history/trades', BalancerTradesHistoryResource),
('/blockchains/ETH/modules/balancer/history/events', BalancerEventsHistoryResource),
('/blockchains/ETH/modules/compound/balances', CompoundBalancesResource),
('/blockchains/ETH/modules/compound/history', CompoundHistoryResource),
('/blockchains/ETH/modules/uniswap/balances', UniswapBalancesResource),
('/blockchains/ETH/modules/uniswap/history/events', UniswapEventsHistoryResource),
('/blockchains/ETH/modules/uniswap/history/trades', UniswapTradesHistoryResource),
('/blockchains/ETH/modules/sushiswap/balances', SushiswapBalancesResource),
('/blockchains/ETH/modules/sushiswap/history/events', SushiswapEventsHistoryResource),
('/blockchains/ETH/modules/sushiswap/history/trades', SushiswapTradesHistoryResource),
('/blockchains/ETH/modules/yearn/vaults/balances', YearnVaultsBalancesResource),
('/blockchains/ETH/modules/yearn/vaults/history', YearnVaultsHistoryResource),
('/blockchains/ETH/modules/yearn/vaultsv2/balances', YearnVaultsV2BalancesResource),
('/blockchains/ETH/modules/yearn/vaultsv2/history', YearnVaultsV2HistoryResource),
('/blockchains/ETH/modules/liquity/balances', LiquityTrovesResource),
('/blockchains/ETH/modules/liquity/events/trove', LiquityTrovesHistoryResource),
('/blockchains/ETH/modules/liquity/events/staking', LiquityStakingHistoryResource),
('/blockchains/ETH/modules/liquity/staking', LiquityStakingResource),
('/blockchains/ETH/modules/pickle/dill', PickleDillResource),
('/blockchains/ETH/modules/loopring/balances', LoopringBalancesResource),
('/blockchains/<string:blockchain>', BlockchainsAccountsResource),
('/blockchains/BTC/xpub', BTCXpubResource),
('/blockchains/AVAX/transactions', AvalancheTransactionsResource),
(
'/blockchains/AVAX/transactions/<string:address>',
AvalancheTransactionsResource,
'per_address_avalanche_transactions_resource',
),
('/blockchains/AVAX/erc20details/', ERC20TokenInfoAVAX),
('/assets', OwnedAssetsResource),
('/assets/types', AssetsTypesResource),
('/assets/replace', AssetsReplaceResource),
('/assets/all', AllAssetsResource),
('/assets/ethereum', EthereumAssetsResource),
('/assets/prices/current', CurrentAssetsPriceResource),
('/assets/prices/historical', HistoricalAssetsPriceResource),
('/assets/ignored', IgnoredAssetsResource),
('/assets/updates', AssetUpdatesResource),
('/assets/user', UserAssetsResource),
('/actions/ignored', IgnoredActionsResource),
('/info', InfoResource),
('/ping', PingResource),
('/import', DataImportResource),
('/nfts', NFTSResource),
('/nfts/balances', NFTSBalanceResource),
('/database/info', DatabaseInfoResource),
('/database/backups', DatabaseBackupsResource),
('/locations/associated', AssociatedLocations),
('/staking/kraken', StakingResource),
('/snapshot/download', DBSnapshotDownloadingResource),
('/snapshot/export', DBSnapshotExportingResource),
('/snapshot/import', DBSnapshotImportingResource),
('/snapshot/delete', DBSnapshotDeletingResource),
('/ens/reverse', ReverseEnsResource),
]
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def setup_urls(
rest_api: RestAPI,
blueprint: Blueprint,
urls: URLS,
) -> None:
for url_tuple in urls:
if len(url_tuple) == 2:
route, resource_cls = url_tuple # type: ignore
endpoint = resource_cls.__name__.lower()
elif len(url_tuple) == 3:
route, resource_cls, endpoint = url_tuple # type: ignore
else:
raise ValueError(f"Invalid URL format: {url_tuple!r}")
blueprint.add_url_rule(
route,
view_func=resource_cls.as_view(endpoint, rest_api_object=rest_api),
)
def endpoint_not_found(e: NotFound) -> Response:
msg = 'invalid endpoint'
# The isinstance check is because I am not sure if `e` is always going to
# be a "NotFound" error here
if isinstance(e, NotFound):
msg = e.description
return api_response(wrap_in_fail_result(msg), HTTPStatus.NOT_FOUND)
@parser.error_handler # type: ignore
@resource_parser.error_handler
@ignore_kwarg_parser.error_handler
def handle_request_parsing_error(
err: ValidationError,
_request: werkzeug.local.LocalProxy,
_schema: Schema,
error_status_code: Optional[int], # pylint: disable=unused-argument
error_headers: Optional[Dict], # pylint: disable=unused-argument
) -> None:
""" This handles request parsing errors generated for example by schema
field validation failing."""
msg = str(err)
if isinstance(err.messages, dict):
# first key is just the location. Ignore
key = list(err.messages.keys())[0]
msg = json.dumps(err.messages[key])
elif isinstance(err.messages, list):
msg = ','.join(err.messages)
err_response = jsonify(result=None, message=msg)
err_response.status_code = HTTPStatus.BAD_REQUEST
abort(err_response)
class APIServer():
_api_prefix = '/api/1'
def __init__(
self,
rest_api: RestAPI,
ws_notifier: RotkiNotifier,
cors_domain_list: List[str] = None,
) -> None:
flask_app = Flask(__name__)
if cors_domain_list:
CORS(flask_app, origins=cors_domain_list)
blueprint = create_blueprint(self._api_prefix)
setup_urls(
blueprint=blueprint,
rest_api=rest_api,
urls=URLS_V1,
)
self.rest_api = rest_api
self.rotki_notifier = ws_notifier
self.flask_app = flask_app
self.blueprint = blueprint
self.wsgiserver: Optional[WSGIServer] = None
self.flask_app.register_blueprint(self.blueprint)
self.ws_server: Optional[WebSocketServer] = None
self.flask_app.errorhandler(HTTPStatus.NOT_FOUND)(endpoint_not_found)
self.flask_app.register_error_handler(Exception, self.unhandled_exception)
@staticmethod
def unhandled_exception(exception: Exception) -> Response:
""" Flask.errorhandler when an exception wasn't correctly handled """
log.critical(
'Unhandled exception when processing endpoint request',
exc_info=True,
exception=str(exception),
)
return api_response(wrap_in_fail_result(str(exception)), HTTPStatus.INTERNAL_SERVER_ERROR)
def run(self, host: str = '127.0.0.1', port: int = 5042, **kwargs: Any) -> None:
"""This is only used for the data faker and not used in production"""
self.flask_app.run(host=host, port=port, **kwargs)
def start(
self,
host: str = '127.0.0.1',
rest_port: int = 5042,
websockets_port: int = 5043,
) -> None:
"""This is used to start the API server in production"""
wsgi_logger = logging.getLogger(__name__ + '.pywsgi')
self.wsgiserver = WSGIServer(
listener=(host, rest_port),
application=self.flask_app,
log=wsgi_logger,
error_log=wsgi_logger,
)
msg = f'rotki REST API server is running at: {host}:{rest_port}'
print(msg)
log.info(msg)
self.wsgiserver.start()
self.ws_server = WebSocketServer(
listener=(host, websockets_port),
application=WebsocketResource([
('^/', RotkiWSApp),
]),
debug=False,
environ={'rotki_notifier': self.rotki_notifier},
)
msg = f'rotki Websockets API server is running at: {host}:{websockets_port}'
print(msg)
log.info(msg)
self.ws_server.start()
def stop(self, timeout: int = 5) -> None:
"""Stops the API server. If handlers are running after timeout they are killed"""
if self.wsgiserver is not None:
self.wsgiserver.stop(timeout)
self.wsgiserver = None
if self.ws_server is not None:
self.ws_server.stop(timeout)
self.wsgiserver = None
self.rest_api.stop()
| 1.390625 | 1 |
stability/stairs_contacts.py | haudren/stability-polygon | 0 | 11640 | import numpy as np
pos = []
normals = []
p = [[-0.4722227, -0.24517583, -0.6370031]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
p = [[-0.2549828, -0.24587737, -0.63704705]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
p = [[-0.25787751, -0.38255749, -0.63705089]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
p = [[-0.47206733, -0.38317576, -0.6370076]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
#Contact lgripper/handrail
#Left
p = [[0.3651077, 0.33419711, 0.63609439]]
n = [[-3.39491173e-05, 9.99999875e-01, 4.99472000e-04]]
pos.append(p)
normals.append(n)
#Right
#p = [[0.36510907, 0.29419711, 0.63607441]]
#p = [[0.3651077, 0.33419711, 0.63609439]]
#n = [[3.44761855e-05, -9.99999874e-01, -5.00077386e-04]]
#pos.append(p)
#normals.append(n)
#Bottom
#p = [[0.34212609, 0.31418314, 0.66248165]]
#n = [[-6.56636734e-01, -3.99160434e-04, 7.54206895e-01]]
#pos.append(p)
#normals.append(n)
#
##Top
p = [[0.38480749, 0.31420908, 0.61345819]]
n = [[6.56636734e-01, 4.00439950e-04, -7.54206894e-01]]
pos.append(p)
normals.append(n)
pos = [np.array(px).T for px in pos]
#for p in pos:
# p[2, 0] = 0.0
normals = [np.array(nx).T for nx in normals]
| 1.90625 | 2 |
generator/framework/analyser/analyser.py | sinsay/ds_generator | 0 | 11641 | <reponame>sinsay/ds_generator
import inspect
import re
import types
from collections import namedtuple
from typing import List, Union, Dict
from flask_restplus import fields
from ...common import MetaData, Entry, Arg, ArgSource, RpcType,\
type_def, rpc_doc_args_key, rpc_doc_resp_key, rpc_impl_rename
from ...common.web.namespace import get_namespace, NamespaceInfo
function_type = frozenset([staticmethod, classmethod, types.FunctionType])
func_obj_types = frozenset([staticmethod, classmethod])
method_reg = re.compile(r"^[\s\S]+?(?=:param|:return:|$)")
class Analyser(object):
"""
解析器,解析指定的类型所绑定的 document 信息
"""
@staticmethod
def analyse(service_classes, service_impl_classes, need_impl: bool = True) -> List[MetaData]:
"""
解析 service_classes 的信息,并返回其元数据,如果该类型未添加元数据,则不将其加入元数据列表
service_impl_classes 则为对应 service 的实现器, 两者应该是一一对应的关系
need_impl 指示了是否需要为 service_class 获取其具体实现
:param service_classes:
:param service_impl_classes
:param need_impl:
:return:
"""
meta_data = []
for c in service_classes:
methods = extract_methods(c)
if not methods:
continue
# 查看是否有自定义名称
impl_name = getattr(c, rpc_impl_rename, c.__name__)
# 找到对应 impl
impl = list(filter(lambda i: i.__name__ ==
impl_name, service_impl_classes))
if not impl and need_impl:
raise Exception(
"found service %s definition without implement code" % c.__name__)
meta = MetaData(c.__name__, c, methods,
impl_type=impl and impl[0] or None)
meta_data.append(meta)
return sorted(meta_data, key=lambda m: m.name.lower())
def extract_methods(cls):
"""
解析一个 Class, 得到所有定义了 api doc 的方法
:param cls:
:return:
"""
# process cls' s apidoc if exists
base_entries_arg: List[Arg] = process_cls_args(cls)
entries = []
for (attr_name, attr) in cls.__dict__.items():
attr_type = type(attr)
if attr_name.startswith('_') or attr_type not in function_type:
continue
rpc_doc_args: type_def.Dict = getattr(attr, rpc_doc_args_key, None)
rpc_doc_resp: type_def.RpcType = getattr(attr, rpc_doc_resp_key, None)
if attr_type in func_obj_types:
# extract real method from method object
attr = getattr(attr, "__func__", None)
api_doc = getattr(attr, '__apidoc__', None)
# TODO: 暂时不做多种配置方式的合并, 后续考虑提供
entry = None
if api_doc:
entry = analyse_doc(cls, attr, attr_name, api_doc, base_entries_arg)
entry.args = base_entries_arg + entry.args
args = list(base_entries_arg)
result = type_def.Void()
if rpc_doc_args:
# args 必然是 model 的 RpcType.Dict 类型
# 根据 attr_name 来选择 ArgSource, 如果是非 http method, 则不管设置
# 成何种类型都不会有影响
for name, value in rpc_doc_args.get_elem_info().items():
source = get_source_type(attr_name, value)
args.append(Arg(name, value, value.default_value,
value.description, value.required, source))
if rpc_doc_resp:
result = rpc_doc_resp
if not entry:
raw_doc = inspect.getdoc(attr) or ""
method_doc = method_reg.search(raw_doc)
if method_doc:
method_doc = method_doc.group(0)
args = sorted(args, key=lambda a: a.name.lower())
entry = Entry(attr_name, args, result, method_doc)
entries.append(entry)
return sorted(entries, key=lambda e: e.name.lower())
def get_source_type(method_name: str, field: RpcType) -> ArgSource:
"""
获取 field 字段的来源信息,首先根据方法名,如果是 http 的方法,
则按照 get 对应 params, post 对应 body 的形式,
如果 field 主动设置了 source, 则使用 field 的
"""
source = ArgSource.UNKNOWN
if method_name == "get":
source = ArgSource.PARAMS
elif method_name == "post":
source = ArgSource.BODY
if field.source != ArgSource.UNKNOWN:
source = field.get_source()
return source
def process_cls_args(cls) -> List[Arg]:
"""
从 cls 的 api_doc 中获取参数信息
"""
cls_api_doc = getattr(cls, "__apidoc__", {})
params: dict = cls_api_doc.get("params", {})
args: List[Arg] = []
for key, value in params.items():
field_type = switch_type(value.get("type", "str"), value)
source_in = value.get("in", "path")
source = ArgSource.PARAMS
if source_in == "path":
source = ArgSource.PATH
elif source_in == "body":
source = ArgSource.BODY
elif source_in == "header":
source = ArgSource.HEADER
args.append(Arg(key, field_type, field_type.default_value, source=source, description=field_type.description))
# 处理完 Flask 的定义,还需要处理 CommonBase 的定义
ns_info: Union[NamespaceInfo, None] = get_namespace(cls)
if ns_info is not None:
for arg_name, arg_value in ns_info.params.items():
args.append(
Arg(
arg_name,
arg_value,
arg_value.default_value,
description=arg_value.description,
required=arg_value.required,
source=ArgSource.PATH
)
)
return args
def analyse_doc(cls, method, name, api_doc, class_args: List[Arg]) -> Entry:
"""
解析 cls 类型中 method 的 api_doc 信息,转换为本地格式,便于后续的分析
:param cls
:param method
:param name:
:param api_doc:
:param class_args:
:return:
"""
# 首先查找函数命名的参数
# 首先解开函数的 wrapper, 拿到实际调用的函数体
while hasattr(method, "__wrapped__"):
method = getattr(method, "__wrapped__")
# 尝试获取 entry 的注释
method_doc_raw = inspect.getdoc(method) or ""
method_doc = method_reg.search(method_doc_raw) or ""
if method_doc:
method_doc = method_doc.group(0)
args = analyse_args(cls, method, method_doc_raw, api_doc, class_args)
entry = Entry(name, args, type_def.Void(), method_doc)
status_codes = api_doc.get("responses", {}).keys()
for status_code in status_codes:
result = analyse_result(api_doc, status_code)
if result:
entry.set_result(status_code, result)
return entry
def analyse_result(api_doc, status_code: int) -> type_def.RpcType:
"""
解析出 api_doc 中的返回值信息
:param api_doc:
:param status_code:
:return:
"""
(desc, data_meta) = api_doc.get("responses", {}).get(status_code, (None, None))
if not desc and not data_meta:
return type_def.Void() # 该接口返回空类型
if isinstance(data_meta, dict):
# 说明是复合类型
result = type_def.fields.Dict(required=True)
for (key, type_info) in data_meta.items():
result.add_field(key, switch_type(type_info))
else:
# 说明是基础类型
result = switch_type(data_meta)
return result
def analyse_args(cls, method, method_doc_raw, api_doc, class_args: List[Arg]) -> List[Arg]:
"""
cls 为要解析的模块, method 为该模块对应的方法, api_doc 是该 method 的描述文件
通过以上信息解析出该函数的参数信息
:param cls:
:param method:
:param method_doc_raw:
:param api_doc:
:param class_args:
:return:
"""
frame_info = inspect.getfullargspec(method)
method_args = frame_info.args
if len(frame_info.args) > 0 and frame_info.args[0] == "self":
method_args = method_args[1:]
if len(method_args) > len(frame_info.annotations):
# 缺少必要的参数类型描述
raise Exception(
"模块 %s 的函数 %s 有 %s 个参数,但具有类型描述的参数个数只有 %s 个. \n"
"请为缺少类型描述的参数 %s 添加类型信息, eg: 为 id 添加参数说明\n\t"
"def hello(id: int): pass" %
(
cls.__name__,
method.__name__,
len(frame_info.args),
len(frame_info.annotations),
frame_info.args
)
)
params: List[Arg] = []
params_dict = api_doc.get("params", {})
params.extend(analyse_flask_args(method, params_dict, False) or [])
# post
expect_list = api_doc.get("expect", [])
for expect in expect_list:
params.extend(analyse_flask_args(method, expect, True) or [])
# 最后才处理函数定义的参数
func_params: List[Arg] = []
for (index, arg) in enumerate(method_args):
arg_type = switch_type(frame_info.annotations[arg])
if isinstance(arg_type, (type_def.Void, )):
continue
# try to extract documentation from doc
arg_doc = re.search(
r":param %s:(?P<doc>[\s\S]+?)(?=:param|:return|$)" % arg, method_doc_raw)
if arg_doc:
arg_doc = arg_doc.group("doc")
arg_info = Arg(arg, arg_type, None, arg_doc or "")
func_params.append(arg_info)
args_len = len(func_params) - 1
for (index, default) in enumerate(frame_info.defaults or []):
func_params[len(args_len - index)].default = default
# 移除重复的参数
for p in func_params:
is_dup: bool = False
for pp in params:
if p.name == pp.name:
is_dup = True
break
if not is_dup:
for pp in class_args:
if p.name == pp.name:
is_dup = True
break
if not is_dup:
params.append(p)
return params
def analyse_flask_args(method, type_dict, in_body: bool) -> List[Arg]:
"""
解析 type_dict 中的信息,将其转换为无 flask 模块依赖的类型信息
:param method
:param type_dict:
:param in_body: 如果 in_body 则参数来自于 body, 否则的话需要根据 in 字段进行判断,如果
in header 则参数来自 header, 否则是 get 的参数
:return:
"""
params = []
for (key, value) in type_dict.items():
if isinstance(value, dict):
# 可能是 param 定义,或 flask doc 的说明
attr_type = value.get("type", None)
attr_type = switch_type(attr_type, value)
if not attr_type or isinstance(attr_type, type_def.Void):
raise Exception("%s 的参数 %s 没有类型定义" % (method, key))
if in_body:
source = ArgSource.BODY
else:
if value.get("in", "params") == "params":
source = ArgSource.PARAMS
else:
source = ArgSource.HEADER
arg = Arg(key, attr_type, default=value.get("default", None),
description=value.get("description", ""), source=source,
required=attr_type.required)
params.append(arg)
else:
attr_type = switch_type(value)
if attr_type:
if in_body:
source = ArgSource.BODY
else:
source = ArgSource.PARAMS
required = True
if value.required is not None:
required = not not value.required
arg = Arg(key, attr_type, attr_type.default_value, value.description,
required=required, source=source)
params.append(arg)
return params
str_literal = ["str", "string"]
number_literal = ["int", "integer"]
base_mapping_fields = {
"default": None,
"required": True,
"default_value": None,
"maximum": None,
"minimal": None,
}
sm = namedtuple(
"DefaultMapping",
[
"description", "required", "min_length",
"max_length", "min_items", "max_items",
"default_value", "must_true",
"must_false", "minimum", "maximum"
]
)(
("description", ""),
("required", True),
("min_length", None),
("max_length", None),
("min_items", None),
("max_items", None),
("default_value", None),
("must_true", None),
("must_false", None),
("minimum", None),
("maximum", None)
)
field_adaptor = {
"minimum": "min",
"maximum": "max",
"default_value": "default"
}
base_sm = [sm.description, sm.default_value, sm.required]
def build_sm(*args, need_base: bool = True):
sm_list = (need_base and base_sm or []) + list(args)
def wrap(addition: Union[Dict, object, None]):
def get_method(_key, _default):
pass
if isinstance(addition, dict):
def get_method(key, default_value):
return addition.get(key, default_value)
elif isinstance(addition, object):
def get_method(key, default_value):
return getattr(addition, key, default_value)
d = {}
for (k, v) in sm_list:
d[k] = v
if not addition:
continue
v = get_method(k, None)
if v is None:
adapt_key = field_adaptor.get(k, None)
if adapt_key is not None:
v = get_method(adapt_key, None)
if v is not None:
d[k] = v
return d
return wrap
number_sm = build_sm(sm.minimum, sm.maximum)
str_sm = build_sm(sm.min_length, sm.max_length)
bool_sm = build_sm(sm.must_true, sm.must_false)
list_sm = build_sm(sm.description, sm.min_items, sm.max_items, need_base=False)
type_switch_mapping = {
"int": number_sm,
int: number_sm,
"integer": number_sm,
fields.Integer: number_sm,
"float": number_sm,
float: number_sm,
fields.Float: number_sm,
"str": str_sm,
"string": str_sm,
str: str_sm,
fields.String: str_sm,
"bool": bool_sm,
bool: bool_sm,
fields.Boolean: bool_sm,
"list": list_sm,
fields.List: list_sm,
}
type_convert_mapping = {
int: type_def.fields.Integer,
str: type_def.fields.String,
float: type_def.fields.Float,
bool: type_def.fields.Bool,
fields.Integer: type_def.fields.Integer,
fields.String: type_def.fields.String,
fields.Boolean: type_def.fields.Boolean,
fields.Float: type_def.fields.Float
}
def switch_type(from_type, addition: Union[dict, None] = None) -> type_def.RpcType:
"""
转换类型定义,将第三的定义转换为本地类型,
addition 为 flask 类型信息的附加信息,可以为其增加类似 maximum, default, max_items 等信息
:param from_type:
:param addition
:return:
"""
map_func = type_switch_mapping.get(from_type, None)
if map_func is None:
map_func = type_switch_mapping.get(type(from_type), lambda _: {})
kwargs = map_func(addition)
if isinstance(from_type, str):
if from_type in str_literal:
return type_def.fields.String(**kwargs)
elif from_type in number_literal:
return type_def.fields.Integer(**kwargs)
# 如果是基础类型,或 flask 的基础类型,可以直接构造
from_type_constructor = type_convert_mapping.get(from_type, None)
if not from_type_constructor:
from_type_constructor = type_convert_mapping.get(type(from_type), None)
if from_type_constructor:
return from_type_constructor(**kwargs)
if isinstance(from_type, fields.List):
elem_type = switch_type(from_type.container)
return type_def.fields.List(
elem_type, **kwargs)
elif isinstance(from_type, fields.Nested):
field_dict = {}
for (field, field_value) in from_type.model.items():
field_dict[field] = switch_type(field_value, field_value)
return type_def.fields.Dict(field_dict, from_type.description, from_type.required)
else:
return type_def.Void()
| 1.945313 | 2 |
bot/helper/mirror_utils/download_utils/telegram_downloader.py | vincreator/Eunha | 0 | 11642 | <filename>bot/helper/mirror_utils/download_utils/telegram_downloader.py
import logging
import random
from time import time
from threading import RLock, Lock, Thread
from bot import LOGGER, download_dict, download_dict_lock, app, STOP_DUPLICATE, STORAGE_THRESHOLD
from bot.helper.ext_utils.bot_utils import get_readable_file_size
from ..status_utils.telegram_download_status import TelegramDownloadStatus
from bot.helper.telegram_helper.message_utils import sendMarkup, sendMessage, sendStatusMessage
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot.helper.ext_utils.fs_utils import check_storage_threshold
global_lock = Lock()
GLOBAL_GID = set()
logging.getLogger("pyrogram").setLevel(logging.WARNING)
class TelegramDownloadHelper:
def __init__(self, listener):
self.name = ""
self.size = 0
self.progress = 0
self.downloaded_bytes = 0
self.__start_time = time()
self.__listener = listener
self.__id = ""
self.__is_cancelled = False
self.__resource_lock = RLock()
@property
def download_speed(self):
with self.__resource_lock:
return self.downloaded_bytes / (time() - self.__start_time)
def __onDownloadStart(self, name, size, file_id):
with global_lock:
GLOBAL_GID.add(file_id)
with self.__resource_lock:
self.name = name
self.size = size
self.__id = file_id
gid = ''.join(random.choices(file_id, k=12))
with download_dict_lock:
download_dict[self.__listener.uid] = TelegramDownloadStatus(self, self.__listener, gid)
sendStatusMessage(self.__listener.message, self.__listener.bot)
def __onDownloadProgress(self, current, total):
if self.__is_cancelled:
self.__onDownloadError('Cancelled by user!')
app.stop_transmission()
return
with self.__resource_lock:
self.downloaded_bytes = current
try:
self.progress = current / self.size * 100
except ZeroDivisionError:
pass
def __onDownloadError(self, error):
with global_lock:
try:
GLOBAL_GID.remove(self.__id)
except KeyError:
pass
self.__listener.onDownloadError(error)
def __onDownloadComplete(self):
with global_lock:
GLOBAL_GID.remove(self.__id)
self.__listener.onDownloadComplete()
def __download(self, message, path):
try:
download = app.download_media(message,
progress = self.__onDownloadProgress,
file_name = path
)
except Exception as e:
LOGGER.error(str(e))
return self.__onDownloadError(str(e))
if download is not None:
self.__onDownloadComplete()
elif not self.__is_cancelled:
self.__onDownloadError('Internal error occurred')
def add_download(self, message, path, filename):
_dmsg = app.get_messages(message.chat.id, reply_to_message_ids=message.message_id)
media = None
media_array = [_dmsg.document, _dmsg.video, _dmsg.audio]
for i in media_array:
if i is not None:
media = i
break
if media is not None:
with global_lock:
# For avoiding locking the thread lock for long time unnecessarily
download = media.file_id not in GLOBAL_GID
if filename == "":
name = media.file_name
else:
name = filename
path = path + name
if download:
size = media.file_size
if STOP_DUPLICATE and not self.__listener.isLeech:
LOGGER.info('Checking File/Folder if already in Drive...')
smsg, button = GoogleDriveHelper().drive_list(name, True, True)
if smsg:
msg = "File/Folder is already available in Drive.\nHere are the search results:"
return sendMarkup(msg, self.__listener.bot, self.__listener.message, button)
if STORAGE_THRESHOLD is not None:
arch = any([self.__listener.isZip, self.__listener.extract])
acpt = check_storage_threshold(size, arch)
if not acpt:
msg = f'You must leave {STORAGE_THRESHOLD}GB free storage.'
msg += f'\nYour File/Folder size is {get_readable_file_size(size)}'
return sendMessage(msg, self.__listener.bot, self.__listener.message)
self.__onDownloadStart(name, size, media.file_id)
LOGGER.info(f'Downloading Telegram file with id: {media.file_id}')
Thread(target=self.__download, args=(_dmsg, path)).start()
else:
self.__onDownloadError('File already being downloaded!')
else:
self.__onDownloadError('No document in the replied message')
def cancel_download(self):
LOGGER.info(f'Cancelling download on user request: {self.__id}')
self.__is_cancelled = True
| 2.171875 | 2 |
setup.py | goofmint/qualityforward-py | 0 | 11643 | import setuptools
setuptools.setup(
name="qualityforward",
version="1.1",
author="<NAME>",
author_email="<EMAIL>",
description="Python library for QualityForward API",
long_description="This is python library for QualityForward API. QualityForward is cloud based test management service.",
long_description_content_type="text/markdown",
url="https://cloud.veriserve.co.jp/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| 1.117188 | 1 |
backend/trips/models.py | repeating/PoputchikiInno | 20 | 11644 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, AbstractUser
from django.utils import timezone
from django.utils.translation import gettext as _
from django import forms
from django.contrib.auth.hashers import make_password
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from phonenumber_field.modelfields import PhoneNumberField
from datetime import datetime
class CarTrip(models.Model):
class Meta:
verbose_name = _('carTrip')
verbose_name_plural = _('cartrips')
def __str__(self):
return f'{self.driver_name} Car Trip'
driver_name = models.CharField(max_length=200)
destination = models.CharField(max_length=200)
number_of_seats = models.IntegerField('number of seats')
trip_date = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
@classmethod
def create(cls , driver_name, destination, number_of_seats, trip_date):
trip = cls(driver_name= driver_name,
destination=destination,
number_of_seats=number_of_seats,
trip_date=trip_date,
pub_date=datetime.now()
)
return trip
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
class Relation(models.Model):
class Meta:
verbose_name = _('relation')
verbose_name_plural = _('relation')
trip_number = models.IntegerField('trip_number')
hiker_name = models.CharField(max_length=200)
def __str__(self ):
return f'{self.hiker_name} going on trip id = {self.trip_number}'
@classmethod
def create(cls , trip_number, hiker_name):
rel = cls(trip_number=trip_number,
hiker_name=hiker_name,
)
return rel
| 2.21875 | 2 |
jcs/jcs_main.py | orenmel/lexsub | 26 | 11645 | <reponame>orenmel/lexsub
'''
Run lexical substitution experiments
'''
import sys
import time
import argparse
import re
import numpy
from jcs.jcs_io import extract_word_weight
from jcs.data.context_instance import ContextInstance
from jcs.jcs_io import vec_to_str
from jcs.jcs_io import vec_to_str_generated
from jcs.cs_embedding_inferrer import CsEmbeddingInferrer
from jcs.context2vec_inferrer import Context2vecInferrer
target_re = re.compile(".*__(.*)__.*")
def read_candidates(candidates_file):
target2candidates = {}
# finally.r::eventually;ultimately
with open(candidates_file, 'r') as f:
for line in f:
segments = line.split('::')
target = segments[0]
candidates = set(segments[1].strip().split(';'))
target2candidates[target] = candidates
return target2candidates
def run_test(inferrer):
if args.candidatesfile != None:
target2candidates = read_candidates(args.candidatesfile)
else:
target2candidates = None
tfi = open(args.testfile, 'r')
tfo = open(args.resultsfile, 'w')
tfo_ranked = open(args.resultsfile+'.ranked', 'w')
tfo_generated_oot = open(args.resultsfile+'.generated.oot', 'w')
tfo_generated_best = open(args.resultsfile+'.generated.best', 'w')
lines = 0
while True:
context_line = tfi.readline()
if not context_line:
break;
lst_instance = ContextInstance(context_line, args.no_pos)
lines += 1
if (args.debug == True):
tfo.write("\nTest context:\n")
tfo.write("***************\n")
tfo.write(lst_instance.decorate_context())
result_vec = inferrer.find_inferred(lst_instance, tfo)
generated_results = inferrer.generate_inferred(result_vec, lst_instance.target, lst_instance.target_lemma, lst_instance.pos)
tfo.write("\nGenerated lemmatized results\n")
tfo.write("***************\n")
tfo.write("GENERATED\t" + ' '.join([lst_instance.full_target_key, lst_instance.target_id]) + " ::: " + vec_to_str_generated(generated_results.iteritems(), args.topgenerated)+"\n")
tfo_generated_oot.write(' '.join([lst_instance.full_target_key, lst_instance.target_id]) + " ::: " + vec_to_str_generated(generated_results.iteritems(), args.topgenerated)+"\n")
tfo_generated_best.write(' '.join([lst_instance.full_target_key, lst_instance.target_id]) + " :: " + vec_to_str_generated(generated_results.iteritems(), 1)+"\n")
filtered_results = inferrer.filter_inferred(result_vec, target2candidates[lst_instance.target_key], lst_instance.pos)
tfo.write("\nFiltered results\n")
tfo.write("***************\n")
tfo.write("RANKED\t" + ' '.join([lst_instance.full_target_key, lst_instance.target_id]) + "\t" + vec_to_str(filtered_results.iteritems(), len(filtered_results))+"\n")
tfo_ranked.write("RANKED\t" + ' '.join([lst_instance.full_target_key, lst_instance.target_id]) + "\t" + vec_to_str(filtered_results.iteritems(), len(filtered_results))+"\n")
# print "end %f" % time.time()
if lines % 10 == 0:
print "Read %d lines" % lines
print "Read %d lines in total" % lines
print "Time per word: %f msec" % inferrer.msec_per_word()
tfi.close()
tfo.close()
tfo_ranked.close()
tfo_generated_oot.close()
tfo_generated_best.close()
def run(args):
print time.asctime(time.localtime(time.time()))
if args.inferrer == 'emb':
inferrer = CsEmbeddingInferrer(args.vocabfile, args.ignoretarget, args.contextmath, args.embeddingpath, args.embeddingpathc, args.testfileconll, args.bow_size, 10)
print "Using CsEmbeddingInferrer"
elif args.inferrer == 'lstm':
inferrer = Context2vecInferrer(args.lstm_config, args.ignoretarget, args.contextmath, 10)
print "Using Context2vecInferrer"
else:
raise Exception("Unknown inferrer type: " + args.inferrer)
print time.asctime(time.localtime(time.time()))
run_test(inferrer)
print "Finished"
print time.asctime(time.localtime(time.time()))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='JCS utility')
parser.add_argument('--inferrer', choices=['lstm', 'emb'],
default='lstm',
help='context type ("lstm", "emb")')
# Only for Context2vecInferrer
parser.add_argument('-lstm_config', action="store", dest="lstm_config", default=None, help="config file of lstm context model and respective word embeddings")
# Only for CsEmbeddingInferrer
parser.add_argument('-embeddingpath', action="store", dest="embeddingpath", default=None, help="prefix to files containing word embeddings")
parser.add_argument('-embeddingpathc', action="store", dest="embeddingpathc", default=None, help="prefix to files containing context word embeddings")
parser.add_argument('-vocabfile', action="store", dest="vocabfile")
parser.add_argument('-bow',action='store',dest='bow_size', default=-1, type=int, help="context bag-of-words window size. 0 means entire sentence. -1 means syntactic dependency contexts.")
# Common
parser.add_argument('-targetsfile', action="store", dest="targetsfile", default=None)
parser.add_argument('-testfile', action="store", dest="testfile")
parser.add_argument('-testfileconll', action="store", dest="testfileconll", default=None, help="test file with sentences parsed in conll format")
parser.add_argument('-candidatesfile', action="store", dest="candidatesfile", default=None)
parser.add_argument('-resultsfile', action="store", dest="resultsfile")
parser.add_argument('-contextmath', action="store", dest="contextmath", default=None, help="arithmetics used to consider context [add|mult|geomean|none]")
parser.add_argument('--ignoretarget', action="store_true", dest="ignoretarget", default=False, help="ignore lhs target. compute only context compatibility.")
parser.add_argument('--nopos',action='store_true',dest='no_pos', default=False, help="ignore part-of-speech of target word")
parser.add_argument('-topgenerated', action="store", dest="topgenerated", type=int, default=10, help="top entries to print in generated parvecs")
parser.add_argument('--debug',action='store_true',dest='debug')
args = parser.parse_args(sys.argv[1:])
config_file_name = args.resultsfile + ".CONFIG"
cf = open(config_file_name, 'w')
cf.write(' '.join(sys.argv)+'\n')
cf.close()
numpy.seterr(all='raise', divide='raise', over='raise', under='raise', invalid='raise')
run(args)
| 2.4375 | 2 |
src/django_richenum/__init__.py | adepue/django-richenum | 0 | 11646 | <filename>src/django_richenum/__init__.py
__version__ = 'unknown'
try:
__version__ = __import__('pkg_resources').get_distribution('django_richenum').version
except Exception as e:
pass
| 1.523438 | 2 |
scripts/v.py | NatashaChopper/stage | 0 | 11647 | import numpy
with open ("dic.txt", "w", encoding="utf-8") as dic:
for x in range(5, 790, 1):
if 92 < x <= 113:
dic.write('"'+str(x)+'"'+":"+ '"'+'1'+'",')
elif 113 < x <= 133:
dic.write('"'+str(x)+'"'+":"+ '"'+'2'+'",')
elif 133 < x <= 153:
dic.write('"'+str(x)+'"'+":"+ '"'+'3'+'",')
elif 153 < x <= 173:
dic.write('"'+str(x)+'"'+":"+ '"'+'4'+'",')
elif 173 < x <= 193:
dic.write('"'+str(x)+'"'+":"+ '"'+'5'+'",')
elif 193 < x <= 213:
dic.write('"'+str(x)+'"'+":"+ '"'+'6'+'",')
elif 213 < x <= 233:
dic.write('"'+str(x)+'"'+":"+ '"'+'7'+'",')
elif 233 < x <= 253:
dic.write('"'+str(x)+'"'+":"+ '"'+'8'+'",')
elif 253 < x <= 273:
dic.write('"'+str(x)+'"'+":"+ '"'+'9'+'",')
elif 273 < x <= 293:
dic.write('"'+str(x)+'"'+":"+ '"'+'10'+'",')
elif 293 < x <= 313:
dic.write('"'+str(x)+'"'+":"+ '"'+'11'+'",')
elif 313 < x <= 333:
dic.write('"'+str(x)+'"'+":"+ '"'+'12'+'",')
elif 333 < x <= 353:
dic.write('"'+str(x)+'"'+":"+ '"'+'13'+'",')
elif 353 < x <= 373:
dic.write('"'+str(x)+'"'+":"+ '"'+'14'+'",')
elif 373 < x <= 393:
dic.write('"'+str(x)+'"'+":"+ '"'+'15'+'",')
elif 393 < x <= 413:
dic.write('"'+str(x)+'"'+":"+ '"'+'16'+'",')
elif 413 < x <= 433:
dic.write('"'+str(x)+'"'+":"+ '"'+'17'+'",')
elif 433 < x <= 453:
dic.write('"'+str(x)+'"'+":"+ '"'+'18'+'",')
elif 453 < x <= 473:
dic.write('"'+str(x)+'"'+":"+ '"'+'19'+'",')
elif 473 < x <= 493:
dic.write('"'+str(x)+'"'+":"+ '"'+'20'+'",')
elif 493 < x <= 513:
dic.write('"'+str(x)+'"'+":"+ '"'+'21'+'",')
elif 513 < x <= 533:
dic.write('"'+str(x)+'"'+":"+ '"'+'22'+'",')
elif 533 < x <= 553:
dic.write('"'+str(x)+'"'+":"+ '"'+'23'+'",')
elif 553 < x <= 573:
dic.write('"'+str(x)+'"'+":"+ '"'+'24'+'",')
elif 573 < x <= 593:
dic.write('"'+str(x)+'"'+":"+ '"'+'25'+'",')
elif 593 < x <= 613:
dic.write('"'+str(x)+'"'+":"+ '"'+'26'+'",')
elif 613 < x <= 633:
dic.write('"'+str(x)+'"'+":"+ '"'+'27'+'",')
elif 633 < x <= 653:
dic.write('"'+str(x)+'"'+":"+ '"'+'28'+'",')
elif 653 < x <= 673:
dic.write('"'+str(x)+'"'+":"+ '"'+'29'+'",')
elif 673 < x <= 693:
dic.write('"'+str(x)+'"'+":"+ '"'+'30'+'",')
elif 693 < x <= 713:
dic.write('"'+str(x)+'"'+":"+ '"'+'31'+'",')
elif 713 < x <= 733:
dic.write('"'+str(x)+'"'+":"+ '"'+'32'+'",')
elif 733 < x <= 753:
dic.write('"'+str(x)+'"'+":"+ '"'+'33'+'",')
elif 753 < x <= 773:
dic.write('"'+str(x)+'"'+":"+ '"'+'34'+'",')
elif 773 < x <= 793:
dic.write('"'+str(x)+'"'+":"+ '"'+'35'+'",')
elif 4 < x <= 15:
dic.write('"'+str(x)+'"'+":"+ '"'+'36'+'",')
elif 15 < x <= 25:
dic.write('"'+str(x)+'"'+":"+ '"'+'37'+'",')
elif 25 < x <= 35:
dic.write('"'+str(x)+'"'+":"+ '"'+'38'+'",')
elif 35 < x <= 45:
dic.write('"'+str(x)+'"'+":"+ '"'+'39'+'",')
elif 45 < x <= 55:
dic.write('"'+str(x)+'"'+":"+ '"'+'40'+'",')
elif 55 < x <= 65:
dic.write('"'+str(x)+'"'+":"+ '"'+'41'+'",')
elif 65 < x <= 75:
dic.write('"'+str(x)+'"'+":"+ '"'+'42'+'",')
elif 75 < x <= 85:
dic.write('"'+str(x)+'"'+":"+ '"'+'43'+'",')
elif 85 < x <= 92:
dic.write('"'+str(x)+'"'+":"+ '"'+'44'+'",')
with open ("time.txt", "w", encoding="utf-8") as duree:
for y in numpy.arange(0, 1.7, 0.01):
if 0 < y <= 0.1:
duree.write('"'+str(y)+'"'+":"+ '"'+'80'+'",')
elif 0.1 < y <= 0.2:
duree.write('"'+str(y)+'"'+":"+ '"'+'81'+'",')
elif 0.2 < y <= 0.3:
duree.write('"'+str(y)+'"'+":"+ '"'+'82'+'",')
elif 0.3 < y <= 0.4:
duree.write('"'+str(y)+'"'+":"+ '"'+'83'+'",')
elif 0.4 < y <= 0.5:
duree.write('"'+str(y)+'"'+":"+ '"'+'84'+'",')
elif 0.5 < y <= 0.6:
duree.write('"'+str(y)+'"'+":"+ '"'+'85'+'",')
elif 0.6 < y <= 0.7:
duree.write('"'+str(y)+'"'+":"+ '"'+'86'+'",')
elif 0.7 < y <= 0.8:
duree.write('"'+str(y)+'"'+":"+ '"'+'87'+'",')
elif 0.8 < y <= 0.9:
duree.write('"'+str(y)+'"'+":"+ '"'+'88'+'",')
elif 0.9 < y <= 1:
duree.write('"'+str(y)+'"'+":"+ '"'+'89'+'",')
elif 1 < y <= 1.1:
duree.write('"'+str(y)+'"'+":"+ '"'+'90'+'",')
elif 1.1 < y <= 1.2:
duree.write('"'+str(y)+'"'+":"+ '"'+'91'+'",')
elif 1.2 < y <= 1.3:
duree.write('"'+str(y)+'"'+":"+ '"'+'92'+'",')
elif 1.3 < y <= 1.4:
duree.write('"'+str(y)+'"'+":"+ '"'+'93'+'",')
elif 1.4 < y <= 1.5:
duree.write('"'+str(y)+'"'+":"+ '"'+'94'+'",')
elif 1.5 < y <= 1.6:
duree.write('"'+str(y)+'"'+":"+ '"'+'95'+'",')
elif 1.6 < y <= 1.7:
duree.write('"'+str(y)+'"'+":"+ '"'+'96'+'",') | 2.703125 | 3 |
scrim/globals.py | danbradham/scrim | 4 | 11648 | # -*- coding: utf-8 -*-
'''
=============
scrim.globals
=============
Defines variables passed into the python script via Environment Variables by
scrim scripts. If SCRIM_SHELL is None, then the python script was not executed
by a scrim script.
SHELLS (list): list of available shells
SCRIM_SHELL (str): Parent shell, one of the above SHELLS
SCRIM_PATH (str): Path to output shell script
SCRIM_AUTO_WRITE (bool): Write to SCRIM_PATH when python exits?
SCRIM_SCRIPT (str): Path to the scrim script that invoked python
SCRIM_DEBUG (bool): Is scrim script running in debug mode?
'''
from __future__ import absolute_import
import os
__all__ = [
'SHELLS', 'SCRIM_SHELL', 'SCRIM_PATH', 'SCRIM_AUTO_WRITE',
'SCRIM_SCRIPT', 'SCRIM_DEBUG'
]
SHELLS = [
'powershell.exe',
'cmd.exe',
'bash'
]
SCRIM_SHELL = os.environ.get('SCRIM_SHELL', None)
SCRIM_PATH = os.environ.get('SCRIM_PATH', None)
SCRIM_AUTO_WRITE = bool(os.environ.get('SCRIM_AUTO_WRITE', False))
SCRIM_SCRIPT = os.environ.get('SCRIM_SCRIPT', None)
SCRIM_DEBUG = bool(os.environ.get('SCRIM_DEBUG', False))
| 2.453125 | 2 |
examples/Api/channels.py | asheshambasta/csound-expression | 0 | 11649 | import csnd6
class Control:
def __init__(self, volume, frequency):
engine = csnd6.Csound()
engine.SetOption("-odac")
engine.Compile("osc.csd")
thread = csnd6.CsoundPerformanceThread(engine)
thread.Play()
self.engine = engine
self.thread = thread
self.set_volume(volume)
self.set_frequency(frequency)
def set_volume(self, volume):
self.engine.SetChannel("volume", volume)
def set_frequency(self, frequency):
self.engine.SetChannel("frequency", frequency)
def close(self):
self.thread.Stop()
self.thread.Join()
| 2.609375 | 3 |
virtual/lib/python3.6/site-packages/isbnlib/_goob.py | david12-wq/PITCHE_APP | 0 | 11650 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Query the Google Books (JSON API v1) service for metadata."""
import logging
from .dev import stdmeta
from .dev._bouth23 import u
from .dev._exceptions import ISBNNotConsistentError, RecordMappingError
from .dev.webquery import query as wquery
UA = 'isbnlib (gzip)'
SERVICE_URL = (
'https://www.googleapis.com/books/v1/volumes?q=isbn:{isbn}'
'&fields=items/volumeInfo(title,subtitle,authors,publisher,publishedDate,'
'language,industryIdentifiers)&maxResults=1')
LOGGER = logging.getLogger(__name__)
# pylint: disable=broad-except
def _mapper(isbn, records):
"""Mapp: canonical <- records."""
# canonical: ISBN-13, Title, Authors, Publisher, Year, Language
try:
canonical = {}
canonical['ISBN-13'] = u(isbn)
title = records.get('title', u('')).replace(' :', ':')
subtitle = records.get('subtitle', u(''))
title = title + ' - ' + subtitle if subtitle else title
canonical['Title'] = title
canonical['Authors'] = records.get('authors', [u('')])
# see issue #64
canonical['Publisher'] = records.get('publisher', u('')).strip('"')
if 'publishedDate' in records and len(records['publishedDate']) >= 4:
canonical['Year'] = records['publishedDate'][0:4]
else: # pragma: no cover
canonical['Year'] = u('')
canonical['Language'] = records.get('language', u(''))
except Exception: # pragma: no cover
LOGGER.debug('RecordMappingError for %s with data %s', isbn, records)
raise RecordMappingError(isbn)
# call stdmeta for extra cleanning and validation
return stdmeta(canonical)
def _records(isbn, data):
"""Classify (canonically) the parsed data."""
# put the selected data in records
try:
recs = data['items'][0]['volumeInfo']
except Exception: # pragma: no cover
# don't raise exception!
LOGGER.debug('No data from "goob" for isbn %s', isbn)
return {}
# consistency check (isbn request = isbn response)
if recs:
ids = recs.get('industryIdentifiers', '')
if u('ISBN_13') in repr(ids) and isbn not in repr(
ids): # pragma: no cover
LOGGER.debug('ISBNNotConsistentError for %s (%s)', isbn, repr(ids))
raise ISBNNotConsistentError('{0} not in {1}'.format(
isbn, repr(ids)))
else:
return {} # pragma: no cover
# map canonical <- records
return _mapper(isbn, recs)
def query(isbn):
"""Query the Google Books (JSON API v1) service for metadata."""
data = wquery(SERVICE_URL.format(isbn=isbn), user_agent=UA)
return _records(isbn, data)
| 2.375 | 2 |
gdxpds/test/conftest.py | cdgaete/gdx-pandas | 42 | 11651 | # [LICENSE]
# Copyright (c) 2020, Alliance for Sustainable Energy.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or
# promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# [/LICENSE]
import pytest
def pytest_addoption(parser):
parser.addoption(
"--no-clean-up", action="store_true", default=False,
help="Pass this option to leave test outputs in place"
)
@pytest.fixture(scope="session",autouse=True)
def clean_up(request):
return (not request.config.getoption('--no-clean-up'))
| 1.5 | 2 |
jinahub/indexers/storage/PostgreSQLStorage/tests/test_postgres_dbms.py | Taekyoon/executors | 29 | 11652 | <filename>jinahub/indexers/storage/PostgreSQLStorage/tests/test_postgres_dbms.py
import os
import time
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow
from jina.logging.profile import TimeContext
from jina_commons.indexers.dump import import_metas, import_vectors
from ..postgres_indexer import PostgreSQLStorage
from ..postgreshandler import doc_without_embedding
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d "
f"--remove-orphans"
)
time.sleep(5)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down "
f"--remove-orphans"
)
d_embedding = np.array([1, 1, 1, 1, 1, 1, 1])
c_embedding = np.array([2, 2, 2, 2, 2, 2, 2])
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='function', autouse=True)
def patched_random_port(mocker):
used_ports = set()
from jina.helper import random_port
def _random_port():
for i in range(10):
_port = random_port()
if _port is not None and _port not in used_ports:
used_ports.add(_port)
return _port
raise Exception('no port available')
mocker.patch('jina.helper.random_port', new_callable=lambda: _random_port)
def get_documents(chunks, same_content, nr=10, index_start=0, same_tag_content=None):
next_chunk_id = nr + index_start
for i in range(index_start, nr + index_start):
d = Document()
d.id = i
if same_content:
d.text = 'hello world'
d.embedding = np.random.random(d_embedding.shape)
else:
d.text = f'hello world {i}'
d.embedding = np.random.random(d_embedding.shape)
if same_tag_content:
d.tags['field'] = 'tag data'
elif same_tag_content is False:
d.tags['field'] = f'tag data {i}'
for j in range(chunks):
c = Document()
c.id = next_chunk_id
if same_content:
c.text = 'hello world from chunk'
c.embedding = np.random.random(c_embedding.shape)
else:
c.text = f'hello world from chunk {j}'
c.embedding = np.random.random(c_embedding.shape)
if same_tag_content:
c.tags['field'] = 'tag data'
elif same_tag_content is False:
c.tags['field'] = f'tag data {next_chunk_id}'
next_chunk_id += 1
d.chunks.append(c)
yield d
def validate_db_side(postgres_indexer, expected_data):
ids, vecs, metas = zip(*expected_data)
with postgres_indexer.handler as handler:
cursor = handler.connection.cursor()
cursor.execute(
f'SELECT doc_id, embedding, doc from {postgres_indexer.table} ORDER BY '
f'doc_id::int'
)
record = cursor.fetchall()
for i in range(len(expected_data)):
np.testing.assert_equal(ids[i], str(record[i][0]))
embedding = np.frombuffer(record[i][1], dtype=postgres_indexer.dump_dtype)
np.testing.assert_equal(vecs[i], embedding)
np.testing.assert_equal(metas[i], bytes(record[i][2]))
def test_config():
ex = Executor.load_config(
str(Path(__file__).parents[1] / 'config.yml'), override_with={'dry_run': True}
)
assert ex.username == 'postgres'
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_postgres(tmpdir, docker_compose):
postgres_indexer = PostgreSQLStorage()
NR_DOCS = 10000
original_docs = DocumentArray(
list(get_documents(nr=NR_DOCS, chunks=0, same_content=False))
)
postgres_indexer.delete(original_docs, {})
with TimeContext(f'### indexing {len(original_docs)} docs'):
postgres_indexer.add(original_docs, {})
np.testing.assert_equal(postgres_indexer.size, NR_DOCS)
info_original_docs = [
(doc.id, doc.embedding, doc_without_embedding(doc)) for doc in original_docs
]
validate_db_side(postgres_indexer, info_original_docs)
new_docs = DocumentArray(
list(get_documents(chunks=False, nr=10, same_content=True))
)
postgres_indexer.update(new_docs, {})
info_new_docs = [
(doc.id, doc.embedding, doc_without_embedding(doc)) for doc in new_docs
]
ids, vecs, metas = zip(*info_new_docs)
expected_info = [(ids[0], vecs[0], metas[0])]
validate_db_side(postgres_indexer, expected_info)
postgres_indexer.delete(new_docs, {})
np.testing.assert_equal(postgres_indexer.size, len(original_docs) - len(new_docs))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mwu_empty_dump(tmpdir, docker_compose):
f = Flow().add(uses=PostgreSQLStorage)
with f:
resp = f.post(
on='/index', inputs=DocumentArray([Document()]), return_results=True
)
print(f'{resp}')
dump_path = os.path.join(tmpdir, 'dump')
with f:
f.post(
on='/dump',
parameters={'dump_path': os.path.join(tmpdir, 'dump'), 'shards': 1},
)
# assert dump contents
ids, vecs = import_vectors(dump_path, pea_id='0')
assert ids is not None
ids, metas = import_metas(dump_path, pea_id='0')
assert vecs is not None
assert metas is not None
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_return_embeddings(docker_compose):
indexer = PostgreSQLStorage()
doc = Document(embedding=np.random.random(10))
da = DocumentArray([doc])
query1 = DocumentArray([Document(id=doc.id)])
indexer.add(da, parameters={})
indexer.search(query1, parameters={})
assert query1[0].embedding is not None
assert query1[0].embedding.shape == (10,)
query2 = DocumentArray([Document(id=doc.id)])
indexer.search(query2, parameters={"return_embeddings": False})
assert query2[0].embedding is None
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_get_documents(docker_compose):
indexer = PostgreSQLStorage()
NR = 10
docs = DocumentArray(
list(
get_documents(
nr=NR,
chunks=0,
same_content=False,
)
)
)
indexer.add(docs)
assert len(list(indexer.get_document_iterator())) == NR
indexer.delete(docs)
assert len(list(indexer.get_document_iterator())) == 0
assert indexer.size == 0
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_clear(docker_compose):
indexer = PostgreSQLStorage()
NR = 10
docs = DocumentArray(
list(
get_documents(
nr=NR,
chunks=0,
same_content=False,
)
)
)
indexer.add(docs)
assert len(list(indexer.get_document_iterator())) == NR
indexer.clear()
assert indexer.size == 0
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
@pytest.mark.parametrize('psql_virtual_shards', [44, 128])
@pytest.mark.parametrize('real_shards', [1, 5])
def test_snapshot(docker_compose, psql_virtual_shards, real_shards):
postgres_indexer = PostgreSQLStorage(virtual_shards=psql_virtual_shards)
def _assert_snapshot_shard_distribution(func, nr_shards, total_docs_expected):
total_docs = 0
for i in range(nr_shards):
data = func(shard_id=i, total_shards=nr_shards)
docs_this_shard = len(list(data))
assert docs_this_shard >= postgres_indexer.virtual_shards // real_shards
total_docs += docs_this_shard
np.testing.assert_equal(total_docs, total_docs_expected)
NR_SHARDS = real_shards
NR_DOCS = postgres_indexer.virtual_shards * 2 + 3
original_docs = DocumentArray(
list(get_documents(nr=NR_DOCS, chunks=0, same_content=False))
)
NR_NEW_DOCS = 30
new_docs = DocumentArray(
list(
get_documents(
nr=NR_NEW_DOCS, index_start=NR_DOCS, chunks=0, same_content=False
)
)
)
# make sure to cleanup if the PSQL instance is kept running
postgres_indexer.delete(original_docs, {})
postgres_indexer.delete(new_docs, {})
# indexing the documents
postgres_indexer.add(original_docs, {})
np.testing.assert_equal(postgres_indexer.size, NR_DOCS)
# create a snapshot
postgres_indexer.snapshot()
# data added the snapshot will not be part of the export
postgres_indexer.add(new_docs, {})
np.testing.assert_equal(postgres_indexer.size, NR_DOCS + NR_NEW_DOCS)
np.testing.assert_equal(postgres_indexer.snapshot_size, NR_DOCS)
_assert_snapshot_shard_distribution(
postgres_indexer.get_snapshot, NR_SHARDS, NR_DOCS
)
# create another snapshot
postgres_indexer.snapshot()
timestamp = postgres_indexer.last_snapshot_timestamp
# docs for the delta resolving
NR_DOCS_DELTA = 33
docs_delta = DocumentArray(
list(
get_documents(
nr=NR_DOCS_DELTA,
index_start=NR_DOCS + NR_NEW_DOCS,
chunks=0,
same_content=False,
)
)
)
time.sleep(3)
postgres_indexer.add(docs_delta, {})
np.testing.assert_equal(
postgres_indexer.size, NR_DOCS + NR_NEW_DOCS + NR_DOCS_DELTA
)
np.testing.assert_equal(postgres_indexer.snapshot_size, NR_DOCS + NR_NEW_DOCS)
NR_DOCS_DELTA_DELETED = 10
docs_delta_deleted = DocumentArray(
list(
get_documents(
nr=NR_DOCS_DELTA_DELETED, index_start=0, chunks=0, same_content=False
)
)
)
postgres_indexer.delete(docs_delta_deleted, {'soft_delete': True})
_assert_snapshot_shard_distribution(
postgres_indexer.get_snapshot,
NR_SHARDS,
NR_DOCS + NR_NEW_DOCS,
)
# we use total_shards=1 in order to guarantee getting all the data in the delta
deltas = postgres_indexer.get_delta_updates(
shard_id=0, total_shards=1, timestamp=timestamp
)
deltas = list(deltas)
np.testing.assert_equal(len(deltas), NR_DOCS_DELTA + NR_DOCS_DELTA_DELETED)
def test_postgres_shard_distribution():
assert ['0'] == PostgreSQLStorage._vshards_to_get(0, 3, 5)
assert ['1'] == PostgreSQLStorage._vshards_to_get(1, 3, 5)
assert ['2', '3', '4'] == PostgreSQLStorage._vshards_to_get(2, 3, 5)
assert [str(s) for s in range(5)] == PostgreSQLStorage._vshards_to_get(0, 1, 5)
with pytest.raises(ValueError):
PostgreSQLStorage._vshards_to_get(1, 1, 5)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_save_get_trained_model(docker_compose):
postgres_indexer = PostgreSQLStorage()
model = np.random.random((100, 5)).tobytes()
postgres_indexer.save_trained_model(model, None)
trained_model, trained_model_checksum = postgres_indexer.get_trained_model()
assert trained_model == model
assert trained_model_checksum is None
postgres_indexer.save_trained_model(model, 'sha256:hello')
trained_model, trained_model_checksum = postgres_indexer.get_trained_model()
assert trained_model == model
assert trained_model_checksum == 'sha256:hello'
| 1.84375 | 2 |
targhe/models.py | luca772005/studio | 0 | 11653 | from django.db import models
# Create your models here.
class Tipo(models.Model):
descrizione = models.CharField(blank=False, null=False, max_length=128)
def __unicode__(self):
return "{}".format(self.descrizione)
class Meta:
verbose_name_plural = 'Tipi'
class Marca(models.Model):
descrizione = models.CharField(blank=False, null=False, max_length=128)
def __unicode__(self):
return "{}".format(self.descrizione)
class Meta:
verbose_name_plural = 'Marche'
class Modello(models.Model):
descrizione = models.CharField(blank=False, null=False, max_length=128)
marca = models.ForeignKey(Marca, null=False, blank=False)
tipo = models.ForeignKey(Tipo, null=False, blank=False)
def __unicode__(self):
return "{}".format(self.descrizione)
class Meta:
verbose_name_plural = 'Modelli'
class Alimentazione(models.Model):
descrizione = models.CharField(blank=False, null=False, max_length=128)
def __unicode__(self):
return "{}".format(self.descrizione)
class Meta:
verbose_name_plural = 'Alimentazioni'
class Mezzo(models.Model):
telaio = models.CharField(blank=False, null=False, max_length=128)
colore = models.CharField(blank=False, null=False, max_length=128)
alimentazione = models.ForeignKey(Alimentazione, null=False, blank=False)
modello = models.ForeignKey(Modello, null=False, blank=False)
def __unicode__(self):
return "{} {}".format(self.telaio, self.modello)
class Meta:
verbose_name_plural = 'Mezzi'
class Targa(models.Model):
numero = models.CharField(null=False, blank=False, max_length=16)
dal = models.DateField()
al = models.DateField()
mezzo = models.ForeignKey(Mezzo, null=False, blank=False)
def __unicode__(self):
return "{}".format(self.numero)
class Meta:
verbose_name_plural = 'Targhe'
| 2.234375 | 2 |
WhatSender/__init__.py | Shauryasamant/whatsender | 1 | 11654 | <filename>WhatSender/__init__.py
from WhatSender.sender import SendMessage
| 1.21875 | 1 |
scenarios/sync_sheets_and_groups.py | Ragnaruk/api_integration | 0 | 11655 | import pickle
from time import sleep
import googleapiclient.errors
from transliterate import translit
from logs.logging import get_logger
from api_google.google_api_sheets import get_sheets_service, get_multiple_ranges
from api_google.google_api_directory import get_directory_service, get_users_for_domain, \
get_groups_for_domain, create_group, add_user_to_group
from api_google.google_api_groupsettings import get_groupsettings_service, \
get_group_settings, update_group_settings
from config.config import sync_sheets_and_groups, path_data_directory
def main():
logger = get_logger('sync_sheets_and_groups', sync_sheets_and_groups['logging_level'])
data_path = path_data_directory / 'sync_sheets_and_groups'
data_path.mkdir(parents=True, exist_ok=True)
synced_users_path = data_path / 'synced_users.pickle'
while True:
# number_of_registered_users = 0
# synced_users_dictionary_creation = False
#
# # Getting a list of users who have already been synced
# if synced_users_path.exists():
# logger.debug('Reading synced users from: %s', synced_users_path)
# with open(synced_users_path, 'rb') as f:
# synced_users = pickle.load(f)
# else:
# logger.info('Creating synced users dictionary')
# synced_users = dict()
# synced_users_dictionary_creation = True
try:
service_directory = get_directory_service()
service_sheets = get_sheets_service()
# ranges = get_multiple_ranges(
# service_sheets,
# sync_sheets_and_groups['spreadsheet_id'],
# sync_sheets_and_groups['range_names']
# )
#
# with open(data_path / 'ranges.pickle', 'wb') as file:
# pickle.dump(ranges, file)
with open(data_path / 'ranges.pickle', 'rb') as file:
ranges = pickle.load(file)
#
# [logger.debug(x) for x in ranges]
# group_results = []
# for group in ranges[0]['values']:
# group_name = group[0].split(" ", 1)[0]
#
# email = (translit(group_name, "ru", reversed=True)).lower() \
# + "@" \
# + sync_sheets_and_groups['google_domain']
#
# try:
# group_results.append(create_group(service_directory, email, group_name, ""))
# except googleapiclient.errors.HttpError as exception:
# # If group already exists among other things
# logger.error(exception, exc_info=False)
#
# logger.debug(group_name, email)
#
# group_results.sort(key=lambda x: x['name'])
# with open(data_path / 'group_results.pickle', 'wb') as file:
# pickle.dump(group_results, file)
with open(data_path / 'group_results.pickle', 'rb') as file:
group_results = pickle.load(file)
#
# [logger.debug(x) for x in group_results]
created_group_names = [x['name'] for x in group_results]
[logger.debug(x) for x in created_group_names]
# # A client should wait 1 minute before adding users or sending messages to a new group
# sleep(60)
students = dict(zip(
[i[0] if i else "" for i in ranges[1]['values']],
[i[0] if i else "" for i in ranges[2]['values']]
))
logger.debug(students.items())
leaders = dict(zip(
[i[0] if i else "" for i in ranges[3]['values']],
[i[0] if i else "" for i in ranges[4]['values']]
))
logger.debug(leaders.items())
group_users = {}
for group in ranges[0]['values']:
id = group[0].split(" ", 1)[0]
if id not in created_group_names:
logger.debug("Skipping group: ", id)
continue
else:
logger.debug("Adding users to group: ", id)
group_users[id] = []
# Leader email
group_users[id].append(
[leaders[group[1]], 'MEMBER']
)
# Member emails
for i in range(2, len(group)):
group_users[id].append(
[students[group[i]], 'MEMBER']
)
# Mandatory user
group_users[id] += sync_sheets_and_groups['mandatory_members']
with open(data_path / 'group_users.pickle', 'wb') as file:
pickle.dump(group_users, file)
with open(data_path / 'group_users.pickle', 'rb') as file:
group_users = pickle.load(file)
[logger.debug(x) for x in group_users]
# # Add users to groups
# user_results = []
# for group in group_users:
# for group_user in group_users[group]:
# user_results.append(
# add_user_to_group(service, group, group_user[0], group_user[1])
# )
#
# with open(data_path / 'user_results.pickle', 'wb') as file:
# pickle.dump(user_results, file)
# with open(data_path / 'user_results.pickle', 'rb') as file:
# user_results = pickle.load(file)
#
# [logger.debug(x) for x in user_results]
# students = dict(zip(
# [i[0] if i else "" for i in ranges[1]['values']],
# [i[0] if i else "" for i in ranges[2]['values']]
# ))
#
# leaders = dict(zip(
# [i[0] if i else "" for i in ranges[3]['values']],
# [i[0] if i else "" for i in ranges[4]['values']]
# ))
# if id not in synced_users:
# synced_users[id] = set()
#
# member_emails = set()
#
# # Leader email
# member_emails.add(
# leaders[group[1]]
# )
#
# # Member emails
# for i in range(2, len(group)):
# member_emails.add(
# students[group[i]]
# )
#
# # Mandatory emails
# member_emails |= set(sync_sheets_and_groups['mandatory_members'])
#
# # Synced users
# member_emails -= synced_users[id]
# synced_users[id] |= member_emails
#
# member_emails = list(member_emails)
#
# logger.debug('Name: %s - Description: %s - Users: %s',
# name, description, member_emails)
#
# if not synced_users_dictionary_creation:
# # TODO
# number_of_registered_users += len(member_emails)
#
# logger.debug('Result: %s', result)
# # -----
# # Might need rework
# # -----
#
# service = get_groupsettings_service()
#
# group_emails = []
# for group_name in group_names:
# group_emails.append(
# (translit(group_name, "ru", reversed=True)).lower() \
# + "@" \
# + create_google_groups['google_domain']
# )
#
# with open(data_path / 'group_emails.pickle', 'wb') as file:
# pickle.dump(group_emails, file)
# with open(data_path / 'group_emails.pickle', 'rb') as file:
# group_emails = pickle.load(file)
#
# [logger.debug(x) for x in group_emails]
#
# settings_results = []
# for group_email in group_emails:
# settings_results.append(
# update_group_settings(
# service,
# group_email,
# {
# "whoCanJoin": "INVITED_CAN_JOIN",
# "whoCanViewMembership": "ALL_IN_DOMAIN_CAN_VIEW",
# "whoCanViewGroup": "ALL_IN_DOMAIN_CAN_VIEW",
# "whoCanPostMessage": "ALL_IN_DOMAIN_CAN_POST",
# "isArchived": "true"
# }
# )
# )
#
# with open(data_path / 'settings_results.pickle', 'wb') as file:
# pickle.dump(settings_results, file)
# with open(data_path / 'settings_results.pickle', 'rb') as file:
# settings_results = pickle.load(file)
#
# [logger.debug(x) for x in settings_results]
except Exception as exception:
logger.error(exception, exc_info=True)
# logger.debug('Writing synced users to: %s', synced_users_path)
# with open(synced_users_path, 'wb') as f:
# pickle.dump(synced_users, f)
#
# logger.info('Update finished. Registered %s users. Sleeping for %s seconds.',
# number_of_registered_users, sync_sheets_and_groups['sleep_time'])
sleep(sync_sheets_and_groups['sleep_time'])
if __name__ == '__main__':
main()
| 2.46875 | 2 |
src/GridCal/Gui/TowerBuilder/gui.py | SanPen/GridCal | 284 | 11656 | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'gui.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from .matplotlibwidget import MatplotlibWidget
from .icons_rc import *
class Ui_Dialog(object):
def setupUi(self, Dialog):
if not Dialog.objectName():
Dialog.setObjectName(u"Dialog")
Dialog.resize(1183, 675)
self.gridLayout = QGridLayout(Dialog)
self.gridLayout.setObjectName(u"gridLayout")
self.gridLayout.setContentsMargins(1, 1, 1, 1)
self.tabWidget = QTabWidget(Dialog)
self.tabWidget.setObjectName(u"tabWidget")
self.tab_2 = QWidget()
self.tab_2.setObjectName(u"tab_2")
self.verticalLayout_6 = QVBoxLayout(self.tab_2)
self.verticalLayout_6.setObjectName(u"verticalLayout_6")
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.main_splitter = QSplitter(self.tab_2)
self.main_splitter.setObjectName(u"main_splitter")
self.main_splitter.setOrientation(Qt.Horizontal)
self.frame_8 = QFrame(self.main_splitter)
self.frame_8.setObjectName(u"frame_8")
self.frame_8.setFrameShape(QFrame.NoFrame)
self.frame_8.setFrameShadow(QFrame.Raised)
self.verticalLayout_5 = QVBoxLayout(self.frame_8)
self.verticalLayout_5.setObjectName(u"verticalLayout_5")
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.frame_5 = QFrame(self.frame_8)
self.frame_5.setObjectName(u"frame_5")
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_5.sizePolicy().hasHeightForWidth())
self.frame_5.setSizePolicy(sizePolicy)
self.frame_5.setFrameShape(QFrame.NoFrame)
self.frame_5.setFrameShadow(QFrame.Raised)
self.horizontalLayout = QHBoxLayout(self.frame_5)
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.label_9 = QLabel(self.frame_5)
self.label_9.setObjectName(u"label_9")
self.horizontalLayout.addWidget(self.label_9)
self.name_lineEdit = QLineEdit(self.frame_5)
self.name_lineEdit.setObjectName(u"name_lineEdit")
self.horizontalLayout.addWidget(self.name_lineEdit)
self.verticalLayout_5.addWidget(self.frame_5)
self.frame_6 = QFrame(self.frame_8)
self.frame_6.setObjectName(u"frame_6")
sizePolicy.setHeightForWidth(self.frame_6.sizePolicy().hasHeightForWidth())
self.frame_6.setSizePolicy(sizePolicy)
self.frame_6.setFrameShape(QFrame.NoFrame)
self.frame_6.setFrameShadow(QFrame.Raised)
self.horizontalLayout_3 = QHBoxLayout(self.frame_6)
self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")
self.horizontalSpacer_2 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(self.horizontalSpacer_2)
self.label_8 = QLabel(self.frame_6)
self.label_8.setObjectName(u"label_8")
self.horizontalLayout_3.addWidget(self.label_8)
self.frequency_doubleSpinBox = QDoubleSpinBox(self.frame_6)
self.frequency_doubleSpinBox.setObjectName(u"frequency_doubleSpinBox")
self.frequency_doubleSpinBox.setDecimals(0)
self.frequency_doubleSpinBox.setValue(50.000000000000000)
self.horizontalLayout_3.addWidget(self.frequency_doubleSpinBox)
self.label_11 = QLabel(self.frame_6)
self.label_11.setObjectName(u"label_11")
self.horizontalLayout_3.addWidget(self.label_11)
self.rho_doubleSpinBox = QDoubleSpinBox(self.frame_6)
self.rho_doubleSpinBox.setObjectName(u"rho_doubleSpinBox")
self.rho_doubleSpinBox.setMaximum(9999999.000000000000000)
self.rho_doubleSpinBox.setValue(100.000000000000000)
self.horizontalLayout_3.addWidget(self.rho_doubleSpinBox)
self.verticalLayout_5.addWidget(self.frame_6)
self.splitter = QSplitter(self.frame_8)
self.splitter.setObjectName(u"splitter")
self.splitter.setMaximumSize(QSize(16777215, 16777215))
self.splitter.setOrientation(Qt.Vertical)
self.frame_3 = QFrame(self.splitter)
self.frame_3.setObjectName(u"frame_3")
self.frame_3.setFrameShape(QFrame.NoFrame)
self.frame_3.setFrameShadow(QFrame.Raised)
self.verticalLayout_8 = QVBoxLayout(self.frame_3)
self.verticalLayout_8.setObjectName(u"verticalLayout_8")
self.label_12 = QLabel(self.frame_3)
self.label_12.setObjectName(u"label_12")
self.verticalLayout_8.addWidget(self.label_12)
self.wires_tableView = QTableView(self.frame_3)
self.wires_tableView.setObjectName(u"wires_tableView")
self.verticalLayout_8.addWidget(self.wires_tableView)
self.frame_7 = QFrame(self.frame_3)
self.frame_7.setObjectName(u"frame_7")
self.frame_7.setFrameShape(QFrame.StyledPanel)
self.frame_7.setFrameShadow(QFrame.Raised)
self.horizontalLayout_4 = QHBoxLayout(self.frame_7)
self.horizontalLayout_4.setObjectName(u"horizontalLayout_4")
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.add_to_tower_pushButton = QPushButton(self.frame_7)
self.add_to_tower_pushButton.setObjectName(u"add_to_tower_pushButton")
icon = QIcon()
icon.addFile(u":/Icons/icons/plus.svg", QSize(), QIcon.Normal, QIcon.Off)
self.add_to_tower_pushButton.setIcon(icon)
self.horizontalLayout_4.addWidget(self.add_to_tower_pushButton)
self.horizontalSpacer_3 = QSpacerItem(990, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(self.horizontalSpacer_3)
self.verticalLayout_8.addWidget(self.frame_7)
self.splitter.addWidget(self.frame_3)
self.frame_4 = QFrame(self.splitter)
self.frame_4.setObjectName(u"frame_4")
self.frame_4.setFrameShape(QFrame.NoFrame)
self.frame_4.setFrameShadow(QFrame.Raised)
self.verticalLayout_4 = QVBoxLayout(self.frame_4)
self.verticalLayout_4.setObjectName(u"verticalLayout_4")
self.verticalLayout_4.setContentsMargins(9, 9, 9, 9)
self.label_10 = QLabel(self.frame_4)
self.label_10.setObjectName(u"label_10")
self.verticalLayout_4.addWidget(self.label_10)
self.tower_tableView = QTableView(self.frame_4)
self.tower_tableView.setObjectName(u"tower_tableView")
self.verticalLayout_4.addWidget(self.tower_tableView)
self.frame = QFrame(self.frame_4)
self.frame.setObjectName(u"frame")
self.frame.setFrameShape(QFrame.NoFrame)
self.frame.setFrameShadow(QFrame.Raised)
self.horizontalLayout_2 = QHBoxLayout(self.frame)
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.delete_from_tower_pushButton = QPushButton(self.frame)
self.delete_from_tower_pushButton.setObjectName(u"delete_from_tower_pushButton")
icon1 = QIcon()
icon1.addFile(u":/Icons/icons/minus.svg", QSize(), QIcon.Normal, QIcon.Off)
self.delete_from_tower_pushButton.setIcon(icon1)
self.horizontalLayout_2.addWidget(self.delete_from_tower_pushButton)
self.horizontalSpacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(self.horizontalSpacer)
self.compute_pushButton = QPushButton(self.frame)
self.compute_pushButton.setObjectName(u"compute_pushButton")
icon2 = QIcon()
icon2.addFile(u":/Icons/icons/calc.svg", QSize(), QIcon.Normal, QIcon.Off)
self.compute_pushButton.setIcon(icon2)
self.compute_pushButton.setIconSize(QSize(16, 16))
self.horizontalLayout_2.addWidget(self.compute_pushButton)
self.verticalLayout_4.addWidget(self.frame)
self.splitter.addWidget(self.frame_4)
self.verticalLayout_5.addWidget(self.splitter)
self.main_splitter.addWidget(self.frame_8)
self.PlotFrame = QFrame(self.main_splitter)
self.PlotFrame.setObjectName(u"PlotFrame")
self.PlotFrame.setFrameShape(QFrame.NoFrame)
self.PlotFrame.setFrameShadow(QFrame.Raised)
self.verticalLayout_7 = QVBoxLayout(self.PlotFrame)
self.verticalLayout_7.setObjectName(u"verticalLayout_7")
self.verticalLayout_7.setContentsMargins(9, 9, 9, 9)
self.label_4 = QLabel(self.PlotFrame)
self.label_4.setObjectName(u"label_4")
self.verticalLayout_7.addWidget(self.label_4)
self.plotwidget = MatplotlibWidget(self.PlotFrame)
self.plotwidget.setObjectName(u"plotwidget")
self.verticalLayout_7.addWidget(self.plotwidget)
self.frame_9 = QFrame(self.PlotFrame)
self.frame_9.setObjectName(u"frame_9")
self.frame_9.setMaximumSize(QSize(16777215, 24))
self.frame_9.setFrameShape(QFrame.StyledPanel)
self.frame_9.setFrameShadow(QFrame.Raised)
self.horizontalLayout_5 = QHBoxLayout(self.frame_9)
self.horizontalLayout_5.setObjectName(u"horizontalLayout_5")
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalSpacer_4 = QSpacerItem(19, 19, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(self.horizontalSpacer_4)
self.acceptButton = QPushButton(self.frame_9)
self.acceptButton.setObjectName(u"acceptButton")
self.horizontalLayout_5.addWidget(self.acceptButton)
self.verticalLayout_7.addWidget(self.frame_9)
self.main_splitter.addWidget(self.PlotFrame)
self.verticalLayout_6.addWidget(self.main_splitter)
self.tabWidget.addTab(self.tab_2, "")
self.tab = QWidget()
self.tab.setObjectName(u"tab")
self.verticalLayout_3 = QVBoxLayout(self.tab)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.frame_10 = QFrame(self.tab)
self.frame_10.setObjectName(u"frame_10")
self.frame_10.setFrameShape(QFrame.StyledPanel)
self.frame_10.setFrameShadow(QFrame.Raised)
self.gridLayout_2 = QGridLayout(self.frame_10)
self.gridLayout_2.setObjectName(u"gridLayout_2")
self.label_2 = QLabel(self.frame_10)
self.label_2.setObjectName(u"label_2")
self.gridLayout_2.addWidget(self.label_2, 0, 1, 1, 1)
self.label_6 = QLabel(self.frame_10)
self.label_6.setObjectName(u"label_6")
self.gridLayout_2.addWidget(self.label_6, 2, 0, 1, 1)
self.z_tableView_abcn = QTableView(self.frame_10)
self.z_tableView_abcn.setObjectName(u"z_tableView_abcn")
self.gridLayout_2.addWidget(self.z_tableView_abcn, 1, 0, 1, 1)
self.y_tableView_abcn = QTableView(self.frame_10)
self.y_tableView_abcn.setObjectName(u"y_tableView_abcn")
self.gridLayout_2.addWidget(self.y_tableView_abcn, 1, 1, 1, 1)
self.label_7 = QLabel(self.frame_10)
self.label_7.setObjectName(u"label_7")
self.gridLayout_2.addWidget(self.label_7, 4, 0, 1, 1)
self.z_tableView_abc = QTableView(self.frame_10)
self.z_tableView_abc.setObjectName(u"z_tableView_abc")
self.gridLayout_2.addWidget(self.z_tableView_abc, 3, 0, 1, 1)
self.label = QLabel(self.frame_10)
self.label.setObjectName(u"label")
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.z_tableView_seq = QTableView(self.frame_10)
self.z_tableView_seq.setObjectName(u"z_tableView_seq")
self.gridLayout_2.addWidget(self.z_tableView_seq, 5, 0, 1, 1)
self.label_3 = QLabel(self.frame_10)
self.label_3.setObjectName(u"label_3")
self.gridLayout_2.addWidget(self.label_3, 2, 1, 1, 1)
self.y_tableView_abc = QTableView(self.frame_10)
self.y_tableView_abc.setObjectName(u"y_tableView_abc")
self.gridLayout_2.addWidget(self.y_tableView_abc, 3, 1, 1, 1)
self.label_5 = QLabel(self.frame_10)
self.label_5.setObjectName(u"label_5")
self.gridLayout_2.addWidget(self.label_5, 4, 1, 1, 1)
self.y_tableView_seq = QTableView(self.frame_10)
self.y_tableView_seq.setObjectName(u"y_tableView_seq")
self.gridLayout_2.addWidget(self.y_tableView_seq, 5, 1, 1, 1)
self.verticalLayout_3.addWidget(self.frame_10)
self.tabWidget.addTab(self.tab, "")
self.gridLayout.addWidget(self.tabWidget, 4, 0, 1, 1)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(0)
QMetaObject.connectSlotsByName(Dialog)
# setupUi
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QCoreApplication.translate("Dialog", u"Tower creation", None))
self.label_9.setText(QCoreApplication.translate("Dialog", u"Name", None))
self.label_8.setText(QCoreApplication.translate("Dialog", u"Frequency (Hz)", None))
self.label_11.setText(QCoreApplication.translate("Dialog", u"Earth resistivity (Ohm/m^3)", None))
self.label_12.setText(QCoreApplication.translate("Dialog", u"Wire catalogue", None))
#if QT_CONFIG(tooltip)
self.add_to_tower_pushButton.setToolTip(QCoreApplication.translate("Dialog", u"Add wire", None))
#endif // QT_CONFIG(tooltip)
self.add_to_tower_pushButton.setText("")
self.label_10.setText(QCoreApplication.translate("Dialog", u"Wire compisition", None))
#if QT_CONFIG(tooltip)
self.delete_from_tower_pushButton.setToolTip(QCoreApplication.translate("Dialog", u"Delete wire", None))
#endif // QT_CONFIG(tooltip)
self.delete_from_tower_pushButton.setText("")
#if QT_CONFIG(tooltip)
self.compute_pushButton.setToolTip(QCoreApplication.translate("Dialog", u"Compute matrices", None))
#endif // QT_CONFIG(tooltip)
self.compute_pushButton.setText("")
self.label_4.setText(QCoreApplication.translate("Dialog", u"Tower", None))
self.acceptButton.setText(QCoreApplication.translate("Dialog", u"Accept", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), QCoreApplication.translate("Dialog", u"Tower designer", None))
self.label_2.setText(QCoreApplication.translate("Dialog", u" Y shunt (uS / km) for ABCN", None))
self.label_6.setText(QCoreApplication.translate("Dialog", u" Z series (Ohm / km) for ABC", None))
self.label_7.setText(QCoreApplication.translate("Dialog", u" Z series (Ohm / km) in sequence components", None))
self.label.setText(QCoreApplication.translate("Dialog", u" Z series (Ohm / km) for ABCN", None))
self.label_3.setText(QCoreApplication.translate("Dialog", u" Y shunt (uS / km) for ABC", None))
self.label_5.setText(QCoreApplication.translate("Dialog", u" Y shunt (uS / km) for the sequence components", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QCoreApplication.translate("Dialog", u"Impedance matrices", None))
# retranslateUi
| 1.976563 | 2 |
PythonAPI/util/check_lidar_bb.py | inverted-ai/carla | 0 | 11657 | #!/usr/bin/env python
# Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Lidar/BB check for CARLA
This script obtains the LiDAR's point cloud corresponding to all the vehicles
of the scene and make sure that they are inside the bounding box of the
corresponding actor.
This is done in a predefined route in Town03 with a high speed and several agressive
turns.
In a nutshell, the script have a queue that is filled in each frame with a lidar point
cloud and an structure for storing the Bounding Boxes. This last one is emulated as a
sensor filling the queue in the on_tick callback of the carla.world. In this way, we make
sure that we are correctly syncronizing the lidar point cloud and BB/actor transformations.
Then, we select the points corresponding to each actor (car) in the scene and check they
are inside the bounding boxes of that actor, all in each vehicle frame of reference.
Important Data structure description:
+ Lidar data structure: four element tuple with:
- [0] Frame
- [1] Sensor name: 'semlidar'
- [2] Point cloud in the form of a numpy dictionary with all semantic lidar information
- [3] Global transformation of the sensor
+ Bounding box data structure: four element tuple with:
- [0] Frame
- [1] Sensor name: 'bb'
- [2] List of actor information: each a tuple with:
- [0] Actor id
- [1] Actor type (blueprint's name)
- [0] Actor's global transformation
- [0] Actor's bounding box
+ ActorTrace class: Takes the Lidar data structure and one actor information and
check if all the data points related with this actor are inside its BB.
This is done in the local coordinate frame of the actor and should be done like:
trace = ActorTrace(actor_info, lidar_data)
trace.process()
trace.check_lidar_data()
"""
import glob
import os
import sys
import numpy as np
from queue import Queue
from queue import Empty
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
class ActorTrace(object):
"""Class that store and process information about an actor at certain moment."""
def __init__(self, actor, lidar):
self.set_lidar(lidar)
self.set_actor(actor)
self._lidar_pc_local = np.array([])
self._bb_vertices = np.array([])
self._bb_minlimits = [0, 0, 0]
self._bb_maxlimits = [0, 0, 0]
def set_lidar(self, lidar):
self._frame = lidar[0]
self._lidar_data = lidar[2]
self._lidar_transf = lidar[3]
def set_actor(self, actor):
self._actor_id = actor[0]
self._actor_type = actor[1]
self._actor_transf = actor[2]
self._actor_bb = actor[3]
def process(self):
# Filter lidar points that correspond to my actor id
data_actor = self._lidar_data[self._lidar_data['ObjIdx'] == self._actor_id]
# Take the xyz point cloud data and transform it to actor's frame
points = np.array([data_actor['x'], data_actor['y'], data_actor['z']]).T
points = np.append(points, np.ones((points.shape[0], 1)), axis=1)
points = np.dot(self._lidar_transf.get_matrix(), points.T).T # sensor -> world
points = np.dot(self._actor_transf.get_inverse_matrix(), points.T).T # world -> actor
points = points[:, :-1]
# Saving the points in 'local' coordinates
self._lidar_pc_local = points
# We compute the limits in the local frame of reference using the
# vertices of the bounding box
vertices = self._actor_bb.get_local_vertices()
ver_py = []
for v in vertices:
ver_py.append([v.x, v.y, v.z])
ver_np = np.array(ver_py)
self._bb_vertices = ver_np
self._bb_minlimits = ver_np.min(axis=0) - 0.001
self._bb_maxlimits = ver_np.max(axis=0) + 0.001
def print(self, print_if_empty = False):
if self._lidar_pc_local.shape[0] > 0 or print_if_empty:
np.savetxt("veh_data_%d_%s_%d.out" % (self._frame, self._actor_type, self._actor_id), self._lidar_pc_local)
np.savetxt("bb_data_%d_%s_%d.out" % (self._frame, self._actor_type, self._actor_id), self._bb_vertices)
def lidar_is_outside_bb(self, check_axis = [True, True, True]):
lidar_pc = self._lidar_pc_local
if check_axis[0]:
xmin = self._bb_minlimits[0]
xmax = self._bb_maxlimits[0]
out = np.any((lidar_pc[:,0] > xmax) | (lidar_pc[:,0] < xmin))
if out:
print("Problem with x axis")
return True
if check_axis[1]:
ymin = self._bb_minlimits[1]
ymax = self._bb_maxlimits[1]
out = np.any((lidar_pc[:, 1] > ymax) | (lidar_pc[:, 1] < ymin))
if out:
print("Problem with y axis")
return True
if check_axis[2]:
zmin = self._bb_minlimits[2]
zmax = self._bb_maxlimits[2]
out = np.any((lidar_pc[:, 2] > zmax) | (lidar_pc[:, 2] < zmin))
if out:
print("Problem with z axis")
return True
return False
def check_lidar_data(self):
if self.lidar_is_outside_bb():
print("Error!!! Points of lidar point cloud are outside its BB for car %d: %s " % (self._actor_id, self._actor_type))
self.print()
return False
else:
return True
def wait(world, frames=100, queue = None, slist = None):
for i in range(0, frames):
world.tick()
if queue != None and slist != None:
try:
for _i in range (0, len(slist)):
s_frame = queue.get(True, 1.0)
except Empty:
print(" Some of the sensor information is missed")
# Sensor callback.
# This is where you receive the sensor data and
# process it as you liked and the important part is that,
# at the end, it should include an element into the sensor queue.
def lidar_callback(sensor_data, sensor_queue, sensor_name):
sensor_pc_local = np.frombuffer(sensor_data.raw_data, dtype=np.dtype([
('x', np.float32), ('y', np.float32), ('z', np.float32),
('CosAngle', np.float32), ('ObjIdx', np.uint32), ('ObjTag', np.uint32)]))
sensor_transf = sensor_data.transform
sensor_queue.put((sensor_data.frame, sensor_name, sensor_pc_local, sensor_transf))
def bb_callback(snapshot, world, sensor_queue, sensor_name):
data_array = []
vehicles = world.get_actors().filter('vehicle.*')
for actor in vehicles:
data_array.append((actor.id, actor.type_id, actor.get_transform(), actor.bounding_box))
sensor_queue.put((snapshot.frame, sensor_name, data_array))
def move_spectator(world, actor):
actor_tr = actor.get_transform()
spectator_transform = carla.Transform(actor_tr.location, actor_tr.rotation)
spectator_transform.location -= actor_tr.get_forward_vector() * 5
spectator_transform.location -= actor_tr.get_up_vector() * 3
spectator = world.get_spectator()
spectator.set_transform(spectator_transform)
def world_callback(snapshot, world, sensor_queue, sensor_name, actor):
move_spectator(world, actor)
bb_callback(snapshot, world, sensor_queue, sensor_name)
def process_sensors(w_frame, sensor_queue, sensor_number):
if sensor_number != 2:
print("Error!!! Sensor number should be two")
sl_data = None
bb_data = None
try:
for i in range (0, sensor_number):
s_frame = sensor_queue.get(True, 1.0)
while s_frame[0] != w_frame:
print("Warning! Missmatch for sensor %s in the frame timestamp (w: %d, s: %d)" % (s_frame[1], w_frame, s_frame[0]))
print("This could be due to accumulated data for previous steps")
s_frame = sensor_queue.get(True, 1.0)
if s_frame[1] == "semlidar":
sl_data = s_frame
elif s_frame[1] == "bb":
bb_data = s_frame
#print(" Frame: %d Sensor: %s Len: %d " % (s_frame[0], s_frame[1], len(s_frame[2])))
except Empty:
print("Error!!! The needeinformation is not here!!!")
return
if sl_data == None or bb_data == None:
print("Error!!! Missmatch for sensor %s in the frame timestamp (w: %d, s: %d)" % (s_frame[1], w_frame, s_frame[0]))
for actor_data in bb_data[2]:
trace_vehicle = ActorTrace(actor_data, sl_data)
trace_vehicle.process()
trace_vehicle.check_lidar_data()
class SpawnCar(object):
def __init__(self, location, rotation, filter="vehicle.*", autopilot = False, velocity = None):
self._filter = filter
self._transform = carla.Transform(location, rotation)
self._autopilot = autopilot
self._velocity = velocity
self._actor = None
self._world = None
def spawn(self, world):
self._world = world
actor_BP = world.get_blueprint_library().filter(self._filter)[0]
self._actor = world.spawn_actor(actor_BP, self._transform)
self._actor.set_autopilot(True)
return self._actor
def destroy(self):
if self._actor != None:
self._actor.destroy()
CarPropList = [
SpawnCar(carla.Location(x=83, y= -40, z=5), carla.Rotation(yaw=-90), filter= "*lincoln*", autopilot=True),
SpawnCar(carla.Location(x=83, y= -30, z=3), carla.Rotation(yaw=-90), filter= "*ambulance*", autopilot=True),
SpawnCar(carla.Location(x=83, y= -20, z=3), carla.Rotation(yaw=-90), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=120, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*isetta*", autopilot=True),
SpawnCar(carla.Location(x=100, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=140, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*model3*", autopilot=True),
SpawnCar(carla.Location(x=160, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*impala*", autopilot=False),
SpawnCar(carla.Location(x=180, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*a2*", autopilot=True),
SpawnCar(carla.Location(x=60, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*model3*", autopilot=True),
SpawnCar(carla.Location(x=80, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=100, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*mustan*", autopilot=True),
SpawnCar(carla.Location(x=120, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*isetta*", autopilot=True),
SpawnCar(carla.Location(x=140, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*impala*", autopilot=True),
SpawnCar(carla.Location(x=160, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*prius*", autopilot=True),
SpawnCar(carla.Location(x=234, y= +20,z=2), carla.Rotation(yaw=+90), filter= "*dodge*", autopilot=True),
SpawnCar(carla.Location(x=234, y= +40,z=2), carla.Rotation(yaw=+90), filter= "*isetta*", autopilot=True),
SpawnCar(carla.Location(x=234, y= +80,z=2), carla.Rotation(yaw=+90), filter= "*tt*", autopilot=True),
SpawnCar(carla.Location(x=243, y= -40,z=2), carla.Rotation(yaw=-90), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=243, y= -20,z=2), carla.Rotation(yaw=-90), filter= "*mkz2017*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +00,z=2), carla.Rotation(yaw=-90), filter= "*mustan*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +20,z=2), carla.Rotation(yaw=-90), filter= "*dodge*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +40,z=2), carla.Rotation(yaw=-90), filter= "*charger2020*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +60,z=2), carla.Rotation(yaw=-90), filter= "*lincoln2020*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +80,z=2), carla.Rotation(yaw=-90), filter= "*tt*", autopilot=True),
SpawnCar(carla.Location(x=243, y=+100,z=2), carla.Rotation(yaw=-90), filter= "*a2*", autopilot=True),
SpawnCar(carla.Location(x=243, y=+120,z=2), carla.Rotation(yaw=-90), filter= "*wrangler_rubicon*", autopilot=True),
SpawnCar(carla.Location(x=243, y=+140,z=2), carla.Rotation(yaw=-90), filter= "*c3*", autopilot=True)
]
def spawn_prop_vehicles(world):
for car in CarPropList:
car.spawn(world)
def destroy_prop_vehicles():
for car in CarPropList:
car.destroy()
def main():
# We start creating the client
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
world = client.get_world()
try:
# We need to save the settings to be able to recover them at the end
# of the script to leave the server in the same state that we found it.
original_settings = world.get_settings()
settings = world.get_settings()
# We set CARLA syncronous mode
settings.fixed_delta_seconds = 0.05
settings.synchronous_mode = True
world.apply_settings(settings)
traffic_manager = client.get_trafficmanager(8000)
traffic_manager.set_synchronous_mode(True)
# We create the sensor queue in which we keep track of the information
# already received. This structure is thread safe and can be
# accessed by all the sensors callback concurrently without problem.
sensor_queue = Queue()
# Spawning ego vehicle
actor_BP = world.get_blueprint_library().filter("vehicle.lincoln.mkz2017")[0]
car_tr = carla.Transform(carla.Location(x=239, y=125, z=0.9), carla.Rotation(yaw=-88.5))
actor = world.spawn_actor(actor_BP, car_tr)
world.tick()
move_spectator(world, actor)
spawn_prop_vehicles(world)
wait(world, 10)
# We create all the sensors and keep them in a list for convenience.
sensor_list = []
lidar_bp = world.get_blueprint_library().find('sensor.lidar.ray_cast_semantic')
lidar_bp.set_attribute('channels', '64')
lidar_bp.set_attribute('points_per_second', '500000')
lidar_bp.set_attribute('range', '300')
lidar_bp.set_attribute('upper_fov', '10.0')
lidar_bp.set_attribute('lower_fov', '-90.0')
lidar_tr = carla.Transform(carla.Location(z=3), carla.Rotation(yaw=0))
lidar = world.spawn_actor(lidar_bp, lidar_tr, attach_to=actor)
lidar.listen(lambda data: lidar_callback(data, sensor_queue, "semlidar"))
world.on_tick(lambda snapshot: world_callback(snapshot, world, sensor_queue, "bb", actor))
sensor_list.append(lidar)
sensor_list.append(actor) # actor acts as a 'sensor' to simplify bb-lidar data comparison
# Set autopilot for main vehicle
actor.enable_constant_velocity(carla.Vector3D(20, 0, 0))
for _i in range(0, 100):
# Tick the server
world.tick()
w_frame = world.get_snapshot().frame
process_sensors(w_frame, sensor_queue, len(sensor_list))
actor.disable_constant_velocity()
finally:
world.apply_settings(original_settings)
# Destroy all the actors
destroy_prop_vehicles()
for sensor in sensor_list:
sensor.destroy()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print(' - Exited by user.')
| 2.640625 | 3 |
claim.py | bukovyn/claim | 0 | 11658 | <gh_stars>0
#!/usr/bin/env python3
""" Text files created on DOS/Windows machines have different line endings than
files created on Unix/Linux. DOS uses carriage return and new line ("\r\n")
as a line ending, while Unix uses just new line ("\n"). The purpose of this
script is to have a quick, on the go, shell friendly solution to convert one
to the other.
"""
import sys
import argparse
def main():
""" Removes error traceback clutter and converts files specified.
"""
sys.tracebacklimit = 0
args = commands()
for filename in args.filenames:
convert(filename, args.dos)
def commands():
""" Sets up command line arguments and improper argument error handling.
Returns:
parser (object)
"""
parser = argparse.ArgumentParser()
parser.add_argument('-dos', action='store_true',
help="converts file to DOS")
parser.add_argument('filenames', metavar='filename',
type=str, nargs='+', help="file to be converted")
return parser.parse_args()
def convert(filename, flag):
""" Converts the file's line endings appropriately.
Args:
filename (string): the file being converted
flag (bool): defaults to UNIX. If flag is true, converts line endings to DOS
"""
unix, dos = '\n', '\r\n'
style = 'UNIX'
with open(filename, 'rb') as f:
content = f.read().decode('UTF-8')
if flag:
style = 'DOS'
content = content.replace(unix, dos)
else:
content = content.replace(dos, unix)
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("converting file '{}' to {} ...".format(filename, style))
if __name__ == '__main__':
main()
| 3.40625 | 3 |
reference/Python/media/moviepy/audio/extract_audio.py | steadylearner/code | 4 | 11659 | import sys
from moviepy.editor import *
clip = VideoFileClip(sys.argv[1])
audioclip = clip.audio
audioclip.write_audiofile(sys.argv[2])
| 2.390625 | 2 |
objettoqt/mixins.py | brunonicko/objettoqt | 0 | 11660 | # -*- coding: utf-8 -*-
"""Mix-in classes for `Qt` types."""
from ._mixins import (
OQAbstractItemModelMixin,
OQAbstractItemViewMixin,
OQObjectMixin,
OQWidgetMixin,
)
from ._views import OQListViewMixin
__all__ = [
"OQObjectMixin",
"OQWidgetMixin",
"OQAbstractItemModelMixin",
"OQAbstractItemViewMixin",
"OQListViewMixin",
]
| 1.15625 | 1 |
util/mccLog.py | ccchooko/webControlClient | 0 | 11661 | <gh_stars>0
#-*-coding:utf8-*-
import logging
from datetime import datetime
class mccLog(object):
def __init__(self):
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename= datetime.now().strftime("%Y%m%d%H%M%S") + '.log',
filemode='a')
def mccWriteLog(self, logContent):
logging.info(logContent)
def mccError(self, errorContent):
logging.error(errorContent)
| 2.796875 | 3 |
Learning-Python/Jumble-Solver/jumble_solver.py | oliverkeen/Sandbox | 0 | 11662 | # <NAME>
# Software Engineering 001
# jumble_solver.py
# 2/17/2021
# Assignment:
# Consider the game "Jumble"
# https://www.sandiegouniontribune.com/sd-jumble-daily-htmlstory.html
# Create a Python program to find the individual words in Jumble puzzles such
# that INJURE prints after entering the following: solve("JNUIER")
from PyDictionary import PyDictionary # Installation: pip install PyDictionary
from math import factorial
from random import shuffle
def solve(jumble):
combos = []
chars = list(jumble.upper())
dict = PyDictionary()
# Maximum possible unique combinations of chars
limit = factorial(len(chars))
while len(combos) < limit:
# Generates random string from chars
shuffle(chars)
tmp = "".join(chars)
# Appends tmp to combos list only if it is unique
if tmp not in combos:
combos.append(tmp)
# Prints tmp only if it returns an English meaning
if dict.meaning(tmp, disable_errors = True):
print(tmp)
break
| 4.15625 | 4 |
msm/skill_entry.py | luca-vercelli/mycroft-skills-manager | 0 | 11663 | # Copyright (c) 2018 <NAME>, Inc.
#
# This file is part of Mycroft Skills Manager
# (see https://github.com/MycroftAI/mycroft-skills-manager).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import logging
import os
import shutil
import subprocess
import yaml
from contextlib import contextmanager
from difflib import SequenceMatcher
from functools import wraps
from git import Repo, GitError
from git.exc import GitCommandError
from lazy import lazy
from os.path import exists, join, basename, dirname, isfile
from shutil import rmtree, move
from subprocess import PIPE, Popen
from tempfile import mktemp, gettempdir
from threading import Lock
from typing import Callable
from pako import PakoManager
from msm import SkillRequirementsException, git_to_msm_exceptions
from msm.exceptions import PipRequirementsException, \
SystemRequirementsException, AlreadyInstalled, SkillModified, \
AlreadyRemoved, RemoveException, CloneException, NotInstalled, GitException
from msm.util import cached_property, Git
LOG = logging.getLogger(__name__)
# Branches which can be switched from when updating
# TODO Make this configurable
SWITCHABLE_BRANCHES = ['master']
# default constraints to use if no are given
DEFAULT_CONSTRAINTS = '/etc/mycroft/constraints.txt'
FIVE_MINUTES = 300
@contextmanager
def work_dir(directory):
old_dir = os.getcwd()
os.chdir(directory)
try:
yield
finally:
os.chdir(old_dir)
def _backup_previous_version(func: Callable = None):
"""Private decorator to back up previous skill folder"""
@wraps(func)
def wrapper(self, *args, **kwargs):
self.old_path = None
if self.is_local:
self.old_path = join(gettempdir(), self.name)
if exists(self.old_path):
rmtree(self.old_path)
shutil.copytree(self.path, self.old_path)
try:
func(self, *args, **kwargs)
# Modified skill or GitError should not restore working copy
except (SkillModified, GitError, GitException):
raise
except Exception:
LOG.info('Problem performing action. Restoring skill to '
'previous state...')
if exists(self.path):
rmtree(self.path)
if self.old_path and exists(self.old_path):
shutil.copytree(self.old_path, self.path)
self.is_local = exists(self.path)
raise
finally:
# Remove temporary path if needed
if self.old_path and exists(self.old_path):
rmtree(self.old_path)
return wrapper
class SkillEntry(object):
pip_lock = Lock()
manifest_yml_format = {
'dependencies': {
'system': {},
'exes': [],
'skill': [],
'python': []
}
}
def __init__(self, name, path, url='', sha='', msm=None):
url = url.rstrip('/')
url = url[:-len('.git')] if url.endswith('.git') else url
self.path = path
self.url = url
self.sha = sha
self.msm = msm
if msm:
u = url.lower()
self.meta_info = msm.repo.skills_meta_info.get(u, {})
else:
self.meta_info = {}
if name is not None:
self.name = name
elif 'name' in self.meta_info:
self.name = self.meta_info['name']
else:
self.name = basename(path)
# TODO: Handle git:// urls as well
from_github = False
if url.startswith('https://'):
url_tokens = url.rstrip("/").split("/")
from_github = url_tokens[-3] == 'github.com' if url else False
self.author = self.extract_author(url) if from_github else ''
self.id = self.extract_repo_id(url) if from_github else self.name
self.is_local = exists(path)
self.old_path = None # Path of previous version while upgrading
@property
def is_beta(self):
return not self.sha or self.sha == 'HEAD'
@property
def is_dirty(self):
"""True if different from the version in the mycroft-skills repo.
Considers a skill dirty if
- the checkout sha doesn't match the mycroft-skills repo
- the skill doesn't exist in the mycroft-skills repo
- the skill is not a git repo
- has local modifications
"""
if not exists(self.path):
return False
try:
checkout = Git(self.path)
mod = checkout.status(porcelain=True, untracked_files='no') != ''
current_sha = checkout.rev_parse('HEAD')
except GitCommandError: # Not a git checkout
return True
skill_shas = {d[0]: d[3] for d in self.msm.repo.get_skill_data()}
return (self.name not in skill_shas or
current_sha != skill_shas[self.name] or
mod)
@cached_property(ttl=FIVE_MINUTES)
def skill_gid(self):
"""Format skill gid for the skill.
This property does some Git gymnastics to determine its return value.
When a device boots, each skill accesses this property several times.
To reduce the amount of boot time, cache the value returned by this
property. Cache expires five minutes after it is generated.
"""
LOG.debug('Generating skill_gid for ' + self.name)
gid = ''
if self.is_dirty:
gid += '@|'
if self.meta_info != {}:
gid += self.meta_info['skill_gid']
else:
name = self.name.split('.')[0]
gid += name
return gid
def __str__(self):
return self.name
def attach(self, remote_entry):
"""Attach a remote entry to a local entry"""
self.name = remote_entry.name
self.sha = remote_entry.sha
self.url = remote_entry.url
self.author = remote_entry.author
return self
@classmethod
def from_folder(cls, path, msm=None, use_cache=True):
"""Find or create skill entry from folder path.
Arguments:
path: path of skill folder
msm: msm instance to use for caching and extended information
retrieval.
use_cache: Enable/Disable cache usage. defaults to True
"""
if msm and use_cache:
skills = {skill.path: skill for skill in msm.local_skills.values()}
if path in skills:
return skills[path]
return cls(None, path, cls.find_git_url(path), msm=msm)
@classmethod
def create_path(cls, folder, url, name=''):
return join(folder, '{}.{}'.format(
name or cls.extract_repo_name(url), cls.extract_author(url)
).lower())
@staticmethod
def extract_repo_name(url):
s = url.rstrip('/').split("/")[-1]
a, b, c = s.rpartition('.git')
if not c:
return a
return s
@staticmethod
def extract_author(url):
return url.rstrip('/').split("/")[-2].split(':')[-1]
@classmethod
def extract_repo_id(cls, url):
return '{}:{}'.format(cls.extract_author(url).lower(),
cls.extract_repo_name(url)).lower()
@staticmethod
def _tokenize(x):
return x.replace('-', ' ').split()
@staticmethod
def _extract_tokens(s, tokens):
s = s.lower().replace('-', ' ')
extracted = []
for token in tokens:
extracted += [token] * s.count(token)
s = s.replace(token, '')
s = ' '.join(i for i in s.split(' ') if i)
tokens = [i for i in s.split(' ') if i]
return s, tokens, extracted
@classmethod
def _compare(cls, a, b):
return SequenceMatcher(a=a, b=b).ratio()
def match(self, query, author=None):
search, search_tokens, search_common = self._extract_tokens(
query, ['skill', 'fallback', 'mycroft']
)
name, name_tokens, name_common = self._extract_tokens(
self.name, ['skill', 'fallback', 'mycroft']
)
weights = [
(9, self._compare(name, search)),
(9, self._compare(name.split(' '), search_tokens)),
(2, self._compare(name_common, search_common)),
]
if author:
author_weight = self._compare(self.author, author)
weights.append((5, author_weight))
author_weight = author_weight
else:
author_weight = 1.0
return author_weight * (
sum(weight * val for weight, val in weights) /
sum(weight for weight, val in weights)
)
def run_pip(self, constraints=None):
if not self.dependent_python_packages:
return False
# Use constraints to limit the installed versions
if constraints and not exists(constraints):
LOG.error('Couldn\'t find the constraints file')
return False
elif exists(DEFAULT_CONSTRAINTS):
constraints = DEFAULT_CONSTRAINTS
LOG.info('Installing requirements.txt for ' + self.name)
can_pip = os.access(dirname(sys.executable), os.W_OK | os.X_OK)
pip_args = [sys.executable, '-m', 'pip', 'install']
if constraints:
pip_args += ['-c', constraints]
if not can_pip:
pip_args = ['sudo', '-n'] + pip_args
with self.pip_lock:
"""
Iterate over the individual Python packages and
install them one by one to enforce the order specified
in the manifest.
"""
for dependent_python_package in self.dependent_python_packages:
pip_command = pip_args + [dependent_python_package]
proc = Popen(pip_command, stdout=PIPE, stderr=PIPE)
pip_code = proc.wait()
if pip_code != 0:
stderr = proc.stderr.read().decode()
if pip_code == 1 and 'sudo:' in stderr and pip_args[0] == 'sudo':
raise PipRequirementsException(
2, '', 'Permission denied while installing pip '
'dependencies. Please run in virtualenv or use sudo'
)
raise PipRequirementsException(
pip_code, proc.stdout.read().decode(), stderr
)
return True
def install_system_deps(self):
self.run_requirements_sh()
system_packages = {
exe: (packages or '').split()
for exe, packages in self.dependent_system_packages.items()
}
LOG.info('Installing system requirements...')
all_deps = system_packages.pop('all', [])
try:
manager = PakoManager()
success = manager.install(all_deps, overrides=system_packages)
except RuntimeError as e:
LOG.warning('Failed to launch package manager: {}'.format(e))
success = False
missing_exes = [
exe for exe in self.dependencies.get('exes') or []
if not shutil.which(exe)
]
if missing_exes:
if not success:
LOG.warning('Failed to install dependencies.')
if all_deps:
LOG.warning('Please install manually: {}'.format(
' '.join(all_deps)
))
raise SkillRequirementsException('Could not find exes: {}'.format(
', '.join(missing_exes)
))
return success
def run_requirements_sh(self):
setup_script = join(self.path, "requirements.sh")
if not exists(setup_script):
return False
with work_dir(self.path):
rc = subprocess.call(["bash", setup_script])
if rc != 0:
LOG.error("Requirements.sh failed with error code: " + str(rc))
raise SystemRequirementsException(rc)
LOG.info("Successfully ran requirements.sh for " + self.name)
return True
def run_skill_requirements(self):
if not self.msm:
raise ValueError('Pass msm to SkillEntry to install skill deps')
try:
for skill_dep in self.dependent_skills:
LOG.info("Installing skill dependency: {}".format(skill_dep))
try:
self.msm.install(skill_dep)
except AlreadyInstalled:
pass
except Exception as e:
raise SkillRequirementsException(e)
def verify_info(self, info, fmt):
if not info:
return
if not isinstance(info, type(fmt)):
LOG.warning('Invalid value type manifest.yml for {}: {}'.format(
self.name, type(info)
))
return
if not isinstance(info, dict) or not fmt:
return
for key in info:
if key not in fmt:
LOG.warning('Unknown key in manifest.yml for {}: {}'.format(
self.name, key
))
continue
self.verify_info(info[key], fmt[key])
@lazy
def skill_info(self):
yml_path = join(self.path, 'manifest.yml')
if exists(yml_path):
LOG.info('Reading from manifest.yml')
with open(yml_path) as f:
info = yaml.safe_load(f)
self.verify_info(info, self.manifest_yml_format)
return info or {}
return {}
@lazy
def dependencies(self):
return self.skill_info.get('dependencies') or {}
@lazy
def dependent_skills(self):
skills = set()
reqs = join(self.path, "skill_requirements.txt")
if exists(reqs):
with open(reqs, "r") as f:
for i in f.readlines():
skill = i.strip()
if skill:
skills.add(skill)
for i in self.dependencies.get('skill') or []:
skills.add(i)
return list(skills)
@lazy
def dependent_python_packages(self):
reqs = join(self.path, "requirements.txt")
req_lines = []
if exists(reqs):
with open(reqs, "r") as f:
req_lines += f.readlines()
req_lines += self.dependencies.get('python') or []
# Strip comments
req_lines = [l.split('#')[0].strip() for l in req_lines]
return [i for i in req_lines if i] # Strip empty lines
@lazy
def dependent_system_packages(self):
return self.dependencies.get('system') or {}
def remove(self):
if not self.is_local:
raise AlreadyRemoved(self.name)
try:
rmtree(self.path)
self.is_local = False
except OSError as e:
raise RemoveException(str(e))
LOG.info('Successfully removed ' + self.name)
@_backup_previous_version
def install(self, constraints=None):
if self.is_local:
raise AlreadyInstalled(self.name)
LOG.info("Downloading skill: " + self.url)
try:
tmp_location = mktemp()
Repo.clone_from(self.url, tmp_location)
self.is_local = True
Git(tmp_location).reset(self.sha or 'HEAD', hard=True)
except GitCommandError as e:
raise CloneException(e.stderr)
if isfile(join(tmp_location, '__init__.py')):
move(join(tmp_location, '__init__.py'),
join(tmp_location, '__init__'))
try:
move(tmp_location, self.path)
if self.msm:
self.run_skill_requirements()
self.install_system_deps()
self.run_pip(constraints)
finally:
if isfile(join(self.path, '__init__')):
move(join(self.path, '__init__'),
join(self.path, '__init__.py'))
LOG.info('Successfully installed ' + self.name)
def update_deps(self, constraints=None):
if self.msm:
self.run_skill_requirements()
self.install_system_deps()
self.run_pip(constraints)
def _find_sha_branch(self):
git = Git(self.path)
sha_branches = git.branch(
contains=self.sha, all=True
).split('\n')
sha_branch = [b for b in sha_branches if ' -> ' not in b][0]
sha_branch = sha_branch.strip('* \n').replace('remotes/', '')
for remote in git.remote().split('\n'):
sha_branch = sha_branch.replace(remote + '/', '')
return sha_branch
@_backup_previous_version
def update(self):
if not self.is_local:
raise NotInstalled('{} is not installed'.format(self.name))
git = Git(self.path)
with git_to_msm_exceptions():
sha_before = git.rev_parse('HEAD')
modified_files = git.status(porcelain=True, untracked='no')
if modified_files != '':
raise SkillModified('Uncommitted changes:\n' + modified_files)
git.fetch()
current_branch = git.rev_parse('--abbrev-ref', 'HEAD').strip()
if self.sha and current_branch in SWITCHABLE_BRANCHES:
# Check out correct branch
git.checkout(self._find_sha_branch())
git.merge(self.sha or 'origin/HEAD', ff_only=True)
sha_after = git.rev_parse('HEAD')
if sha_before != sha_after:
self.update_deps()
LOG.info('Updated ' + self.name)
# Trigger reload by modifying the timestamp
os.utime(join(self.path, '__init__.py'))
return True
else:
LOG.info('Nothing new for ' + self.name)
return False
@staticmethod
def find_git_url(path):
"""Get the git url from a folder"""
try:
LOG.debug(
'Attempting to retrieve the remote origin URL config for '
'skill in path ' + path
)
return Git(path).config('remote.origin.url')
except GitError:
return ''
def __repr__(self):
return '<SkillEntry {}>'.format(' '.join(
'{}={}'.format(attr, self.__dict__[attr])
for attr in ['name', 'author', 'is_local']
))
| 1.492188 | 1 |
sleekxmpp/plugins/xep_0027/stanza.py | elrond79/SleekXMPP | 3 | 11664 | <gh_stars>1-10
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 <NAME>, <NAME>
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.xmlstream import ElementBase
class Signed(ElementBase):
name = 'x'
namespace = 'jabber:x:signed'
plugin_attrib = 'signed'
interfaces = set(['signed'])
is_extension = True
def set_signed(self, value):
parent = self.parent()
xmpp = parent.stream
data = xmpp['xep_0027'].sign(value, parent['from'])
if data:
self.xml.text = data
else:
del parent['signed']
def get_signed(self):
return self.xml.text
class Encrypted(ElementBase):
name = 'x'
namespace = 'jabber:x:encrypted'
plugin_attrib = 'encrypted'
interfaces = set(['encrypted'])
is_extension = True
def set_encrypted(self, value):
parent = self.parent()
xmpp = parent.stream
data = xmpp['xep_0027'].encrypt(value, parent['to'].bare)
if data:
self.xml.text = data
else:
del parent['encrypted']
def get_encrypted(self):
parent = self.parent()
xmpp = parent.stream
if self.xml.text:
return xmpp['xep_0027'].decrypt(self.xml.text, parent['to'])
return None
| 2.03125 | 2 |
tcptofpc.py | catenacyber/fuzzpcap | 6 | 11665 | #tshark -r input.pcap -qz "follow,tcp,raw,0"
import struct
import sys
import binascii
import subprocess
result = subprocess.Popen( ["tshark", "-r", sys.argv[1], "-qz", "follow,tcp,raw,0"],
stdout=subprocess.PIPE)
sys.stdout.buffer.write(b"FPC\x80")
for i in range(4):
result.stdout.readline()
dp=result.stdout.readline().split(b":")[2]
sp=result.stdout.readline().split(b":")[2]
sys.stdout.buffer.write(struct.pack('>H', int(sp)))
sys.stdout.buffer.write(struct.pack('>H', int(dp)))
for l in result.stdout.readlines():
s2c = 0
if l[0] == 9:
l = l[1:]
s2c = 1
try:
r = binascii.unhexlify(l[:-1])
except:
continue
sys.stdout.buffer.write(struct.pack('>B', int(s2c)))
sys.stdout.buffer.write(r)
sys.stdout.buffer.write(b"FPC0")
| 2.25 | 2 |
AIJ Filter Collection/AIJ_Night_Filters.py | kjkoeller/BSU-Code | 1 | 11666 | """
Created: November 11, 2020
Author: <NAME>
Python Version 3.9
This program is meant to make the process of collecting the different filters from AIJ excel spreadsheets faster.
The user enters however many nights they have and the program goes through and checks those text files for the
different columns for,HJD, Amag, and Amag error for the B and V filters.
The program will also calculate the R magnitude from the rel flux of T1.
There are error catching statements within the program so if the user mistypes, the program will not crash and
close on them.
"""
import pandas as pd
from os import path
def main(c):
# warning prompts for the user to read to make sure this program works correctly
if c == 0:
# warning prompts for the user to read to make sure this program works correctly
print()
print("Make sure you have turned the output xls files from AIJ into tab delimited text files. "
"Since these xls files are corrupt for reading directly from.")
print("You will also need to go into each night and filter and "
"make the HJD column 6 decimals instead of the output of 3 within Excel.")
print()
else:
print()
while True:
# checks to see whether you have entered a number and a correct filter letter
try:
num = int(input("Number of nights you have: "))
filter_name = input("Which filter are these nights in (B, V, R): ")
if filter_name.upper() == "B" or filter_name.upper() == "V" or filter_name.upper() == "R":
break
else:
print("Please enter B, V, or R for your filter.")
print()
continue
except ValueError:
print("You have entered an invalid number for your number of nights. Please enter a number.")
print("")
get_filters(num)
def get_filters(n):
"""
Takes a number of nights for a given filter and takes out the HJD, either A_Mag1 or T1_flux, and
error for mag or flux
:param n: Number of observation nights
:param f: The filter letter being used
:return: the output text files for each night in a given filter
"""
total_hjd = []
total_amag = []
total_error = []
# checks for either the b, v, r filter as either upper or lowercase will work
for i in range(n):
while True:
# makes sure the file pathway is real and points to some file
# (does not check if that file is the correct one though)
try:
# an example pathway for the files
# E:\Research\Data\NSVS_254037\2018.10.12-reduced\Check\V\2018.10.12.APASS.V_measurements.txt
file = input("Enter night %d file path: " % (i+1))
if path.exists(file):
break
else:
continue
except FileNotFoundError:
print("Please enter a correct file path")
# noinspection PyUnboundLocalVariable
df = pd.read_csv(file, delimiter="\t")
# set parameters to lists from the file by the column header
hjd = []
amag = []
amag_error = []
try:
hjd = list(df["HJD"])
amag = list(df["Source_AMag_T1"])
amag_error = list(df["Source_AMag_Err_T1"])
except KeyError:
print("The file you entered does not have the columns of HJD, Source_AMag_T1, or Source_AMag_Err_T1. "
"Please re-enter the file path and make sure its the correct file.")
c = 1
main(c)
total_hjd.append(hjd)
total_amag.append(amag)
total_error.append(amag_error)
# converts the Dataframe embedded lists into a normal flat list
new_hjd = [item for elem in total_hjd for item in elem]
new_amag = [item for elem in total_amag for item in elem]
new_error = [item for elem in total_error for item in elem]
# outputs the new file to dataframe and then into a text file for use in Peranso or PHOEBE
data = pd.DataFrame({
"HJD": new_hjd,
"AMag": new_amag,
"AMag Error": new_error
})
print("")
output = input("What is the file output name (with file extension .txt): ")
data.to_csv(output, index=False, header=False, sep='\t')
print("")
print("Fished saving the file to the same location as this program.")
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
count = 0
main(count)
| 4 | 4 |
backend/songwriter/migrations/0006_auto_20170902_0723.py | giliam/turbo-songwriter | 0 | 11667 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-09-02 05:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('songwriter', '0005_auto_20170824_1726'),
]
operations = [
migrations.AlterModelOptions(
name='author',
options={'ordering': ['lastname', 'firstname']},
),
migrations.AlterModelOptions(
name='chord',
options={'ordering': ['note']},
),
migrations.AlterModelOptions(
name='editor',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='theme',
options={'ordering': ['name']},
),
]
| 1.53125 | 2 |
util/textbox_utils.py | yannl35133/sslib | 0 | 11668 | CHARACTERS_PER_LINE = 39
def break_lines(text):
chars_in_line = 1
final_text = ''
skip = False
for char in text:
if chars_in_line >= CHARACTERS_PER_LINE:
if char == ' ':
# we happen to be on a space, se we can just break here
final_text += '\n'
skip = True
else:
# work backwards to find the space to break on
for i in range(len(final_text) - 1, 0, -1):
if final_text[i] == ' ':
final_text = final_text[:i] + '\n' + final_text[i + 1:]
break
chars_in_line = 0
chars_in_line += 1
if not skip:
final_text += char
skip = False
return final_text
if __name__ == '__main__':
print(break_lines('The <y<Spirit of the Sword>> guides the goddess\' chosen hero to <r<Skyloft Village>>'))
print(break_lines('Hey, you look like you have a Questions?'))
print(break_lines('Skyloft Peater/Peatrice\'s Crystals has Bug Net'))
| 3.765625 | 4 |
tools/isolate/data/isolate/with_flag.py | Scopetta197/chromium | 212 | 11669 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
def main():
print 'with_flag: Verify the test data files were mapped properly'
assert len(sys.argv) == 2
mode = sys.argv[1]
assert mode in ('run', 'trace')
files = sorted(os.listdir('files1'))
tree = {
'test_file1.txt': 'Foo\n',
'test_file2.txt': 'Bar\n',
}
# Ignore .svn directory which happens to be there with --mode=trace
# from a svn checkout. The file shouldn't be there when --mode=run is used.
if mode == 'trace' and '.svn' in files:
files.remove('.svn')
if files != sorted(tree):
print '%s != %s' % (files, sorted(tree))
return 2
for k, v in tree.iteritems():
content = open(os.path.join('files1', k), 'rb').read()
if v != content:
print '%s: %r != %r' % (k, v, content)
return 3
root_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir, base = os.path.split(root_dir)
if mode == 'trace':
# Verify the parent directory.
parent_dir, base2 = os.path.split(parent_dir)
if base != 'isolate' or base2 != 'data':
print 'mode trace: Invalid root dir %s' % root_dir
return 4
else:
# Verify that we are not inside a checkout.
if base == 'data':
print 'mode run: Invalid root dir %s' % root_dir
return 5
return 0
if __name__ == '__main__':
sys.exit(main())
| 2.15625 | 2 |
tensorflow/load_mnist.py | stone-zeng/ising | 1 | 11670 | """Loading MNIST dataset.
"""
import struct
import numpy as np
class MNIST:
"""
Loading MNIST dataset.
In the directory of MNIST dataset, there should be the following files:
- Training set:
- train-images-idx3-ubyte
- train-labels-idx1-ubyte
- Test set:
- t10k-images-idx3-ubyte
- t10k-labels-idx1-ubyte
Functions
---------
next_batch()
image_pair(index: int)
sample_batch(batch_index: int)
to_ndarray()
Attributes
----------
data_type: Can be either `"test"` or `"train"`.
path: Path for MNIST data.
data_size: Size of the dataset. Default value `None` means using all data in MNIST.
batch_size: Size of the mini-batch. Default value `None` means using the whole dataset as
a mini-batch.
binarize: Whether to binarize the images (using 0 and 1 values). Default value is True.
reshape: Whether to reshape the images into 2D arrays. Default value is False.
one_hot: whether to use one-hot encoding for labels (e.g. using vector
`[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]` for 0). Default value is False.
"""
IMAGE_SIZE = 784
LABEL_SIZE = 1
_IMAGE_SIZE_FMT = ">784B"
_LABEL_SIZE_FMT = ">B"
IMAGE_SHAPE = (28, 28)
batch_index = 0
def __init__(self, data_type: str, path: str,
data_size: int = None,
batch_size: int = None,
binarize=True,
reshape=False,
one_hot=False):
self.data_type = data_type
self.path = path
# Options
self.binarize = binarize
self.reshape = reshape
self.one_hot = one_hot
# Data buffer
# `data_size` will be updated according to the actual data
image_buf, label_buf = self._read_file()
# Size
if data_size is None:
# `len(image_buf)` may not be exactly divided by 784
self.data_size = len(image_buf) // self.IMAGE_SIZE
else:
self.data_size = data_size
if batch_size is None:
self.batch_size = self.data_size
else:
if batch_size <= self.data_size:
self.batch_size = batch_size
else:
raise ValueError("batch size larger than data size")
self.batch_num = self.data_size // self.batch_size
# Data
self._images = self._get_image(image_buf)
self._labels = self._get_label(label_buf)
def _read_file(self):
if self.data_type == "test":
image_file_name = self.path + "t10k-images-idx3-ubyte"
label_file_name = self.path + "t10k-labels-idx1-ubyte"
elif self.data_type == "train":
image_file_name = self.path + "train-images-idx3-ubyte"
label_file_name = self.path + "train-labels-idx1-ubyte"
else:
raise ValueError("only type \"test\" and \"train\" are available")
# "rb" means reading + binary mode
with open(image_file_name, "rb") as image_file:
image_buf = image_file.read()
with open(label_file_name, "rb") as label_file:
label_buf = label_file.read()
return image_buf, label_buf
def _get_image(self, image_buf):
"""Get an image array from `image_buf`.
This is the structure of the image file (training set):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000803(2051) magic number
0004 32 bit integer 60000 number of images
0008 32 bit integer 28 number of rows
0012 32 bit integer 28 number of columns
0016 unsigned byte ?? pixel
0017 unsigned byte ?? pixel
........
xxxx unsigned byte ?? pixel
"""
image_buf_len = self.data_size * self.IMAGE_SIZE + 16
image_offset = 16
image_arr = []
while image_offset < image_buf_len:
temp = struct.unpack_from(self._IMAGE_SIZE_FMT, image_buf, image_offset)
if self.binarize:
temp = np.vectorize(lambda x: 0 if x <= 127 else 1)(temp)
if self.reshape:
temp = np.reshape(temp, self.IMAGE_SHAPE)
image_arr.append(temp)
image_offset += self.IMAGE_SIZE
return image_arr
def _get_label(self, label_buf):
"""Get an label array from `label_buf`.
This is the structure of the label file (training set):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000801(2049) magic number (MSB first)
0004 32 bit integer 60000 number of items
0008 unsigned byte ?? label
0009 unsigned byte ?? label
........
xxxx unsigned byte ?? label
"""
label_buf_len = self.data_size * self.LABEL_SIZE + 8
label_offset = 8
label_arr = []
while label_offset < label_buf_len:
temp = struct.unpack_from(self._LABEL_SIZE_FMT, label_buf, label_offset)[0]
if self.one_hot:
vec = np.zeros(10)
vec[temp] = 1
label_arr.append(vec)
else:
label_arr.append(temp)
label_offset += self.LABEL_SIZE
return label_arr
def next_batch(self):
"""Increase `batch_index` by 1, then return a mini-batch of (image, label) tuples."""
this_batch = self.batch(self.batch_index)
self.batch_index = (self.batch_index + 1) % self.batch_num
return this_batch
def image_pair(self, index: int):
"""Return a (image, label) tuple at `index`."""
if index < self.data_size:
return self._images[index], self._labels[index]
raise IndexError("image index out of range")
def batch(self, batch_index: int):
"""Return a mini-batch of (image, label) tuples at `batch_index`."""
if batch_index < self.batch_num:
begin = batch_index * self.batch_size
end = (batch_index + 1) * self.batch_size
return self._images[begin:end], self._labels[begin:end]
raise IndexError("batch index out of range")
def to_ndarray(self):
"""Return the raw data tuple `(images, labels)` as `np.ndarray`.
"""
images = []
labels = []
for i in range(self.batch_num):
image, label = self.batch(i)
images.append(image)
labels.append(label)
return np.asarray(images), np.asarray(labels)
def _test():
data = MNIST("train", MNIST_PATH,
data_size=200, batch_size=8,
reshape=True, one_hot=False, binarize=False)
print("Meta-data:")
print("\tDataset size:", data.data_size)
print("\tBatch size:", data.batch_size)
col_num = 4
row_num = data.batch_size // col_num + 1
_test_random_images(data, col_num, row_num)
_test_random_batch(data, col_num, row_num)
_test_next_batch(data, col_num, row_num)
def _test_random_images(data, col_num, row_num):
images = []
labels = []
for _ in range(10):
index = random.randrange(data.data_size)
image, label = data.image_pair(index)
images.append(image)
labels.append(label)
_plot(images, labels, col_num=col_num, row_num=row_num)
def _test_random_batch(data, col_num, row_num):
index = random.randrange(data.batch_num)
images, labels = data.batch(index)
_plot(images, labels, col_num=col_num, row_num=row_num)
def _test_next_batch(data, col_num, row_num):
for _ in range(3):
images, labels = data.next_batch()
_plot(images, labels, col_num=col_num, row_num=row_num)
def _plot(images, labels, col_num, row_num):
for i, (image, label) in enumerate(zip(images, labels)):
plt.subplot(row_num, col_num, i + 1)
plt.imshow(image, cmap="gray")
plt.axis('off')
plt.title(str(label))
plt.show()
def _test_numpy():
images, labels = MNIST("train", MNIST_PATH,
data_size=200, batch_size=8,
reshape=False, one_hot=False, binarize=False).to_ndarray()
print(images.shape) # shape = (num_batches, batch_size, num_visible)
print(np.moveaxis(images, 0, -1).shape) # shape = (batch_size, num_visible, num_batches)
print(labels.shape) # shape = (num_batches, batch_size)
if __name__ == "__main__":
import random
import matplotlib.pyplot as plt
# Local MNIST data
MNIST_PATH = "../../machine-learning/data/mnist/"
_test()
_test_numpy()
| 3.8125 | 4 |
sarnet_td3/common/gpu_multithread.py | JingdiC/SARNet | 16 | 11671 | import threading, queue, time, os, pickle
# from queue import Queue
import numpy as np
import tensorflow as tf
import sarnet_td3.common.tf_util as U
from tensorflow.python.keras.backend import set_session
lock = threading.Lock()
class MultiTrainTD3(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_end_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]] for i in range(self.num_agents)] for _ in range(self.num_env)]
# self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_end_rewards = []
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
self.save_n_ep = self.num_env * 10
self.print_step = -int(self.save_n_ep / self.num_env)
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_qdebug":
out = self.get_qdebug(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t[p_index], is_train)
# print(np.shape(obs_n_t))
act_j_t, state_j_t1, mem_j_t1, attn_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM" or self.args.encoder_model != "DDPG":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t
def get_qdebug(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
obs_n_t, action_n_t, q1_h_n_t, q2_h_n_t = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
q1_j_input = agent.prep_q_input(obs_n_t, action_n_t, q1_h_n_t[p_index])
_, q1_h_j_t1 = agent.q1_debug['q_values'](*(q1_j_input))
if self.args.td3:
q2_input = agent.prep_q_input(obs_n_t, action_n_t, q2_h_n_t[p_index])
_, q2_h_j_t1 = agent.q2_debug['q_values'](*(q2_input))
else:
q2_h_j_t1 = []
return q1_h_j_t1, q2_h_j_t1
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
train_step = data
loss = agent.update(self.trainers, self.buffer_op, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, ep_step = data
# rew_n (num_env, num_agents)
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
if ep_step >= self.args.max_episode_len - 10: # Compute only last 10 episode step rewards
self.ep_end_rewards[j][-1] += rew
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[j][i][-1].append(info)
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
self.ep_end_rewards[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
for i in range(self.num_agents):
self.agent_info[j][i].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
# pickle_info = [self.agent_info[j] for j in range(self.num_env)]
with open(file_name, 'wb') as fp:
# Dump files as [num_env, [# agents, [#ep, [#stps, [dim]]]]
pickle.dump(self.agent_info, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
if num_episodes % (self.save_n_ep) == 0:
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
# episode_rewards, agent_rewards, final_ep_rewards, final_ep_ag_rewards = rewards
if self.args.env_type == "mpe":
# print statement depends on whether or not there are adversaries
if self.num_adversaries == 0:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards) / 10.
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)))
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)) + "\n")
else:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards)
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
# Keep track of final episode reward
self.final_ep_rewards.append(episode_b_rewards)
self.final_ep_end_rewards.append(ep_end_b_rewards)
for rew in ep_ag_b_rewards:
self.final_ep_ag_rewards.append(rew)
self.time_prev = time.time()
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
rew_ep_end_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards_ep_end.pkl'
with open(rew_ep_end_file_name, 'wb') as fp:
pickle.dump(self.final_ep_end_rewards, fp)
agrew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_agrewards.pkl'
with open(agrew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_ag_rewards, fp)
"""
REINFORCE Threads
"""
class MultiTrainVPG(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
if self.args.env_type == "mpe":
self.print_step = -int(self.save_rate / self.num_env)
else: # print for episode end only (success rate)
self.print_step = -int(self.save_rate / (self.num_env * self.args.max_episode_len))
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "add_to_buffer_reinforce":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2)
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, is_train)
act_j_t, act_soft_j_t, state_j_t1, mem_j_t1, attn_j_t, value_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, act_soft_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t, value_j_t
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
train_step, buffer_data = data
agent = self.trainers[p_index]
loss = agent.update(self.trainers, buffer_data, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, terminal = data
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[-1][i].append(info_n[0]['n'])
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
self.agent_info[j].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
with open(file_name, 'wb') as fp:
pickle.dump(self.ep_success, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
episode_b_success = []
for j in range(self.num_env):
episode_b_success.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_success = np.mean(np.array(episode_b_success)) / self.args.max_episode_len
print("steps: {}, episodes: {}, mean episode success: {}, time: {}".format(
train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode success: {}, time: {}".format(
train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n")
self.final_ep_rewards.append(episode_b_success)
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
def get_gputhreads(trainers, args, buffer_op, num_env, num_agents, num_adv):
threads = []
sess = tf.compat.v1.get_default_session()
for t in range(args.num_gpu_threads):
input_q = queue.Queue()
output_q = queue.Queue()
if args.policy_grad == "maddpg":
threads.append(MultiTrainTD3(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv)))
elif args.policy_grad == "reinforce":
threads.append(
MultiTrainVPG(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv)))
threads[t].start()
time.sleep(1)
return threads
def close_gputhreads(threads):
for t in threads:
t.input_queue.put(("None", None, None))
for t in threads:
t.join()
print('GPU trainers cancelled')
return
| 2 | 2 |
Scripts/Miscellaneous/Fake_news_web/app.py | valterm/Python_and_the_Web | 1 | 11672 | from flask import Flask, request, render_template
from sklearn.externals import joblib
from feature import *
pipeline = joblib.load('pipeline.sav')
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/api',methods=['POST'])
def get_delay():
result=request.form
query_title = result['title']
query_author = result['author']
query_text = result['maintext']
print(query_text)
query = get_all_query(query_title, query_author, query_text)
##user_input = {'query':query}
pred = pipeline.predict(query)
print(pred)
dic = {1:'real',0:'fake'}
return f'<html><body><h1>{dic[pred[0]]}</h1> <form action="/"> <button type="submit">back </button> </form></body></html>'
if __name__ == '__main__':
app.run(port=8080, debug=True)
| 2.546875 | 3 |
1019.next-greater-node-in-linked-list.py | elfgzp/leetCode | 3 | 11673 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def nextLargerNodes(self, head: ListNode) -> List[int]:
nums = []
while head:
nums.append(head.val)
head = head.next
stack = []
res = [0] * len(nums)
for i, n in enumerate(nums):
while stack and nums[stack[-1]] < n:
res[stack.pop()] = n
stack.append(i)
return res
| 3.71875 | 4 |
WXApi/WXApi/__init__.py | KEDYY/pyweipi | 1 | 11674 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Create: 2014/5/20
Update: 2017/11/22
"""
from .WXError import *
from .WXMenu import *
from .WXUtils import *
from .event import *
from .request import MPCenter
__date__ = '2017/3/12'
__version__ = '1.0.1'
__license__ = 'The MIT License'
| 1.070313 | 1 |
evaluator_package/Parsing_tools.py | MONICA-Project/GOST-tools | 0 | 11675 | def is_field(token):
"""Checks if the token is a valid ogc type field
"""
return token in ["name", "description", "encodingType", "location", "properties", "metadata",
"definition", "phenomenonTime", "resultTime", "observedArea", "result", "id", "@iot.id",
"resultQuality", "validTime", "time", "parameters", "feature"]
def tokenize_parentheses(tokens):
""" Finds non parsed parentheses in tokens (ex.: ['x(y']['z)'] -> ['x']['(']['y']['z'][')']
:param tokens: a list of tokens
:return: the list with unchecked parenteses tokenized
"""
for index, token in enumerate(tokens):
if ("(" in token or ")" in token) and len(token) > 1:
parenthesis_index = token.find("(")
parenthesis = "("
if parenthesis_index < 0:
parenthesis_index = token.find(")")
parenthesis = ")"
left_side = token[:parenthesis_index]
right_side = token[parenthesis_index + 1:]
del tokens[index]
if bool(left_side):
tokens.insert(index, left_side)
index += 1
tokens.insert(index, parenthesis)
if bool(right_side):
index += 1
tokens.insert(index, right_side)
| 3.53125 | 4 |
salt/runners/mine.py | byteskeptical/salt | 12 | 11676 | <gh_stars>10-100
# -*- coding: utf-8 -*-
'''
A runner to access data from the salt mine
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import Python Libs
import logging
# Import salt libs
import salt.utils.minions
log = logging.getLevelName(__name__)
def get(tgt, fun, tgt_type='glob'):
'''
Gathers the data from the specified minions' mine, pass in the target,
function to look up and the target type
CLI Example:
.. code-block:: bash
salt-run mine.get '*' network.interfaces
'''
ret = salt.utils.minions.mine_get(tgt, fun, tgt_type, __opts__)
return ret
def update(tgt,
tgt_type='glob',
clear=False,
mine_functions=None):
'''
.. versionadded:: 2017.7.0
Update the mine data on a certain group of minions.
tgt
Which minions to target for the execution.
tgt_type: ``glob``
The type of ``tgt``.
clear: ``False``
Boolean flag specifying whether updating will clear the existing
mines, or will update. Default: ``False`` (update).
mine_functions
Update the mine data on certain functions only.
This feature can be used when updating the mine for functions
that require refresh at different intervals than the rest of
the functions specified under ``mine_functions`` in the
minion/master config or pillar.
CLI Example:
.. code-block:: bash
salt-run mine.update '*'
salt-run mine.update 'juniper-edges' tgt_type='nodegroup'
'''
ret = __salt__['salt.execute'](tgt,
'mine.update',
tgt_type=tgt_type,
clear=clear,
mine_functions=mine_functions)
return ret
| 2.25 | 2 |
app.py | rhedgeco/test_plaid_webapp | 0 | 11677 | from plaid import Client
from backend.link_token import LinkToken
from general_falcon_webserver import WebApp
client = Client(client_id='5e2e3527dd6924001167e8e8', secret='<KEY>', environment='sandbox')
app = WebApp()
app.add_route('link', LinkToken(client))
app.launch_webserver()
| 1.984375 | 2 |
examples/circuitplayground_light_plotter.py | sommersoft/Adafruit_CircuitPython_CircuitPlayground | 0 | 11678 | <reponame>sommersoft/Adafruit_CircuitPython_CircuitPlayground
"""If you're using Mu, this example will plot the light levels from the light sensor (located next
to the eye) on your Circuit Playground. Try shining a flashlight on your Circuit Playground, or
covering the light sensor to see the plot increase and decrease."""
import time
from adafruit_circuitplayground import cp
while True:
print("Light:", cp.light)
print((cp.light,))
time.sleep(0.1)
| 2.953125 | 3 |
kickeststats/exceptions.py | antimaLinux/kickscarper | 0 | 11679 | <filename>kickeststats/exceptions.py<gh_stars>0
"""Exception utilities."""
class ParsingException(Exception):
pass
class EnvVariableNotSet(Exception):
def __init__(self, varname: str) -> None:
super(EnvVariableNotSet, self).__init__(f"Env variable [{varname}] not set.")
class InvalidLineUp(Exception):
pass
class UnsupportedLineUp(Exception):
def __init__(self, line_up_name: str) -> None:
super(UnsupportedLineUp, self).__init__(
f"Line-up [{line_up_name}] is not supported."
)
class InvalidTeamLineup(Exception):
pass
| 2.40625 | 2 |
helpers/HurstEstimationNumerics.py | Baozhen-Li/SurfaceTopography | 0 | 11680 | #
# Copyright 2018, 2020 <NAME>
# 2019-2020 <NAME>
# 2015-2016 <NAME>
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Tests to understand the difficulties in extracting hurst from noisy data
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import PyCo.Tools as Tools
import SurfaceTopography as Surf
def plot_naive(surface, lam_max):
fig = plt.figure()
ax=fig.add_subplot(111)
ax.set_yscale('log')
ax.set_xscale('log')
surf = Tools.CharacterisePeriodicSurface(surface)
q = surf.q
C = surf.C
H, alpha = surf.estimate_hurst_naive(lambda_max=lam_max, full_output=True)
print("H = {}, alpha = {}".format(H, alpha))
ax.loglog(q, C, alpha=.1)
mean, err, q_g = surf.grouped_stats(100)
mask = np.isfinite(mean)
mean = mean[mask]
err = err[:, mask]
q_g = q_g[mask]
ax.errorbar(q_g, mean, yerr=err)
ax.set_title("Naive: H={:.2f}, h_rms={:.2e}".format(H, np.sqrt((surface.heights() ** 2).mean())))
a, b = np.polyfit(np.log(q), np.log(C), 1)
ax.plot(q, q**(-2-2*H)*alpha, label="{}, H={:.2f}".format('fit', H))
ax.legend(loc='best')
def plot_grad_C0(surface, H_in, lam_max):
surf = Tools.CharacterisePeriodicSurface(surface)
q_min = 2*np.pi/lam_max
sl = surf.q > q_min
q = surf.q[sl]
C = surf.C[sl]
dim = 2
def C0_of_H(H):
return ((q**(-3-2*H)).sum() /
(q**(-5-4*H)/C).sum())
def objective(H, C0):
return ((1 - C0*q**(-2*H-2)/C)**2 /
q**(dim-1)).sum()
C0 = C0_of_H(H_in)
O0 = objective(H_in, C0)
c_s = np.linspace(0, 2*C0, 51)
o_s = np.zeros_like(c_s)
for i, c in enumerate(c_s):
o_s[i] = objective(H_in, c)
fig = plt.figure()
ax=fig.add_subplot(111)
fig.suptitle('grad(C0)')
ax.plot(c_s, o_s, marker= '+')
ax.scatter(C0, O0, marker='x', label = 'root', c='r')
ax.grid(True)
print("C0 = {}, obj0 = {}".format(C0, O0))
return C0
def plot_grad_H(surface, lam_max):
surf = Tools.CharacterisePeriodicSurface(surface)
q_min = 2*np.pi/lam_max
sl = surf.q > q_min
q = surf.q[sl]# np.array(surf.q[sl][0], surf.q[sl][-1])
C = surf.C[sl]# np.array(surf.C[sl][0], surf.C[sl][-1])
dim = 2
def C0_of_H(H):
return ((C**2/q**(-5-dim-4*H)).sum() /
(C/q**(-3-dim-2*H)).sum())
def grad_h(H, C0):
return (4*C0/C*np.log(q)*q**(-1-2*H-dim)*(1 - C0*q**(-2-2*H)/C)).sum()
def objective(H, C0):
return ((c/q**(-2*H-2) - C0)**2 /
q**(dim-1)).sum()
def full_obj(H):
C0 = C0_of_H(H)
return ((1 - C0/C*q**(-2*H-2))**2 /
q**(dim-1)).sum()
h_s = np.linspace(.0, 2., 51)
o_s = np.zeros_like(h_s)
g_s = np.zeros_like(h_s)
for i, h in enumerate(h_s):
c = C0_of_H(h)
o_s[i] = objective(h, c)
g_s[i] = grad_h(h, c)
H_opt, obj_opt, err, nfeq = scipy.optimize.fminbound(full_obj, 0, 2, full_output=True)
if err != 0:
raise Exception()
fig = plt.figure()
ax=fig.add_subplot(211)
ax.set_xlim(h_s[0], h_s[-1])
fig.suptitle('grad(H)')
ax.plot(h_s, o_s, marker= '+')
ax.grid(True)
ax.scatter(H_opt, obj_opt, marker='x', label = 'root', c='r')
ax=fig.add_subplot(212)
ax.set_xlim(h_s[0], h_s[-1])
ax.plot(h_s, g_s, marker= '+')
grad_opt = grad_h(H_opt, C0_of_H(H_opt))
ax.scatter(H_opt, grad_opt, marker='x', label = 'root', c='r')
#res = scipy.optimize.fmin
#print("H_out = {}, obj0 = {}".format(C0, O0))
ax.grid(True)
return H_opt, C0_of_H(H_opt)
def compare_to_PyPy(surface, lam_max, H_ref, C0_ref):
fig = plt.figure()
ax=fig.add_subplot(111)
ax.set_yscale('log')
ax.set_xscale('log')
surf = Tools.CharacterisePeriodicSurface(surface)
q_min = 2*np.pi/lam_max
sl = surf.q > q_min
q = surf.q
C = surf.C
H, alpha, res = surf.estimate_hurst_alt(lambda_max=lam_max, full_output=True)
print("H = {}, alpha = {}".format(H, alpha))
ax.loglog(q, C, alpha=.1)
mean, err, q_g = surf.grouped_stats(100)
mask = np.isfinite(mean)
mean = mean[mask]
err = err[:, mask]
q_g = q_g[mask]
ax.errorbar(q_g, mean, yerr=err)
ax.set_title("New: H_pypy={:.2f}, H_ref = {:.2f}, h_rms={:.2e}".format(H, H_ref, np.sqrt((surface.heights() ** 2).mean())))
ax.plot(q[sl], q[sl]**(-2-2*H)*alpha, label="{}, H={:.4f}".format('fit', H), lw = 3)
ax.plot(q[sl], q[sl]**(-2-2*H_ref)*C0_ref, label="{}, H={:.4f}".format('ref_fit', H_ref), lw = 3)
ax.legend(loc='best')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.loglog(q[sl], C[sl]/(q[sl]**(-2-2*H_ref)*C0_ref), alpha=.1)
ax.errorbar(q_g, mean/(q_g**(-2-2*H_ref)*C0_ref), yerr=err/(q_g**(-2-2*H_ref)*C0_ref))
def main():
siz = 2000e-9
lam_max = .2*siz
size = (siz, siz)
hurst = .75
h_rms = 3.24e-8
res = 128
nb_grid_pts = (res, res)
seed = 2
surface = Tools.RandomSurfaceGaussian(
nb_grid_pts, size, hurst, h_rms, lambda_max=lam_max, seed=seed).get_surface()
plot_naive(surface, lam_max)
plot_grad_C0(surface, hurst, lam_max)
H, C0 = plot_grad_H(surface, lam_max)
print("H_ref = {}, C0_ref = {}".format(H, C0))
compare_to_PyPy(surface, lam_max, H, C0)
if __name__ == "__main__":
main()
plt.show()
| 1.90625 | 2 |
test/serverless_mock_test.py | zhangyuan/serverless-mock-python | 5 | 11681 | import threading
import requests
import json
import os
from nose.tools import *
from server import Httpd
app_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "app")
class TestServerlessMock(object):
def test_ok(self):
ok_(True)
def setUp(self):
self.httpd = Httpd(app_path, 0)
thread = threading.Thread(target=self.httpd.serve, args=())
thread.daemon = True
thread.start()
self.prefix = "http://localhost:%d" % self.httpd.port
def tearDown(self):
self.httpd.shutdown()
def test_return_hello_world(self):
response = requests.get(self.url(""))
eq_("Hello World", response.text)
def test_simple_get(self):
response = requests.get(self.url("/simple_get"))
eq_(200, response.status_code)
data = response.json()
eq_(200, data.get("statusCode"))
body = json.loads(data.get("body"))
eq_("Go Serverless v1.0! Your function executed successfully!", body.get("message"))
def test_simple_get_and_ignore_query_string(self):
response = requests.get(self.url("/simple_get?status=unknown"))
eq_(200, response.status_code)
data = response.json()
eq_(200, data.get("statusCode"))
body = json.loads(data.get("body"))
eq_("Go Serverless v1.0! Your function executed successfully!", body.get("message"))
def test_simple_post(self):
response = requests.post(self.url("/simple_post"))
eq_(200, response.status_code)
data = response.json()
eq_(201, data.get("statusCode"))
def test_post_with_payload(self):
response = requests.post(self.url("/post_with_payload"), data=json.dumps({"id" : 123}))
eq_(200, response.status_code)
data = response.json()
eq_(200, data.get("statusCode"))
eq_({"id" : 123}, data.get("body"))
def test_post_with_payload_and_template(self):
response = requests.post(self.url("/post_with_payload_and_template"), data=json.dumps({"id" : 123}))
eq_(200, response.status_code)
data = response.json()
eq_(200, data.get("statusCode"))
eq_({"body" : {"id" : 123}}, data.get("body"))
def test_post_with_payload_and_template_without_any_function(self):
response = requests.post(self.url("/post_with_payload_and_template_without_any_function"), data=json.dumps({"id" : 123}))
eq_(200, response.status_code)
data = response.json()
eq_(200, data.get("statusCode"))
eq_({"action" : "trigger"}, data.get("body"))
def url(self, path):
return "%s%s" % (self.prefix, path)
| 2.375 | 2 |
setup.py | holoyan/python-data-validation | 3 | 11682 | <reponame>holoyan/python-data-validation
from setuptools import setup, find_packages
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyva',
packages=find_packages(),
version='0.4.1',
license='MIT',
description='Simple and flexible python data validation library',
long_description=long_description,
long_description_content_type='text/markdown',
author='Artak',
author_email='<EMAIL>',
url='https://github.com/holoyan/python-data-validation',
keywords=['data', 'validation', 'validator', 'data validator'],
install_requires=[ # I get to this in a second
'python-dateutil',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| 1.835938 | 2 |
bus_system/apps/trip/migrations/0007_auto_20210624_1812.py | pygabo/bus_system | 0 | 11683 | # Generated by Django 3.1.12 on 2021-06-24 18:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('trip', '0006_remove_travelmodel_driver'),
]
operations = [
migrations.AddField(
model_name='tripmodel',
name='tickets_sold',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.AlterField(
model_name='travelmodel',
name='trip',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='travel_trip_set', related_query_name='travel_trip_set', to='trip.tripmodel'),
),
]
| 1.617188 | 2 |
geoscilabs/dcip/DCWidgetPlate2_5D.py | lheagy/geosci-labs | 1 | 11684 | <filename>geoscilabs/dcip/DCWidgetPlate2_5D.py
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
from scipy.constants import epsilon_0
from scipy.ndimage.measurements import center_of_mass
from ipywidgets import IntSlider, FloatSlider, FloatText, ToggleButtons
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.ticker import LogFormatter
from matplotlib.path import Path
import matplotlib.patches as patches
from pymatsolver import Pardiso
from discretize import TensorMesh
from SimPEG import maps, SolverLU, utils
from SimPEG.utils import ExtractCoreMesh
from SimPEG.electromagnetics.static import resistivity as DC
from ..base import widgetify
# Mesh, mapping can be globals global
npad = 15
growrate = 2.0
cs = 0.5
hx = [(cs, npad, -growrate), (cs, 200), (cs, npad, growrate)]
hy = [(cs, npad, -growrate), (cs, 100)]
mesh = TensorMesh([hx, hy], "CN")
expmap = maps.ExpMap(mesh)
mapping = expmap
dx = 5
xr = np.arange(-40, 41, dx)
dxr = np.diff(xr)
xmin = -40.0
xmax = 40.0
ymin = -40.0
ymax = 8.0
xylim = np.c_[[xmin, ymin], [xmax, ymax]]
indCC, meshcore = ExtractCoreMesh(xylim, mesh)
indx = (
(mesh.gridFx[:, 0] >= xmin)
& (mesh.gridFx[:, 0] <= xmax)
& (mesh.gridFx[:, 1] >= ymin)
& (mesh.gridFx[:, 1] <= ymax)
)
indy = (
(mesh.gridFy[:, 0] >= xmin)
& (mesh.gridFy[:, 0] <= xmax)
& (mesh.gridFy[:, 1] >= ymin)
& (mesh.gridFy[:, 1] <= ymax)
)
indF = np.concatenate((indx, indy))
_cache = {
"A": None,
"B": None,
"dx": None,
"dz": None,
"xc": None,
"zc": None,
"rotAng": None,
"sigplate": None,
"sighalf": None,
}
def plate_fields(A, B, dx, dz, xc, zc, rotAng, sigplate, sighalf):
re_run = (
_cache["A"] != A
or _cache["B"] != B
or _cache["dx"] != dx
or _cache["dz"] != dz
or _cache["xc"] != xc
or _cache["zc"] != zc
or _cache["rotAng"] != rotAng
or _cache["sigplate"] != sigplate
or _cache["sighalf"] != sighalf
)
if re_run:
# Create halfspace model
mhalf = np.log(sighalf * np.ones([mesh.nC]))
# Create true model with plate
mtrue = createPlateMod(xc, zc, dx, dz, rotAng, sigplate, sighalf)
if B == []:
src = DC.sources.Pole([], np.r_[A, 0.0])
else:
src = DC.sources.Dipole([], np.r_[A, 0.0], np.r_[B, 0.0])
survey = DC.survey.Survey([src])
problem = DC.Simulation2DCellCentered(
mesh, survey=survey, sigmaMap=mapping, solver=Pardiso, bc_type='Dirichlet'
)
problem_prim = DC.Simulation2DCellCentered(
mesh, survey=survey, sigmaMap=mapping, solver=Pardiso, bc_type='Dirichlet'
)
total_field = problem.fields(mtrue)
primary_field = problem_prim.fields(mhalf)
_cache["A"] = A
_cache["B"] = B
_cache["dx"] = dx
_cache["dz"] = dz
_cache["xc"] = xc
_cache["zc"] = zc
_cache["rotAng"] = rotAng
_cache["sigplate"] = sigplate
_cache["sighalf"] = sighalf
_cache["mtrue"] = mtrue
_cache["mhalf"] = mhalf
_cache["src"] = src
_cache["primary_field"] = primary_field
_cache["total_field"] = total_field
else:
mtrue = _cache["mtrue"]
mhalf = _cache["mhalf"]
src = _cache["src"]
primary_field = _cache["primary_field"]
total_field = _cache["total_field"]
return mtrue, mhalf, src, primary_field, total_field
def getPlateCorners(xc, zc, dx, dz, rotAng):
# Form rotation matix
rotMat = np.array(
[
[np.cos(rotAng * (np.pi / 180.0)), -np.sin(rotAng * (np.pi / 180.0))],
[np.sin(rotAng * (np.pi / 180.0)), np.cos(rotAng * (np.pi / 180.0))],
]
)
originCorners = np.array(
[
[-0.5 * dx, 0.5 * dz],
[0.5 * dx, 0.5 * dz],
[-0.5 * dx, -0.5 * dz],
[0.5 * dx, -0.5 * dz],
]
)
rotPlateCorners = np.dot(originCorners, rotMat)
plateCorners = rotPlateCorners + np.hstack(
[np.repeat(xc, 4).reshape([4, 1]), np.repeat(zc, 4).reshape([4, 1])]
)
return plateCorners
def createPlateMod(xc, zc, dx, dz, rotAng, sigplate, sighalf):
# use matplotlib paths to find CC inside of polygon
plateCorners = getPlateCorners(xc, zc, dx, dz, rotAng)
verts = [
(plateCorners[0, :]), # left, top
(plateCorners[1, :]), # right, top
(plateCorners[3, :]), # right, bottom
(plateCorners[2, :]), # left, bottom
(plateCorners[0, :]), # left, top (closes polygon)
]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
path = Path(verts, codes)
CCLocs = mesh.gridCC
insideInd = np.where(path.contains_points(CCLocs))
# Check selected cell centers by plotting
# print insideInd
# fig = plt.figure()
# ax = fig.add_subplot(111)
# patch = patches.PathPatch(path, facecolor='none', lw=2)
# ax.add_patch(patch)
# plt.scatter(CCLocs[insideInd,0],CCLocs[insideInd,1])
# ax.set_xlim(-10,10)
# ax.set_ylim(-20,0)
# plt.axes().set_aspect('equal')
# plt.show()
mtrue = sighalf * np.ones([mesh.nC])
mtrue[insideInd] = sigplate
mtrue = np.log(mtrue)
return mtrue
def get_Surface_Potentials(survey, src, field_obj):
phi = field_obj[src, "phi"]
CCLoc = mesh.gridCC
zsurfaceLoc = np.max(CCLoc[:, 1])
surfaceInd = np.where(CCLoc[:, 1] == zsurfaceLoc)
xSurface = CCLoc[surfaceInd, 0].T
phiSurface = phi[surfaceInd]
phiScale = 0.0
if survey == "Pole-Dipole" or survey == "Pole-Pole":
refInd = utils.closestPoints(mesh, [xmax + 60.0, 0.0], gridLoc="CC")
# refPoint = CCLoc[refInd]
# refSurfaceInd = np.where(xSurface == refPoint[0])
# phiScale = np.median(phiSurface)
phiScale = phi[refInd]
phiSurface = phiSurface - phiScale
return xSurface, phiSurface, phiScale
def sumPlateCharges(xc, zc, dx, dz, rotAng, qSecondary):
# plateCorners = getPlateCorners(xc,zc,dx,dz,rotAng)
chargeRegionCorners = getPlateCorners(xc, zc, dx + 1.0, dz + 1.0, rotAng)
# plateVerts = [
# (plateCorners[0,:]), # left, top
# (plateCorners[1,:]), # right, top
# (plateCorners[3,:]), # right, bottom
# (plateCorners[2,:]), # left, bottom
# (plateCorners[0,:]), # left, top (closes polygon)
# ]
chargeRegionVerts = [
(chargeRegionCorners[0, :]), # left, top
(chargeRegionCorners[1, :]), # right, top
(chargeRegionCorners[3, :]), # right, bottom
(chargeRegionCorners[2, :]), # left, bottom
(chargeRegionCorners[0, :]), # left, top (closes polygon)
]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
# platePath = Path(plateVerts, codes)
chargeRegionPath = Path(chargeRegionVerts, codes)
CCLocs = mesh.gridCC
# plateInsideInd = np.where(platePath.contains_points(CCLocs))
chargeRegionInsideInd = np.where(chargeRegionPath.contains_points(CCLocs))
plateChargeLocs = CCLocs[chargeRegionInsideInd]
plateCharge = qSecondary[chargeRegionInsideInd]
posInd = np.where(plateCharge >= 0)
negInd = np.where(plateCharge < 0)
qPos = utils.mkvc(plateCharge[posInd])
qNeg = utils.mkvc(plateCharge[negInd])
qPosLoc = plateChargeLocs[posInd, :][0]
qNegLoc = plateChargeLocs[negInd, :][0]
# qPosData = np.vstack([qPosLoc[:, 0], qPosLoc[:, 1], qPos]).T
# qNegData = np.vstack([qNegLoc[:, 0], qNegLoc[:, 1], qNeg]).T
if qNeg.shape == (0,) or qPos.shape == (0,):
qNegAvgLoc = np.r_[-10, -10]
qPosAvgLoc = np.r_[+10, -10]
else:
qNegAvgLoc = np.average(qNegLoc, axis=0, weights=qNeg)
qPosAvgLoc = np.average(qPosLoc, axis=0, weights=qPos)
qPosSum = np.sum(qPos)
qNegSum = np.sum(qNeg)
# # Check things by plotting
# fig = plt.figure()
# ax = fig.add_subplot(111)
# platePatch = patches.PathPatch(platePath, facecolor='none', lw=2)
# ax.add_patch(platePatch)
# chargeRegionPatch = patches.PathPatch(chargeRegionPath, facecolor='none', lw=2)
# ax.add_patch(chargeRegionPatch)
# plt.scatter(qNegAvgLoc[0],qNegAvgLoc[1],color='b')
# plt.scatter(qPosAvgLoc[0],qPosAvgLoc[1],color='r')
# ax.set_xlim(-15,5)
# ax.set_ylim(-25,-5)
# plt.axes().set_aspect('equal')
# plt.show()
return qPosSum, qNegSum, qPosAvgLoc, qNegAvgLoc
# The only thing we need to make it work is a 2.5D field object in SimPEG
def getSensitivity(survey, A, B, M, N, model):
src_type, rx_type = survey.split("-")
if rx_type == "Pole":
rx = DC.receivers.Pole(np.r_[M, 0.0])
else:
rx = DC.receivers.Dipole(np.r_[M, 0.0], np.r_[N, 0.0])
if src_type == "Pole":
src = DC.sources.Pole([rx], np.r_[A, 0.0])
else:
src = DC.sources.Dipole([rx], np.r_[A, 0.0], np.r_[B, 0.0])
# Model mappings
expmap = maps.ExpMap(mesh)
mapping = expmap
survey = DC.Survey([src])
sim = DC.Simulation3DCellCentered(
mesh, sigmaMap=mapping, solver=Pardiso, survey=survey
)
J = sim.getJ(model)[0]
return J
def calculateRhoA(survey, VM, VN, A, B, M, N):
eps = 1e-9 # to stabilize division
if survey == "Dipole-Dipole":
G = 1.0 / (
1.0 / (np.abs(A - M) + eps)
- 1.0 / (np.abs(M - B) + eps)
- 1.0 / (np.abs(N - A) + eps)
+ 1.0 / (np.abs(N - B) + eps)
)
rho_a = (VM - VN) * 2.0 * np.pi * G
elif survey == "Pole-Dipole":
G = 1.0 / (1.0 / (np.abs(A - M) + eps) - 1.0 / (np.abs(N - A) + eps))
rho_a = (VM - VN) * 2.0 * np.pi * G
elif survey == "Dipole-Pole":
G = 1.0 / (1.0 / (np.abs(A - M) + eps) - 1.0 / (np.abs(M - B) + eps))
rho_a = (VM) * 2.0 * np.pi * G
elif survey == "Pole-Pole":
G = 1.0 / (1.0 / (np.abs(A - M) + eps))
rho_a = (VM) * 2.0 * np.pi * G
return rho_a
def PLOT(
survey, A, B, M, N, dx, dz, xc, zc, rotAng, rhohalf, rhoplate, Field, Type, Scale
):
labelsize = 16.0
ticksize = 16.0
sigplate = 1.0 / rhoplate
sighalf = 1.0 / rhohalf
if survey == "Pole-Dipole" or survey == "Pole-Pole":
B = []
mtrue, mhalf, src, primary_field, total_field = plate_fields(
A, B, dx, dz, xc, zc, rotAng, sigplate, sighalf
)
fig, ax = plt.subplots(2, 1, figsize=(9 * 1.5, 9 * 1.8), sharex=True)
fig.subplots_adjust(right=0.8, wspace=0.05, hspace=0.05)
xSurface, phiTotalSurface, phiScaleTotal = get_Surface_Potentials(
survey, src, total_field
)
xSurface, phiPrimSurface, phiScalePrim = get_Surface_Potentials(
survey, src, primary_field
)
ylim = np.r_[-1.0, 1.0] * np.max(np.abs(phiTotalSurface))
xlim = np.array([-40, 40])
if survey == "Dipole-Pole" or survey == "Pole-Pole":
MInd = np.where(xSurface == M)
N = []
VM = phiTotalSurface[MInd[0]]
VN = 0.0
VMprim = phiPrimSurface[MInd[0]]
VNprim = 0.0
else:
MInd = np.where(xSurface == M)
NInd = np.where(xSurface == N)
VM = phiTotalSurface[MInd[0]]
VN = phiTotalSurface[NInd[0]]
VMprim = phiPrimSurface[MInd[0]]
VNprim = phiPrimSurface[NInd[0]]
# 2D geometric factor
G2D = rhohalf / (calculateRhoA(survey, VMprim, VNprim, A, B, M, N))
# Subplot 1: Full set of surface potentials
ax[0].plot(xSurface, phiTotalSurface, color=[0.1, 0.5, 0.1], linewidth=2)
ax[0].plot(xSurface, phiPrimSurface, linestyle="dashed", linewidth=0.5, color="k")
ax[0].grid(
which="both", linestyle="-", linewidth=0.5, color=[0.2, 0.2, 0.2], alpha=0.5
)
if survey == "Pole-Dipole" or survey == "Pole-Pole":
ax[0].plot(A, 0, "+", markersize=12, markeredgewidth=3, color=[1.0, 0.0, 0])
else:
ax[0].plot(A, 0, "+", markersize=12, markeredgewidth=3, color=[1.0, 0.0, 0])
ax[0].plot(B, 0, "_", markersize=12, markeredgewidth=3, color=[0.0, 0.0, 1.0])
ax[0].set_ylabel("Potential, (V)", fontsize=labelsize)
ax[0].set_xlabel("x (m)", fontsize=labelsize)
ax[0].set_xlim(xlim)
ax[0].set_ylim(ylim)
if survey == "Dipole-Pole" or survey == "Pole-Pole":
ax[0].plot(M, VM, "o", color="k")
xytextM = (M + 0.5, max(min(VM, ylim.max()), ylim.min()) + 10)
ax[0].annotate("%2.1e" % (VM), xy=xytextM, xytext=xytextM, fontsize=labelsize)
else:
ax[0].plot(M, VM, "o", color="k")
ax[0].plot(N, VN, "o", color="k")
xytextM = (M + 0.5, max(min(VM, ylim.max()), ylim.min()) + 10)
xytextN = (N + 0.5, max(min(VN, ylim.max()), ylim.min()) + 10)
ax[0].annotate("%2.1e" % (VM), xy=xytextM, xytext=xytextM, fontsize=labelsize)
ax[0].annotate("%2.1e" % (VN), xy=xytextN, xytext=xytextN, fontsize=labelsize)
ax[0].tick_params(axis="both", which="major", labelsize=ticksize)
props = dict(boxstyle="round", facecolor="grey", alpha=0.4)
ax[0].text(
xlim.max() + 1,
ylim.max() - 0.1 * ylim.max(),
"$\\rho_a$ = %2.2f" % (G2D * calculateRhoA(survey, VM, VN, A, B, M, N)),
verticalalignment="bottom",
bbox=props,
fontsize=labelsize,
)
ax[0].legend(["Model Potential", "Half-Space Potential"], loc=3, fontsize=labelsize)
# # Subplot 2: Surface potentials with gaps around current electrodes
# # Select points more than 5m from Tx electrodes of plotting
# xSurface_AInd = np.where(np.abs(xSurface - A) >= 5.)[0]
# xSurface_BInd = np.where(np.abs(xSurface - B) >= 5.)[0]
# xSurfaceTxGapInd = list(set(xSurface_AInd).intersection(xSurface_BInd))
# xSurface_TxGap = xSurface[xSurfaceTxGapInd]
# phiTotalSurface_TxGap = phiTotalSurface[xSurfaceTxGapInd]
# phiPrimSurface_TxGap = phiPrimSurface[xSurfaceTxGapInd]
# ylim = np.r_[-1., 1.]*(np.max(np.abs(phiTotalSurface_TxGap)) - 0.05*np.max(np.abs(phiTotalSurface_TxGap)))
# ax[1].plot(xSurface_TxGap,phiTotalSurface_TxGap ,color=[0.1,0.5,0.1],linewidth=2)
# ax[1].plot(xSurface_TxGap,phiPrimSurface_TxGap ,linestyle='dashed',linewidth=0.5,color='k')
# ax[1].grid(which='both',linestyle='-',linewidth=0.5,color=[0.2,0.2,0.2],alpha=0.5)
# ax[1].plot(A,0,'+',markersize = 12, markeredgewidth = 3, color=[1.,0.,0])
# ax[1].plot(B,0,'_',markersize = 12, markeredgewidth = 3, color=[0.,0.,1.])
# ax[1].set_ylabel('Potential, (V)',fontsize = labelsize)
# ax[1].set_xlabel('x (m)',fontsize = labelsize)
# ax[1].set_xlim(xlim)
# ax[1].set_ylim(ylim)
# ax[1].plot(M,VM,'o',color='k')
# ax[1].plot(N,VN,'o',color='k')
# ax[1].annotate('%2.1e'%(VM), xy=xytextM, xytext=xytextM,fontsize = labelsize)
# ax[1].annotate('%2.1e'%(VN), xy=xytextN, xytext=xytextN,fontsize = labelsize)
# ax[1].tick_params(axis='both', which='major', labelsize=ticksize)
# props = dict(boxstyle='round', facecolor='grey', alpha=0.4)
# ax[1].text(xlim.max()+1,ylim.max()-0.1*ylim.max(),'$\\rho_a$ = %2.2f'%(G2D*rho_a(VM,VN,A,B,M,N)),
# verticalalignment='bottom', bbox=props, fontsize = labelsize)
# ax[1].legend(['Model Potential','Half-Space Potential'], loc=3, fontsize = labelsize)
if Field == "Model":
label = "Resisitivity (ohm-m)"
xtype = "CC"
view = "real"
streamOpts = None
ind = indCC
formatter = "%.1e"
pcolorOpts = {"cmap": "jet_r"}
if Scale == "Log":
pcolorOpts = {"norm": matplotlib.colors.LogNorm(), "cmap": "jet_r"}
if Type == "Total":
u = 1.0 / (mapping * mtrue)
elif Type == "Primary":
u = 1.0 / (mapping * mhalf)
elif Type == "Secondary":
u = 1.0 / (mapping * mtrue) - 1.0 / (mapping * mhalf)
if Scale == "Log":
linthresh = 10.0
pcolorOpts = {
"norm": matplotlib.colors.SymLogNorm(
linthresh=linthresh, linscale=0.2
),
"cmap": "jet_r",
}
elif Field == "Potential":
label = "Potential (V)"
xtype = "CC"
view = "real"
streamOpts = None
ind = indCC
formatter = "%.1e"
pcolorOpts = {"cmap": "viridis"}
if Scale == "Log":
linthresh = 10.0
pcolorOpts = {
"norm": matplotlib.colors.SymLogNorm(linthresh=linthresh, linscale=0.2),
"cmap": "viridis",
}
if Type == "Total":
# formatter = LogFormatter(10, labelOnlyBase=False)
# pcolorOpts = {'norm':matplotlib.colors.SymLogNorm(linthresh=10, linscale=0.1)}
u = total_field[src, "phi"] - phiScaleTotal
elif Type == "Primary":
# formatter = LogFormatter(10, labelOnlyBase=False)
# pcolorOpts = {'norm':matplotlib.colors.SymLogNorm(linthresh=10, linscale=0.1)}
u = primary_field[src, "phi"] - phiScalePrim
elif Type == "Secondary":
# formatter = None
# pcolorOpts = {"cmap":"viridis"}
uTotal = total_field[src, "phi"] - phiScaleTotal
uPrim = primary_field[src, "phi"] - phiScalePrim
u = uTotal - uPrim
elif Field == "E":
label = "Electric Field (V/m)"
xtype = "F"
view = "vec"
streamOpts = {"color": "w"}
ind = indF
# formatter = LogFormatter(10, labelOnlyBase=False)
pcolorOpts = {"cmap": "viridis"}
if Scale == "Log":
pcolorOpts = {"norm": matplotlib.colors.LogNorm(), "cmap": "viridis"}
formatter = "%.1e"
if Type == "Total":
u = total_field[src, "e"]
elif Type == "Primary":
u = primary_field[src, "e"]
elif Type == "Secondary":
uTotal = total_field[src, "e"]
uPrim = primary_field[src, "e"]
u = uTotal - uPrim
elif Field == "J":
label = "Current density ($A/m^2$)"
xtype = "F"
view = "vec"
streamOpts = {"color": "w"}
ind = indF
# formatter = LogFormatter(10, labelOnlyBase=False)
pcolorOpts = {"cmap": "viridis"}
if Scale == "Log":
pcolorOpts = {"norm": matplotlib.colors.LogNorm(), "cmap": "viridis"}
formatter = "%.1e"
if Type == "Total":
u = total_field[src, "j"]
elif Type == "Primary":
u = primary_field[src, "j"]
elif Type == "Secondary":
uTotal = total_field[src, "j"]
uPrim = primary_field[src, "j"]
u = uTotal - uPrim
elif Field == "Charge":
label = "Charge Density ($C/m^2$)"
xtype = "CC"
view = "real"
streamOpts = None
ind = indCC
# formatter = LogFormatter(10, labelOnlyBase=False)
pcolorOpts = {"cmap": "RdBu_r"}
if Scale == "Log":
linthresh = 1e-12
pcolorOpts = {
"norm": matplotlib.colors.SymLogNorm(linthresh=linthresh, linscale=0.2),
"cmap": "RdBu_r",
}
formatter = "%.1e"
if Type == "Total":
u = total_field[src, "charge"]
elif Type == "Primary":
u = primary_field[src, "charge"]
elif Type == "Secondary":
uTotal = total_field[src, "charge"]
uPrim = primary_field[src, "charge"]
u = uTotal - uPrim
elif Field == "Sensitivity":
label = "Sensitivity"
xtype = "CC"
view = "real"
streamOpts = None
ind = indCC
# formatter = None
# pcolorOpts = {"cmap":"viridis"}
# formatter = LogFormatter(10, labelOnlyBase=False)
pcolorOpts = {"cmap": "viridis"}
if Scale == "Log":
linthresh = 1e-4
pcolorOpts = {
"norm": matplotlib.colors.SymLogNorm(linthresh=linthresh, linscale=0.2),
"cmap": "viridis",
}
# formatter = formatter = "$10^{%.1f}$"
formatter = "%.1e"
if Type == "Total":
u = getSensitivity(survey, A, B, M, N, mtrue)
elif Type == "Primary":
u = getSensitivity(survey, A, B, M, N, mhalf)
elif Type == "Secondary":
uTotal = getSensitivity(survey, A, B, M, N, mtrue)
uPrim = getSensitivity(survey, A, B, M, N, mhalf)
u = uTotal - uPrim
# u = np.log10(abs(u))
if Scale == "Log":
eps = 1e-16
else:
eps = 0.0
dat = meshcore.plotImage(
u[ind] + eps,
v_type=xtype,
ax=ax[1],
grid=False,
view=view,
stream_opts=streamOpts,
pcolor_opts=pcolorOpts,
) # gridOpts={'color':'k', 'alpha':0.5}
# Get plate corners
plateCorners = getPlateCorners(xc, zc, dx, dz, rotAng)
if rhoplate != rhohalf:
# plot top of plate outline
ax[1].plot(
plateCorners[[0, 1], 0],
plateCorners[[0, 1], 1],
linestyle="dashed",
color="k",
)
# plot east side of plate outline
ax[1].plot(
plateCorners[[1, 3], 0],
plateCorners[[1, 3], 1],
linestyle="dashed",
color="k",
)
# plot bottom of plate outline
ax[1].plot(
plateCorners[[2, 3], 0],
plateCorners[[2, 3], 1],
linestyle="dashed",
color="k",
)
# plot west side of plate outline
ax[1].plot(
plateCorners[[0, 2], 0],
plateCorners[[0, 2], 1],
linestyle="dashed",
color="k",
)
if (Field == "Charge") and (Type != "Primary") and (Type != "Total"):
qTotal = total_field[src, "charge"]
qPrim = primary_field[src, "charge"]
qSecondary = qTotal - qPrim
qPosSum, qNegSum, qPosAvgLoc, qNegAvgLoc = sumPlateCharges(
xc, zc, dx, dz, rotAng, qSecondary
)
ax[1].plot(
qPosAvgLoc[0],
qPosAvgLoc[1],
marker=".",
color="black",
markersize=labelsize,
)
ax[1].plot(
qNegAvgLoc[0],
qNegAvgLoc[1],
marker=".",
color="black",
markersize=labelsize,
)
if qPosAvgLoc[0] > qNegAvgLoc[0]:
xytext_qPos = (qPosAvgLoc[0] + 1.0, qPosAvgLoc[1] - 1)
xytext_qNeg = (qNegAvgLoc[0] - 15.0, qNegAvgLoc[1] - 1)
else:
xytext_qPos = (qPosAvgLoc[0] - 15.0, qPosAvgLoc[1] - 1)
xytext_qNeg = (qNegAvgLoc[0] + 1.0, qNegAvgLoc[1] - 1)
ax[1].annotate(
"+Q = %2.1e" % (qPosSum),
xy=xytext_qPos,
xytext=xytext_qPos,
fontsize=labelsize,
)
ax[1].annotate(
"-Q = %2.1e" % (qNegSum),
xy=xytext_qNeg,
xytext=xytext_qNeg,
fontsize=labelsize,
)
ax[1].set_xlabel("x (m)", fontsize=labelsize)
ax[1].set_ylabel("z (m)", fontsize=labelsize)
if survey == "Dipole-Dipole":
ax[1].plot(A, 1.0, marker="v", color="red", markersize=labelsize)
ax[1].plot(B, 1.0, marker="v", color="blue", markersize=labelsize)
ax[1].plot(M, 1.0, marker="^", color="yellow", markersize=labelsize)
ax[1].plot(N, 1.0, marker="^", color="green", markersize=labelsize)
xytextA1 = (A - 0.5, 3)
xytextB1 = (B - 0.5, 3)
xytextM1 = (M - 0.5, 3)
xytextN1 = (N - 0.5, 3)
ax[1].annotate("A", xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate("B", xy=xytextB1, xytext=xytextB1, fontsize=labelsize)
ax[1].annotate("M", xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
ax[1].annotate("N", xy=xytextN1, xytext=xytextN1, fontsize=labelsize)
elif survey == "Pole-Dipole":
ax[1].plot(A, 1.0, marker="v", color="red", markersize=labelsize)
ax[1].plot(M, 1.0, marker="^", color="yellow", markersize=labelsize)
ax[1].plot(N, 1.0, marker="^", color="green", markersize=labelsize)
xytextA1 = (A - 0.5, 3)
xytextM1 = (M - 0.5, 3)
xytextN1 = (N - 0.5, 3)
ax[1].annotate("A", xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate("M", xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
ax[1].annotate("N", xy=xytextN1, xytext=xytextN1, fontsize=labelsize)
elif survey == "Dipole-Pole":
ax[1].plot(A, 1.0, marker="v", color="red", markersize=labelsize)
ax[1].plot(B, 1.0, marker="v", color="blue", markersize=labelsize)
ax[1].plot(M, 1.0, marker="^", color="yellow", markersize=labelsize)
xytextA1 = (A - 0.5, 3)
xytextB1 = (B - 0.5, 3)
xytextM1 = (M - 0.5, 3)
ax[1].annotate("A", xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate("B", xy=xytextB1, xytext=xytextB1, fontsize=labelsize)
ax[1].annotate("M", xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
elif survey == "Pole-Pole":
ax[1].plot(A, 1.0, marker="v", color="red", markersize=labelsize)
ax[1].plot(M, 1.0, marker="^", color="yellow", markersize=labelsize)
xytextA1 = (A - 0.5, 3)
xytextM1 = (M - 0.5, 3)
ax[1].annotate("A", xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate("M", xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
ax[1].tick_params(axis="both", which="major", labelsize=ticksize)
cbar_ax = fig.add_axes([0.8, 0.05, 0.08, 0.5])
cbar_ax.axis("off")
vmin, vmax = dat[0].get_clim()
if Scale == "Log":
if (Field == "E") or (Field == "J"):
cb = plt.colorbar(
dat[0],
ax=cbar_ax,
format=formatter,
ticks=np.logspace(np.log10(vmin), np.log10(vmax), 5),
)
elif Field == "Model":
if Type == "Secondary":
cb = plt.colorbar(
dat[0],
ax=cbar_ax,
format=formatter,
ticks=np.r_[np.minimum(0.0, vmin), np.maximum(0.0, vmax)],
)
else:
cb = plt.colorbar(
dat[0],
ax=cbar_ax,
format=formatter,
ticks=np.logspace(np.log10(vmin), np.log10(vmax), 5),
)
else:
cb = plt.colorbar(
dat[0],
ax=cbar_ax,
format=formatter,
ticks=np.r_[
-1.0
* np.logspace(np.log10(-vmin - eps), np.log10(linthresh), 3)[:-1],
0.0,
np.logspace(np.log10(linthresh), np.log10(vmax), 3)[1:],
],
)
else:
if (Field == "Model") and (Type == "Secondary"):
cb = plt.colorbar(
dat[0],
ax=cbar_ax,
format=formatter,
ticks=np.r_[np.minimum(0.0, vmin), np.maximum(0.0, vmax)],
)
else:
cb = plt.colorbar(
dat[0], ax=cbar_ax, format=formatter, ticks=np.linspace(vmin, vmax, 5)
)
cb.ax.tick_params(labelsize=ticksize)
cb.set_label(label, fontsize=labelsize)
ax[1].set_xlim([-40.0, 40.0])
ax[1].set_ylim([-40.0, 8.0])
# ax[1].set_aspect('equal')
plt.show()
# return fig, ax
def plate_app():
app = widgetify(
PLOT,
survey=ToggleButtons(
options=["Dipole-Dipole", "Dipole-Pole", "Pole-Dipole", "Pole-Pole"],
value="Dipole-Dipole",
),
dx=FloatSlider(
min=1.0, max=1000.0, step=1.0, value=10.0, continuous_update=False
),
dz=FloatSlider(
min=1.0, max=200.0, step=1.0, value=10.0, continuous_update=False
),
xc=FloatSlider(
min=-30.0, max=30.0, step=1.0, value=0.0, continuous_update=False
),
zc=FloatSlider(
min=-30.0, max=0.0, step=1.0, value=-10.0, continuous_update=False
),
rotAng=FloatSlider(
min=-90.0,
max=90.0,
step=1.0,
value=0.0,
continuous_update=False,
description="$\\theta$",
),
rhoplate=FloatText(
min=1e-8,
max=1e8,
value=500.0,
continuous_update=False,
description="$\\rho_2$",
),
rhohalf=FloatText(
min=1e-8,
max=1e8,
value=500.0,
continuous_update=False,
description="$\\rho_1$",
),
A=FloatSlider(
min=-30.25, max=30.25, step=0.5, value=-30.25, continuous_update=False
),
B=FloatSlider(
min=-30.25, max=30.25, step=0.5, value=30.25, continuous_update=False
),
M=FloatSlider(
min=-30.25, max=30.25, step=0.5, value=-10.25, continuous_update=False
),
N=FloatSlider(
min=-30.25, max=30.25, step=0.5, value=10.25, continuous_update=False
),
Field=ToggleButtons(
options=["Model", "Potential", "E", "J", "Charge", "Sensitivity"],
value="Model",
),
Type=ToggleButtons(options=["Total", "Primary", "Secondary"], value="Total"),
Scale=ToggleButtons(options=["Linear", "Log"], value="Linear"),
)
return app
| 1.71875 | 2 |
retratodefases/phase_diagrams/__init__.py | Loracio/retrato-de-fases | 3 | 11685 | <gh_stars>1-10
try:
__PHASE_DIAGRAMS_IMPORTED__
except NameError:
__PHASE_DIAGRAMS_IMPORTED__= False
if not __PHASE_DIAGRAMS_IMPORTED__:
from .phase_portrait import PhasePortrait
from .funcion1D import Funcion1D
from .nullclines import Nullcline2D
__PHASE_DIAGRAMS_IMPORTED__ = True | 1.257813 | 1 |
P13pt/spectrumfitter/spectrumfitter.py | green-mercury/P13pt | 3 | 11686 | #!/usr/bin/python
import sys
import os
import shutil
from glob import glob
from PyQt5.QtCore import (Qt, qInstallMessageHandler, QtInfoMsg, QtCriticalMsg, QtDebugMsg,
QtWarningMsg, QtFatalMsg, QSettings, pyqtSlot, QStandardPaths, QUrl)
from PyQt5.QtGui import QIcon, QDesktopServices
from PyQt5.QtWidgets import (QApplication, QMessageBox, QMainWindow, QDockWidget, QAction,
QFileDialog, QProgressDialog)
from P13pt.spectrumfitter.dataloader import DataLoader
from P13pt.spectrumfitter.navigator import Navigator
from P13pt.spectrumfitter.fitter import Fitter
from P13pt.spectrumfitter.plotter import Plotter
from P13pt.spectrumfitter.load_fitresults import load_fitresults
from P13pt.params_from_filename import params_from_filename
class MainWindow(QMainWindow):
session_file = None
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.settings = QSettings("Mercury", "SpectrumFitter")
# set up data loading area
self.dock_loader = QDockWidget('Data loading', self)
self.dock_loader.setObjectName('loader')
self.loader = DataLoader()
self.dock_loader.setWidget(self.loader)
# set up data navigator
self.dock_navigator = QDockWidget('Data navigation', self)
self.dock_navigator.setObjectName('navigator')
self.navigator = Navigator()
self.dock_navigator.setWidget(self.navigator)
# set up plotter
self.plotter = Plotter()
self.setCentralWidget(self.plotter)
# set up fitter
self.dock_fitter = QDockWidget('Fitting', self)
self.dock_fitter.setObjectName('fitter')
self.fitter = Fitter()
self.dock_fitter.setWidget(self.fitter)
# set up the dock positions
self.addDockWidget(Qt.TopDockWidgetArea, self.dock_loader)
self.addDockWidget(Qt.LeftDockWidgetArea, self.dock_navigator)
self.addDockWidget(Qt.RightDockWidgetArea, self.dock_fitter)
# set up menus
fileMenu = self.menuBar().addMenu('File')
self.act_new_session = QAction('New session', self)
self.act_load_session = QAction('Load session', self)
self.act_save_session = QAction('Save session', self)
self.act_save_session_as = QAction('Save session as...', self)
for a in [self.act_new_session, self.act_load_session, self.act_save_session, self.act_save_session_as]:
fileMenu.addAction(a)
self.recent_menu = fileMenu.addMenu('Recent sessions')
self.update_recent_list()
fileMenu.addSeparator()
self.act_save_image = QAction('Save spectrum as image', self)
self.act_save_allimages = QAction('Save all spectra as images', self)
for a in [self.act_save_image, self.act_save_allimages]:
fileMenu.addAction(a)
viewMenu = self.menuBar().addMenu('View')
for w in [self.dock_loader, self.dock_navigator, self.dock_fitter]:
viewMenu.addAction(w.toggleViewAction())
self.act_restore_default_view = QAction('Restore default', self)
viewMenu.addAction(self.act_restore_default_view)
self.act_toggle_display_style = QAction('Toggle display style', self)
self.act_toggle_display_style.setShortcut(Qt.Key_F8)
viewMenu.addAction(self.act_toggle_display_style)
toolsMenu = self.menuBar().addMenu('Tools')
self.act_install_builtin_models = QAction('Install built-in models', self)
toolsMenu.addAction(self.act_install_builtin_models)
self.act_open_model_folder = QAction('Open model folder', self)
toolsMenu.addAction(self.act_open_model_folder)
# make connections
self.loader.dataset_changed.connect(self.dataset_changed)
self.loader.new_file_in_dataset.connect(self.navigator.new_file_in_dataset)
self.loader.deembedding_changed.connect(self.deembedding_changed)
self.navigator.selection_changed.connect(self.selection_changed)
self.fitter.fit_changed.connect(lambda: self.plotter.plot_fit(self.fitter.model))
self.fitter.fitted_param_changed.connect(self.plotter.fitted_param_changed)
self.fitter.btn_fitall.clicked.connect(self.fit_all)
self.act_new_session.triggered.connect(self.new_session)
self.act_load_session.triggered.connect(self.load_session)
self.act_save_session.triggered.connect(self.save_session)
self.act_save_session_as.triggered.connect(self.save_session_as)
self.act_save_image.triggered.connect(self.save_image)
self.act_save_allimages.triggered.connect(self.save_all_images)
self.act_restore_default_view.triggered.connect(lambda: self.restoreState(self.default_state))
self.act_toggle_display_style.triggered.connect(self.toggle_display_style)
self.act_install_builtin_models.triggered.connect(self.install_builtin_models)
self.act_open_model_folder.triggered.connect(self.open_model_folder)
# set up fitted parameter (this has to be done after making connections, so that fitter and plotter sync)
self.fitter.fitted_param = '-Y12' # default value
# create new session
self.new_session()
# show window
self.show()
self.default_state = self.saveState()
# restore layout from config (this has to be done AFTER self.show())
if self.settings.contains('geometry'):
self.restoreGeometry(self.settings.value("geometry"))
if self.settings.contains('windowState'):
self.restoreState(self.settings.value("windowState"))
def closeEvent(self, event):
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("windowState", self.saveState())
super(MainWindow, self).closeEvent(event)
def dataset_changed(self):
self.fitter.empty_cache()
self.navigator.update_file_list(self.loader.dut_files)
for a in [self.act_save_session, self.act_save_session_as, self.act_save_image, self.act_save_allimages]:
a.setEnabled(True)
def toggle_display_style(self):
if self.plotter.display_style == 'MP':
self.plotter.display_style = 'RI'
else:
self.plotter.display_style = 'MP'
self.deembedding_changed() # TODO: rename and/or remove redundancies, c.f. deembedding_changed()
def deembedding_changed(self):
# TODO: reduce redundancy with selection_changed()
i = self.navigator.file_list.currentRow()
spectrum = self.loader.get_spectrum(i)
if spectrum is not None:
#TODO: show parameters on plot
self.plotter.plot(spectrum, {})
else:
self.plotter.clear()
self.fitter.update_network(spectrum, self.loader.dut_files[i])
def selection_changed(self, i):
if i < 0: # when file_list is cleared:
return
QApplication.setOverrideCursor(Qt.WaitCursor)
# TODO: the argument here should be a filename, not the index
spectrum = self.loader.get_spectrum(i)
if spectrum is not None:
self.plotter.plot(spectrum, params_from_filename(self.loader.dut_files[i]))
else:
self.plotter.clear()
self.fitter.update_network(spectrum, self.loader.dut_files[i])
QApplication.restoreOverrideCursor()
def new_session(self):
self.session_file = None
self.setWindowTitle('Spectrum Fitter - New session')
self.fitter.unload_model()
self.loader.clear()
self.navigator.clear()
self.plotter.clear()
for a in [self.act_save_session, self.act_save_session_as, self.act_save_image, self.act_save_allimages]:
a.setEnabled(False)
@pyqtSlot()
def save_session_as(self, res_file=None):
if not res_file:
res_file, filter = QFileDialog.getSaveFileName(self, 'Fit results file', filter='*.txt')
if not res_file:
return
res_folder = os.path.dirname(res_file)
try:
with open(res_file, 'w') as f:
# write the header
f.write('# fitting results generated by P13pt spectrum fitter\n')
if len(self.loader.dut_files) == 1:
f.write('# dut: ' +
os.path.join(
os.path.relpath(self.loader.dut_folder, res_folder),
self.loader.dut_files[0]
).replace('\\', '/') + '\n')
else:
f.write('# dut: ' + os.path.relpath(self.loader.dut_folder, res_folder).replace('\\', '/') + '\n')
if self.loader.thru and self.loader.thru_toggle_status:
f.write('# thru: ' + os.path.relpath(self.loader.thru_file, res_folder).replace('\\', '/') + '\n')
if self.loader.dummy and self.loader.dummy_toggle_status:
f.write('# dummy: ' + os.path.relpath(self.loader.dummy_file, res_folder).replace('\\', '/') + '\n')
f.write('# fitted_param: ' + self.plotter.fitted_param + '\n')
try:
ra = float(self.loader.txt_ra.text())
except:
ra = 0.
if not ra == 0:
f.write('# ra: ' + str(ra) + '\n')
if self.fitter.model:
f.write('# model: ' + os.path.basename(self.fitter.model_file).replace('\\', '/') + '\n')
f.write('# model_func: ' + self.fitter.cmb_modelfunc.currentText() + '\n')
# TODO: this all could clearly be done in a more elegant way
if self.fitter.cmb_fitmethod.currentText() != 'No fit methods found':
f.write('# fit_method: ' + self.fitter.cmb_fitmethod.currentText() + '\n')
# determine columns
f.write('# filename\t')
for p in params_from_filename(self.loader.dut_files[0]):
f.write(p + '\t')
f.write('\t'.join([p for p in self.fitter.model.params]))
f.write('\n')
# write data
filelist = sorted([filename for filename in self.fitter.model_params])
for filename in filelist:
f.write(filename + '\t')
# TODO: what if some filenames do not contain all parameters? should catch exceptions
for p in params_from_filename(self.loader.dut_files[0]):
f.write(str(params_from_filename(filename)[p]) + '\t')
f.write('\t'.join([str(self.fitter.model_params[filename][p]) for p in self.fitter.model.params]))
f.write('\n')
except EnvironmentError as e:
QMessageBox.critical(self, 'Error', 'Could not save session: '+str(e))
return
self.update_recent_list(res_file)
self.setWindowTitle('Spectrum Fitter - '+res_file)
self.session_file = res_file
def save_session(self):
self.save_session_as(self.session_file)
@pyqtSlot()
def load_session(self, res_file=None):
if not res_file:
res_file, filter = QFileDialog.getOpenFileName(self, 'Fit results file', filter='*.txt')
if not res_file:
return
res_folder = os.path.dirname(res_file)
self.new_session()
# read the data
try:
data, dut, thru, dummy, ra, fitter_info = load_fitresults(res_file, readfilenameparams=False, extrainfo=True)
except IOError as e:
QMessageBox.warning(self, 'Error', 'Could not load data: '+str(e))
return
# using os.path.realpath to get rid of relative path remainders ("..")
self.loader.load_dataset(dut=os.path.realpath(os.path.join(res_folder, dut)) if dut else None,
thru=os.path.realpath(os.path.join(res_folder, thru)) if thru else None,
dummy=os.path.realpath(os.path.join(res_folder, dummy)) if dummy else None,
ra=ra if ra else None)
# if a fitted_param was provided in the session file, set it up
if 'fitted_param' in fitter_info:
self.fitter.fitted_param = fitter_info['fitted_param']
# if a model was provided in the session file, load this model and the provided data
if 'model' in fitter_info:
self.fitter.load_model(filename=fitter_info['model'],
info=fitter_info,
data=data if data else None)
# update the fitter with the first spectrum in the list
self.fitter.update_network(self.loader.get_spectrum(0), self.loader.dut_files[0])
self.update_recent_list(res_file)
self.setWindowTitle('Spectrum Fitter - '+res_file)
self.session_file = res_file
#TODO: this is not really in the right place
@pyqtSlot()
def fit_all(self):
totalnum = len(self.loader.dut_files)
progressdialog = QProgressDialog('Fitting all spectra...', 'Cancel', 0, totalnum-1, self)
progressdialog.setWindowTitle('Progress')
progressdialog.setModal(True)
progressdialog.setAutoClose(True)
progressdialog.show()
for i in range(totalnum):
QApplication.processEvents()
if progressdialog.wasCanceled():
break
self.navigator.file_list.setCurrentRow(i)
self.fitter.fit_model()
progressdialog.setValue(i)
def save_image(self):
basename, ext = os.path.splitext(self.loader.dut_files[self.navigator.file_list.currentRow()])
filename, filter = QFileDialog.getSaveFileName(self, 'Choose file',
os.path.join(self.loader.dut_folder, basename+'.png'),
filter='*.png;;*.jpg;;*.eps')
if filename:
self.plotter.save_fig(filename)
def save_all_images(self):
foldername = QFileDialog.getExistingDirectory(self, 'Choose folder',
self.loader.dut_folder)
totalnum = len(self.loader.dut_files)
progressdialog = QProgressDialog('Saving all images...', 'Cancel', 0, totalnum - 1, self)
progressdialog.setWindowTitle('Progress')
progressdialog.setModal(True)
progressdialog.setAutoClose(True)
progressdialog.show()
for i in range(totalnum):
QApplication.processEvents()
if progressdialog.wasCanceled():
break
self.navigator.file_list.setCurrentRow(i)
basename, ext = os.path.splitext(self.loader.dut_files[self.navigator.file_list.currentRow()])
self.plotter.save_fig(os.path.join(foldername, basename+'.png'))
progressdialog.setValue(i)
def load_recent(self):
action = self.sender()
self.load_session(action.text())
def update_recent_list(self, filename=None):
recentlist = list(self.settings.value('recentSessions')) if self.settings.contains('recentSessions') \
else []
if filename:
if filename in recentlist:
recentlist.remove(filename)
recentlist.insert(0, filename)
recentlist = recentlist[0:5]
self.settings.setValue('recentSessions', recentlist)
self.recent_menu.clear()
for r in recentlist:
a = QAction(r, self)
self.recent_menu.addAction(a)
a.triggered.connect(self.load_recent)
def install_builtin_models(self):
builtin_folder = os.path.join(os.path.dirname(__file__), 'models')
for filename in sorted(glob(os.path.join(builtin_folder, '*.py'))):
# check if the file already exists in the models folder
if os.path.exists(os.path.join(self.fitter.models_dir, os.path.basename(filename))):
answer = QMessageBox.question(self, 'File already exists', 'The file: '+os.path.basename(filename)+
'already exists in your models folder. Would you like to replace it?')
if answer != QMessageBox.Yes:
continue
# if file does not exist or user does not mind replacing it, let's copy:
shutil.copyfile(filename, os.path.join(self.fitter.models_dir, os.path.basename(filename)))
def open_model_folder(self):
QDesktopServices.openUrl(QUrl.fromLocalFile(self.fitter.models_dir))
def msghandler(type, context, message):
if type == QtInfoMsg:
QMessageBox.information(None, 'Info', message)
elif type == QtDebugMsg:
QMessageBox.information(None, 'Debug', message)
elif type == QtCriticalMsg:
QMessageBox.critical(None, 'Critical', message)
elif type == QtWarningMsg:
QMessageBox.warning(None, 'Warning', message)
elif type == QtFatalMsg:
QMessageBox.critical(None, 'Fatal error', message)
def main():
qInstallMessageHandler(msghandler)
# CD into directory where this script is saved
d = os.path.dirname(__file__)
if d != '': os.chdir(d)
app = QApplication(sys.argv)
app.setWindowIcon(QIcon('audacity.png'))
mainwindow = MainWindow()
# Start the main loop.
ret = app.exec_()
sys.exit(ret)
if __name__ == '__main__':
main()
| 1.84375 | 2 |
subversion/tests/cmdline/lock_tests.py | centic9/subversion-ppa | 0 | 11687 | <gh_stars>0
#!/usr/bin/env python
# encoding=utf-8
#
# lock_tests.py: testing versioned properties
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import re, os, stat, logging
logger = logging.getLogger()
# Our testing module
import svntest
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
######################################################################
# Helpers
def check_writability(path, writable):
bits = stat.S_IWGRP | stat.S_IWOTH | stat.S_IWRITE
mode = os.stat(path)[0]
if bool(mode & bits) != writable:
raise svntest.Failure("path '%s' is unexpectedly %s (mode %o)"
% (path, ["writable", "read-only"][writable], mode))
def is_writable(path):
"Raise if PATH is not writable."
check_writability(path, True)
def is_readonly(path):
"Raise if PATH is not readonly."
check_writability(path, False)
######################################################################
# Tests
#----------------------------------------------------------------------
# Each test refers to a section in
# notes/locking/locking-functional-spec.txt
# II.A.2, II.C.2.a: Lock a file in wc A as user FOO and make sure we
# have a representation of it. Checkout wc B as user BAR. Verify
# that user BAR cannot commit changes to the file nor its properties.
def lock_file(sbox):
"lock a file and verify that it's locked"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
file_path = sbox.ospath('iota')
file_path_b = sbox.ospath('iota', wc_dir=wc_b)
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_path)
# --- Meanwhile, in our other working copy... ---
err_re = "(svn\: E195022\: File '.*iota' is locked in another)|" + \
"(svn\: E160039: User '?jconstant'? does not own lock on path.*iota')"
svntest.main.run_svn(None, 'update', wc_b)
# -- Try to change a file --
# change the locked file
svntest.main.file_append(file_path_b, "Covert tweak\n")
# attempt (and fail) to commit as user Sally
svntest.actions.run_and_verify_commit(wc_b, None, None, err_re,
'--username',
svntest.main.wc_author2,
'-m', '', file_path_b)
# Revert our change that we failed to commit
svntest.main.run_svn(None, 'revert', file_path_b)
# -- Try to change a property --
# change the locked file's properties
svntest.main.run_svn(None, 'propset', 'sneakyuser', 'Sally', file_path_b)
err_re = "(svn\: E195022\: File '.*iota' is locked in another)|" + \
"(svn\: E160039\: User '?jconstant'? does not own lock on path)"
# attempt (and fail) to commit as user Sally
svntest.actions.run_and_verify_commit(wc_b, None, None, err_re,
'--username',
svntest.main.wc_author2,
'-m', '', file_path_b)
#----------------------------------------------------------------------
# II.C.2.b.[12]: Lock a file and commit using the lock. Make sure the
# lock is released. Repeat, but request that the lock not be
# released. Make sure the lock is retained.
def commit_file_keep_lock(sbox):
"commit a file and keep lock"
sbox.build()
wc_dir = sbox.wc_dir
# lock 'A/mu' as wc_author
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'some lock comment',
sbox.ospath('A/mu'))
# make a change and commit it, holding lock
sbox.simple_append('A/mu', 'Tweak!\n')
svntest.main.run_svn(None, 'commit', '-m', '', '--no-unlock',
sbox.ospath('A/mu'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', wc_rev=2, writelocked='K')
# Make sure the file is still locked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
def commit_file_unlock(sbox):
"commit a file and release lock"
sbox.build()
wc_dir = sbox.wc_dir
# lock A/mu and iota as wc_author
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'some lock comment',
sbox.ospath('A/mu'),
sbox.ospath('iota'))
# make a change and commit it, allowing lock to be released
sbox.simple_append('A/mu', 'Tweak!\n')
sbox.simple_commit()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', wc_rev=2)
expected_status.tweak('iota', wc_rev=2)
# Make sure the file is unlocked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def commit_propchange(sbox):
"commit a locked file with a prop change"
sbox.build()
wc_dir = sbox.wc_dir
# lock A/mu as wc_author
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'some lock comment',
sbox.ospath('A/mu'))
# make a property change and commit it, allowing lock to be released
sbox.simple_propset('blue', 'azul', 'A/mu')
sbox.simple_commit('A/mu')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', wc_rev=2)
# Make sure the file is unlocked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# II.C.2.c: Lock a file in wc A as user FOO. Attempt to unlock same
# file in same wc as user BAR. Should fail.
#
# Attempt again with --force. Should succeed.
#
# II.C.2.c: Lock a file in wc A as user FOO. Attempt to unlock same
# file in wc B as user FOO. Should fail.
#
# Attempt again with --force. Should succeed.
def break_lock(sbox):
"lock a file and verify lock breaking behavior"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
file_path = sbox.ospath('iota')
file_path_b = sbox.ospath('iota', wc_dir=wc_b)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_path)
# --- Meanwhile, in our other working copy... ---
svntest.main.run_svn(None, 'update', wc_b)
# attempt (and fail) to unlock file
# This should give a "iota' is not locked in this working copy" error
svntest.actions.run_and_verify_svn(None, None, ".*not locked",
'unlock',
file_path_b)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [],
'unlock', '--force',
file_path_b)
#----------------------------------------------------------------------
# II.C.2.d: Lock a file in wc A as user FOO. Attempt to lock same
# file in wc B as user BAR. Should fail.
#
# Attempt again with --force. Should succeed.
#
# II.C.2.d: Lock a file in wc A as user FOO. Attempt to lock same
# file in wc B as user FOO. Should fail.
#
# Attempt again with --force. Should succeed.
def steal_lock(sbox):
"lock a file and verify lock stealing behavior"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
file_path = sbox.ospath('iota')
file_path_b = sbox.ospath('iota', wc_dir=wc_b)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_path)
# --- Meanwhile, in our other working copy... ---
svntest.main.run_svn(None, 'update', wc_b)
# attempt (and fail) to lock file
# This should give a "iota' is already locked... error, but exits 0.
svntest.actions.run_and_verify_svn2(None, None,
".*already locked", 0,
'lock',
'-m', 'trying to break', file_path_b)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
'lock', '--force',
'-m', 'trying to break', file_path_b)
#----------------------------------------------------------------------
# II.B.2, II.C.2.e: Lock a file in wc A. Query wc for the
# lock and verify that all lock fields are present and correct.
def examine_lock(sbox):
"examine the fields of a lockfile for correctness"
sbox.build()
# lock a file as wc_author
svntest.actions.run_and_validate_lock(sbox.ospath('iota'),
svntest.main.wc_author)
#----------------------------------------------------------------------
# II.C.1: Lock a file in wc A. Check out wc B. Break the lock in wc
# B. Verify that wc A gracefully cleans up the lock via update as
# well as via commit.
def handle_defunct_lock(sbox):
"verify behavior when a lock in a wc is defunct"
sbox.build()
wc_dir = sbox.wc_dir
# set up our expected status
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
# lock the file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', sbox.ospath('iota'))
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
file_path_b = sbox.ospath('iota', wc_dir=wc_b)
# --- Meanwhile, in our other working copy... ---
# Try unlocking the file in the second wc.
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
file_path_b)
# update the 1st wc, which should clear the lock there
sbox.simple_update()
# Make sure the file is unlocked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# II.B.1: Set "svn:needs-lock" property on file in wc A. Checkout wc
# B and verify that that file is set as read-only.
#
# Tests propset, propdel, lock, and unlock
def enforce_lock(sbox):
"verify svn:needs-lock read-only behavior"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = sbox.ospath('iota')
lambda_path = sbox.ospath('A/B/lambda')
mu_path = sbox.ospath('A/mu')
# svn:needs-lock value should be forced to a '*'
svntest.actions.set_prop('svn:needs-lock', 'foo', iota_path)
svntest.actions.set_prop('svn:needs-lock', '*', lambda_path)
expected_err = ".*svn: warning: W125005: To turn off the svn:needs-lock property,.*"
svntest.actions.set_prop('svn:needs-lock', ' ', mu_path, expected_err)
# Check svn:needs-lock
svntest.actions.check_prop('svn:needs-lock', iota_path, ['*'])
svntest.actions.check_prop('svn:needs-lock', lambda_path, ['*'])
svntest.actions.check_prop('svn:needs-lock', mu_path, ['*'])
svntest.main.run_svn(None, 'commit',
'-m', '', iota_path, lambda_path, mu_path)
# Now make sure that the perms were flipped on all files
if os.name == 'posix':
mode = stat.S_IWGRP | stat.S_IWOTH | stat.S_IWRITE
if ((os.stat(iota_path)[0] & mode)
or (os.stat(lambda_path)[0] & mode)
or (os.stat(mu_path)[0] & mode)):
logger.warn("Setting 'svn:needs-lock' property on a file failed to set")
logger.warn("file mode to read-only.")
raise svntest.Failure
# obtain a lock on one of these files...
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', iota_path)
# ...and verify that the write bit gets set...
if not (os.stat(iota_path)[0] & mode):
logger.warn("Locking a file with 'svn:needs-lock' failed to set write bit.")
raise svntest.Failure
# ...and unlock it...
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
iota_path)
# ...and verify that the write bit gets unset
if (os.stat(iota_path)[0] & mode):
logger.warn("Unlocking a file with 'svn:needs-lock' failed to unset write bit.")
raise svntest.Failure
# Verify that removing the property restores the file to read-write
svntest.main.run_svn(None, 'propdel', 'svn:needs-lock', iota_path)
if not (os.stat(iota_path)[0] & mode):
logger.warn("Deleting 'svn:needs-lock' failed to set write bit.")
raise svntest.Failure
#----------------------------------------------------------------------
# Test that updating a file with the "svn:needs-lock" property works,
# especially on Windows, where renaming A to B fails if B already
# exists and has its read-only bit set. See also issue #2278.
@Issue(2278)
def update_while_needing_lock(sbox):
"update handles svn:needs-lock correctly"
sbox.build()
sbox.simple_propset('svn:needs-lock', 'foo', 'iota')
sbox.simple_commit('iota')
sbox.simple_update()
# Lock, modify, commit, unlock, to create r3.
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', sbox.ospath('iota'))
sbox.simple_append('iota', 'This line added in r2.\n')
sbox.simple_commit('iota') # auto-unlocks
# Backdate to r2.
sbox.simple_update(revision=2)
# Try updating forward to r3 again. This is where the bug happened.
sbox.simple_update(revision=3)
#----------------------------------------------------------------------
# Tests update / checkout with changing props
def defunct_lock(sbox):
"verify svn:needs-lock behavior with defunct lock"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
iota_path = sbox.ospath('iota')
iota_path_b = sbox.ospath('iota', wc_dir=wc_b)
mode = stat.S_IWGRP | stat.S_IWOTH | stat.S_IWRITE
# Set the prop in wc a
sbox.simple_propset('svn:needs-lock', 'foo', 'iota')
# commit r2
sbox.simple_commit('iota')
# update wc_b
svntest.main.run_svn(None, 'update', wc_b)
# lock iota in wc_b
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', iota_path_b)
# break the lock iota in wc a
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock', '--force',
'-m', '', iota_path)
# update wc_b
svntest.main.run_svn(None, 'update', wc_b)
# make sure that iota got set to read-only
if (os.stat(iota_path_b)[0] & mode):
logger.warn("Upon removal of a defunct lock, a file with 'svn:needs-lock'")
logger.warn("was not set back to read-only")
raise svntest.Failure
#----------------------------------------------------------------------
# Tests dealing with a lock on a deleted path
def deleted_path_lock(sbox):
"verify lock removal on a deleted path"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = sbox.ospath('iota')
iota_url = sbox.repo_url + '/iota'
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', iota_path)
sbox.simple_rm('iota')
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'--no-unlock',
'-m', '', iota_path)
# Now make sure that we can delete the lock from iota via a URL
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
iota_url)
#----------------------------------------------------------------------
# Tests dealing with locking and unlocking
def lock_unlock(sbox):
"lock and unlock some files"
sbox.build()
wc_dir = sbox.wc_dir
pi_path = sbox.ospath('A/D/G/pi')
rho_path = sbox.ospath('A/D/G/rho')
tau_path = sbox.ospath('A/D/G/tau')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau', writelocked='K')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', pi_path, rho_path, tau_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_status.tweak('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau', writelocked=None)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
pi_path, rho_path, tau_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Tests dealing with directory deletion and locks
def deleted_dir_lock(sbox):
"verify removal of a directory with locks inside"
sbox.build()
wc_dir = sbox.wc_dir
pi_path = sbox.ospath('A/D/G/pi')
rho_path = sbox.ospath('A/D/G/rho')
tau_path = sbox.ospath('A/D/G/tau')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', pi_path, rho_path, tau_path)
sbox.simple_rm('A/D/G') # the parent directory
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'--no-unlock',
'-m', '', sbox.ospath('A/D/G'))
#----------------------------------------------------------------------
# III.c : Lock a file and check the output of 'svn stat' from the same
# working copy and another.
def lock_status(sbox):
"verify status of lock in working copy"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
sbox.simple_append('iota', "This is a spreadsheet\n")
sbox.simple_commit('iota')
svntest.main.run_svn(None, 'lock', '-m', '', sbox.ospath('iota'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('iota', wc_rev=2, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Verify status again after modifying the file
sbox.simple_append('iota', 'check stat output after mod')
expected_status.tweak('iota', status='M ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Verify status of lock from another working copy
svntest.main.run_svn(None, 'update', wc_b)
expected_status = svntest.actions.get_virginal_state(wc_b, 2)
expected_status.tweak('iota', writelocked='O')
svntest.actions.run_and_verify_status(wc_b, expected_status)
#----------------------------------------------------------------------
# III.c : Steal lock on a file from another working copy with 'svn lock
# --force', and check the status of lock in the repository from the
# working copy in which the file was initially locked.
def stolen_lock_status(sbox):
"verify status of stolen lock"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
file_path_b = os.path.join(wc_b, fname)
svntest.main.file_append(file_path, "This is a spreadsheet\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
svntest.main.run_svn(None, 'lock',
'-m', '', file_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak(fname, wc_rev=2)
expected_status.tweak(fname, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Forcibly lock same file (steal lock) from another working copy
svntest.main.run_svn(None, 'update', wc_b)
svntest.main.run_svn(None, 'lock',
'-m', '', '--force', file_path_b)
# Verify status from working copy where file was initially locked
expected_status.tweak(fname, writelocked='T')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# III.c : Break lock from another working copy with 'svn unlock --force'
# and verify the status of the lock in the repository with 'svn stat -u'
# from the working copy in the file was initially locked
def broken_lock_status(sbox):
"verify status of broken lock"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
file_path_b = os.path.join(wc_b, fname)
svntest.main.file_append(file_path, "This is a spreadsheet\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
svntest.main.run_svn(None, 'lock',
'-m', '', file_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak(fname, wc_rev=2)
expected_status.tweak(fname, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Forcibly unlock the same file (break lock) from another working copy
svntest.main.run_svn(None, 'update', wc_b)
svntest.main.run_svn(None, 'unlock',
'--force', file_path_b)
# Verify status from working copy where file was initially locked
expected_status.tweak(fname, writelocked='B')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Invalid input test - lock non-existent file
def lock_non_existent_file(sbox):
"verify error on locking non-existent file"
sbox.build()
fname = 'A/foo'
file_path = os.path.join(sbox.wc_dir, fname)
exit_code, output, error = svntest.main.run_svn(1, 'lock',
'-m', '', file_path)
error_msg = "The node '%s' was not found." % os.path.abspath(file_path)
for line in error:
if line.find(error_msg) != -1:
break
else:
logger.warn("Error: %s : not found in: %s" % (error_msg, error))
raise svntest.Failure
#----------------------------------------------------------------------
# Check that locking an out-of-date file fails.
def out_of_date(sbox):
"lock an out-of-date file and ensure failure"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
file_path_b = os.path.join(wc_b, fname)
# Make a new revision of the file in the first WC.
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
# --- Meanwhile, in our other working copy... ---
svntest.actions.run_and_verify_svn2(None, None,
".*newer version of '/iota' exists", 0,
'lock',
'--username', svntest.main.wc_author2,
'-m', '', file_path_b)
#----------------------------------------------------------------------
# Tests reverting a svn:needs-lock file
def revert_lock(sbox):
"verify svn:needs-lock behavior with revert"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = sbox.ospath('iota')
mode = stat.S_IWGRP | stat.S_IWOTH | stat.S_IWRITE
# set the prop in wc
svntest.actions.run_and_verify_svn(None, None, [], 'propset',
'svn:needs-lock', 'foo', iota_path)
# commit r2
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', '', iota_path)
# make sure that iota got set to read-only
if (os.stat(iota_path)[0] & mode):
logger.warn("Committing a file with 'svn:needs-lock'")
logger.warn("did not set the file to read-only")
raise svntest.Failure
# verify status is as we expect
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('iota', wc_rev=2)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# remove read-only-ness
svntest.actions.run_and_verify_svn(None, None, [], 'propdel',
'svn:needs-lock', iota_path)
# make sure that iota got read-only-ness removed
if (os.stat(iota_path)[0] & mode == 0):
logger.warn("Deleting the 'svn:needs-lock' property ")
logger.warn("did not remove read-only-ness")
raise svntest.Failure
# revert the change
svntest.actions.run_and_verify_svn(None, None, [], 'revert', iota_path)
# make sure that iota got set back to read-only
if (os.stat(iota_path)[0] & mode):
logger.warn("Reverting a file with 'svn:needs-lock'")
logger.warn("did not set the file back to read-only")
raise svntest.Failure
# try propdel and revert from a different directory so
# full filenames are used
extra_name = 'xx'
# now lock the file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', iota_path)
# modify it
svntest.main.file_append(iota_path, "This line added\n")
expected_status.tweak(wc_rev=1)
expected_status.tweak('iota', wc_rev=2)
expected_status.tweak('iota', status='M ', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# revert it
svntest.actions.run_and_verify_svn(None, None, [], 'revert', iota_path)
# make sure it is still writable since we have the lock
if (os.stat(iota_path)[0] & mode == 0):
logger.warn("Reverting a 'svn:needs-lock' file (with lock in wc) ")
logger.warn("did not leave the file writable")
raise svntest.Failure
#----------------------------------------------------------------------
def examine_lock_via_url(sbox):
"examine the fields of a lock from a URL"
sbox.build()
wc_dir = sbox.wc_dir
fname = 'iota'
comment = 'This is a lock test.'
file_path = os.path.join(sbox.wc_dir, fname)
file_url = sbox.repo_url + '/' + fname
# lock the file url and check the contents of lock
svntest.actions.run_and_validate_lock(file_url,
svntest.main.wc_author2)
#----------------------------------------------------------------------
def lock_several_files(sbox):
"lock/unlock several files in one go"
sbox.build()
wc_dir = sbox.wc_dir
# Deliberately have no direct child of A as a target
iota_path = os.path.join(sbox.wc_dir, 'iota')
lambda_path = os.path.join(sbox.wc_dir, 'A', 'B', 'lambda')
alpha_path = os.path.join(sbox.wc_dir, 'A', 'B', 'E', 'alpha')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'--username', svntest.main.wc_author2,
'-m', 'lock several',
iota_path, lambda_path, alpha_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('iota', 'A/B/lambda', 'A/B/E/alpha', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
'--username', svntest.main.wc_author2,
iota_path, lambda_path, alpha_path)
expected_status.tweak('iota', 'A/B/lambda', 'A/B/E/alpha', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def lock_switched_files(sbox):
"lock/unlock switched files"
sbox.build()
wc_dir = sbox.wc_dir
gamma_path = sbox.ospath('A/D/gamma')
lambda_path = sbox.ospath('A/B/lambda')
iota_URL = sbox.repo_url + '/iota'
alpha_URL = sbox.repo_url + '/A/B/E/alpha'
svntest.actions.run_and_verify_svn(None, None, [], 'switch',
iota_URL, gamma_path,
'--ignore-ancestry')
svntest.actions.run_and_verify_svn(None, None, [], 'switch',
alpha_URL, lambda_path,
'--ignore-ancestry')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/gamma', 'A/B/lambda', switched='S')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'lock several',
gamma_path, lambda_path)
expected_status.tweak('A/D/gamma', 'A/B/lambda', writelocked='K')
# In WC-NG locks are kept per working copy, not per file
expected_status.tweak('A/B/E/alpha', 'iota', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
gamma_path, lambda_path)
expected_status.tweak('A/D/gamma', 'A/B/lambda', writelocked=None)
expected_status.tweak('A/B/E/alpha', 'iota', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
def lock_uri_encoded(sbox):
"lock and unlock a file with an URI-unsafe name"
sbox.build()
wc_dir = sbox.wc_dir
# lock a file as wc_author
fname = 'amazing space'
file_path = sbox.ospath(fname)
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.actions.run_and_verify_svn(None, None, [], "add", file_path)
expected_output = svntest.wc.State(wc_dir, {
fname : Item(verb='Adding'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({ fname: Item(wc_rev=2, status=' ') })
# Commit the file.
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
file_path)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_path)
# Make sure that the file was locked.
expected_status.tweak(fname, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
file_path)
# Make sure it was successfully unlocked again.
expected_status.tweak(fname, writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# And now the URL case.
file_url = sbox.repo_url + '/' + fname
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_url)
# Make sure that the file was locked.
expected_status.tweak(fname, writelocked='O')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
file_url)
# Make sure it was successfully unlocked again.
expected_status.tweak(fname, writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# A regression test for a bug when svn:needs-lock and svn:executable
# interact badly. The bug was fixed in trunk @ r854933.
@SkipUnless(svntest.main.is_posix_os)
def lock_and_exebit1(sbox):
"svn:needs-lock and svn:executable, part I"
mode_w = stat.S_IWUSR
mode_x = stat.S_IXUSR
mode_r = stat.S_IRUSR
sbox.build()
wc_dir = sbox.wc_dir
gamma_path = sbox.ospath('A/D/gamma')
expected_err = ".*svn: warning: W125005: To turn off the svn:needs-lock property,.*"
svntest.actions.run_and_verify_svn2(None, None, expected_err, 0,
'ps', 'svn:needs-lock', ' ', gamma_path)
expected_err = ".*svn: warning: W125005: To turn off the svn:executable property,.*"
svntest.actions.run_and_verify_svn2(None, None, expected_err, 0,
'ps', 'svn:executable', ' ', gamma_path)
# commit
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', '', gamma_path)
# mode should be +r, -w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Committing a file with 'svn:needs-lock, svn:executable'")
logger.warn("did not set the file to read-only, executable")
raise svntest.Failure
# lock
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', gamma_path)
# mode should be +r, +w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or not gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Locking a file with 'svn:needs-lock, svn:executable'")
logger.warn("did not set the file to read-write, executable")
raise svntest.Failure
# modify
svntest.main.file_append(gamma_path, "check stat output after mod & unlock")
# unlock
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
gamma_path)
# Mode should be +r, -w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Unlocking a file with 'svn:needs-lock, svn:executable'")
logger.warn("did not set the file to read-only, executable")
raise svntest.Failure
# ci
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', '', gamma_path)
# Mode should be still +r, -w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Commiting a file with 'svn:needs-lock, svn:executable'")
logger.warn("after unlocking modified file's permissions")
raise svntest.Failure
#----------------------------------------------------------------------
# A variant of lock_and_exebit1: same test without unlock
@SkipUnless(svntest.main.is_posix_os)
def lock_and_exebit2(sbox):
"svn:needs-lock and svn:executable, part II"
mode_w = stat.S_IWUSR
mode_x = stat.S_IXUSR
mode_r = stat.S_IRUSR
sbox.build()
wc_dir = sbox.wc_dir
gamma_path = sbox.ospath('A/D/gamma')
expected_err = ".*svn: warning: W125005: To turn off the svn:needs-lock property,.*"
svntest.actions.run_and_verify_svn2(None, None, expected_err, 0,
'ps', 'svn:needs-lock', ' ', gamma_path)
expected_err = ".*svn: warning: W125005: To turn off the svn:executable property,.*"
svntest.actions.run_and_verify_svn2(None, None, expected_err, 0,
'ps', 'svn:executable', ' ', gamma_path)
# commit
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', '', gamma_path)
# mode should be +r, -w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Committing a file with 'svn:needs-lock, svn:executable'")
logger.warn("did not set the file to read-only, executable")
raise svntest.Failure
# lock
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', gamma_path)
# mode should be +r, +w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or not gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Locking a file with 'svn:needs-lock, svn:executable'")
logger.warn("did not set the file to read-write, executable")
raise svntest.Failure
# modify
svntest.main.file_append(gamma_path, "check stat output after mod & unlock")
# commit
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', '', gamma_path)
# Mode should be +r, -w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Commiting a file with 'svn:needs-lock, svn:executable'")
logger.warn("did not set the file to read-only, executable")
raise svntest.Failure
def commit_xml_unsafe_file_unlock(sbox):
"commit file with xml-unsafe name and release lock"
sbox.build()
wc_dir = sbox.wc_dir
fname = 'foo & bar'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.main.file_append(file_path, "Initial data.\n")
svntest.main.run_svn(None, 'add', file_path)
svntest.main.run_svn(None,
'commit', '-m', '', file_path)
# lock fname as wc_author
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'some lock comment', file_path)
# make a change and commit it, allowing lock to be released
svntest.main.file_append(file_path, "Followup data.\n")
svntest.main.run_svn(None,
'commit', '-m', '', file_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({ fname : Item(status=' ', wc_rev=3), })
# Make sure the file is unlocked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def repos_lock_with_info(sbox):
"verify info path@X or path -rY return repos lock"
sbox.build()
wc_dir = sbox.wc_dir
fname = 'iota'
comment = 'This is a lock test.'
file_path = os.path.join(sbox.wc_dir, fname)
file_url = sbox.repo_url + '/' + fname
# lock wc file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'--username', svntest.main.wc_author2,
'-m', comment, file_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak(fname, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Steal lock on wc file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'--username', svntest.main.wc_author2,
'--force',
'-m', comment, file_url)
expected_status.tweak(fname, writelocked='T')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Get repository lock token
repos_lock_token \
= svntest.actions.run_and_parse_info(file_url)[0]['Lock Token']
# info with revision option
expected_infos = [
{ 'Lock Token' : repos_lock_token },
]
svntest.actions.run_and_verify_info(expected_infos, file_path, '-r1')
# info with peg revision
svntest.actions.run_and_verify_info(expected_infos, file_path + '@1')
#----------------------------------------------------------------------
@Issue(4126)
def unlock_already_unlocked_files(sbox):
"(un)lock set of files, one already (un)locked"
sbox.build()
wc_dir = sbox.wc_dir
# Deliberately have no direct child of A as a target
iota_path = sbox.ospath('iota')
lambda_path = sbox.ospath('A/B/lambda')
alpha_path = sbox.ospath('A/B/E/alpha')
gamma_path = sbox.ospath('A/D/gamma')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'--username', svntest.main.wc_author2,
'-m', 'lock several',
iota_path, lambda_path, alpha_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('iota', 'A/B/lambda', 'A/B/E/alpha', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
error_msg = ".*Path '/A/B/E/alpha' is already locked by user '" + \
svntest.main.wc_author2 + "'.*"
svntest.actions.run_and_verify_svn2(None, None, error_msg, 0,
'lock',
'--username', svntest.main.wc_author2,
alpha_path, gamma_path)
expected_status.tweak('A/D/gamma', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
'--username', svntest.main.wc_author2,
lambda_path)
expected_status.tweak('A/B/lambda', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
error_msg = "(.*No lock on path '/A/B/lambda'.*)" + \
"|(.*'A/B/lambda' is not locked.*)"
svntest.actions.run_and_verify_svn2(None, None, error_msg, 0,
'unlock',
'--username', svntest.main.wc_author2,
'--force',
iota_path, lambda_path, alpha_path)
expected_status.tweak('iota', 'A/B/E/alpha', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def info_moved_path(sbox):
"show correct lock info on moved path"
sbox.build()
wc_dir = sbox.wc_dir
fname = sbox.ospath("iota")
fname2 = sbox.ospath("iota2")
# Move iota, creating r2.
svntest.actions.run_and_verify_svn(None, None, [],
"mv", fname, fname2)
expected_output = svntest.wc.State(wc_dir, {
'iota2' : Item(verb='Adding'),
'iota' : Item(verb='Deleting'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
"iota2" : Item(status=' ', wc_rev=2)
})
expected_status.remove("iota")
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir)
# Create a new, unrelated iota, creating r3.
svntest.main.file_append(fname, "Another iota")
svntest.actions.run_and_verify_svn(None, None, [],
"add", fname)
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(verb='Adding'),
})
expected_status.add({
"iota" : Item(status=' ', wc_rev=3)
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir)
# Lock the new iota.
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
"lock", fname)
expected_status.tweak("iota", writelocked="K")
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Get info for old iota at r1. This shouldn't give us any lock info.
expected_infos = [
{ 'URL' : '.*' ,
'Lock Token' : None },
]
svntest.actions.run_and_verify_info(expected_infos, fname2, '-r1')
#----------------------------------------------------------------------
def ls_url_encoded(sbox):
"ls locked path needing URL encoding"
sbox.build()
wc_dir = sbox.wc_dir
dirname = sbox.ospath("space dir")
fname = os.path.join(dirname, "f")
# Create a dir with a space in its name and a file therein.
svntest.actions.run_and_verify_svn(None, None, [],
"mkdir", dirname)
svntest.main.file_append(fname, "someone was here")
svntest.actions.run_and_verify_svn(None, None, [],
"add", fname)
expected_output = svntest.wc.State(wc_dir, {
'space dir' : Item(verb='Adding'),
'space dir/f' : Item(verb='Adding'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
"space dir" : Item(status=' ', wc_rev=2),
"space dir/f" : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir)
# Lock the file.
svntest.actions.run_and_verify_svn("Lock space dir/f", ".*locked by user",
[], "lock", fname)
# Make sure ls shows it being locked.
expected_output = " +2 " + re.escape(svntest.main.wc_author) + " +O .+f|" \
" +2 " + re.escape(svntest.main.wc_author) + " .+\./"
svntest.actions.run_and_verify_svn("List space dir",
expected_output, [],
"list", "-v", dirname)
#----------------------------------------------------------------------
# Make sure unlocking a path with the wrong lock token fails.
@Issue(3794)
def unlock_wrong_token(sbox):
"verify unlocking with wrong lock token"
sbox.build()
wc_dir = sbox.wc_dir
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
file_url = sbox.repo_url + "/iota"
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
file_path)
# Steal the lock as the same author, but using a URL to keep the old token
# in the WC.
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
"--force", file_url)
# Then, unlocking the WC path should fail.
### The error message returned is actually this, but let's worry about that
### another day...
svntest.actions.run_and_verify_svn2(
None, None, ".*((No lock on path)|(400 Bad Request))", 0,
'unlock', file_path)
#----------------------------------------------------------------------
# Verify that info shows lock info for locked files with URI-unsafe names
# when run in recursive mode.
def examine_lock_encoded_recurse(sbox):
"verify recursive info shows lock info"
sbox.build()
wc_dir = sbox.wc_dir
fname = 'A/B/F/one iota'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.actions.run_and_verify_svn(None, None, [], "add", file_path)
expected_output = svntest.wc.State(wc_dir, {
fname : Item(verb='Adding'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({ fname: Item(wc_rev=2, status=' ') })
# Commit the file.
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
file_path)
# lock the file and validate the contents
svntest.actions.run_and_validate_lock(file_path,
svntest.main.wc_author)
# Trying to unlock someone else's lock with --force should fail.
@Issue(3801)
def unlocked_lock_of_other_user(sbox):
"unlock file locked by other user"
sbox.build()
wc_dir = sbox.wc_dir
# lock a file with user jrandom
pi_path = sbox.ospath('A/D/G/pi')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', pi_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# now try to unlock with user jconstant, should fail but exit 0.
if sbox.repo_url.startswith("http"):
expected_err = ".*403 Forbidden.*"
else:
expected_err = "svn: warning: W160039: User '%s' is trying to use a lock owned by "\
"'%s'.*" % (svntest.main.wc_author2, svntest.main.wc_author)
svntest.actions.run_and_verify_svn2(None, [], expected_err, 0,
'unlock',
'--username', svntest.main.wc_author2,
pi_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def lock_funky_comment_chars(sbox):
"lock a file using a comment with xml special chars"
sbox.build()
wc_dir = sbox.wc_dir
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'lock & load', file_path)
#----------------------------------------------------------------------
# Check that the svn:needs-lock usage applies to a specific location
# in a working copy, not to the working copy overall.
def lock_twice_in_one_wc(sbox):
"try to lock a file twice in one working copy"
sbox.build()
wc_dir = sbox.wc_dir
mu_path = sbox.ospath('A/mu')
mu2_path = sbox.ospath('A/B/mu')
# Create a needs-lock file
svntest.actions.set_prop('svn:needs-lock', '*', mu_path)
svntest.actions.run_and_verify_svn(None, None, [],
'commit', wc_dir, '-m', '')
# Mark the file readonly
svntest.actions.run_and_verify_svn(None, None, [],
'update', wc_dir)
# Switch a second location for the same file in the same working copy
svntest.actions.run_and_verify_svn(None, None, [],
'switch', sbox.repo_url + '/A',
sbox.ospath('A/B'),
'--ignore-ancestry')
# Lock location 1
svntest.actions.run_and_verify_svn(None, None, [],
'lock', mu_path, '-m', 'Locked here')
# Locking in location 2 should fail ### Currently returns exitcode 0
svntest.actions.run_and_verify_svn2(None, None, ".*is already locked.*", 0,
'lock', '-m', '', mu2_path)
# Change the file anyway
os.chmod(mu2_path, 0700)
svntest.main.file_append(mu2_path, "Updated text")
# Commit will just succeed as the DB owns the lock. It's a user decision
# to commit the other target instead of the one originally locked
svntest.actions.run_and_verify_svn(None, None, [],
'commit', mu2_path, '-m', '')
#----------------------------------------------------------------------
# Test for issue #3524 'Locking path via ra_serf which doesn't exist in
# HEAD triggers assert'
@Issue(3524)
def lock_path_not_in_head(sbox):
"lock path that does not exist in HEAD"
sbox.build()
wc_dir = sbox.wc_dir
D_path = sbox.ospath('A/D')
lambda_path = sbox.ospath('A/B/lambda')
# Commit deletion of A/D and A/B/lambda as r2, then update the WC
# back to r1. Then attempt to lock some paths that no longer exist
# in HEAD. These should fail gracefully.
svntest.actions.run_and_verify_svn(None, None, [],
'delete', lambda_path, D_path)
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', 'Some deletions', wc_dir)
svntest.actions.run_and_verify_svn(None, None, [], 'up', '-r1', wc_dir)
expected_lock_fail_err_re = "svn: warning: W160042: " \
"((Path .* doesn't exist in HEAD revision)" \
"|(L(ock|OCK) request (on '.*' )?failed: 405 Method Not Allowed))"
# Issue #3524 These lock attemtps were triggering an assert over ra_serf:
#
# working_copies\lock_tests-37>svn lock A\D
# ..\..\..\subversion\libsvn_client\ra.c:275: (apr_err=235000)
# svn: In file '..\..\..\subversion\libsvn_ra_serf\util.c' line 1120:
# assertion failed (ctx->status_code)
#
# working_copies\lock_tests-37>svn lock A\B\lambda
# ..\..\..\subversion\libsvn_client\ra.c:275: (apr_err=235000)
# svn: In file '..\..\..\subversion\libsvn_ra_serf\util.c' line 1120:
# assertion failed (ctx->status_code)
svntest.actions.run_and_verify_svn2(None, None, expected_lock_fail_err_re,
0, 'lock', lambda_path)
expected_err = 'svn: E155008: The node \'.*D\' is not a file'
svntest.actions.run_and_verify_svn(None, None, expected_err,
'lock', D_path)
#----------------------------------------------------------------------
def verify_path_escaping(sbox):
"verify escaping of lock paths"
sbox.build()
wc_dir = sbox.wc_dir
# Add test paths using two characters that need escaping in a url, but
# are within the normal ascii range
file1 = sbox.ospath('file #1')
file2 = sbox.ospath('file #2')
file3 = sbox.ospath('file #3')
svntest.main.file_write(file1, 'File 1')
svntest.main.file_write(file2, 'File 2')
svntest.main.file_write(file3, 'File 3')
svntest.main.run_svn(None, 'add', file1, file2, file3)
svntest.main.run_svn(None, 'ci', '-m', 'commit', wc_dir)
svntest.main.run_svn(None, 'lock', '-m', 'lock 1', file1)
svntest.main.run_svn(None, 'lock', '-m', 'lock 2', sbox.repo_url + '/file%20%232')
svntest.main.run_svn(None, 'lock', '-m', 'lock 3', file3)
svntest.main.run_svn(None, 'unlock', sbox.repo_url + '/file%20%233')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add(
{
'file #1' : Item(status=' ', writelocked='K', wc_rev='2'),
'file #2' : Item(status=' ', writelocked='O', wc_rev='2'),
'file #3' : Item(status=' ', writelocked='B', wc_rev='2')
})
# Make sure the file locking is reported correctly
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Issue #3674: Replace + propset of locked file fails over DAV
@Issue(3674)
def replace_and_propset_locked_path(sbox):
"test replace + propset of locked file"
sbox.build()
wc_dir = sbox.wc_dir
mu_path = sbox.ospath('A/mu')
G_path = sbox.ospath('A/D/G')
rho_path = sbox.ospath('A/D/G/rho')
# Lock mu and A/D/G/rho.
svntest.actions.run_and_verify_svn(None, None, [],
'lock', mu_path, rho_path,
'-m', 'Locked')
# Now replace and propset on mu.
svntest.actions.run_and_verify_svn(None, None, [],
'rm', '--keep-local', mu_path)
svntest.actions.run_and_verify_svn(None, None, [],
'add', mu_path)
svntest.actions.run_and_verify_svn(None, None, [],
'propset', 'foo', 'bar', mu_path)
# Commit mu.
svntest.actions.run_and_verify_svn(None, None, [],
'commit', '-m', '', mu_path)
# Let's try this again where directories are involved, shall we?
# Replace A/D/G and A/D/G/rho, propset on A/D/G/rho.
svntest.actions.run_and_verify_svn(None, None, [],
'rm', G_path)
svntest.actions.run_and_verify_svn(None, None, [],
'mkdir', G_path)
svntest.main.file_append(rho_path, "This is the new file 'rho'.\n")
svntest.actions.run_and_verify_svn(None, None, [],
'add', rho_path)
svntest.actions.run_and_verify_svn(None, None, [],
'propset', 'foo', 'bar', rho_path)
# And commit G.
svntest.actions.run_and_verify_svn(None, None, [],
'commit', '-m', '', G_path)
#----------------------------------------------------------------------
def cp_isnt_ro(sbox):
"uncommitted svn:needs-lock add/cp not read-only"
sbox.build()
wc_dir = sbox.wc_dir
mu_URL = sbox.repo_url + '/A/mu'
mu_path = sbox.ospath('A/mu')
mu2_path = sbox.ospath('A/mu2')
mu3_path = sbox.ospath('A/mu3')
kappa_path = sbox.ospath('kappa')
open(kappa_path, 'w').write("This is the file 'kappa'.\n")
## added file
sbox.simple_add('kappa')
svntest.actions.set_prop('svn:needs-lock', 'yes', kappa_path)
is_writable(kappa_path)
sbox.simple_commit('kappa')
is_readonly(kappa_path)
## versioned file
svntest.actions.set_prop('svn:needs-lock', 'yes', mu_path)
is_writable(mu_path)
sbox.simple_commit('A/mu')
is_readonly(mu_path)
# At this point, mu has 'svn:needs-lock' set
## wc->wc copied file
svntest.main.run_svn(None, 'copy', mu_path, mu2_path)
is_writable(mu2_path)
sbox.simple_commit('A/mu2')
is_readonly(mu2_path)
## URL->wc copied file
svntest.main.run_svn(None, 'copy', mu_URL, mu3_path)
is_writable(mu3_path)
sbox.simple_commit('A/mu3')
is_readonly(mu3_path)
#----------------------------------------------------------------------
# Issue #3525: Locked file which is scheduled for delete causes tree
# conflict
@Issue(3525)
def update_locked_deleted(sbox):
"updating locked scheduled-for-delete file"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = sbox.ospath('iota')
mu_path = sbox.ospath('A/mu')
alpha_path = sbox.ospath('A/B/E/alpha')
svntest.main.run_svn(None, 'lock', '-m', 'locked', mu_path, iota_path,
alpha_path)
sbox.simple_rm('iota')
sbox.simple_rm('A/mu')
sbox.simple_rm('A/B/E')
# Create expected output tree for an update.
expected_output = svntest.wc.State(wc_dir, {
})
# Create expected status tree for the update.
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B/E', status='D ')
expected_status.tweak('iota', 'A/mu', 'A/B/E/alpha',
status='D ', writelocked='K')
expected_status.tweak('A/B/E/beta', status='D ')
svntest.actions.run_and_verify_update(wc_dir, expected_output,
None, expected_status)
# Now we steal the lock of iota and A/mu via URL and retry
svntest.main.run_svn(None, 'lock', '-m', 'locked', sbox.repo_url + '/iota',
'--force', sbox.repo_url + '/A/mu',
sbox.repo_url + '/A/B/E/alpha')
expected_status.tweak('iota', 'A/mu', 'A/B/E/alpha',
status='D ', writelocked='O')
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(status='B '),
'A/B/E/alpha' : Item(status='B '),
'iota' : Item(status='B '),
})
svntest.actions.run_and_verify_update(wc_dir, expected_output,
None, expected_status)
#----------------------------------------------------------------------
def block_unlock_if_pre_unlock_hook_fails(sbox):
"block unlock operation if pre-unlock hook fails"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
svntest.actions.create_failing_hook(repo_dir, "pre-unlock", "error text")
# lock a file.
pi_path = sbox.ospath('A/D/G/pi')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', pi_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Make sure the unlock operation fails as pre-unlock hook blocks it.
expected_unlock_fail_err_re = ".*error text|.*500 Internal Server Error"
svntest.actions.run_and_verify_svn2(None, None, expected_unlock_fail_err_re,
1, 'unlock', pi_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def lock_invalid_token(sbox):
"verify pre-lock hook returning invalid token"
sbox.build()
hook_path = os.path.join(sbox.repo_dir, 'hooks', 'pre-lock')
svntest.main.create_python_hook_script(hook_path,
'# encoding=utf-8\n'
'import sys\n'
'sys.stdout.write("тест")\n'
'sys.exit(0)\n')
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.actions.run_and_verify_svn2(None, None,
"svn: warning: W160037: " \
".*scheme.*'opaquelocktoken'", 0,
'lock', '-m', '', file_path)
@Issue(3105)
def lock_multi_wc(sbox):
"obtain locks in multiple working copies in one go"
sbox.build()
sbox2 = sbox.clone_dependent(copy_wc=True)
wc_name = os.path.basename(sbox.wc_dir)
wc2_name = os.path.basename(sbox2.wc_dir)
expected_output = svntest.verify.UnorderedOutput([
'\'%s\' locked by user \'jrandom\'.\n' % os.path.join(wc_name, 'iota'),
'\'%s\' locked by user \'jrandom\'.\n' % os.path.join(wc2_name, 'A', 'mu'),
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'lock', sbox.ospath('iota'),
sbox2.ospath('A/mu'))
expected_output = svntest.verify.UnorderedOutput([
'\'%s\' unlocked.\n' % os.path.join(wc_name, 'iota'),
'\'%s\' unlocked.\n' % os.path.join(wc2_name, 'A', 'mu'),
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'unlock', sbox.ospath('iota'),
sbox2.ospath('A/mu'))
@Issue(3378)
def locks_stick_over_switch(sbox):
"locks are kept alive over switching"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
svntest.actions.run_and_verify_svn(None, None, [],
'cp', sbox.ospath('A'), repo_url + '/AA',
'-m', '')
expected_output = svntest.verify.UnorderedOutput([
'\'iota\' locked by user \'jrandom\'.\n',
'\'%s\' locked by user \'jrandom\'.\n' % os.path.join('A', 'D', 'H', 'chi'),
'\'%s\' locked by user \'jrandom\'.\n' % os.path.join('A', 'mu'),
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'lock', sbox.ospath('A/D/H/chi'),
sbox.ospath('A/mu'),
sbox.ospath('iota'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/H/chi', 'A/mu', 'iota', writelocked='K')
# Make sure the file is still locked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = svntest.wc.State(wc_dir, {
})
expected_status.tweak(wc_rev=2)
expected_status.tweak('', wc_rev=1)
expected_status.tweak('iota', writelocked='K', wc_rev=1)
switched_status = expected_status.copy()
switched_status.tweak(writelocked=None)
switched_status.tweak('iota', writelocked='K')
switched_status.tweak('A', switched='S')
svntest.actions.run_and_verify_switch(wc_dir, sbox.ospath('A'),
repo_url + '/AA',
expected_output, None, switched_status)
# And now switch back to verify that the locks reappear
expected_output = svntest.wc.State(wc_dir, {
})
svntest.actions.run_and_verify_switch(wc_dir, sbox.ospath('A'),
repo_url + '/A',
expected_output, None, expected_status)
@Issue(4304)
def lock_unlock_deleted(sbox):
"lock/unlock a deleted file"
sbox.build()
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None, None, [],
'rm', sbox.ospath('A/mu'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', status='D ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = '\'mu\' locked by user \'jrandom\'.'
svntest.actions.run_and_verify_svn(None, expected_output, [],
'lock', sbox.ospath('A/mu'))
expected_status.tweak('A/mu', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = '\'mu\' unlocked.'
svntest.actions.run_and_verify_svn(None, expected_output, [],
'unlock', sbox.ospath('A/mu'))
expected_status.tweak('A/mu', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(4369)
def commit_stolen_lock(sbox):
"commit with a stolen lock"
sbox.build()
wc_dir = sbox.wc_dir
sbox.simple_append('A/mu', 'zig-zag')
sbox.simple_lock('A/mu')
expected_output = '\'mu\' locked by user \'jrandom\'.'
svntest.actions.run_and_verify_svn(None, expected_output, [],
'lock', '--force',
sbox.repo_url + '/A/mu')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', status='M ', writelocked='T')
err_re = "(.*E160037: Cannot verify lock on path '/A/mu')|" + \
"(.*E160038: '/.*/A/mu': no lock token available)"
svntest.actions.run_and_verify_commit(wc_dir,
[],
expected_status,
err_re,
wc_dir)
# When removing directories, the locks of contained files were not
# correctly removed from the working copy database, thus they later
# magically reappeared when new files or directories with the same
# pathes were added.
@Issue(4364)
def drop_locks_on_parent_deletion(sbox):
"drop locks when the parent is deleted"
sbox.build()
wc_dir = sbox.wc_dir
# lock some files, and remove them.
sbox.simple_lock('A/B/lambda')
sbox.simple_lock('A/B/E/alpha')
sbox.simple_lock('A/B/E/beta')
sbox.simple_rm('A/B')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove_subtree('A/B')
svntest.actions.run_and_verify_commit(wc_dir,
[],
expected_status,
None,
wc_dir)
# now re-add entities to the deleted pathes.
sbox.simple_mkdir('A/B')
sbox.simple_add_text('new file replacing old file', 'A/B/lambda')
sbox.simple_add_text('file replacing former dir', 'A/B/F')
# The bug also resurrected locks on directories when their path
# matched a former file.
sbox.simple_mkdir('A/B/E', 'A/B/E/alpha')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B',
'A/B/E',
'A/B/E/alpha',
'A/B/F',
'A/B/lambda',
wc_rev='3')
expected_status.remove('A/B/E/beta')
svntest.actions.run_and_verify_commit(wc_dir,
[],
expected_status,
None,
wc_dir)
@SkipUnless(svntest.main.is_ra_type_dav)
def dav_lock_timeout(sbox):
"unlock a lock with timeout"
import httplib
from urlparse import urlparse
import base64
sbox.build()
loc = urlparse(sbox.repo_url)
if loc.scheme == 'http':
h = httplib.HTTPConnection(loc.hostname, loc.port)
else:
h = httplib.HTTPSConnection(loc.hostname, loc.port)
lock_body = '<?xml version="1.0" encoding="utf-8" ?>' \
'<D:lockinfo xmlns:D="DAV:">' \
' <D:lockscope><D:exclusive/></D:lockscope>' \
' <D:locktype><D:write/></D:locktype>' \
' <D:owner>' \
' <D:href>http://a/test</D:href>' \
' </D:owner>' \
'</D:lockinfo>'
lock_headers = {
'Authorization': 'Basic ' + base64.b64encode('jconstant:rayjandom'),
'Timeout': 'Second-86400'
}
# Enabling the following line makes this test easier to debug
h.set_debuglevel(9)
h.request('LOCK', sbox.repo_url + '/iota', lock_body, lock_headers)
r = h.getresponse()
# Verify that there is a lock, by trying to obtain one
svntest.actions.run_and_verify_svn2(None, None, ".*locked by user", 0,
'lock', '-m', '', sbox.ospath('iota'))
# Before this patch this used to fail with a parse error of the timeout
svntest.actions.run_and_verify_svn2(None, None, ".*W160039.*Unlock.*403", 0,
'unlock', sbox.repo_url + '/iota')
svntest.actions.run_and_verify_svn(None, None, [],
'unlock', sbox.ospath('iota'), '--force')
def non_root_locks(sbox):
"locks for working copies not at repos root"
sbox.build()
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None, None, [],
'cp', sbox.repo_url, sbox.repo_url + '/X',
'-m', 'copy greek tree')
sbox.simple_switch(sbox.repo_url + '/X')
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Lock a file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
'lock', sbox.ospath('A/D/G/pi'),
'-m', '')
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Updates don't break the lock
sbox.simple_update('A/D')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
sbox.simple_update('')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Break the lock
svntest.actions.run_and_verify_svn(None, None, [],
'unlock', sbox.repo_url + '/X/A/D/G/pi')
# Subdir update reports the break
sbox.simple_update('A/D')
expected_status.tweak('A/D/G/pi', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Relock and break
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
'lock', sbox.ospath('A/D/G/pi'),
'-m', '')
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, None, [],
'unlock', sbox.repo_url + '/X/A/D/G/pi')
# Root update reports the break
sbox.simple_update('')
expected_status.tweak('A/D/G/pi', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(3515)
@SkipUnless(svntest.main.is_ra_type_dav)
def dav_lock_refresh(sbox):
"refresh timeout of DAV lock"
import httplib
from urlparse import urlparse
import base64
sbox.build(create_wc = False)
# Acquire lock on 'iota'
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
sbox.repo_url + '/iota')
# Try to refresh lock using 'If' header
loc = urlparse(sbox.repo_url)
if loc.scheme == 'http':
h = httplib.HTTPConnection(loc.hostname, loc.port)
else:
h = httplib.HTTPSConnection(loc.hostname, loc.port)
lock_token = svntest.actions.run_and_parse_info(sbox.repo_url + '/iota')[0]['Lock Token']
lock_headers = {
'Authorization': 'Basic ' + base64.b64encode('jrandom:rayjandom'),
'If': '(<' + lock_token + '>)',
'Timeout': 'Second-7200'
}
# Enabling the following line makes this test easier to debug
h.set_debuglevel(9)
h.request('LOCK', sbox.repo_url + '/iota', '', lock_headers)
# XFAIL Refreshing of DAV lock fails with error '412 Precondition Failed'
r = h.getresponse()
if r.status != httplib.OK:
raise svntest.Failure('Lock refresh failed: %d %s' % (r.status, r.reason))
@SkipUnless(svntest.main.is_ra_type_dav)
def delete_locked_file_with_percent(sbox):
"lock and delete a file called 'a %( ) .txt'"
sbox.build()
locked_filename = 'a %( ) .txt'
locked_path = sbox.ospath(locked_filename)
svntest.main.file_write(locked_path, "content\n")
sbox.simple_add(locked_filename)
sbox.simple_commit()
sbox.simple_lock(locked_filename)
sbox.simple_rm(locked_filename)
# XFAIL: With a 1.8.x client, this commit fails with:
# svn: E175002: Unexpected HTTP status 400 'Bad Request' on '/svn-test-work/repositories/lock_tests-52/!svn/txr/2-2/a%20%25(%20)%20.txt'
# and the following error in the httpd error log:
# Invalid percent encoded URI in tagged If-header [400, #104]
sbox.simple_commit()
@Issue(4557)
@XFail(svntest.main.is_ra_type_dav)
def delete_dir_with_lots_of_locked_files(sbox):
"delete a directory containing lots of locked files"
sbox.build()
wc_dir = sbox.wc_dir
# A lot of paths.
nfiles = 75 # NOTE: test XPASSES with 50 files!!!
locked_paths = []
for i in range(nfiles):
locked_paths.append(sbox.ospath("A/locked_files/file-%i" % i))
# Create files at these paths
os.mkdir(sbox.ospath("A/locked_files"))
for file_path in locked_paths:
svntest.main.file_write(file_path, "This is '%s'.\n" % (file_path,))
sbox.simple_add("A/locked_files")
sbox.simple_commit()
sbox.simple_update()
# lock all the files
svntest.actions.run_and_verify_svn(None, None, [], 'lock',
'-m', 'All locks',
*locked_paths)
# Locally delete A (regression against earlier versions, which
# always used a special non-standard request)
sbox.simple_rm("A")
# But a further replacement never worked
sbox.simple_mkdir("A")
# And an additional propset didn't work either
# (but doesn't require all lock tokens recursively)
sbox.simple_propset("k", "v", "A")
# Commit the deletion
# XFAIL: As of 1.8.10, this commit fails with:
# svn: E175002: Unexpected HTTP status 400 'Bad Request' on '<path>'
# and the following error in the httpd error log:
# request failed: error reading the headers
# This problem was introduced on the 1.8.x branch in r1606976.
sbox.simple_commit()
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
lock_file,
commit_file_keep_lock,
commit_file_unlock,
commit_propchange,
break_lock,
steal_lock,
examine_lock,
handle_defunct_lock,
enforce_lock,
defunct_lock,
deleted_path_lock,
lock_unlock,
deleted_dir_lock,
lock_status,
stolen_lock_status,
broken_lock_status,
lock_non_existent_file,
out_of_date,
update_while_needing_lock,
revert_lock,
examine_lock_via_url,
lock_several_files,
lock_switched_files,
lock_uri_encoded,
lock_and_exebit1,
lock_and_exebit2,
commit_xml_unsafe_file_unlock,
repos_lock_with_info,
unlock_already_unlocked_files,
info_moved_path,
ls_url_encoded,
unlock_wrong_token,
examine_lock_encoded_recurse,
unlocked_lock_of_other_user,
lock_funky_comment_chars,
lock_twice_in_one_wc,
lock_path_not_in_head,
verify_path_escaping,
replace_and_propset_locked_path,
cp_isnt_ro,
update_locked_deleted,
block_unlock_if_pre_unlock_hook_fails,
lock_invalid_token,
lock_multi_wc,
locks_stick_over_switch,
lock_unlock_deleted,
commit_stolen_lock,
drop_locks_on_parent_deletion,
dav_lock_timeout,
non_root_locks,
dav_lock_refresh,
delete_locked_file_with_percent,
delete_dir_with_lots_of_locked_files,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
| 1.679688 | 2 |
tests/testresourcemap.py | rayvnekieron/regionator | 0 | 11688 | <reponame>rayvnekieron/regionator
#!/usr/bin/env python
"""
Copyright (C) 2006 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
$URL$
$Revision$
$Date$
"""
import unittest
import kml.model
import kml.resourcemap
import xml.dom.minidom
class SimpleResourceMapItemTestCase(unittest.TestCase):
def runTest(self):
rmi = kml.resourcemap.ResourceMapItem()
rmi.ParseTexturesTxtLine('<gp><kp><mid>')
(gp,kp,mid) = rmi.Mapping()
assert gp == 'gp','bad geom path in ResourceMapItem'
assert kp == 'kp','bad kmz path in ResourceMapItem'
assert mid == 'mid','bad mid in ResourceMapItem'
class NoMidResourceMapItemTestCase(unittest.TestCase):
def runTest(self):
rmi = kml.resourcemap.ResourceMapItem()
rmi.ParseTexturesTxtLine('<../goo/hi.jpg> <../koo/bye.jpg>')
(gp,kp,mid) = rmi.Mapping()
assert gp == '../goo/hi.jpg','bad geom path in ResourceMapItem'
assert kp == '../koo/bye.jpg','bad kmz path in ResourceMapItem'
assert mid == None,'non None mid in non-mid ResourceMapItem'
class ResourceMapTestCase(unittest.TestCase):
def setUp(self):
self.__model = kml.model.Model()
self.__model.Parse('London_house.kmz')
textures_txt_data = self.__model.ReadFileData('textures.txt')
self.__rmap = kml.resourcemap.ResourceMap()
self.__rmap.ParseTexturesTxt(textures_txt_data)
def testResourceMapSize(self):
assert self.__rmap.Size() == 3,'ResourceMap.Size() bad'
def testResourceMapIterator(self):
m = []
for rmap_item in self.__rmap:
m.append(rmap_item.Mapping())
assert m[0][0] == '../images/Building3.JPG','textures.txt parse failed 0'
assert m[2][1] == '../images/GAF_Marquis.jpg','textures.txt parse failed 2'
def testResourceMapLookup(self):
kp = self.__rmap.GetKmzPath('../images/Building3.JPG')
assert kp == '../images/Building3.JPG','rmap kmz lookup failed'
gp = self.__rmap.GetGeomPath('../images/GAF_Marquis.jpg')
assert gp == '../images/GAF_Marquis.jpg','rmap geom lookup failed'
class TexturesTxtTestCase(unittest.TestCase):
def setUp(self):
f = open('textures.txt','r')
textures_txt_data = f.read()
f.close()
self.__rmap = kml.resourcemap.ResourceMap()
self.__rmap.ParseTexturesTxt(textures_txt_data)
def testSize(self):
assert self.__rmap.Size() == 66,'textures txt rmap size bad'
def testGeomLookup(self):
gp = ('../geom/north-face-10noCulling.jpg')
rmi = self.__rmap.LookupByGeomPath(gp)
(got_gp,got_kp,got_mid) = rmi.Mapping()
want_kp = '../kmz/north-face-10noCulling.jpg'
assert got_kp == want_kp, 'geom lookup failed'
def testKmzLookup(self):
rmi = self.__rmap.LookupByKmzPath('../kmz/east-face-1_1.jpg')
(got_gp, got_kp, id) = rmi.Mapping()
want_gp = '../geom/east-face-1_1.jpg'
assert got_gp == want_gp,'kmz lookup failed'
def testLookupAll(self):
for rmap_item in self.__rmap:
(gp,kp,mid) = rmap_item.Mapping()
assert gp == self.__rmap.GetGeomPath(kp),'GetGeomPath() failed'
assert kp == self.__rmap.GetKmzPath(gp),'GetKmzPath() failed'
class ResourceMapAddTestCase(unittest.TestCase):
def runTest(self):
rmap = kml.resourcemap.ResourceMap()
rmap.AddResourceMapItem('gpath0','kpath0','mid0')
rmap.AddResourceMapItem('gpath1','kpath1','mid1')
got_tt = rmap.Serialize()
want_tt[0] = '<gpath0> <kpath0> <mid0>'
want_tt[1] = '<gpath1> <kpath1> <mid1>'
assert got_tt == "\n".join(want_tt), 'resource map serialize failed'
class TestConvertTexturesTxt(unittest.TestCase):
def runTest(self):
rmap_kml = kml.resourcemap.ConvertTexturesTxt('textures.txt')
rmap_node = xml.dom.minidom.parseString(rmap_kml)
alias_nodelist = rmap_node.getElementsByTagName('Alias')
targethref_nodelist = rmap_node.getElementsByTagName('targetHref')
sourcehref_nodelist = rmap_node.getElementsByTagName('sourceHref')
assert 66 == len(alias_nodelist) == len(sourcehref_nodelist) == \
len(targethref_nodelist)
assert '../kmz/east-face-10noCulling.jpg' == \
kml.kmlparse.GetSimpleElementText(alias_nodelist[0], 'targetHref')
assert '../geom/east-face-10noCulling.jpg' == \
kml.kmlparse.GetSimpleElementText(alias_nodelist[0], 'sourceHref')
assert '../kmz/west-face-9noCulling.jpg' == \
kml.kmlparse.GetSimpleElementText(alias_nodelist[65], 'targetHref')
assert '../geom/west-face-9noCulling.jpg' == \
kml.kmlparse.GetSimpleElementText(alias_nodelist[65], 'sourceHref')
def suite():
suite = unittest.TestSuite()
suite.addTest(SimpleResourceMapItemTestCase())
suite.addTest(NoMidResourceMapItemTestCase())
suite.addTest(ResourceMapTestCase("testResourceMapSize"))
suite.addTest(ResourceMapTestCase("testResourceMapIterator"))
suite.addTest(ResourceMapTestCase("testResourceMapLookup"))
suite.addTest(TexturesTxtTestCase("testSize"))
suite.addTest(TexturesTxtTestCase("testGeomLookup"))
suite.addTest(TexturesTxtTestCase("testKmzLookup"))
suite.addTest(TexturesTxtTestCase("testLookupAll"))
suite.addTest(TestConvertTexturesTxt())
return suite
runner = unittest.TextTestRunner()
runner.run(suite())
| 1.953125 | 2 |
pycket/base.py | krono/pycket | 0 | 11689 | from pycket.error import SchemeException
from rpython.tool.pairtype import extendabletype
from rpython.rlib import jit, objectmodel
class W_ProtoObject(object):
""" abstract base class of both actual values (W_Objects) and multiple
return values (Values)"""
_attrs_ = []
_settled_ = True
def as_real_value(self):
raise NotImplementedError("not a real value!")
def num_values(val):
raise NotImplementedError("not a real value!")
def get_value(val, index):
raise NotImplementedError("not a real value!")
def get_all_values(self):
raise NotImplementedError("not a real value!")
class W_Object(W_ProtoObject):
__metaclass__ = extendabletype
_attrs_ = []
errorname = "%%%%unreachable%%%%"
def __init__(self):
raise NotImplementedError("abstract base class")
def num_values(self):
return 1
def get_value(self, index):
assert index == 0
return self
def get_all_values(self):
return [self]
def iscallable(self):
return False
def call(self, args, env, cont):
raise SchemeException("%s is not callable" % self.tostring())
def call_with_extra_info(self, args, env, cont, calling_app):
return self.call(args, env, cont)
def enable_jitting(self):
pass # need to override in callables that are based on an AST
# an arity is a pair of a list of numbers and either -1 or a non-negative integer
def get_arity(self):
from pycket.interpreter import Arity
if self.iscallable():
return Arity.unknown
else:
raise SchemeException("%s does not have arity" % self.tostring())
def is_proper_list(self):
return False
def is_impersonator(self):
return self.is_chaperone()
def is_chaperone(self):
return False
def is_proxy(self):
return self.is_chaperone() or self.is_impersonator()
def get_proxied(self):
return self
def get_properties(self):
return {}
def is_non_interposing_chaperone(self):
return False
def immutable(self):
return False
def equal(self, other):
return self is other # default implementation
def eqv(self, other):
return self is other # default implementation
def hash_equal(self):
return objectmodel.compute_hash(self) # default implementation
hash_eqv = hash_equal
def tostring(self):
return str(self)
# for expose
@classmethod
def make_unwrapper(cls):
if cls is W_Object:
return lambda x: x, ''
def unwrap(w_object):
if isinstance(w_object, cls):
return w_object
return None
return unwrap, cls.errorname
class SingletonMeta(type):
def __new__(cls, name, bases, dct):
result = type.__new__(cls, name, bases, dct)
result.singleton = result()
return result
| 2.515625 | 3 |
src/teacher/flake_approx/teacher_env.py | jainraj/CISR_NeurIPS20 | 16 | 11690 | import numpy as np
from stable_baselines import PPO2
from stable_baselines.common.policies import CnnPolicy
from stable_baselines.a2c.utils import conv, linear, conv_to_fc
from src.envs import CMDP, FrozenLakeEnvCustomMap
from src.envs.frozen_lake.frozen_maps import MAPS
from src.students import LagrangianStudent, identity_transfer
from src.online_learning import ExponetiatedGradient
from src.teacher import FrozenLakeEvaluationLogger, TeacherFrozenEnv, \
create_intervention, SmallFrozenTeacherEnv
from src.teacher.frozen_lake_env import SmallFrozenTrainingObservation, SmallFrozenNonStationaryBandits
from src.envs.frozen_lake.utils import create_intervention_from_map, \
OptimalAgent, add_teacher
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
__all__ = ['create_teacher_env', 'small_base_cenv_fn']
def constraint(info=None, **kwargs):
return {'g': float(info['next_state_type'] in 'H')}
def small_base_env_fn():
# Base MDP
world_map = MAPS['small']
not_slipping_prob = 0.8
env_kwargs = dict(desc=world_map,
not_slipping_prob=not_slipping_prob,
base_r_mapping=None,
timeout=200)
return FrozenLakeEnvCustomMap(**env_kwargs)
# Base CMDP
def small_base_cenv_fn():
return CMDP(small_base_env_fn(), constraint,
constraints_values=[0],
n_constraints=1,
avg_constraint=True)
def make_base_small_cenvs():
# Base MDP
world_map = MAPS['small']
# # 2 interventions
# dist = [1, 1]
# tau = [0.1, 0]
# buff_size = [1, 0]
# avg_constraint = [True, True]
# 3 Interventions
dist = [2, 1, 1]
tau = [0.1, 0.1, 0]
buff_size = [1, 1, 0]
avg_constraint = [True, True, True]
interventions = []
for d, t, b, avg in zip(dist, tau, buff_size, avg_constraint):
interventions.append(
create_intervention(
small_base_cenv_fn,
create_intervention_from_map(add_teacher(world_map, d)),
[t], b, use_vec=True, avg_constraint=avg)
)
assert callable(interventions[0])
test_env = create_intervention(
small_base_cenv_fn(), create_intervention_from_map(add_teacher(
world_map)),
[0.0], 0, avg_constraint=True)
return interventions, test_env
############################## TEACHER ENV ###################################
def my_small_cnn(scaled_images, **kwargs):
activ = tf.nn.relu
layer_1 = activ(conv(scaled_images, 'c1', n_filters=32, filter_size=3,
stride=1, **kwargs))
layer_2 = activ(conv(layer_1, 'c2', n_filters=64, filter_size=3,
stride=1, **kwargs))
layer_3 = conv_to_fc(layer_2)
return activ(
linear(layer_3, 'fc1', n_hidden=32, init_scale=np.sqrt(2)))
def create_teacher_env(new_br_kwargs={}, new_online_kwargs={},
original=False, obs_from_training=False,
non_stationary_bandit=False):
# Student definition
br_kwargs = dict(policy=CnnPolicy, verbose=0, n_steps=128,
ent_coef=0.05, cliprange=0.2, learning_rate=1e-3,
noptepochs=9,
policy_kwargs={'cnn_extractor': my_small_cnn})
br_kwargs.update(new_br_kwargs)
# Define online kwargs
online_kwargs = dict(B=0.5, eta=1.0)
online_kwargs.update(new_online_kwargs)
student_cls = LagrangianStudent
n_envs = 4
use_sub_proc_env = False
student_default_kwargs = {'env': None,
'br_algo': PPO2,
'online_algo': ExponetiatedGradient,
'br_kwargs': br_kwargs,
'online_kwargs': online_kwargs,
'lagrangian_ronuds': 2,
'curriculum_transfer': identity_transfer,
'br_uses_vec_env': True,
'use_sub_proc_env': use_sub_proc_env,
'n_envs': n_envs,
}
student_ranges_dict = {}
# Teacher interventions
if original:
# To preserve the teacher env interface while training in the
# original environment, we introduce a dummy intervention
# condition that is always False.
def dummy_intervention(**kwargs):
return 0
_, test_env = make_base_small_cenvs()
intervention = create_intervention(
base_cenv=small_base_cenv_fn,
interventions=[dummy_intervention], taus=[0], buf_size=0,
use_vec=True, avg_constraint=True)
interventions = [intervention]
else:
interventions, test_env = make_base_small_cenvs()
learning_steps = 4800 * 2
time_steps_lim = learning_steps * 10
test_episode_timeout = 200
test_episode_number = 5
if obs_from_training:
env_cls = SmallFrozenTrainingObservation
elif non_stationary_bandit:
env_cls = SmallFrozenNonStationaryBandits
else:
env_cls = SmallFrozenTeacherEnv
return env_cls(student_cls=student_cls,
student_default_kwargs=student_default_kwargs,
interventions=interventions,
final_env=test_env,
logger_cls=FrozenLakeEvaluationLogger,
student_ranges_dict=student_ranges_dict,
learning_steps=learning_steps,
test_episode_number=test_episode_number,
test_episode_timeout=test_episode_timeout,
time_steps_lim=time_steps_lim,
normalize_obs=False)
| 1.609375 | 2 |
scanapi/__init__.py | rajarshig/scanapi | 1 | 11691 | name = "scanapi"
import click
import logging
from scanapi.tree.api_tree import APITree
from scanapi.reporter import Reporter
from scanapi.requests_maker import RequestsMaker
from scanapi.settings import SETTINGS
from scanapi.yaml_loader import load_yaml
@click.command()
@click.option(
"-s",
"--spec-path",
"spec_path",
type=click.Path(exists=True),
default=SETTINGS["spec_path"],
)
@click.option("-o", "--output-path", "output_path")
@click.option(
"-r",
"--reporter",
"reporter",
type=click.Choice(["console", "markdown", "html"]),
default=SETTINGS["reporter"],
)
@click.option("-t", "--template", "template")
@click.option(
"--log-level",
"log_level",
type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]),
default="INFO",
)
def scan(spec_path, output_path, reporter, template, log_level):
"""Automated Testing and Documentation for your REST API."""
logging.basicConfig(level=log_level)
logger = logging.getLogger(__name__)
SETTINGS.update({"spec_path": spec_path, "output_path": output_path})
# custom templates to be implemented later
if template is not None:
logger.warn("Custom templates are not supported yet. Soon to be. Hang tight.")
spec_path = SETTINGS["spec_path"]
try:
api_spec = load_yaml(spec_path)
except FileNotFoundError as e:
error_message = f"Could not find spec file: {spec_path}. {str(e)}"
logger.error(error_message)
return
try:
api_tree = APITree(api_spec)
except Exception as e:
error_message = "Error loading API spec."
error_message = "{} {}".format(error_message, str(e))
logger.error(error_message)
return
RequestsMaker(api_tree).make_all()
Reporter(output_path, reporter, template).write(api_tree.responses.values())
| 2.15625 | 2 |
breadth first search/level order successor.py | JoanWu5/Grokking-the-coding-interview | 0 | 11692 | <reponame>JoanWu5/Grokking-the-coding-interview<filename>breadth first search/level order successor.py
# Given a binary tree and a node, find the level order successor of the given node in the tree.
# The level order successor is the node that appears right after the given node in the level order traversal.
from collections import deque
class TreeNode:
def __init__(self, value) -> None:
self.value = value
self.left = None
self.right = None
def level_order_successor(root, key):
if root is None:
return None
queue = deque()
queue.append(root)
while queue:
current_node = queue.popleft()
if current_node.left:
queue.append(current_node.left)
if current_node.right:
queue.append(current_node.right)
if current_node.value == key:
break
return queue[0].value if queue else None
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(1)
root.left.left = TreeNode(9)
root.right.left = TreeNode(10)
root.right.right = TreeNode(5)
print(level_order_successor(root, 12))
print(level_order_successor(root, 9))
| 3.84375 | 4 |
npbench/benchmarks/cavity_flow/cavity_flow_legate.py | frahlg/npbench | 27 | 11693 | <reponame>frahlg/npbench
# Barba, <NAME>., and Forsyth, <NAME>. (2018).
# CFD Python: the 12 steps to Navier-Stokes equations.
# Journal of Open Source Education, 1(9), 21,
# https://doi.org/10.21105/jose.00021
# TODO: License
# (c) 2017 <NAME>, <NAME>.
# All content is under Creative Commons Attribution CC-BY 4.0,
# and all code is under BSD-3 clause (previously under MIT, and changed on March 8, 2018).
import legate.numpy as np
def build_up_b(b, rho, dt, u, v, dx, dy):
b[1:-1,
1:-1] = (rho * (1 / dt * ((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx) +
(v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy)) -
((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx))**2 - 2 *
((u[2:, 1:-1] - u[0:-2, 1:-1]) / (2 * dy) *
(v[1:-1, 2:] - v[1:-1, 0:-2]) / (2 * dx)) -
((v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy))**2))
def pressure_poisson(nit, p, dx, dy, b):
pn = np.empty_like(p)
pn = p.copy()
for q in range(nit):
pn = p.copy()
p[1:-1, 1:-1] = (((pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dy**2 +
(pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dx**2) /
(2 * (dx**2 + dy**2)) - dx**2 * dy**2 /
(2 * (dx**2 + dy**2)) * b[1:-1, 1:-1])
p[:, -1] = p[:, -2] # dp/dx = 0 at x = 2
p[0, :] = p[1, :] # dp/dy = 0 at y = 0
p[:, 0] = p[:, 1] # dp/dx = 0 at x = 0
p[-1, :] = 0 # p = 0 at y = 2
def cavity_flow(nx, ny, nt, nit, u, v, dt, dx, dy, p, rho, nu):
un = np.empty_like(u)
vn = np.empty_like(v)
b = np.zeros((ny, nx))
for n in range(nt):
un = u.copy()
vn = v.copy()
build_up_b(b, rho, dt, u, v, dx, dy)
pressure_poisson(nit, p, dx, dy, b)
u[1:-1,
1:-1] = (un[1:-1, 1:-1] - un[1:-1, 1:-1] * dt / dx *
(un[1:-1, 1:-1] - un[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * dt / dy *
(un[1:-1, 1:-1] - un[0:-2, 1:-1]) - dt / (2 * rho * dx) *
(p[1:-1, 2:] - p[1:-1, 0:-2]) + nu *
(dt / dx**2 *
(un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +
dt / dy**2 *
(un[2:, 1:-1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1])))
v[1:-1,
1:-1] = (vn[1:-1, 1:-1] - un[1:-1, 1:-1] * dt / dx *
(vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * dt / dy *
(vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) - dt / (2 * rho * dy) *
(p[2:, 1:-1] - p[0:-2, 1:-1]) + nu *
(dt / dx**2 *
(vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2]) +
dt / dy**2 *
(vn[2:, 1:-1] - 2 * vn[1:-1, 1:-1] + vn[0:-2, 1:-1])))
u[0, :] = 0
u[:, 0] = 0
u[:, -1] = 0
u[-1, :] = 1 # set velocity on cavity lid equal to 1
v[0, :] = 0
v[-1, :] = 0
v[:, 0] = 0
v[:, -1] = 0
| 2.640625 | 3 |
Lib/gds/burp/config.py | mwielgoszewski/jython-burp-api | 134 | 11694 | <reponame>mwielgoszewski/jython-burp-api
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from ConfigParser import ConfigParser
from copy import deepcopy
from inspect import cleandoc
import os.path
from .core import ExtensionPoint
__all__ = ['Configuration', 'ConfigSection', 'Option', 'BoolOption',
'IntOption', 'FloatOption', 'ListOption',
'OrderedExtensionsOption']
_use_default = object()
def as_bool(value):
"""Convert the given value to a `bool`.
If `value` is a string, return `True` for any of "yes", "true", "enabled",
"on" or non-zero numbers, ignoring case. For non-string arguments, return
the argument converted to a `bool`, or `False` if the conversion fails.
"""
if isinstance(value, basestring):
try:
return bool(float(value))
except ValueError:
return value.strip().lower() in ('yes', 'true', 'enabled', 'on')
try:
return bool(value)
except (TypeError, ValueError):
return False
def to_unicode(text, charset=None):
"""Convert input to an `unicode` object.
For a `str` object, we'll first try to decode the bytes using the given
`charset` encoding (or UTF-8 if none is specified), then we fall back to
the latin1 encoding which might be correct or not, but at least preserves
the original byte sequence by mapping each byte to the corresponding
unicode code point in the range U+0000 to U+00FF.
For anything else, a simple `unicode()` conversion is attempted,
with special care taken with `Exception` objects.
"""
if isinstance(text, str):
try:
return unicode(text, charset or 'utf-8')
except UnicodeDecodeError:
return unicode(text, 'latin1')
elif isinstance(text, Exception):
# two possibilities for storing unicode strings in exception data:
try:
# custom __str__ method on the exception (e.g. PermissionError)
return unicode(text)
except UnicodeError:
# unicode arguments given to the exception (e.g. parse_date)
return ' '.join([to_unicode(arg) for arg in text.args])
return unicode(text)
def _to_utf8(basestr):
return to_unicode(basestr, 'utf-8').encode('utf-8')
class Configuration(object):
"""Thin layer over `ConfigParser` from the Python standard library.
In addition to providing some convenience methods, the class remembers
the last modification time of the configuration file, and reparses it
when the file has changed.
"""
def __init__(self, filename, params={}):
self.filename = filename
self.parser = ConfigParser()
self.parser.optionxform = str
self._old_sections = {}
self.parents = []
self._lastmtime = 0
self._sections = {}
self.parser.read(filename)
def __contains__(self, name):
"""Return whether the configuration contains a section of the given
name.
"""
return name in self.sections()
def __getitem__(self, name):
"""Return the configuration section with the specified name."""
if name not in self._sections:
self._sections[name] = Section(self, name)
return self._sections[name]
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.filename)
def get(self, section, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
return self[section].get(key, default)
def getbool(self, section, key, default=''):
"""Return the specified option as boolean value.
If the value of the option is one of "yes", "true", "enabled", "on",
or "1", this method wll return `True`, otherwise `False`.
Valid default input is a string or a bool. Returns a bool.
"""
return self[section].getbool(key, default)
def getint(self, section, key, default=''):
"""Return the value of the specified option as integer.
Valid default input is a string or an int. Returns an int.
"""
return self[section].getint(key, default)
def getfloat(self, section, key, default=''):
"""Return the value of the specified option as float.
Valid default input is a string, float or int. Returns a float.
"""
return self[section].getfloat(key, default)
def getlist(self, section, key, default='', sep=',', keep_empty=False):
"""Return a list of values that have been specified as a single
comma-separated option.
A different separator can be specified using the `sep` parameter. If
the `keep_empty` parameter is set to `True`, empty elements are
included in the list.
Valid default input is a string or a list. Returns a string.
"""
return self[section].getlist(key, default, sep, keep_empty)
def getpath(self, section, key, default=''):
"""Return a configuration value as an absolute path.
Relative paths are resolved relative to the location of this
configuration file.
Valid default input is a string. Returns a normalized path.
"""
return self[section].getpath(key, default)
def defaults(self, compmgr=None):
"""Returns a dictionary of the default configuration values
If `compmgr` is specified, return only options declared in components
that are enabled in the given `ComponentManager`.
"""
defaults = {}
for (section, key), option in Option.get_registry(compmgr).items():
defaults.setdefault(section, {})[key] = option.default
return defaults
def options(self, section, compmgr=None):
"""Return a list of `(name, value)` tuples for every option in the
specified section.
This includes options that have default values that haven't been
overridden. If `compmgr` is specified, only return default option
values for components that are enabled in the given `ComponentManager`.
"""
return self[section].options(compmgr)
def remove(self, section, key):
"""Remove the specified option."""
self[section].remove(key)
def sections(self, compmgr=None, defaults=True):
"""Return a list of section names.
If `compmgr` is specified, only the section names corresponding to
options declared in components that are enabled in the given
`ComponentManager` are returned.
"""
sections = set([to_unicode(s) for s in self.parser.sections()])
for parent in self.parents:
sections.update(parent.sections(compmgr, defaults=False))
if defaults:
sections.update(self.defaults(compmgr))
return sorted(sections)
def has_option(self, section, option, defaults=True):
"""Returns True if option exists in section in either the project
burp.ini or one of the parents, or is available through the Option
registry.
"""
section_str = _to_utf8(section)
if self.parser.has_section(section_str):
if _to_utf8(option) in self.parser.options(section_str):
return True
for parent in self.parents:
if parent.has_option(section, option, defaults=False):
return True
return defaults and (section, option) in Option.registry
def parse_if_needed(self, force=False):
if not self.filename or not os.path.isfile(self.filename):
return False
changed = False
modtime = os.path.getmtime(self.filename)
if force or modtime > self._lastmtime:
self._sections = {}
self.parser._sections = {}
if not self.parser.read(self.filename):
raise IOError("Error reading '%(file)s', make sure it is "
"readable." % (self.filename, ))
self._lastmtime = modtime
self._old_sections = deepcopy(self.parser._sections)
changed = True
if changed:
self.parents = []
if self.parser.has_option('inherit', 'file'):
for filename in self.parser.get('inherit', 'file').split(','):
filename = to_unicode(filename.strip())
if not os.path.isabs(filename):
filename = os.path.join(os.path.dirname(self.filename),
filename)
self.parents.append(Configuration(filename))
else:
for parent in self.parents:
changed |= parent.parse_if_needed(force=force)
if changed:
self._cache = {}
return changed
class Section(object):
"""Proxy for a specific configuration section.
Objects of this class should not be instantiated directly.
"""
__slots__ = ['config', 'name', 'overridden', '_cache']
def __init__(self, config, name):
self.config = config
self.name = name
self.overridden = {}
self._cache = {}
def contains(self, key, defaults=True):
if self.config.parser.has_option(_to_utf8(self.name), _to_utf8(key)):
return True
for parent in self.config.parents:
if parent[self.name].contains(key, defaults=False):
return True
return defaults and Option.registry.has_key((self.name, key))
__contains__ = contains
def iterate(self, compmgr=None, defaults=True):
"""Iterate over the options in this section.
If `compmgr` is specified, only return default option values for
components that are enabled in the given `ComponentManager`.
"""
options = set()
name_str = _to_utf8(self.name)
if self.config.parser.has_section(name_str):
for option_str in self.config.parser.options(name_str):
option = to_unicode(option_str)
options.add(option.lower())
yield option
for parent in self.config.parents:
for option in parent[self.name].iterate(defaults=False):
loption = option.lower()
if loption not in options:
options.add(loption)
yield option
if defaults:
for section, option in Option.get_registry(compmgr).keys():
if section == self.name and option.lower() not in options:
yield option
__iter__ = iterate
def __repr__(self):
return '<%s [%s]>' % (self.__class__.__name__, self.name)
def get(self, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
cached = self._cache.get(key, _use_default)
if cached is not _use_default:
return cached
name_str = _to_utf8(self.name)
key_str = _to_utf8(key)
if self.config.parser.has_option(name_str, key_str):
value = self.config.parser.get(name_str, key_str)
else:
for parent in self.config.parents:
value = parent[self.name].get(key, _use_default)
if value is not _use_default:
break
else:
if default is not _use_default:
option = Option.registry.get((self.name, key))
value = option.default if option else _use_default
else:
value = _use_default
if value is _use_default:
return default
if not value:
value = u''
elif isinstance(value, basestring):
value = to_unicode(value)
self._cache[key] = value
return value
def getbool(self, key, default=''):
"""Return the value of the specified option as boolean.
This method returns `True` if the option value is one of "yes", "true",
"enabled", "on", or non-zero numbers, ignoring case. Otherwise `False`
is returned.
Valid default input is a string or a bool. Returns a bool.
"""
return as_bool(self.get(key, default))
def getint(self, key, default=''):
"""Return the value of the specified option as integer.
Valid default input is a string or an int. Returns an int.
"""
value = self.get(key, default)
if not value:
return 0
return int(value)
def getfloat(self, key, default=''):
"""Return the value of the specified option as float.
Valid default input is a string, float or int. Returns a float.
"""
value = self.get(key, default)
if not value:
return 0.0
return float(value)
def getlist(self, key, default='', sep=',', keep_empty=True):
"""Return a list of values that have been specified as a single
comma-separated option.
A different separator can be specified using the `sep` parameter. If
the `keep_empty` parameter is set to `False`, empty elements are
omitted from the list.
Valid default input is a string or a list. Returns a list.
"""
value = self.get(key, default)
if not value:
return []
if isinstance(value, basestring):
items = [item.strip() for item in value.split(sep)]
else:
items = list(value)
if not keep_empty:
items = filter(None, items)
return items
def getpath(self, key, default=''):
"""Return the value of the specified option as a path, relative to
the location of this configuration file.
Valid default input is a string. Returns a normalized path.
"""
path = self.get(key, default)
if not path:
return default
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(self.config.filename), path)
return os.path.normcase(os.path.realpath(path))
def options(self, compmgr=None):
"""Return `(key, value)` tuples for every option in the section.
This includes options that have default values that haven't been
overridden. If `compmgr` is specified, only return default option
values for components that are enabled in the given `ComponentManager`.
"""
for key in self.iterate(compmgr):
yield key, self.get(key)
def _get_registry(cls, compmgr=None):
"""Return the descriptor registry.
If `compmgr` is specified, only return descriptors for components that
are enabled in the given `ComponentManager`.
"""
if compmgr is None:
return cls.registry
from .core import ComponentMeta
components = {}
for comp in ComponentMeta._components:
for attr in comp.__dict__.itervalues():
if isinstance(attr, cls):
components[attr] = comp
return dict(each for each in cls.registry.iteritems()
if each[1] not in components
or compmgr.is_enabled(components[each[1]]))
class ConfigSection(object):
"""Descriptor for configuration sections."""
registry = {}
@staticmethod
def get_registry(compmgr=None):
"""Return the section registry, as a `dict` mapping section names to
`ConfigSection` objects.
If `compmgr` is specified, only return sections for components that are
enabled in the given `ComponentManager`.
"""
return _get_registry(ConfigSection, compmgr)
def __init__(self, name, doc, doc_domain='burpini'):
"""Create the configuration section."""
self.name = name
self.registry[self.name] = self
self.__doc__ = cleandoc(doc)
self.doc_domain = doc_domain
def __get__(self, instance, owner):
if instance is None:
return self
config = getattr(instance, 'config', None)
if config and isinstance(config, Configuration):
return config[self.name]
def __repr__(self):
return '<%s [%s]>' % (self.__class__.__name__, self.name)
class Option(object):
"""Descriptor for configuration options."""
registry = {}
accessor = Section.get
@staticmethod
def get_registry(compmgr=None):
"""Return the option registry, as a `dict` mapping `(section, key)`
tuples to `Option` objects.
If `compmgr` is specified, only return options for components that are
enabled in the given `ComponentManager`.
"""
return _get_registry(Option, compmgr)
def __init__(self, section, name, default=None, doc='',
doc_domain='burpini'):
"""Create the configuration option.
:param section: the name of the configuration section this option
belongs to
:param name: the name of the option
:param default: the default value for the option
:param doc: documentation of the option
"""
self.section = section
self.name = name
self.default = default
self.registry[(self.section, self.name)] = self
self.__doc__ = cleandoc(doc)
self.doc_domain = doc_domain
def __get__(self, instance, owner):
if instance is None:
return self
config = getattr(instance, 'config', None)
if config and isinstance(config, Configuration):
section = config[self.section]
value = self.accessor(section, self.name, self.default)
return value
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __repr__(self):
return '<%s [%s] "%s">' % (self.__class__.__name__, self.section,
self.name)
class BoolOption(Option):
"""Descriptor for boolean configuration options."""
accessor = Section.getbool
class IntOption(Option):
"""Descriptor for integer configuration options."""
accessor = Section.getint
class FloatOption(Option):
"""Descriptor for float configuration options."""
accessor = Section.getfloat
class ListOption(Option):
"""Descriptor for configuration options that contain multiple values
separated by a specific character.
"""
def __init__(self, section, name, default=None, sep=',', keep_empty=False,
doc='', doc_domain='burpini'):
Option.__init__(self, section, name, default, doc, doc_domain)
self.sep = sep
self.keep_empty = keep_empty
def accessor(self, section, name, default):
return section.getlist(name, default, self.sep, self.keep_empty)
class OrderedExtensionsOption(ListOption):
"""A comma separated, ordered, list of components implementing `interface`.
Can be empty.
If `include_missing` is true (the default) all components implementing
the interface are returned, with those specified by the option ordered
first."""
def __init__(self, section, name, interface, default=None,
include_missing=True, doc='', doc_domain='burpini'):
ListOption.__init__(self, section, name, default, doc=doc,
doc_domain=doc_domain)
self.xtnpt = ExtensionPoint(interface)
self.include_missing = include_missing
def __get__(self, instance, owner):
if instance is None:
return self
order = ListOption.__get__(self, instance, owner)
components = []
for impl in self.xtnpt.extensions(instance):
if self.include_missing or impl.__class__.__name__ in order:
components.append(impl)
def compare(x, y):
x, y = x.__class__.__name__, y.__class__.__name__
if x not in order:
return int(y in order)
if y not in order:
return -int(x in order)
return cmp(order.index(x), order.index(y))
components.sort(compare)
return components
| 2.171875 | 2 |
src/Application/PythonScriptModule/pymodules_old/apitest/rotate.py | antont/tundra | 0 | 11695 | <reponame>antont/tundra
import circuits
from PythonQt.QtGui import QQuaternion as Quat
from PythonQt.QtGui import QVector3D as Vec
import naali
COMPNAME = "rotation"
class RotationHandler(circuits.BaseComponent):
def __init__(self, entity=None, comp=None, changetype=None):
circuits.BaseComponent.__init__(self)
self.entity = entity
self.comp = comp
if self.comp is not None: #normal run, check for nonEC run now
# Todo: OnChanged() is deprecated
comp.connect("OnChanged()", self.onChanged)
self.rot = Quat.fromAxisAndAngle(Vec(0, 1, 0), 1)
def onChanged(self):
y = self.comp.GetAttribute('y')
self.rot = Quat.fromAxisAndAngle(Vec(0, y, 0), 1)
#print self.rot, y
@circuits.handler("update")
def update(self, frametime):
if self.entity is not None:
p = self.entity.placeable
ort = p.Orientation
ort *= self.rot
p.Orientation = ort
# else: #testing without EC, as a autoloaded module
# entid = 2088826547
# try:
# self.entity = naali.getEntity(entid)
# except:
# pass #not there (yet)
# else:
# self.entity.createComponent("EC_DynamicComponent")
# oldent = r.getEntity(ent.id)
# self.comp = oldent.dynamic
@circuits.handler("on_logout")
def on_logout(self, evid):
self.entity = None #XXX figure out proper unregistering, preferrably in componenthandler.py / EC_Script biz
| 2.34375 | 2 |
pyqubo/package_info.py | caja-matematica/pyqubo | 1 | 11696 | <gh_stars>1-10
# (major, minor, patch, prerelease)
VERSION = (0, 0, 6, "")
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + "".join(VERSION[3:])
__package_name__ = 'pyqubo'
__contact_names__ = 'Recruit Communications Co., Ltd.'
__contact_emails__ = '<EMAIL>'
__homepage__ = 'https://pyqubo.readthedocs.io/en/latest/'
__repository_url__ = 'https://github.com/recruit-communications/pyqubo'
__download_url__ = 'https://github.com/recruit-communications/pyqubo'
__description__ = 'PyQUBO allows you to create QUBOs or Ising models from mathematical expressions.'
__license__ = 'Apache 2.0'
__keywords__ = 'QUBO, quantum annealing, annealing machine, ising model, optimization'
| 1.804688 | 2 |
bin/DBImportOperation/etl_operations.py | karlam123/DBImport | 0 | 11697 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import re
import logging
import subprocess
import errno, os, pty
import shlex
from subprocess import Popen, PIPE
from ConfigReader import configuration
import mysql.connector
from mysql.connector import errorcode
from common.Singleton import Singleton
from DBImportConfig import import_config
from DBImportOperation import common_operations
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import time
class operation(object, metaclass=Singleton):
def __init__(self, Hive_DB=None, Hive_Table=None):
logging.debug("Executing etl_operations.__init__()")
self.Hive_DB = None
self.Hive_Table = None
self.mysql_conn = None
self.mysql_cursor = None
self.startDate = None
self.common_operations = common_operations.operation(Hive_DB, Hive_Table)
self.import_config = import_config.config(Hive_DB, Hive_Table)
if Hive_DB != None and Hive_Table != None:
self.setHiveTable(Hive_DB, Hive_Table)
else:
# If the class already is initialized, we just pull the parameters and set them here
self.Hive_DB = self.common_operations.Hive_DB
self.Hive_Table = self.common_operations.Hive_Table
self.startDate = self.import_config.startDate
logging.debug("Executing etl_operations.__init__() - Finished")
def setHiveTable(self, Hive_DB, Hive_Table):
""" Sets the parameters to work against a new Hive database and table """
self.Hive_DB = Hive_DB.lower()
self.Hive_Table = Hive_Table.lower()
self.common_operations.setHiveTable(self.Hive_DB, self.Hive_Table)
self.import_config.setHiveTable(self.Hive_DB, self.Hive_Table)
try:
self.import_config.getImportConfig()
self.startDate = self.import_config.startDate
self.import_config.lookupConnectionAlias()
except:
self.import_config.remove_temporary_files()
sys.exit(1)
def remove_temporary_files(self):
self.import_config.remove_temporary_files()
def connectToHive(self,):
logging.debug("Executing etl_operations.connectToHive()")
try:
self.common_operations.connectToHive()
except Exception as ex:
logging.error(ex)
self.import_config.remove_temporary_files()
sys.exit(1)
logging.debug("Executing etl_operations.connectToHive() - Finished")
def mergeHiveTables(self, sourceDB, sourceTable, targetDB, targetTable, historyDB = None, historyTable=None, targetDeleteDB = None, targetDeleteTable=None, createHistoryAudit=False, sourceIsIncremental=False, sourceIsImportTable=False, softDelete=False, mergeTime=datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), datalakeSource=None, PKColumns=None, hiveMergeJavaHeap=None, oracleFlashbackSource=False, mssqlChangeTrackingSource=False, deleteNotUpdatedRows=False, oracleFlashbackImportTable=None, mssqlChangeTrackingImportTable=None):
""" Merge source table into Target table. Also populate a History Audit table if selected """
logging.debug("Executing etl_operations.mergeHiveTables()")
targetColumns = self.common_operations.getHiveColumns(hiveDB=targetDB, hiveTable=targetTable, includeType=False, includeComment=False)
columnMerge = self.common_operations.getHiveColumnNameDiff(sourceDB=sourceDB, sourceTable=sourceTable, targetDB=targetDB, targetTable=targetTable, importTool = self.import_config.importTool, sourceIsImportTable=True)
if PKColumns == None:
PKColumns = self.common_operations.getPKfromTable(hiveDB=targetDB, hiveTable=targetTable, quotedColumns=False)
datalakeIUDExists = False
datalakeInsertExists = False
datalakeUpdateExists = False
datalakeDeleteExists = False
datalakeSourceExists = False
for index, row in targetColumns.iterrows():
if row['name'] == "datalake_iud": datalakeIUDExists = True
if row['name'] == "datalake_insert": datalakeInsertExists = True
if row['name'] == "datalake_update": datalakeUpdateExists = True
if row['name'] == "datalake_delete": datalakeDeleteExists = True
if row['name'] == "datalake_source": datalakeSourceExists = True
if hiveMergeJavaHeap != None:
query = "set hive.tez.container.size=%s"%(hiveMergeJavaHeap)
self.common_operations.executeHiveQuery(query)
query = "merge into `%s`.`%s` as T \n"%(targetDB, targetTable)
query += "using `%s`.`%s` as S \n"%(sourceDB, sourceTable)
query += "on \n"
for i, targetColumn in enumerate(PKColumns.split(",")):
try:
sourceColumn = columnMerge.loc[columnMerge['Exist'] == 'both'].loc[columnMerge['targetName'] == targetColumn].iloc[0]['sourceName']
except IndexError:
logging.error("Primary Key cant be found in the source target table. Please check PK override")
self.import_config.remove_temporary_files()
sys.exit(1)
if sourceColumn == None:
logging.error("ERROR: Problem determine column name in source table for primary key column '%s'"%(targetColumn))
self.import_config.remove_temporary_files()
sys.exit(1)
if i == 0:
query += " T.`%s` = S.`%s` "%(targetColumn, sourceColumn)
else:
query += "and\n T.`%s` = S.`%s` "%(targetColumn, sourceColumn)
query += "\n"
query += "when matched "
if sourceIsIncremental == False:
# If the source is not incremental, it means that we need to check all the values in
# all columns as we dont know if the row have changed or not
query += "and (\n"
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
foundPKcolumn = False
for column in PKColumns.split(","):
if row['targetName'] == column:
foundPKcolumn = True
if foundPKcolumn == False:
if firstIteration == True:
query += " "
firstIteration = False
else:
query += " or "
query += "T.`%s` != S.`%s` "%(row['targetName'], row['sourceName'])
query += "or ( T.`%s` is null and S.`%s` is not null ) "%(row['targetName'], row['sourceName'])
query += "or ( T.`%s` is not null and S.`%s` is null ) "%(row['targetName'], row['sourceName'])
query += "\n"
if softDelete == True and datalakeIUDExists == True:
# If a row is deleted and then inserted again with the same values in all fields, this will still trigger an update
query += " or T.datalake_iud = 'D' \n"
query += ") \n"
if oracleFlashbackSource == True:
query += "and ( S.datalake_flashback_operation is null or S.datalake_flashback_operation != 'D' ) \n"
if mssqlChangeTrackingSource == True:
query += "and ( S.datalake_mssql_changetrack_operation is null or S.datalake_mssql_changetrack_operation != 'D' ) \n"
query += "then update set "
firstIteration = True
nonPKcolumnFound = False
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
foundPKcolumn = False
for column in PKColumns.split(","):
if row['targetName'] == column:
foundPKcolumn = True
if foundPKcolumn == False:
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s` = S.`%s`"%(row['targetName'], row['sourceName'])
nonPKcolumnFound = True
if nonPKcolumnFound == False:
# This will happen if there are only columns that is part of the PK in the table. Impossible to merge it with full history
logging.error("This table only have columns that is part of the PrimaryKey. Merge operations cant be used")
self.import_config.remove_temporary_files()
sys.exit(1)
if datalakeIUDExists == True: query += ", \n `datalake_iud` = 'U'"
if datalakeUpdateExists == True: query += ", \n `datalake_update` = '%s'"%(mergeTime)
if datalakeSourceExists == True and datalakeSource != None: query += ", \n `datalake_source` = '%s'"%(datalakeSource)
query += " \n"
if oracleFlashbackSource == True:
query += "when matched and S.datalake_flashback_operation = 'D' then delete \n"
if mssqlChangeTrackingSource == True:
query += "when matched and S.datalake_mssql_changetrack_operation = 'D' then delete \n"
query += "when not matched "
if oracleFlashbackSource == True:
query += "and ( S.datalake_flashback_operation is null or S.datalake_flashback_operation != 'D' ) \n"
if mssqlChangeTrackingSource == True:
query += "and ( S.datalake_mssql_changetrack_operation is null or S.datalake_mssql_changetrack_operation != 'D' ) \n"
query += "then insert values ( "
firstIteration = True
for index, row in targetColumns.iterrows():
ColumnName = row['name']
sourceColumnName = columnMerge.loc[columnMerge['targetName'] == ColumnName]['sourceName'].fillna('').iloc[0]
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
if sourceColumnName != "":
query += " S.`%s`"%(sourceColumnName)
elif ColumnName == "datalake_iud":
query += " 'I'"
elif ColumnName == "datalake_insert":
query += " '%s'"%(mergeTime)
elif ColumnName == "datalake_update":
query += " '%s'"%(mergeTime)
elif ColumnName == "datalake_source":
query += " '%s'"%(datalakeSource)
else:
query += " NULL"
query += " \n) \n"
# print("==============================================================")
# print(query)
# self.import_config.remove_temporary_files()
# sys.exit(1)
## query = query.replace('\n', '')
self.common_operations.executeHiveQuery(query)
if deleteNotUpdatedRows == True:
# This is used by Oracle Flashback and MSSQL Change Tracking imports when doing a reinitialization of the data and we need to
# remove the rows that was not updated
query = "delete from `%s`.`%s` where datalake_update != '%s' "%(targetDB, targetTable, mergeTime)
self.common_operations.executeHiveQuery(query)
# If a row was previously deleted and now inserted again and we are using Soft Delete,
# then the information in the datalake_iud, datalake_insert and datalake_delete is wrong.
if softDelete == True:
query = "update `%s`.`%s` set "%(targetDB, targetTable)
query += " datalake_iud = 'I', "
query += " datalake_insert = datalake_update, "
query += " datalake_delete = null "
query += "where "
query += " datalake_iud = 'U' and "
query += " datalake_delete is not null"
# print("==============================================================")
# print(query)
# query = query.replace('\n', '')
self.common_operations.executeHiveQuery(query)
# Statement to select all rows that was changed in the Target table and insert them to the History table
if createHistoryAudit == True and historyDB != None and historyTable != None and oracleFlashbackSource == False:
query = "insert into table `%s`.`%s` \n"%(historyDB, historyTable)
query += "( "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n `datalake_source`"
query += ",\n `datalake_iud`"
query += ",\n `datalake_timestamp`"
query += "\n) \n"
query += "select "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n '%s'"%(datalakeSource)
query += ",\n `datalake_iud`"
query += ",\n `datalake_update`"
query += "\nfrom `%s`.`%s` \n"%(targetDB, targetTable)
query += "where datalake_update = '%s'"%(mergeTime)
self.common_operations.executeHiveQuery(query)
if sourceIsIncremental == False and targetDeleteDB != None and targetDeleteTable != None:
# Start with truncating the History Delete table as we need to rebuild this one from scratch to determine what rows are deleted
query = "truncate table `%s`.`%s`"%(targetDeleteDB, targetDeleteTable)
self.common_operations.executeHiveQuery(query)
# Insert all rows (PK columns only) that exists in the Target Table but dont exists in the Import table (the ones that was deleted)
query = "insert into table `%s`.`%s` \n(`"%(targetDeleteDB, targetDeleteTable)
query += "`, `".join(PKColumns.split(","))
query += "`) \nselect T.`"
query += "`, T.`".join(PKColumns.split(","))
query += "` \nfrom `%s`.`%s` as T \n"%(targetDB, targetTable)
query += "left outer join `%s`.`%s` as S \n"%(sourceDB, sourceTable)
query += "on \n"
for i, targetColumn in enumerate(PKColumns.split(",")):
sourceColumn = columnMerge.loc[columnMerge['Exist'] == 'both'].loc[columnMerge['targetName'] == targetColumn].iloc[0]['sourceName']
if i == 0:
query += " T.`%s` = S.`%s` "%(targetColumn, sourceColumn)
else:
query += "and\n T.`%s` = S.`%s` "%(targetColumn, sourceColumn)
query += "\nwhere \n"
for i, targetColumn in enumerate(PKColumns.split(",")):
sourceColumn = columnMerge.loc[columnMerge['Exist'] == 'both'].loc[columnMerge['targetName'] == targetColumn].iloc[0]['sourceName']
if i == 0:
query += " S.`%s` is null "%(sourceColumn)
else:
query += "and\n S.`%s` is null "%(sourceColumn)
self.common_operations.executeHiveQuery(query)
if oracleFlashbackSource == True and createHistoryAudit == True:
# If it is a history merge with Oracle Flashback, we need to handle the deletes separatly
query = "insert into table `%s`.`%s` \n"%(historyDB, historyTable)
query += "( "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n `datalake_source`"
query += ",\n `datalake_iud`"
query += ",\n `datalake_timestamp`"
query += "\n) \n"
query += "select "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n '%s'"%(datalakeSource)
query += ",\n `datalake_flashback_operation` as `datalake_iud`"
query += ",\n timestamp('%s') as `datalake_timestamp`"%(mergeTime)
query += "\nfrom `%s`.`%s`"%(sourceDB, oracleFlashbackImportTable)
self.common_operations.executeHiveQuery(query)
if mssqlChangeTrackingSource == True and createHistoryAudit == True:
# If it is a history merge with MSSQL Change Traging, we need to handle the deletes separatly
query = "insert into table `%s`.`%s` \n"%(historyDB, historyTable)
query += "( "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n `datalake_source`"
query += ",\n `datalake_iud`"
query += ",\n `datalake_timestamp`"
query += "\n) \n"
query += "select "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n '%s'"%(datalakeSource)
query += ",\n `datalake_mssql_changetrack_operation` as `datalake_iud`"
query += ",\n timestamp('%s') as `datalake_timestamp`"%(mergeTime)
query += "\nfrom `%s`.`%s`"%(sourceDB, mssqlChangeTrackingImportTable)
self.common_operations.executeHiveQuery(query)
# Insert the deleted rows into the History table. Without this, it's impossible to see what values the column had before the delete
if sourceIsIncremental == False and createHistoryAudit == True and historyDB != None and historyTable != None and targetDeleteDB != None and targetDeleteTable != None:
query = "insert into table `%s`.`%s` \n"%(historyDB, historyTable)
query += "( "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n `datalake_source`"
query += ",\n `datalake_iud`"
query += ",\n `datalake_timestamp`"
query += "\n) \n"
query += "select "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " T.`%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n '%s' as `datalake_source`"%(datalakeSource)
query += ",\n 'D' as `datalake_iud`"
query += ",\n timestamp('%s') as `datalake_timestamp`"%(mergeTime)
query += "\nfrom `%s`.`%s` as D \n"%(targetDeleteDB, targetDeleteTable)
query += "left join `%s`.`%s` as T \n"%(targetDB, targetTable)
query += "on \n"
for i, column in enumerate(PKColumns.split(",")):
if i == 0:
query += " T.`%s` = D.`%s` "%(column, column)
else:
query += "and\n T.`%s` = D.`%s` "%(column, column)
# print("==============================================================")
# print(query)
# query = query.replace('\n', '')
self.common_operations.executeHiveQuery(query)
if sourceIsIncremental == False and targetDeleteDB != None and targetDeleteTable != None:
# Use the merge command to delete found rows between the Delete Table and the History Table
query = "merge into `%s`.`%s` as T \n"%(targetDB, targetTable)
query += "using `%s`.`%s` as D \n"%(targetDeleteDB, targetDeleteTable)
query += "on \n"
for i, column in enumerate(PKColumns.split(",")):
if i == 0:
query += " T.`%s` = D.`%s` "%(column, column)
else:
query += "and\n T.`%s` = D.`%s` "%(column, column)
if softDelete == True:
query += "and\n T.`datalake_delete` != 'D' "
query += "\n"
if softDelete == False:
query += "when matched then delete \n"
else:
query += "when matched then update set \n"
query += "datalake_iud = 'D', \n"
query += "datalake_update = timestamp('%s'), \n"%(mergeTime)
query += "datalake_delete = timestamp('%s') "%(mergeTime)
# print("==============================================================")
# print(query)
# query = query.replace('\n', '')
self.common_operations.executeHiveQuery(query)
logging.debug("Executing etl_operations.mergeHiveTables() - Finished")
| 1.796875 | 2 |
grvx/nodes/ieeg/read.py | UMCU-RIBS/grvx | 1 | 11698 | <reponame>UMCU-RIBS/grvx
from logging import getLogger
from numpy import mean, std
from pickle import dump
from wonambi import Dataset
from wonambi.trans import math, concatenate
from bidso import Task, Electrodes
lg = getLogger(__name__)
def read_ieeg_block(filename, electrode_file, conditions, minimalduration, output_dir):
d = Dataset(filename, bids=True)
markers = d.read_markers()
electrodes = Electrodes(electrode_file)
elec_names = [x['name'] for x in electrodes.electrodes.tsv]
elec_names = [x for x in elec_names if x in d.header['chan_name']] # exclude elec location that have no corresponding channel
all_conditions = [x for v in conditions.values() for x in v]
clean_labels = _reject_channels(d, elec_names, all_conditions, minimalduration)
outputs = []
for active_baseline, data_conds in conditions.items():
block_beg = []
block_end = []
for mrk in markers:
if mrk['name'] in data_conds:
dur = (mrk['end'] - mrk['start'])
if dur >= minimalduration:
block_beg.append(mrk['start'])
block_end.append(mrk['end'])
data = d.read_data(begtime=block_beg, endtime=block_end, chan=clean_labels)
output_task = Task(filename)
output_task.extension = '.pkl'
output_task.task += active_baseline
output_file = output_dir / output_task.get_filename()
with output_file.open('wb') as f:
dump(data, f)
outputs.append(output_file)
return outputs
def _reject_channels(d, elec_names, cond, minimalduration):
markers = d.read_markers()
block_beg = []
block_end = []
for mrk in markers:
if mrk['name'] in cond:
dur = (mrk['end'] - mrk['start'])
if dur >= minimalduration:
block_beg.append(mrk['start'])
block_end.append(mrk['end'])
data = d.read_data(chan=elec_names, begtime=block_beg, endtime=block_end)
data = concatenate(data, 'time')
clean_labels = reject_channels(data, 3)
return clean_labels
def reject_channels(dat, reject_chan_thresh):
dat_std = math(dat, operator_name='nanstd', axis='time')
THRESHOLD = reject_chan_thresh
x = dat_std.data[0]
thres = [mean(x) + THRESHOLD * std(x)]
clean_labels = list(dat_std.chan[0][dat_std.data[0] < thres])
return clean_labels
| 2.09375 | 2 |
bin/analysis/ipa/constraints/split.py | ncbray/pystream | 6 | 11699 | # Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from language.python import ast
from . base import Constraint
from .. calling import cpa
class Splitter(Constraint):
def __init__(self, src):
assert src.isNode(), src
self.src = src
self.dst = []
self.callbacks = []
def addSplitCallback(self, callback):
self.callbacks.append(callback)
if self.objects: callback()
def attach(self):
self.src.addNext(self)
def localName(self):
return 'split_temp'
def makeTarget(self, context):
lcl = context.local(ast.Local(self.localName()))
lcl.addPrev(self)
self.dst.append(lcl)
return lcl
def makeConsistent(self, context):
# Make constraint consistent
if self.src.values:
self.changed(context, self.src, self.src.values)
if self.src.critical.values:
self.criticalChanged(context, self.src, self.src.critical.values)
def criticalChanged(self, context, node, diff):
for dst in self.dst:
dst.critical.updateValues(context, dst, diff)
def doNotify(self):
for callback in self.callbacks:
callback()
def isSplit(self):
return True
class TypeSplitConstraint(Splitter):
def __init__(self, src):
Splitter.__init__(self, src)
self.objects = {}
self.megamorphic = False
def localName(self):
return 'type_split_temp'
def types(self):
return self.objects.keys()
def makeMegamorphic(self):
assert not self.megamorphic
self.megamorphic = True
self.objects.clear()
self.objects[cpa.anyType] = self.src
self.doNotify()
def changed(self, context, node, diff):
if self.megamorphic: return
changed = False
for obj in diff:
cpaType = obj.cpaType()
if cpaType not in self.objects:
if len(self.objects) >= 4:
self.makeMegamorphic()
break
else:
temp = self.makeTarget(context)
self.objects[cpaType] = temp
changed = True
else:
temp = self.objects[cpaType]
temp.updateSingleValue(obj)
else:
if changed: self.doNotify()
# TODO prevent over splitting? All objects with the same qualifier should be grouped?
class ExactSplitConstraint(Splitter):
def __init__(self, src):
Splitter.__init__(self, src)
self.objects = {}
def localName(self):
return 'exact_split_temp'
def changed(self, context, node, diff):
changed = False
for obj in diff:
if obj not in self.objects:
temp = self.makeTarget(context)
self.objects[obj] = temp
changed = True
else:
temp = self.objects[obj]
temp.updateSingleValue(obj)
if changed: self.doNotify()
| 2.234375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.